]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/3.3.6/grsecurity-2.9-3.3.6-201205131658.patch
Auto commit, grsecurity-3.1-4.9.16-201703180820.patch added.
[thirdparty/grsecurity-scrape.git] / test / 3.3.6 / grsecurity-2.9-3.3.6-201205131658.patch
CommitLineData
2ddef239
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 0c083c5..bf13011 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37@@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41+PERF*
42 SCCS
43 System.map*
44 TAGS
45@@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49+builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55+clut_vga16.c
56+common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63+config.c
64 config.mak
65 config.mak.autogen
66+config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70@@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74+dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78+exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82@@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86+gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93+hash
94+hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98@@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102-kconfig
103+kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107@@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111-linux
112+lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116@@ -165,14 +181,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120-media
121 mconf
122+mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129+mkpiggy
130 mkprep
131 mkregtable
132 mktables
133@@ -208,6 +225,7 @@ r300_reg_safe.h
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137+regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
141@@ -218,6 +236,7 @@ setup
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149@@ -228,6 +247,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157@@ -245,7 +265,9 @@ vmlinux
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161+vmlinux.bin.bz2
162 vmlinux.lds
163+vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
167@@ -253,9 +275,11 @@ vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zconf.lex.c
178 zoffset.h
179diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
180index d99fd9c..8689fef 100644
181--- a/Documentation/kernel-parameters.txt
182+++ b/Documentation/kernel-parameters.txt
183@@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188+ virtualization environments that don't cope well with the
189+ expand down segment used by UDEREF on X86-32 or the frequent
190+ page table updates on X86-64.
191+
192+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193+
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197diff --git a/Makefile b/Makefile
198index 9cd6941..92e68ff 100644
199--- a/Makefile
200+++ b/Makefile
201@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206-HOSTCXXFLAGS = -O2
207+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
214 # Rules shared between *config targets and build targets
215
216 # Basic helpers built in scripts/
217-PHONY += scripts_basic
218-scripts_basic:
219+PHONY += scripts_basic gcc-plugins
220+scripts_basic: gcc-plugins
221 $(Q)$(MAKE) $(build)=scripts/basic
222 $(Q)rm -f .tmp_quiet_recordmcount
223
224@@ -564,6 +565,55 @@ else
225 KBUILD_CFLAGS += -O2
226 endif
227
228+ifndef DISABLE_PAX_PLUGINS
229+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
230+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
231+ifndef CONFIG_UML
232+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
233+endif
234+endif
235+ifdef CONFIG_PAX_MEMORY_STACKLEAK
236+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
237+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
238+endif
239+ifdef CONFIG_KALLOCSTAT_PLUGIN
240+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
241+endif
242+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
243+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
244+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
245+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
246+endif
247+ifdef CONFIG_CHECKER_PLUGIN
248+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
249+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
250+endif
251+endif
252+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
253+ifdef CONFIG_PAX_SIZE_OVERFLOW
254+SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
255+endif
256+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
257+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
258+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
259+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
260+ifeq ($(KBUILD_EXTMOD),)
261+gcc-plugins:
262+ $(Q)$(MAKE) $(build)=tools/gcc
263+else
264+gcc-plugins: ;
265+endif
266+else
267+gcc-plugins:
268+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
269+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
270+else
271+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
272+endif
273+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
274+endif
275+endif
276+
277 include $(srctree)/arch/$(SRCARCH)/Makefile
278
279 ifneq ($(CONFIG_FRAME_WARN),0)
280@@ -708,7 +758,7 @@ export mod_strip_cmd
281
282
283 ifeq ($(KBUILD_EXTMOD),)
284-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
285+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
286
287 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
288 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
289@@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
290
291 # The actual objects are generated when descending,
292 # make sure no implicit rule kicks in
293+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
294+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
295 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
296
297 # Handle descending into subdirectories listed in $(vmlinux-dirs)
298@@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
299 # Error messages still appears in the original language
300
301 PHONY += $(vmlinux-dirs)
302-$(vmlinux-dirs): prepare scripts
303+$(vmlinux-dirs): gcc-plugins prepare scripts
304 $(Q)$(MAKE) $(build)=$@
305
306 # Store (new) KERNELRELASE string in include/config/kernel.release
307@@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
308 $(Q)$(MAKE) $(build)=.
309
310 # All the preparing..
311+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
312 prepare: prepare0
313
314 # Generate some files
315@@ -1089,6 +1142,8 @@ all: modules
316 # using awk while concatenating to the final file.
317
318 PHONY += modules
319+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
320+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
321 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
322 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
323 @$(kecho) ' Building modules, stage 2.';
324@@ -1104,7 +1159,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
325
326 # Target to prepare building external modules
327 PHONY += modules_prepare
328-modules_prepare: prepare scripts
329+modules_prepare: gcc-plugins prepare scripts
330
331 # Target to install modules
332 PHONY += modules_install
333@@ -1201,6 +1256,7 @@ distclean: mrproper
334 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
335 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
336 -o -name '.*.rej' \
337+ -o -name '.*.rej' -o -name '*.so' \
338 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
339 -type f -print | xargs rm -f
340
341@@ -1361,6 +1417,8 @@ PHONY += $(module-dirs) modules
342 $(module-dirs): crmodverdir $(objtree)/Module.symvers
343 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
344
345+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
346+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
347 modules: $(module-dirs)
348 @$(kecho) ' Building modules, stage 2.';
349 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
350@@ -1487,17 +1545,21 @@ else
351 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
352 endif
353
354-%.s: %.c prepare scripts FORCE
355+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357+%.s: %.c gcc-plugins prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 %.i: %.c prepare scripts FORCE
360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
361-%.o: %.c prepare scripts FORCE
362+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
363+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
364+%.o: %.c gcc-plugins prepare scripts FORCE
365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
366 %.lst: %.c prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368-%.s: %.S prepare scripts FORCE
369+%.s: %.S gcc-plugins prepare scripts FORCE
370 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
371-%.o: %.S prepare scripts FORCE
372+%.o: %.S gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.symtypes: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376@@ -1507,11 +1569,15 @@ endif
377 $(cmd_crmodverdir)
378 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
379 $(build)=$(build-dir)
380-%/: prepare scripts FORCE
381+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383+%/: gcc-plugins prepare scripts FORCE
384 $(cmd_crmodverdir)
385 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
386 $(build)=$(build-dir)
387-%.ko: prepare scripts FORCE
388+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390+%.ko: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir) $(@:.ko=.o)
394diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
395index 640f909..48b6597 100644
396--- a/arch/alpha/include/asm/atomic.h
397+++ b/arch/alpha/include/asm/atomic.h
398@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
399 #define atomic_dec(v) atomic_sub(1,(v))
400 #define atomic64_dec(v) atomic64_sub(1,(v))
401
402+#define atomic64_read_unchecked(v) atomic64_read(v)
403+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
404+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
405+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
406+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
407+#define atomic64_inc_unchecked(v) atomic64_inc(v)
408+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
409+#define atomic64_dec_unchecked(v) atomic64_dec(v)
410+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
411+
412 #define smp_mb__before_atomic_dec() smp_mb()
413 #define smp_mb__after_atomic_dec() smp_mb()
414 #define smp_mb__before_atomic_inc() smp_mb()
415diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
416index ad368a9..fbe0f25 100644
417--- a/arch/alpha/include/asm/cache.h
418+++ b/arch/alpha/include/asm/cache.h
419@@ -4,19 +4,19 @@
420 #ifndef __ARCH_ALPHA_CACHE_H
421 #define __ARCH_ALPHA_CACHE_H
422
423+#include <linux/const.h>
424
425 /* Bytes per L1 (data) cache line. */
426 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
427-# define L1_CACHE_BYTES 64
428 # define L1_CACHE_SHIFT 6
429 #else
430 /* Both EV4 and EV5 are write-through, read-allocate,
431 direct-mapped, physical.
432 */
433-# define L1_CACHE_BYTES 32
434 # define L1_CACHE_SHIFT 5
435 #endif
436
437+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
438 #define SMP_CACHE_BYTES L1_CACHE_BYTES
439
440 #endif
441diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
442index da5449e..7418343 100644
443--- a/arch/alpha/include/asm/elf.h
444+++ b/arch/alpha/include/asm/elf.h
445@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
446
447 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
448
449+#ifdef CONFIG_PAX_ASLR
450+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
451+
452+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
453+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
454+#endif
455+
456 /* $0 is set by ld.so to a pointer to a function which might be
457 registered using atexit. This provides a mean for the dynamic
458 linker to call DT_FINI functions for shared libraries that have
459diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
460index bc2a0da..8ad11ee 100644
461--- a/arch/alpha/include/asm/pgalloc.h
462+++ b/arch/alpha/include/asm/pgalloc.h
463@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
464 pgd_set(pgd, pmd);
465 }
466
467+static inline void
468+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
469+{
470+ pgd_populate(mm, pgd, pmd);
471+}
472+
473 extern pgd_t *pgd_alloc(struct mm_struct *mm);
474
475 static inline void
476diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
477index de98a73..bd4f1f8 100644
478--- a/arch/alpha/include/asm/pgtable.h
479+++ b/arch/alpha/include/asm/pgtable.h
480@@ -101,6 +101,17 @@ struct vm_area_struct;
481 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
482 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
483 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
484+
485+#ifdef CONFIG_PAX_PAGEEXEC
486+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
487+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
488+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
489+#else
490+# define PAGE_SHARED_NOEXEC PAGE_SHARED
491+# define PAGE_COPY_NOEXEC PAGE_COPY
492+# define PAGE_READONLY_NOEXEC PAGE_READONLY
493+#endif
494+
495 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
496
497 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
498diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
499index 2fd00b7..cfd5069 100644
500--- a/arch/alpha/kernel/module.c
501+++ b/arch/alpha/kernel/module.c
502@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
503
504 /* The small sections were sorted to the end of the segment.
505 The following should definitely cover them. */
506- gp = (u64)me->module_core + me->core_size - 0x8000;
507+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
508 got = sechdrs[me->arch.gotsecindex].sh_addr;
509
510 for (i = 0; i < n; i++) {
511diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
512index 01e8715..be0e80f 100644
513--- a/arch/alpha/kernel/osf_sys.c
514+++ b/arch/alpha/kernel/osf_sys.c
515@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
516 /* At this point: (!vma || addr < vma->vm_end). */
517 if (limit - len < addr)
518 return -ENOMEM;
519- if (!vma || addr + len <= vma->vm_start)
520+ if (check_heap_stack_gap(vma, addr, len))
521 return addr;
522 addr = vma->vm_end;
523 vma = vma->vm_next;
524@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
525 merely specific addresses, but regions of memory -- perhaps
526 this feature should be incorporated into all ports? */
527
528+#ifdef CONFIG_PAX_RANDMMAP
529+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
530+#endif
531+
532 if (addr) {
533 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
534 if (addr != (unsigned long) -ENOMEM)
535@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
536 }
537
538 /* Next, try allocating at TASK_UNMAPPED_BASE. */
539- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
540- len, limit);
541+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
542+
543 if (addr != (unsigned long) -ENOMEM)
544 return addr;
545
546diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
547index fadd5f8..904e73a 100644
548--- a/arch/alpha/mm/fault.c
549+++ b/arch/alpha/mm/fault.c
550@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
551 __reload_thread(pcb);
552 }
553
554+#ifdef CONFIG_PAX_PAGEEXEC
555+/*
556+ * PaX: decide what to do with offenders (regs->pc = fault address)
557+ *
558+ * returns 1 when task should be killed
559+ * 2 when patched PLT trampoline was detected
560+ * 3 when unpatched PLT trampoline was detected
561+ */
562+static int pax_handle_fetch_fault(struct pt_regs *regs)
563+{
564+
565+#ifdef CONFIG_PAX_EMUPLT
566+ int err;
567+
568+ do { /* PaX: patched PLT emulation #1 */
569+ unsigned int ldah, ldq, jmp;
570+
571+ err = get_user(ldah, (unsigned int *)regs->pc);
572+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
573+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
574+
575+ if (err)
576+ break;
577+
578+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
579+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
580+ jmp == 0x6BFB0000U)
581+ {
582+ unsigned long r27, addr;
583+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
584+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
585+
586+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
587+ err = get_user(r27, (unsigned long *)addr);
588+ if (err)
589+ break;
590+
591+ regs->r27 = r27;
592+ regs->pc = r27;
593+ return 2;
594+ }
595+ } while (0);
596+
597+ do { /* PaX: patched PLT emulation #2 */
598+ unsigned int ldah, lda, br;
599+
600+ err = get_user(ldah, (unsigned int *)regs->pc);
601+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
602+ err |= get_user(br, (unsigned int *)(regs->pc+8));
603+
604+ if (err)
605+ break;
606+
607+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
608+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
609+ (br & 0xFFE00000U) == 0xC3E00000U)
610+ {
611+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
612+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
613+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
614+
615+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
616+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
617+ return 2;
618+ }
619+ } while (0);
620+
621+ do { /* PaX: unpatched PLT emulation */
622+ unsigned int br;
623+
624+ err = get_user(br, (unsigned int *)regs->pc);
625+
626+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
627+ unsigned int br2, ldq, nop, jmp;
628+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
629+
630+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
631+ err = get_user(br2, (unsigned int *)addr);
632+ err |= get_user(ldq, (unsigned int *)(addr+4));
633+ err |= get_user(nop, (unsigned int *)(addr+8));
634+ err |= get_user(jmp, (unsigned int *)(addr+12));
635+ err |= get_user(resolver, (unsigned long *)(addr+16));
636+
637+ if (err)
638+ break;
639+
640+ if (br2 == 0xC3600000U &&
641+ ldq == 0xA77B000CU &&
642+ nop == 0x47FF041FU &&
643+ jmp == 0x6B7B0000U)
644+ {
645+ regs->r28 = regs->pc+4;
646+ regs->r27 = addr+16;
647+ regs->pc = resolver;
648+ return 3;
649+ }
650+ }
651+ } while (0);
652+#endif
653+
654+ return 1;
655+}
656+
657+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
658+{
659+ unsigned long i;
660+
661+ printk(KERN_ERR "PAX: bytes at PC: ");
662+ for (i = 0; i < 5; i++) {
663+ unsigned int c;
664+ if (get_user(c, (unsigned int *)pc+i))
665+ printk(KERN_CONT "???????? ");
666+ else
667+ printk(KERN_CONT "%08x ", c);
668+ }
669+ printk("\n");
670+}
671+#endif
672
673 /*
674 * This routine handles page faults. It determines the address,
675@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
676 good_area:
677 si_code = SEGV_ACCERR;
678 if (cause < 0) {
679- if (!(vma->vm_flags & VM_EXEC))
680+ if (!(vma->vm_flags & VM_EXEC)) {
681+
682+#ifdef CONFIG_PAX_PAGEEXEC
683+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
684+ goto bad_area;
685+
686+ up_read(&mm->mmap_sem);
687+ switch (pax_handle_fetch_fault(regs)) {
688+
689+#ifdef CONFIG_PAX_EMUPLT
690+ case 2:
691+ case 3:
692+ return;
693+#endif
694+
695+ }
696+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
697+ do_group_exit(SIGKILL);
698+#else
699 goto bad_area;
700+#endif
701+
702+ }
703 } else if (!cause) {
704 /* Allow reads even for write-only mappings */
705 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
706diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
707index 86976d0..8e07f84 100644
708--- a/arch/arm/include/asm/atomic.h
709+++ b/arch/arm/include/asm/atomic.h
710@@ -15,6 +15,10 @@
711 #include <linux/types.h>
712 #include <asm/system.h>
713
714+#ifdef CONFIG_GENERIC_ATOMIC64
715+#include <asm-generic/atomic64.h>
716+#endif
717+
718 #define ATOMIC_INIT(i) { (i) }
719
720 #ifdef __KERNEL__
721@@ -25,7 +29,15 @@
722 * atomic_set() is the clrex or dummy strex done on every exception return.
723 */
724 #define atomic_read(v) (*(volatile int *)&(v)->counter)
725+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
726+{
727+ return v->counter;
728+}
729 #define atomic_set(v,i) (((v)->counter) = (i))
730+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
731+{
732+ v->counter = i;
733+}
734
735 #if __LINUX_ARM_ARCH__ >= 6
736
737@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
738 int result;
739
740 __asm__ __volatile__("@ atomic_add\n"
741+"1: ldrex %1, [%3]\n"
742+" adds %0, %1, %4\n"
743+
744+#ifdef CONFIG_PAX_REFCOUNT
745+" bvc 3f\n"
746+"2: bkpt 0xf103\n"
747+"3:\n"
748+#endif
749+
750+" strex %1, %0, [%3]\n"
751+" teq %1, #0\n"
752+" bne 1b"
753+
754+#ifdef CONFIG_PAX_REFCOUNT
755+"\n4:\n"
756+ _ASM_EXTABLE(2b, 4b)
757+#endif
758+
759+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
760+ : "r" (&v->counter), "Ir" (i)
761+ : "cc");
762+}
763+
764+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
765+{
766+ unsigned long tmp;
767+ int result;
768+
769+ __asm__ __volatile__("@ atomic_add_unchecked\n"
770 "1: ldrex %0, [%3]\n"
771 " add %0, %0, %4\n"
772 " strex %1, %0, [%3]\n"
773@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
774 smp_mb();
775
776 __asm__ __volatile__("@ atomic_add_return\n"
777+"1: ldrex %1, [%3]\n"
778+" adds %0, %1, %4\n"
779+
780+#ifdef CONFIG_PAX_REFCOUNT
781+" bvc 3f\n"
782+" mov %0, %1\n"
783+"2: bkpt 0xf103\n"
784+"3:\n"
785+#endif
786+
787+" strex %1, %0, [%3]\n"
788+" teq %1, #0\n"
789+" bne 1b"
790+
791+#ifdef CONFIG_PAX_REFCOUNT
792+"\n4:\n"
793+ _ASM_EXTABLE(2b, 4b)
794+#endif
795+
796+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
797+ : "r" (&v->counter), "Ir" (i)
798+ : "cc");
799+
800+ smp_mb();
801+
802+ return result;
803+}
804+
805+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
806+{
807+ unsigned long tmp;
808+ int result;
809+
810+ smp_mb();
811+
812+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
813 "1: ldrex %0, [%3]\n"
814 " add %0, %0, %4\n"
815 " strex %1, %0, [%3]\n"
816@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
817 int result;
818
819 __asm__ __volatile__("@ atomic_sub\n"
820+"1: ldrex %1, [%3]\n"
821+" subs %0, %1, %4\n"
822+
823+#ifdef CONFIG_PAX_REFCOUNT
824+" bvc 3f\n"
825+"2: bkpt 0xf103\n"
826+"3:\n"
827+#endif
828+
829+" strex %1, %0, [%3]\n"
830+" teq %1, #0\n"
831+" bne 1b"
832+
833+#ifdef CONFIG_PAX_REFCOUNT
834+"\n4:\n"
835+ _ASM_EXTABLE(2b, 4b)
836+#endif
837+
838+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839+ : "r" (&v->counter), "Ir" (i)
840+ : "cc");
841+}
842+
843+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
844+{
845+ unsigned long tmp;
846+ int result;
847+
848+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
849 "1: ldrex %0, [%3]\n"
850 " sub %0, %0, %4\n"
851 " strex %1, %0, [%3]\n"
852@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
853 smp_mb();
854
855 __asm__ __volatile__("@ atomic_sub_return\n"
856-"1: ldrex %0, [%3]\n"
857-" sub %0, %0, %4\n"
858+"1: ldrex %1, [%3]\n"
859+" sub %0, %1, %4\n"
860+
861+#ifdef CONFIG_PAX_REFCOUNT
862+" bvc 3f\n"
863+" mov %0, %1\n"
864+"2: bkpt 0xf103\n"
865+"3:\n"
866+#endif
867+
868 " strex %1, %0, [%3]\n"
869 " teq %1, #0\n"
870 " bne 1b"
871+
872+#ifdef CONFIG_PAX_REFCOUNT
873+"\n4:\n"
874+ _ASM_EXTABLE(2b, 4b)
875+#endif
876+
877 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
878 : "r" (&v->counter), "Ir" (i)
879 : "cc");
880@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
881 return oldval;
882 }
883
884+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
885+{
886+ unsigned long oldval, res;
887+
888+ smp_mb();
889+
890+ do {
891+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
892+ "ldrex %1, [%3]\n"
893+ "mov %0, #0\n"
894+ "teq %1, %4\n"
895+ "strexeq %0, %5, [%3]\n"
896+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
897+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
898+ : "cc");
899+ } while (res);
900+
901+ smp_mb();
902+
903+ return oldval;
904+}
905+
906 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
907 {
908 unsigned long tmp, tmp2;
909@@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
910
911 return val;
912 }
913+#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
914 #define atomic_add(i, v) (void) atomic_add_return(i, v)
915+#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
916
917 static inline int atomic_sub_return(int i, atomic_t *v)
918 {
919@@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
920
921 return val;
922 }
923+#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
924 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
925+#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
926
927 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
928 {
929@@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
930
931 return ret;
932 }
933+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
934
935 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
936 {
937@@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
938 #endif /* __LINUX_ARM_ARCH__ */
939
940 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
941+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
942+{
943+ return xchg(&v->counter, new);
944+}
945
946 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
947 {
948@@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
949 }
950
951 #define atomic_inc(v) atomic_add(1, v)
952+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
953+{
954+ atomic_add_unchecked(1, v);
955+}
956 #define atomic_dec(v) atomic_sub(1, v)
957+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
958+{
959+ atomic_sub_unchecked(1, v);
960+}
961
962 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
963+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
964+{
965+ return atomic_add_return_unchecked(1, v) == 0;
966+}
967 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
968 #define atomic_inc_return(v) (atomic_add_return(1, v))
969+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
970+{
971+ return atomic_add_return_unchecked(1, v);
972+}
973 #define atomic_dec_return(v) (atomic_sub_return(1, v))
974 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
975
976@@ -239,6 +406,14 @@ typedef struct {
977 u64 __aligned(8) counter;
978 } atomic64_t;
979
980+#ifdef CONFIG_PAX_REFCOUNT
981+typedef struct {
982+ u64 __aligned(8) counter;
983+} atomic64_unchecked_t;
984+#else
985+typedef atomic64_t atomic64_unchecked_t;
986+#endif
987+
988 #define ATOMIC64_INIT(i) { (i) }
989
990 static inline u64 atomic64_read(atomic64_t *v)
991@@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
992 return result;
993 }
994
995+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
996+{
997+ u64 result;
998+
999+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1000+" ldrexd %0, %H0, [%1]"
1001+ : "=&r" (result)
1002+ : "r" (&v->counter), "Qo" (v->counter)
1003+ );
1004+
1005+ return result;
1006+}
1007+
1008 static inline void atomic64_set(atomic64_t *v, u64 i)
1009 {
1010 u64 tmp;
1011@@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1012 : "cc");
1013 }
1014
1015+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1016+{
1017+ u64 tmp;
1018+
1019+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1020+"1: ldrexd %0, %H0, [%2]\n"
1021+" strexd %0, %3, %H3, [%2]\n"
1022+" teq %0, #0\n"
1023+" bne 1b"
1024+ : "=&r" (tmp), "=Qo" (v->counter)
1025+ : "r" (&v->counter), "r" (i)
1026+ : "cc");
1027+}
1028+
1029 static inline void atomic64_add(u64 i, atomic64_t *v)
1030 {
1031 u64 result;
1032@@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1033 __asm__ __volatile__("@ atomic64_add\n"
1034 "1: ldrexd %0, %H0, [%3]\n"
1035 " adds %0, %0, %4\n"
1036+" adcs %H0, %H0, %H4\n"
1037+
1038+#ifdef CONFIG_PAX_REFCOUNT
1039+" bvc 3f\n"
1040+"2: bkpt 0xf103\n"
1041+"3:\n"
1042+#endif
1043+
1044+" strexd %1, %0, %H0, [%3]\n"
1045+" teq %1, #0\n"
1046+" bne 1b"
1047+
1048+#ifdef CONFIG_PAX_REFCOUNT
1049+"\n4:\n"
1050+ _ASM_EXTABLE(2b, 4b)
1051+#endif
1052+
1053+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1054+ : "r" (&v->counter), "r" (i)
1055+ : "cc");
1056+}
1057+
1058+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1059+{
1060+ u64 result;
1061+ unsigned long tmp;
1062+
1063+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1064+"1: ldrexd %0, %H0, [%3]\n"
1065+" adds %0, %0, %4\n"
1066 " adc %H0, %H0, %H4\n"
1067 " strexd %1, %0, %H0, [%3]\n"
1068 " teq %1, #0\n"
1069@@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1070
1071 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1072 {
1073- u64 result;
1074- unsigned long tmp;
1075+ u64 result, tmp;
1076
1077 smp_mb();
1078
1079 __asm__ __volatile__("@ atomic64_add_return\n"
1080+"1: ldrexd %1, %H1, [%3]\n"
1081+" adds %0, %1, %4\n"
1082+" adcs %H0, %H1, %H4\n"
1083+
1084+#ifdef CONFIG_PAX_REFCOUNT
1085+" bvc 3f\n"
1086+" mov %0, %1\n"
1087+" mov %H0, %H1\n"
1088+"2: bkpt 0xf103\n"
1089+"3:\n"
1090+#endif
1091+
1092+" strexd %1, %0, %H0, [%3]\n"
1093+" teq %1, #0\n"
1094+" bne 1b"
1095+
1096+#ifdef CONFIG_PAX_REFCOUNT
1097+"\n4:\n"
1098+ _ASM_EXTABLE(2b, 4b)
1099+#endif
1100+
1101+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1102+ : "r" (&v->counter), "r" (i)
1103+ : "cc");
1104+
1105+ smp_mb();
1106+
1107+ return result;
1108+}
1109+
1110+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1111+{
1112+ u64 result;
1113+ unsigned long tmp;
1114+
1115+ smp_mb();
1116+
1117+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1118 "1: ldrexd %0, %H0, [%3]\n"
1119 " adds %0, %0, %4\n"
1120 " adc %H0, %H0, %H4\n"
1121@@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1122 __asm__ __volatile__("@ atomic64_sub\n"
1123 "1: ldrexd %0, %H0, [%3]\n"
1124 " subs %0, %0, %4\n"
1125+" sbcs %H0, %H0, %H4\n"
1126+
1127+#ifdef CONFIG_PAX_REFCOUNT
1128+" bvc 3f\n"
1129+"2: bkpt 0xf103\n"
1130+"3:\n"
1131+#endif
1132+
1133+" strexd %1, %0, %H0, [%3]\n"
1134+" teq %1, #0\n"
1135+" bne 1b"
1136+
1137+#ifdef CONFIG_PAX_REFCOUNT
1138+"\n4:\n"
1139+ _ASM_EXTABLE(2b, 4b)
1140+#endif
1141+
1142+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1143+ : "r" (&v->counter), "r" (i)
1144+ : "cc");
1145+}
1146+
1147+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1148+{
1149+ u64 result;
1150+ unsigned long tmp;
1151+
1152+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1153+"1: ldrexd %0, %H0, [%3]\n"
1154+" subs %0, %0, %4\n"
1155 " sbc %H0, %H0, %H4\n"
1156 " strexd %1, %0, %H0, [%3]\n"
1157 " teq %1, #0\n"
1158@@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1159
1160 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1161 {
1162- u64 result;
1163- unsigned long tmp;
1164+ u64 result, tmp;
1165
1166 smp_mb();
1167
1168 __asm__ __volatile__("@ atomic64_sub_return\n"
1169-"1: ldrexd %0, %H0, [%3]\n"
1170-" subs %0, %0, %4\n"
1171-" sbc %H0, %H0, %H4\n"
1172+"1: ldrexd %1, %H1, [%3]\n"
1173+" subs %0, %1, %4\n"
1174+" sbc %H0, %H1, %H4\n"
1175+
1176+#ifdef CONFIG_PAX_REFCOUNT
1177+" bvc 3f\n"
1178+" mov %0, %1\n"
1179+" mov %H0, %H1\n"
1180+"2: bkpt 0xf103\n"
1181+"3:\n"
1182+#endif
1183+
1184 " strexd %1, %0, %H0, [%3]\n"
1185 " teq %1, #0\n"
1186 " bne 1b"
1187+
1188+#ifdef CONFIG_PAX_REFCOUNT
1189+"\n4:\n"
1190+ _ASM_EXTABLE(2b, 4b)
1191+#endif
1192+
1193 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1194 : "r" (&v->counter), "r" (i)
1195 : "cc");
1196@@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1197 return oldval;
1198 }
1199
1200+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1201+{
1202+ u64 oldval;
1203+ unsigned long res;
1204+
1205+ smp_mb();
1206+
1207+ do {
1208+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1209+ "ldrexd %1, %H1, [%3]\n"
1210+ "mov %0, #0\n"
1211+ "teq %1, %4\n"
1212+ "teqeq %H1, %H4\n"
1213+ "strexdeq %0, %5, %H5, [%3]"
1214+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1215+ : "r" (&ptr->counter), "r" (old), "r" (new)
1216+ : "cc");
1217+ } while (res);
1218+
1219+ smp_mb();
1220+
1221+ return oldval;
1222+}
1223+
1224 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1225 {
1226 u64 result;
1227@@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1228
1229 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1230 {
1231- u64 result;
1232- unsigned long tmp;
1233+ u64 result, tmp;
1234
1235 smp_mb();
1236
1237 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1238-"1: ldrexd %0, %H0, [%3]\n"
1239-" subs %0, %0, #1\n"
1240-" sbc %H0, %H0, #0\n"
1241+"1: ldrexd %1, %H1, [%3]\n"
1242+" subs %0, %1, #1\n"
1243+" sbc %H0, %H1, #0\n"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+" bvc 3f\n"
1247+" mov %0, %1\n"
1248+" mov %H0, %H1\n"
1249+"2: bkpt 0xf103\n"
1250+"3:\n"
1251+#endif
1252+
1253 " teq %H0, #0\n"
1254-" bmi 2f\n"
1255+" bmi 4f\n"
1256 " strexd %1, %0, %H0, [%3]\n"
1257 " teq %1, #0\n"
1258 " bne 1b\n"
1259-"2:"
1260+"4:\n"
1261+
1262+#ifdef CONFIG_PAX_REFCOUNT
1263+ _ASM_EXTABLE(2b, 4b)
1264+#endif
1265+
1266 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1267 : "r" (&v->counter)
1268 : "cc");
1269@@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1270 " teq %0, %5\n"
1271 " teqeq %H0, %H5\n"
1272 " moveq %1, #0\n"
1273-" beq 2f\n"
1274+" beq 4f\n"
1275 " adds %0, %0, %6\n"
1276 " adc %H0, %H0, %H6\n"
1277+
1278+#ifdef CONFIG_PAX_REFCOUNT
1279+" bvc 3f\n"
1280+"2: bkpt 0xf103\n"
1281+"3:\n"
1282+#endif
1283+
1284 " strexd %2, %0, %H0, [%4]\n"
1285 " teq %2, #0\n"
1286 " bne 1b\n"
1287-"2:"
1288+"4:\n"
1289+
1290+#ifdef CONFIG_PAX_REFCOUNT
1291+ _ASM_EXTABLE(2b, 4b)
1292+#endif
1293+
1294 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1295 : "r" (&v->counter), "r" (u), "r" (a)
1296 : "cc");
1297@@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1298
1299 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1300 #define atomic64_inc(v) atomic64_add(1LL, (v))
1301+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1302 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1303+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1304 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1305 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1306 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1307+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1308 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1309 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1310 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1311diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1312index 75fe66b..2255c86 100644
1313--- a/arch/arm/include/asm/cache.h
1314+++ b/arch/arm/include/asm/cache.h
1315@@ -4,8 +4,10 @@
1316 #ifndef __ASMARM_CACHE_H
1317 #define __ASMARM_CACHE_H
1318
1319+#include <linux/const.h>
1320+
1321 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1322-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1323+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1324
1325 /*
1326 * Memory returned by kmalloc() may be used for DMA, so we must make
1327diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1328index d5d8d5c..ad92c96 100644
1329--- a/arch/arm/include/asm/cacheflush.h
1330+++ b/arch/arm/include/asm/cacheflush.h
1331@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1332 void (*dma_unmap_area)(const void *, size_t, int);
1333
1334 void (*dma_flush_range)(const void *, const void *);
1335-};
1336+} __no_const;
1337
1338 /*
1339 * Select the calling method
1340diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1341index 0e9ce8d..6ef1e03 100644
1342--- a/arch/arm/include/asm/elf.h
1343+++ b/arch/arm/include/asm/elf.h
1344@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1345 the loader. We need to make sure that it is out of the way of the program
1346 that it will "exec", and that there is sufficient room for the brk. */
1347
1348-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1349+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1350+
1351+#ifdef CONFIG_PAX_ASLR
1352+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1353+
1354+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1355+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1356+#endif
1357
1358 /* When the program starts, a1 contains a pointer to a function to be
1359 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1360@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1361 extern void elf_set_personality(const struct elf32_hdr *);
1362 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1363
1364-struct mm_struct;
1365-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1366-#define arch_randomize_brk arch_randomize_brk
1367-
1368 extern int vectors_user_mapping(void);
1369 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1370 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1371diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1372index e51b1e8..32a3113 100644
1373--- a/arch/arm/include/asm/kmap_types.h
1374+++ b/arch/arm/include/asm/kmap_types.h
1375@@ -21,6 +21,7 @@ enum km_type {
1376 KM_L1_CACHE,
1377 KM_L2_CACHE,
1378 KM_KDB,
1379+ KM_CLEARPAGE,
1380 KM_TYPE_NR
1381 };
1382
1383diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1384index 53426c6..c7baff3 100644
1385--- a/arch/arm/include/asm/outercache.h
1386+++ b/arch/arm/include/asm/outercache.h
1387@@ -35,7 +35,7 @@ struct outer_cache_fns {
1388 #endif
1389 void (*set_debug)(unsigned long);
1390 void (*resume)(void);
1391-};
1392+} __no_const;
1393
1394 #ifdef CONFIG_OUTER_CACHE
1395
1396diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1397index 97b440c..b7ff179 100644
1398--- a/arch/arm/include/asm/page.h
1399+++ b/arch/arm/include/asm/page.h
1400@@ -123,7 +123,7 @@ struct cpu_user_fns {
1401 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1402 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1403 unsigned long vaddr, struct vm_area_struct *vma);
1404-};
1405+} __no_const;
1406
1407 #ifdef MULTI_USER
1408 extern struct cpu_user_fns cpu_user;
1409diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1410index 943504f..bf8d667 100644
1411--- a/arch/arm/include/asm/pgalloc.h
1412+++ b/arch/arm/include/asm/pgalloc.h
1413@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1414 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1415 }
1416
1417+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1418+{
1419+ pud_populate(mm, pud, pmd);
1420+}
1421+
1422 #else /* !CONFIG_ARM_LPAE */
1423
1424 /*
1425@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1426 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1427 #define pmd_free(mm, pmd) do { } while (0)
1428 #define pud_populate(mm,pmd,pte) BUG()
1429+#define pud_populate_kernel(mm,pmd,pte) BUG()
1430
1431 #endif /* CONFIG_ARM_LPAE */
1432
1433diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1434index e4c96cc..1145653 100644
1435--- a/arch/arm/include/asm/system.h
1436+++ b/arch/arm/include/asm/system.h
1437@@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1438
1439 #define xchg(ptr,x) \
1440 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1441+#define xchg_unchecked(ptr,x) \
1442+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1443
1444 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1445
1446@@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1447
1448 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1449
1450+#define _ASM_EXTABLE(from, to) \
1451+" .pushsection __ex_table,\"a\"\n"\
1452+" .align 3\n" \
1453+" .long " #from ", " #to"\n" \
1454+" .popsection"
1455+
1456+
1457 #endif /* __ASSEMBLY__ */
1458
1459 #define arch_align_stack(x) (x)
1460diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1461index d4c24d4..4ac53e8 100644
1462--- a/arch/arm/include/asm/thread_info.h
1463+++ b/arch/arm/include/asm/thread_info.h
1464@@ -141,6 +141,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
1465 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1466 #define TIF_SYSCALL_TRACE 8
1467 #define TIF_SYSCALL_AUDIT 9
1468+
1469+/* within 8 bits of TIF_SYSCALL_TRACE
1470+ to meet flexible second operand requirements
1471+*/
1472+#define TIF_GRSEC_SETXID 10
1473+
1474 #define TIF_POLLING_NRFLAG 16
1475 #define TIF_USING_IWMMXT 17
1476 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1477@@ -156,9 +162,11 @@ extern void vfp_flush_hwstate(struct thread_info *);
1478 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1479 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1480 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1481+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1482
1483 /* Checks for any syscall work in entry-common.S */
1484-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1485+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1486+ _TIF_GRSEC_SETXID)
1487
1488 /*
1489 * Change these and you break ASM code in entry-common.S
1490diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1491index 2958976..12ccac4 100644
1492--- a/arch/arm/include/asm/uaccess.h
1493+++ b/arch/arm/include/asm/uaccess.h
1494@@ -22,6 +22,8 @@
1495 #define VERIFY_READ 0
1496 #define VERIFY_WRITE 1
1497
1498+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1499+
1500 /*
1501 * The exception table consists of pairs of addresses: the first is the
1502 * address of an instruction that is allowed to fault, and the second is
1503@@ -387,8 +389,23 @@ do { \
1504
1505
1506 #ifdef CONFIG_MMU
1507-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1508-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1509+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1510+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1511+
1512+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1513+{
1514+ if (!__builtin_constant_p(n))
1515+ check_object_size(to, n, false);
1516+ return ___copy_from_user(to, from, n);
1517+}
1518+
1519+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1520+{
1521+ if (!__builtin_constant_p(n))
1522+ check_object_size(from, n, true);
1523+ return ___copy_to_user(to, from, n);
1524+}
1525+
1526 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1527 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1528 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1529@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1530
1531 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1532 {
1533+ if ((long)n < 0)
1534+ return n;
1535+
1536 if (access_ok(VERIFY_READ, from, n))
1537 n = __copy_from_user(to, from, n);
1538 else /* security hole - plug it */
1539@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1540
1541 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1542 {
1543+ if ((long)n < 0)
1544+ return n;
1545+
1546 if (access_ok(VERIFY_WRITE, to, n))
1547 n = __copy_to_user(to, from, n);
1548 return n;
1549diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1550index 5b0bce6..becd81c 100644
1551--- a/arch/arm/kernel/armksyms.c
1552+++ b/arch/arm/kernel/armksyms.c
1553@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1554 #ifdef CONFIG_MMU
1555 EXPORT_SYMBOL(copy_page);
1556
1557-EXPORT_SYMBOL(__copy_from_user);
1558-EXPORT_SYMBOL(__copy_to_user);
1559+EXPORT_SYMBOL(___copy_from_user);
1560+EXPORT_SYMBOL(___copy_to_user);
1561 EXPORT_SYMBOL(__clear_user);
1562
1563 EXPORT_SYMBOL(__get_user_1);
1564diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1565index 971d65c..cc936fb 100644
1566--- a/arch/arm/kernel/process.c
1567+++ b/arch/arm/kernel/process.c
1568@@ -28,7 +28,6 @@
1569 #include <linux/tick.h>
1570 #include <linux/utsname.h>
1571 #include <linux/uaccess.h>
1572-#include <linux/random.h>
1573 #include <linux/hw_breakpoint.h>
1574 #include <linux/cpuidle.h>
1575
1576@@ -273,9 +272,10 @@ void machine_power_off(void)
1577 machine_shutdown();
1578 if (pm_power_off)
1579 pm_power_off();
1580+ BUG();
1581 }
1582
1583-void machine_restart(char *cmd)
1584+__noreturn void machine_restart(char *cmd)
1585 {
1586 machine_shutdown();
1587
1588@@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1589 return 0;
1590 }
1591
1592-unsigned long arch_randomize_brk(struct mm_struct *mm)
1593-{
1594- unsigned long range_end = mm->brk + 0x02000000;
1595- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1596-}
1597-
1598 #ifdef CONFIG_MMU
1599 /*
1600 * The vectors page is always readable from user space for the
1601diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1602index f5ce8ab..4b73893 100644
1603--- a/arch/arm/kernel/ptrace.c
1604+++ b/arch/arm/kernel/ptrace.c
1605@@ -905,10 +905,19 @@ long arch_ptrace(struct task_struct *child, long request,
1606 return ret;
1607 }
1608
1609+#ifdef CONFIG_GRKERNSEC_SETXID
1610+extern void gr_delayed_cred_worker(void);
1611+#endif
1612+
1613 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1614 {
1615 unsigned long ip;
1616
1617+#ifdef CONFIG_GRKERNSEC_SETXID
1618+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1619+ gr_delayed_cred_worker();
1620+#endif
1621+
1622 if (why)
1623 audit_syscall_exit(regs);
1624 else
1625diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1626index a255c39..4a19b25 100644
1627--- a/arch/arm/kernel/setup.c
1628+++ b/arch/arm/kernel/setup.c
1629@@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1630 struct cpu_tlb_fns cpu_tlb __read_mostly;
1631 #endif
1632 #ifdef MULTI_USER
1633-struct cpu_user_fns cpu_user __read_mostly;
1634+struct cpu_user_fns cpu_user __read_only;
1635 #endif
1636 #ifdef MULTI_CACHE
1637-struct cpu_cache_fns cpu_cache __read_mostly;
1638+struct cpu_cache_fns cpu_cache __read_only;
1639 #endif
1640 #ifdef CONFIG_OUTER_CACHE
1641-struct outer_cache_fns outer_cache __read_mostly;
1642+struct outer_cache_fns outer_cache __read_only;
1643 EXPORT_SYMBOL(outer_cache);
1644 #endif
1645
1646diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1647index f84dfe6..13e94f7 100644
1648--- a/arch/arm/kernel/traps.c
1649+++ b/arch/arm/kernel/traps.c
1650@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1651
1652 static DEFINE_RAW_SPINLOCK(die_lock);
1653
1654+extern void gr_handle_kernel_exploit(void);
1655+
1656 /*
1657 * This function is protected against re-entrancy.
1658 */
1659@@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1660 panic("Fatal exception in interrupt");
1661 if (panic_on_oops)
1662 panic("Fatal exception");
1663+
1664+ gr_handle_kernel_exploit();
1665+
1666 if (ret != NOTIFY_STOP)
1667 do_exit(SIGSEGV);
1668 }
1669diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1670index 66a477a..bee61d3 100644
1671--- a/arch/arm/lib/copy_from_user.S
1672+++ b/arch/arm/lib/copy_from_user.S
1673@@ -16,7 +16,7 @@
1674 /*
1675 * Prototype:
1676 *
1677- * size_t __copy_from_user(void *to, const void *from, size_t n)
1678+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1679 *
1680 * Purpose:
1681 *
1682@@ -84,11 +84,11 @@
1683
1684 .text
1685
1686-ENTRY(__copy_from_user)
1687+ENTRY(___copy_from_user)
1688
1689 #include "copy_template.S"
1690
1691-ENDPROC(__copy_from_user)
1692+ENDPROC(___copy_from_user)
1693
1694 .pushsection .fixup,"ax"
1695 .align 0
1696diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1697index 6ee2f67..d1cce76 100644
1698--- a/arch/arm/lib/copy_page.S
1699+++ b/arch/arm/lib/copy_page.S
1700@@ -10,6 +10,7 @@
1701 * ASM optimised string functions
1702 */
1703 #include <linux/linkage.h>
1704+#include <linux/const.h>
1705 #include <asm/assembler.h>
1706 #include <asm/asm-offsets.h>
1707 #include <asm/cache.h>
1708diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1709index d066df6..df28194 100644
1710--- a/arch/arm/lib/copy_to_user.S
1711+++ b/arch/arm/lib/copy_to_user.S
1712@@ -16,7 +16,7 @@
1713 /*
1714 * Prototype:
1715 *
1716- * size_t __copy_to_user(void *to, const void *from, size_t n)
1717+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1718 *
1719 * Purpose:
1720 *
1721@@ -88,11 +88,11 @@
1722 .text
1723
1724 ENTRY(__copy_to_user_std)
1725-WEAK(__copy_to_user)
1726+WEAK(___copy_to_user)
1727
1728 #include "copy_template.S"
1729
1730-ENDPROC(__copy_to_user)
1731+ENDPROC(___copy_to_user)
1732 ENDPROC(__copy_to_user_std)
1733
1734 .pushsection .fixup,"ax"
1735diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1736index 5c908b1..e712687 100644
1737--- a/arch/arm/lib/uaccess.S
1738+++ b/arch/arm/lib/uaccess.S
1739@@ -20,7 +20,7 @@
1740
1741 #define PAGE_SHIFT 12
1742
1743-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1744+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1745 * Purpose : copy a block to user memory from kernel memory
1746 * Params : to - user memory
1747 * : from - kernel memory
1748@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1749 sub r2, r2, ip
1750 b .Lc2u_dest_aligned
1751
1752-ENTRY(__copy_to_user)
1753+ENTRY(___copy_to_user)
1754 stmfd sp!, {r2, r4 - r7, lr}
1755 cmp r2, #4
1756 blt .Lc2u_not_enough
1757@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1758 ldrgtb r3, [r1], #0
1759 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1760 b .Lc2u_finished
1761-ENDPROC(__copy_to_user)
1762+ENDPROC(___copy_to_user)
1763
1764 .pushsection .fixup,"ax"
1765 .align 0
1766 9001: ldmfd sp!, {r0, r4 - r7, pc}
1767 .popsection
1768
1769-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1770+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1771 * Purpose : copy a block from user memory to kernel memory
1772 * Params : to - kernel memory
1773 * : from - user memory
1774@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1775 sub r2, r2, ip
1776 b .Lcfu_dest_aligned
1777
1778-ENTRY(__copy_from_user)
1779+ENTRY(___copy_from_user)
1780 stmfd sp!, {r0, r2, r4 - r7, lr}
1781 cmp r2, #4
1782 blt .Lcfu_not_enough
1783@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1784 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1785 strgtb r3, [r0], #1
1786 b .Lcfu_finished
1787-ENDPROC(__copy_from_user)
1788+ENDPROC(___copy_from_user)
1789
1790 .pushsection .fixup,"ax"
1791 .align 0
1792diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1793index 025f742..8432b08 100644
1794--- a/arch/arm/lib/uaccess_with_memcpy.c
1795+++ b/arch/arm/lib/uaccess_with_memcpy.c
1796@@ -104,7 +104,7 @@ out:
1797 }
1798
1799 unsigned long
1800-__copy_to_user(void __user *to, const void *from, unsigned long n)
1801+___copy_to_user(void __user *to, const void *from, unsigned long n)
1802 {
1803 /*
1804 * This test is stubbed out of the main function above to keep
1805diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1806index 6722627..8f97548c 100644
1807--- a/arch/arm/mach-omap2/board-n8x0.c
1808+++ b/arch/arm/mach-omap2/board-n8x0.c
1809@@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1810 }
1811 #endif
1812
1813-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1814+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1815 .late_init = n8x0_menelaus_late_init,
1816 };
1817
1818diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1819index 2b2d51c..0127490 100644
1820--- a/arch/arm/mach-ux500/mbox-db5500.c
1821+++ b/arch/arm/mach-ux500/mbox-db5500.c
1822@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1823 return sprintf(buf, "0x%X\n", mbox_value);
1824 }
1825
1826-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1827+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1828
1829 static int mbox_show(struct seq_file *s, void *data)
1830 {
1831diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1832index bb7eac3..3bade16 100644
1833--- a/arch/arm/mm/fault.c
1834+++ b/arch/arm/mm/fault.c
1835@@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1836 }
1837 #endif
1838
1839+#ifdef CONFIG_PAX_PAGEEXEC
1840+ if (fsr & FSR_LNX_PF) {
1841+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1842+ do_group_exit(SIGKILL);
1843+ }
1844+#endif
1845+
1846 tsk->thread.address = addr;
1847 tsk->thread.error_code = fsr;
1848 tsk->thread.trap_no = 14;
1849@@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1850 }
1851 #endif /* CONFIG_MMU */
1852
1853+#ifdef CONFIG_PAX_PAGEEXEC
1854+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1855+{
1856+ long i;
1857+
1858+ printk(KERN_ERR "PAX: bytes at PC: ");
1859+ for (i = 0; i < 20; i++) {
1860+ unsigned char c;
1861+ if (get_user(c, (__force unsigned char __user *)pc+i))
1862+ printk(KERN_CONT "?? ");
1863+ else
1864+ printk(KERN_CONT "%02x ", c);
1865+ }
1866+ printk("\n");
1867+
1868+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1869+ for (i = -1; i < 20; i++) {
1870+ unsigned long c;
1871+ if (get_user(c, (__force unsigned long __user *)sp+i))
1872+ printk(KERN_CONT "???????? ");
1873+ else
1874+ printk(KERN_CONT "%08lx ", c);
1875+ }
1876+ printk("\n");
1877+}
1878+#endif
1879+
1880 /*
1881 * First Level Translation Fault Handler
1882 *
1883@@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1884 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1885 struct siginfo info;
1886
1887+#ifdef CONFIG_PAX_REFCOUNT
1888+ if (fsr_fs(ifsr) == 2) {
1889+ unsigned int bkpt;
1890+
1891+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1892+ current->thread.error_code = ifsr;
1893+ current->thread.trap_no = 0;
1894+ pax_report_refcount_overflow(regs);
1895+ fixup_exception(regs);
1896+ return;
1897+ }
1898+ }
1899+#endif
1900+
1901 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1902 return;
1903
1904diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1905index ce8cb19..3ec539d 100644
1906--- a/arch/arm/mm/mmap.c
1907+++ b/arch/arm/mm/mmap.c
1908@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1909 if (len > TASK_SIZE)
1910 return -ENOMEM;
1911
1912+#ifdef CONFIG_PAX_RANDMMAP
1913+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1914+#endif
1915+
1916 if (addr) {
1917 if (do_align)
1918 addr = COLOUR_ALIGN(addr, pgoff);
1919@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1920 addr = PAGE_ALIGN(addr);
1921
1922 vma = find_vma(mm, addr);
1923- if (TASK_SIZE - len >= addr &&
1924- (!vma || addr + len <= vma->vm_start))
1925+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1926 return addr;
1927 }
1928 if (len > mm->cached_hole_size) {
1929- start_addr = addr = mm->free_area_cache;
1930+ start_addr = addr = mm->free_area_cache;
1931 } else {
1932- start_addr = addr = mm->mmap_base;
1933- mm->cached_hole_size = 0;
1934+ start_addr = addr = mm->mmap_base;
1935+ mm->cached_hole_size = 0;
1936 }
1937
1938 full_search:
1939@@ -124,14 +127,14 @@ full_search:
1940 * Start a new search - just in case we missed
1941 * some holes.
1942 */
1943- if (start_addr != TASK_UNMAPPED_BASE) {
1944- start_addr = addr = TASK_UNMAPPED_BASE;
1945+ if (start_addr != mm->mmap_base) {
1946+ start_addr = addr = mm->mmap_base;
1947 mm->cached_hole_size = 0;
1948 goto full_search;
1949 }
1950 return -ENOMEM;
1951 }
1952- if (!vma || addr + len <= vma->vm_start) {
1953+ if (check_heap_stack_gap(vma, addr, len)) {
1954 /*
1955 * Remember the place where we stopped the search:
1956 */
1957@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1958
1959 if (mmap_is_legacy()) {
1960 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1961+
1962+#ifdef CONFIG_PAX_RANDMMAP
1963+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1964+ mm->mmap_base += mm->delta_mmap;
1965+#endif
1966+
1967 mm->get_unmapped_area = arch_get_unmapped_area;
1968 mm->unmap_area = arch_unmap_area;
1969 } else {
1970 mm->mmap_base = mmap_base(random_factor);
1971+
1972+#ifdef CONFIG_PAX_RANDMMAP
1973+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1974+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1975+#endif
1976+
1977 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1978 mm->unmap_area = arch_unmap_area_topdown;
1979 }
1980diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1981index 71a6827..e7fbc23 100644
1982--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1983+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1984@@ -43,7 +43,7 @@ struct samsung_dma_ops {
1985 int (*started)(unsigned ch);
1986 int (*flush)(unsigned ch);
1987 int (*stop)(unsigned ch);
1988-};
1989+} __no_const;
1990
1991 extern void *samsung_dmadev_get_ops(void);
1992 extern void *s3c_dma_get_ops(void);
1993diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1994index 5f28cae..3d23723 100644
1995--- a/arch/arm/plat-samsung/include/plat/ehci.h
1996+++ b/arch/arm/plat-samsung/include/plat/ehci.h
1997@@ -14,7 +14,7 @@
1998 struct s5p_ehci_platdata {
1999 int (*phy_init)(struct platform_device *pdev, int type);
2000 int (*phy_exit)(struct platform_device *pdev, int type);
2001-};
2002+} __no_const;
2003
2004 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2005
2006diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2007index c3a58a1..78fbf54 100644
2008--- a/arch/avr32/include/asm/cache.h
2009+++ b/arch/avr32/include/asm/cache.h
2010@@ -1,8 +1,10 @@
2011 #ifndef __ASM_AVR32_CACHE_H
2012 #define __ASM_AVR32_CACHE_H
2013
2014+#include <linux/const.h>
2015+
2016 #define L1_CACHE_SHIFT 5
2017-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2018+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2019
2020 /*
2021 * Memory returned by kmalloc() may be used for DMA, so we must make
2022diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2023index 3b3159b..425ea94 100644
2024--- a/arch/avr32/include/asm/elf.h
2025+++ b/arch/avr32/include/asm/elf.h
2026@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2027 the loader. We need to make sure that it is out of the way of the program
2028 that it will "exec", and that there is sufficient room for the brk. */
2029
2030-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2031+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2032
2033+#ifdef CONFIG_PAX_ASLR
2034+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2035+
2036+#define PAX_DELTA_MMAP_LEN 15
2037+#define PAX_DELTA_STACK_LEN 15
2038+#endif
2039
2040 /* This yields a mask that user programs can use to figure out what
2041 instruction set this CPU supports. This could be done in user space,
2042diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2043index b7f5c68..556135c 100644
2044--- a/arch/avr32/include/asm/kmap_types.h
2045+++ b/arch/avr32/include/asm/kmap_types.h
2046@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2047 D(11) KM_IRQ1,
2048 D(12) KM_SOFTIRQ0,
2049 D(13) KM_SOFTIRQ1,
2050-D(14) KM_TYPE_NR
2051+D(14) KM_CLEARPAGE,
2052+D(15) KM_TYPE_NR
2053 };
2054
2055 #undef D
2056diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2057index f7040a1..db9f300 100644
2058--- a/arch/avr32/mm/fault.c
2059+++ b/arch/avr32/mm/fault.c
2060@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2061
2062 int exception_trace = 1;
2063
2064+#ifdef CONFIG_PAX_PAGEEXEC
2065+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2066+{
2067+ unsigned long i;
2068+
2069+ printk(KERN_ERR "PAX: bytes at PC: ");
2070+ for (i = 0; i < 20; i++) {
2071+ unsigned char c;
2072+ if (get_user(c, (unsigned char *)pc+i))
2073+ printk(KERN_CONT "???????? ");
2074+ else
2075+ printk(KERN_CONT "%02x ", c);
2076+ }
2077+ printk("\n");
2078+}
2079+#endif
2080+
2081 /*
2082 * This routine handles page faults. It determines the address and the
2083 * problem, and then passes it off to one of the appropriate routines.
2084@@ -156,6 +173,16 @@ bad_area:
2085 up_read(&mm->mmap_sem);
2086
2087 if (user_mode(regs)) {
2088+
2089+#ifdef CONFIG_PAX_PAGEEXEC
2090+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2091+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2092+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2093+ do_group_exit(SIGKILL);
2094+ }
2095+ }
2096+#endif
2097+
2098 if (exception_trace && printk_ratelimit())
2099 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2100 "sp %08lx ecr %lu\n",
2101diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2102index 568885a..f8008df 100644
2103--- a/arch/blackfin/include/asm/cache.h
2104+++ b/arch/blackfin/include/asm/cache.h
2105@@ -7,6 +7,7 @@
2106 #ifndef __ARCH_BLACKFIN_CACHE_H
2107 #define __ARCH_BLACKFIN_CACHE_H
2108
2109+#include <linux/const.h>
2110 #include <linux/linkage.h> /* for asmlinkage */
2111
2112 /*
2113@@ -14,7 +15,7 @@
2114 * Blackfin loads 32 bytes for cache
2115 */
2116 #define L1_CACHE_SHIFT 5
2117-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2118+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2119 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2120
2121 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2122diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2123index aea2718..3639a60 100644
2124--- a/arch/cris/include/arch-v10/arch/cache.h
2125+++ b/arch/cris/include/arch-v10/arch/cache.h
2126@@ -1,8 +1,9 @@
2127 #ifndef _ASM_ARCH_CACHE_H
2128 #define _ASM_ARCH_CACHE_H
2129
2130+#include <linux/const.h>
2131 /* Etrax 100LX have 32-byte cache-lines. */
2132-#define L1_CACHE_BYTES 32
2133 #define L1_CACHE_SHIFT 5
2134+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2135
2136 #endif /* _ASM_ARCH_CACHE_H */
2137diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2138index 1de779f..336fad3 100644
2139--- a/arch/cris/include/arch-v32/arch/cache.h
2140+++ b/arch/cris/include/arch-v32/arch/cache.h
2141@@ -1,11 +1,12 @@
2142 #ifndef _ASM_CRIS_ARCH_CACHE_H
2143 #define _ASM_CRIS_ARCH_CACHE_H
2144
2145+#include <linux/const.h>
2146 #include <arch/hwregs/dma.h>
2147
2148 /* A cache-line is 32 bytes. */
2149-#define L1_CACHE_BYTES 32
2150 #define L1_CACHE_SHIFT 5
2151+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2152
2153 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2154
2155diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2156index 0d8a7d6..d0c9ff5 100644
2157--- a/arch/frv/include/asm/atomic.h
2158+++ b/arch/frv/include/asm/atomic.h
2159@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2160 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2161 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2162
2163+#define atomic64_read_unchecked(v) atomic64_read(v)
2164+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2165+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2166+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2167+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2168+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2169+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2170+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2171+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2172+
2173 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2174 {
2175 int c, old;
2176diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2177index 2797163..c2a401d 100644
2178--- a/arch/frv/include/asm/cache.h
2179+++ b/arch/frv/include/asm/cache.h
2180@@ -12,10 +12,11 @@
2181 #ifndef __ASM_CACHE_H
2182 #define __ASM_CACHE_H
2183
2184+#include <linux/const.h>
2185
2186 /* bytes per L1 cache line */
2187 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2188-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2189+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2190
2191 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2192 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2193diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2194index f8e16b2..c73ff79 100644
2195--- a/arch/frv/include/asm/kmap_types.h
2196+++ b/arch/frv/include/asm/kmap_types.h
2197@@ -23,6 +23,7 @@ enum km_type {
2198 KM_IRQ1,
2199 KM_SOFTIRQ0,
2200 KM_SOFTIRQ1,
2201+ KM_CLEARPAGE,
2202 KM_TYPE_NR
2203 };
2204
2205diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2206index 385fd30..6c3d97e 100644
2207--- a/arch/frv/mm/elf-fdpic.c
2208+++ b/arch/frv/mm/elf-fdpic.c
2209@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2210 if (addr) {
2211 addr = PAGE_ALIGN(addr);
2212 vma = find_vma(current->mm, addr);
2213- if (TASK_SIZE - len >= addr &&
2214- (!vma || addr + len <= vma->vm_start))
2215+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2216 goto success;
2217 }
2218
2219@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2220 for (; vma; vma = vma->vm_next) {
2221 if (addr > limit)
2222 break;
2223- if (addr + len <= vma->vm_start)
2224+ if (check_heap_stack_gap(vma, addr, len))
2225 goto success;
2226 addr = vma->vm_end;
2227 }
2228@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2229 for (; vma; vma = vma->vm_next) {
2230 if (addr > limit)
2231 break;
2232- if (addr + len <= vma->vm_start)
2233+ if (check_heap_stack_gap(vma, addr, len))
2234 goto success;
2235 addr = vma->vm_end;
2236 }
2237diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2238index c635028..6d9445a 100644
2239--- a/arch/h8300/include/asm/cache.h
2240+++ b/arch/h8300/include/asm/cache.h
2241@@ -1,8 +1,10 @@
2242 #ifndef __ARCH_H8300_CACHE_H
2243 #define __ARCH_H8300_CACHE_H
2244
2245+#include <linux/const.h>
2246+
2247 /* bytes per L1 cache line */
2248-#define L1_CACHE_BYTES 4
2249+#define L1_CACHE_BYTES _AC(4,UL)
2250
2251 /* m68k-elf-gcc 2.95.2 doesn't like these */
2252
2253diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2254index 0f01de2..d37d309 100644
2255--- a/arch/hexagon/include/asm/cache.h
2256+++ b/arch/hexagon/include/asm/cache.h
2257@@ -21,9 +21,11 @@
2258 #ifndef __ASM_CACHE_H
2259 #define __ASM_CACHE_H
2260
2261+#include <linux/const.h>
2262+
2263 /* Bytes per L1 cache line */
2264-#define L1_CACHE_SHIFT (5)
2265-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2266+#define L1_CACHE_SHIFT 5
2267+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2268
2269 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2270 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2271diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2272index 3fad89e..3047da5 100644
2273--- a/arch/ia64/include/asm/atomic.h
2274+++ b/arch/ia64/include/asm/atomic.h
2275@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2276 #define atomic64_inc(v) atomic64_add(1, (v))
2277 #define atomic64_dec(v) atomic64_sub(1, (v))
2278
2279+#define atomic64_read_unchecked(v) atomic64_read(v)
2280+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2281+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2282+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2283+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2284+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2285+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2286+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2287+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2288+
2289 /* Atomic operations are already serializing */
2290 #define smp_mb__before_atomic_dec() barrier()
2291 #define smp_mb__after_atomic_dec() barrier()
2292diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2293index 988254a..e1ee885 100644
2294--- a/arch/ia64/include/asm/cache.h
2295+++ b/arch/ia64/include/asm/cache.h
2296@@ -1,6 +1,7 @@
2297 #ifndef _ASM_IA64_CACHE_H
2298 #define _ASM_IA64_CACHE_H
2299
2300+#include <linux/const.h>
2301
2302 /*
2303 * Copyright (C) 1998-2000 Hewlett-Packard Co
2304@@ -9,7 +10,7 @@
2305
2306 /* Bytes per L1 (data) cache line. */
2307 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2308-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2309+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2310
2311 #ifdef CONFIG_SMP
2312 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2313diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2314index b5298eb..67c6e62 100644
2315--- a/arch/ia64/include/asm/elf.h
2316+++ b/arch/ia64/include/asm/elf.h
2317@@ -42,6 +42,13 @@
2318 */
2319 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2320
2321+#ifdef CONFIG_PAX_ASLR
2322+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2323+
2324+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2325+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2326+#endif
2327+
2328 #define PT_IA_64_UNWIND 0x70000001
2329
2330 /* IA-64 relocations: */
2331diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2332index 96a8d92..617a1cf 100644
2333--- a/arch/ia64/include/asm/pgalloc.h
2334+++ b/arch/ia64/include/asm/pgalloc.h
2335@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2336 pgd_val(*pgd_entry) = __pa(pud);
2337 }
2338
2339+static inline void
2340+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2341+{
2342+ pgd_populate(mm, pgd_entry, pud);
2343+}
2344+
2345 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2346 {
2347 return quicklist_alloc(0, GFP_KERNEL, NULL);
2348@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2349 pud_val(*pud_entry) = __pa(pmd);
2350 }
2351
2352+static inline void
2353+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2354+{
2355+ pud_populate(mm, pud_entry, pmd);
2356+}
2357+
2358 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2359 {
2360 return quicklist_alloc(0, GFP_KERNEL, NULL);
2361diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2362index 1a97af3..7529d31 100644
2363--- a/arch/ia64/include/asm/pgtable.h
2364+++ b/arch/ia64/include/asm/pgtable.h
2365@@ -12,7 +12,7 @@
2366 * David Mosberger-Tang <davidm@hpl.hp.com>
2367 */
2368
2369-
2370+#include <linux/const.h>
2371 #include <asm/mman.h>
2372 #include <asm/page.h>
2373 #include <asm/processor.h>
2374@@ -143,6 +143,17 @@
2375 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2376 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2377 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2378+
2379+#ifdef CONFIG_PAX_PAGEEXEC
2380+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2381+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2382+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2383+#else
2384+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2385+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2386+# define PAGE_COPY_NOEXEC PAGE_COPY
2387+#endif
2388+
2389 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2390 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2391 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2392diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2393index b77768d..e0795eb 100644
2394--- a/arch/ia64/include/asm/spinlock.h
2395+++ b/arch/ia64/include/asm/spinlock.h
2396@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2397 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2398
2399 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2400- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2401+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2402 }
2403
2404 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2405diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2406index 449c8c0..432a3d2 100644
2407--- a/arch/ia64/include/asm/uaccess.h
2408+++ b/arch/ia64/include/asm/uaccess.h
2409@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2410 const void *__cu_from = (from); \
2411 long __cu_len = (n); \
2412 \
2413- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2414+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2415 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2416 __cu_len; \
2417 })
2418@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2419 long __cu_len = (n); \
2420 \
2421 __chk_user_ptr(__cu_from); \
2422- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2423+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2424 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2425 __cu_len; \
2426 })
2427diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2428index 24603be..948052d 100644
2429--- a/arch/ia64/kernel/module.c
2430+++ b/arch/ia64/kernel/module.c
2431@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2432 void
2433 module_free (struct module *mod, void *module_region)
2434 {
2435- if (mod && mod->arch.init_unw_table &&
2436- module_region == mod->module_init) {
2437+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2438 unw_remove_unwind_table(mod->arch.init_unw_table);
2439 mod->arch.init_unw_table = NULL;
2440 }
2441@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2442 }
2443
2444 static inline int
2445+in_init_rx (const struct module *mod, uint64_t addr)
2446+{
2447+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2448+}
2449+
2450+static inline int
2451+in_init_rw (const struct module *mod, uint64_t addr)
2452+{
2453+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2454+}
2455+
2456+static inline int
2457 in_init (const struct module *mod, uint64_t addr)
2458 {
2459- return addr - (uint64_t) mod->module_init < mod->init_size;
2460+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2461+}
2462+
2463+static inline int
2464+in_core_rx (const struct module *mod, uint64_t addr)
2465+{
2466+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2467+}
2468+
2469+static inline int
2470+in_core_rw (const struct module *mod, uint64_t addr)
2471+{
2472+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2473 }
2474
2475 static inline int
2476 in_core (const struct module *mod, uint64_t addr)
2477 {
2478- return addr - (uint64_t) mod->module_core < mod->core_size;
2479+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2480 }
2481
2482 static inline int
2483@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2484 break;
2485
2486 case RV_BDREL:
2487- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2488+ if (in_init_rx(mod, val))
2489+ val -= (uint64_t) mod->module_init_rx;
2490+ else if (in_init_rw(mod, val))
2491+ val -= (uint64_t) mod->module_init_rw;
2492+ else if (in_core_rx(mod, val))
2493+ val -= (uint64_t) mod->module_core_rx;
2494+ else if (in_core_rw(mod, val))
2495+ val -= (uint64_t) mod->module_core_rw;
2496 break;
2497
2498 case RV_LTV:
2499@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2500 * addresses have been selected...
2501 */
2502 uint64_t gp;
2503- if (mod->core_size > MAX_LTOFF)
2504+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2505 /*
2506 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2507 * at the end of the module.
2508 */
2509- gp = mod->core_size - MAX_LTOFF / 2;
2510+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2511 else
2512- gp = mod->core_size / 2;
2513- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2514+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2515+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2516 mod->arch.gp = gp;
2517 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2518 }
2519diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2520index 609d500..7dde2a8 100644
2521--- a/arch/ia64/kernel/sys_ia64.c
2522+++ b/arch/ia64/kernel/sys_ia64.c
2523@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2524 if (REGION_NUMBER(addr) == RGN_HPAGE)
2525 addr = 0;
2526 #endif
2527+
2528+#ifdef CONFIG_PAX_RANDMMAP
2529+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2530+ addr = mm->free_area_cache;
2531+ else
2532+#endif
2533+
2534 if (!addr)
2535 addr = mm->free_area_cache;
2536
2537@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2538 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2539 /* At this point: (!vma || addr < vma->vm_end). */
2540 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2541- if (start_addr != TASK_UNMAPPED_BASE) {
2542+ if (start_addr != mm->mmap_base) {
2543 /* Start a new search --- just in case we missed some holes. */
2544- addr = TASK_UNMAPPED_BASE;
2545+ addr = mm->mmap_base;
2546 goto full_search;
2547 }
2548 return -ENOMEM;
2549 }
2550- if (!vma || addr + len <= vma->vm_start) {
2551+ if (check_heap_stack_gap(vma, addr, len)) {
2552 /* Remember the address where we stopped this search: */
2553 mm->free_area_cache = addr + len;
2554 return addr;
2555diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2556index 53c0ba0..2accdde 100644
2557--- a/arch/ia64/kernel/vmlinux.lds.S
2558+++ b/arch/ia64/kernel/vmlinux.lds.S
2559@@ -199,7 +199,7 @@ SECTIONS {
2560 /* Per-cpu data: */
2561 . = ALIGN(PERCPU_PAGE_SIZE);
2562 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2563- __phys_per_cpu_start = __per_cpu_load;
2564+ __phys_per_cpu_start = per_cpu_load;
2565 /*
2566 * ensure percpu data fits
2567 * into percpu page size
2568diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2569index 20b3593..1ce77f0 100644
2570--- a/arch/ia64/mm/fault.c
2571+++ b/arch/ia64/mm/fault.c
2572@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2573 return pte_present(pte);
2574 }
2575
2576+#ifdef CONFIG_PAX_PAGEEXEC
2577+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2578+{
2579+ unsigned long i;
2580+
2581+ printk(KERN_ERR "PAX: bytes at PC: ");
2582+ for (i = 0; i < 8; i++) {
2583+ unsigned int c;
2584+ if (get_user(c, (unsigned int *)pc+i))
2585+ printk(KERN_CONT "???????? ");
2586+ else
2587+ printk(KERN_CONT "%08x ", c);
2588+ }
2589+ printk("\n");
2590+}
2591+#endif
2592+
2593 void __kprobes
2594 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2595 {
2596@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2597 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2598 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2599
2600- if ((vma->vm_flags & mask) != mask)
2601+ if ((vma->vm_flags & mask) != mask) {
2602+
2603+#ifdef CONFIG_PAX_PAGEEXEC
2604+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2605+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2606+ goto bad_area;
2607+
2608+ up_read(&mm->mmap_sem);
2609+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2610+ do_group_exit(SIGKILL);
2611+ }
2612+#endif
2613+
2614 goto bad_area;
2615
2616+ }
2617+
2618 /*
2619 * If for any reason at all we couldn't handle the fault, make
2620 * sure we exit gracefully rather than endlessly redo the
2621diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2622index 5ca674b..e0e1b70 100644
2623--- a/arch/ia64/mm/hugetlbpage.c
2624+++ b/arch/ia64/mm/hugetlbpage.c
2625@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2626 /* At this point: (!vmm || addr < vmm->vm_end). */
2627 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2628 return -ENOMEM;
2629- if (!vmm || (addr + len) <= vmm->vm_start)
2630+ if (check_heap_stack_gap(vmm, addr, len))
2631 return addr;
2632 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2633 }
2634diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2635index 13df239d..cb52116 100644
2636--- a/arch/ia64/mm/init.c
2637+++ b/arch/ia64/mm/init.c
2638@@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2639 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2640 vma->vm_end = vma->vm_start + PAGE_SIZE;
2641 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2642+
2643+#ifdef CONFIG_PAX_PAGEEXEC
2644+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2645+ vma->vm_flags &= ~VM_EXEC;
2646+
2647+#ifdef CONFIG_PAX_MPROTECT
2648+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2649+ vma->vm_flags &= ~VM_MAYEXEC;
2650+#endif
2651+
2652+ }
2653+#endif
2654+
2655 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2656 down_write(&current->mm->mmap_sem);
2657 if (insert_vm_struct(current->mm, vma)) {
2658diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2659index 40b3ee9..8c2c112 100644
2660--- a/arch/m32r/include/asm/cache.h
2661+++ b/arch/m32r/include/asm/cache.h
2662@@ -1,8 +1,10 @@
2663 #ifndef _ASM_M32R_CACHE_H
2664 #define _ASM_M32R_CACHE_H
2665
2666+#include <linux/const.h>
2667+
2668 /* L1 cache line size */
2669 #define L1_CACHE_SHIFT 4
2670-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2671+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2672
2673 #endif /* _ASM_M32R_CACHE_H */
2674diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2675index 82abd15..d95ae5d 100644
2676--- a/arch/m32r/lib/usercopy.c
2677+++ b/arch/m32r/lib/usercopy.c
2678@@ -14,6 +14,9 @@
2679 unsigned long
2680 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2681 {
2682+ if ((long)n < 0)
2683+ return n;
2684+
2685 prefetch(from);
2686 if (access_ok(VERIFY_WRITE, to, n))
2687 __copy_user(to,from,n);
2688@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2689 unsigned long
2690 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2691 {
2692+ if ((long)n < 0)
2693+ return n;
2694+
2695 prefetchw(to);
2696 if (access_ok(VERIFY_READ, from, n))
2697 __copy_user_zeroing(to,from,n);
2698diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2699index 0395c51..5f26031 100644
2700--- a/arch/m68k/include/asm/cache.h
2701+++ b/arch/m68k/include/asm/cache.h
2702@@ -4,9 +4,11 @@
2703 #ifndef __ARCH_M68K_CACHE_H
2704 #define __ARCH_M68K_CACHE_H
2705
2706+#include <linux/const.h>
2707+
2708 /* bytes per L1 cache line */
2709 #define L1_CACHE_SHIFT 4
2710-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2711+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2712
2713 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2714
2715diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2716index 4efe96a..60e8699 100644
2717--- a/arch/microblaze/include/asm/cache.h
2718+++ b/arch/microblaze/include/asm/cache.h
2719@@ -13,11 +13,12 @@
2720 #ifndef _ASM_MICROBLAZE_CACHE_H
2721 #define _ASM_MICROBLAZE_CACHE_H
2722
2723+#include <linux/const.h>
2724 #include <asm/registers.h>
2725
2726 #define L1_CACHE_SHIFT 5
2727 /* word-granular cache in microblaze */
2728-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2729+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2730
2731 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2732
2733diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2734index 1d93f81..67794d0 100644
2735--- a/arch/mips/include/asm/atomic.h
2736+++ b/arch/mips/include/asm/atomic.h
2737@@ -21,6 +21,10 @@
2738 #include <asm/war.h>
2739 #include <asm/system.h>
2740
2741+#ifdef CONFIG_GENERIC_ATOMIC64
2742+#include <asm-generic/atomic64.h>
2743+#endif
2744+
2745 #define ATOMIC_INIT(i) { (i) }
2746
2747 /*
2748@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2749 */
2750 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2751
2752+#define atomic64_read_unchecked(v) atomic64_read(v)
2753+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2754+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2755+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2756+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2757+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2758+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2759+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2760+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2761+
2762 #endif /* CONFIG_64BIT */
2763
2764 /*
2765diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2766index b4db69f..8f3b093 100644
2767--- a/arch/mips/include/asm/cache.h
2768+++ b/arch/mips/include/asm/cache.h
2769@@ -9,10 +9,11 @@
2770 #ifndef _ASM_CACHE_H
2771 #define _ASM_CACHE_H
2772
2773+#include <linux/const.h>
2774 #include <kmalloc.h>
2775
2776 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2777-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2778+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2779
2780 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2781 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2782diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2783index 455c0ac..ad65fbe 100644
2784--- a/arch/mips/include/asm/elf.h
2785+++ b/arch/mips/include/asm/elf.h
2786@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2787 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2788 #endif
2789
2790+#ifdef CONFIG_PAX_ASLR
2791+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2792+
2793+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2794+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2795+#endif
2796+
2797 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2798 struct linux_binprm;
2799 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2800 int uses_interp);
2801
2802-struct mm_struct;
2803-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2804-#define arch_randomize_brk arch_randomize_brk
2805-
2806 #endif /* _ASM_ELF_H */
2807diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2808index da9bd7d..91aa7ab 100644
2809--- a/arch/mips/include/asm/page.h
2810+++ b/arch/mips/include/asm/page.h
2811@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2812 #ifdef CONFIG_CPU_MIPS32
2813 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2814 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2815- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2816+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2817 #else
2818 typedef struct { unsigned long long pte; } pte_t;
2819 #define pte_val(x) ((x).pte)
2820diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2821index 881d18b..cea38bc 100644
2822--- a/arch/mips/include/asm/pgalloc.h
2823+++ b/arch/mips/include/asm/pgalloc.h
2824@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2825 {
2826 set_pud(pud, __pud((unsigned long)pmd));
2827 }
2828+
2829+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2830+{
2831+ pud_populate(mm, pud, pmd);
2832+}
2833 #endif
2834
2835 /*
2836diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2837index 6018c80..7c37203 100644
2838--- a/arch/mips/include/asm/system.h
2839+++ b/arch/mips/include/asm/system.h
2840@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2841 */
2842 #define __ARCH_WANT_UNLOCKED_CTXSW
2843
2844-extern unsigned long arch_align_stack(unsigned long sp);
2845+#define arch_align_stack(x) ((x) & ~0xfUL)
2846
2847 #endif /* _ASM_SYSTEM_H */
2848diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2849index 0d85d8e..ec71487 100644
2850--- a/arch/mips/include/asm/thread_info.h
2851+++ b/arch/mips/include/asm/thread_info.h
2852@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2853 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2854 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2855 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2856+/* li takes a 32bit immediate */
2857+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2858 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2859
2860 #ifdef CONFIG_MIPS32_O32
2861@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2862 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2863 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2864 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2865+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2866+
2867+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2868
2869 /* work to do in syscall_trace_leave() */
2870-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2871+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2872
2873 /* work to do on interrupt/exception return */
2874 #define _TIF_WORK_MASK (0x0000ffef & \
2875 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2876 /* work to do on any return to u-space */
2877-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2878+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2879
2880 #endif /* __KERNEL__ */
2881
2882diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2883index 9fdd8bc..4bd7f1a 100644
2884--- a/arch/mips/kernel/binfmt_elfn32.c
2885+++ b/arch/mips/kernel/binfmt_elfn32.c
2886@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2887 #undef ELF_ET_DYN_BASE
2888 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2889
2890+#ifdef CONFIG_PAX_ASLR
2891+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2892+
2893+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2894+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2895+#endif
2896+
2897 #include <asm/processor.h>
2898 #include <linux/module.h>
2899 #include <linux/elfcore.h>
2900diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2901index ff44823..97f8906 100644
2902--- a/arch/mips/kernel/binfmt_elfo32.c
2903+++ b/arch/mips/kernel/binfmt_elfo32.c
2904@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2905 #undef ELF_ET_DYN_BASE
2906 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2907
2908+#ifdef CONFIG_PAX_ASLR
2909+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2910+
2911+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2912+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2913+#endif
2914+
2915 #include <asm/processor.h>
2916
2917 /*
2918diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2919index 7955409..ceaea7c 100644
2920--- a/arch/mips/kernel/process.c
2921+++ b/arch/mips/kernel/process.c
2922@@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2923 out:
2924 return pc;
2925 }
2926-
2927-/*
2928- * Don't forget that the stack pointer must be aligned on a 8 bytes
2929- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2930- */
2931-unsigned long arch_align_stack(unsigned long sp)
2932-{
2933- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2934- sp -= get_random_int() & ~PAGE_MASK;
2935-
2936- return sp & ALMASK;
2937-}
2938diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2939index 7786b60..3e38c72 100644
2940--- a/arch/mips/kernel/ptrace.c
2941+++ b/arch/mips/kernel/ptrace.c
2942@@ -529,6 +529,10 @@ static inline int audit_arch(void)
2943 return arch;
2944 }
2945
2946+#ifdef CONFIG_GRKERNSEC_SETXID
2947+extern void gr_delayed_cred_worker(void);
2948+#endif
2949+
2950 /*
2951 * Notification of system call entry/exit
2952 * - triggered by current->work.syscall_trace
2953@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2954 /* do the secure computing check first */
2955 secure_computing(regs->regs[2]);
2956
2957+#ifdef CONFIG_GRKERNSEC_SETXID
2958+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2959+ gr_delayed_cred_worker();
2960+#endif
2961+
2962 if (!(current->ptrace & PT_PTRACED))
2963 goto out;
2964
2965diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
2966index a632bc1..0b77c7c 100644
2967--- a/arch/mips/kernel/scall32-o32.S
2968+++ b/arch/mips/kernel/scall32-o32.S
2969@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
2970
2971 stack_done:
2972 lw t0, TI_FLAGS($28) # syscall tracing enabled?
2973- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2974+ li t1, _TIF_SYSCALL_WORK
2975 and t0, t1
2976 bnez t0, syscall_trace_entry # -> yes
2977
2978diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
2979index 3b5a5e9..e1ee86d 100644
2980--- a/arch/mips/kernel/scall64-64.S
2981+++ b/arch/mips/kernel/scall64-64.S
2982@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
2983
2984 sd a3, PT_R26(sp) # save a3 for syscall restarting
2985
2986- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2987+ li t1, _TIF_SYSCALL_WORK
2988 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
2989 and t0, t1, t0
2990 bnez t0, syscall_trace_entry
2991diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
2992index 6be6f70..1859577 100644
2993--- a/arch/mips/kernel/scall64-n32.S
2994+++ b/arch/mips/kernel/scall64-n32.S
2995@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
2996
2997 sd a3, PT_R26(sp) # save a3 for syscall restarting
2998
2999- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3000+ li t1, _TIF_SYSCALL_WORK
3001 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3002 and t0, t1, t0
3003 bnez t0, n32_syscall_trace_entry
3004diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3005index 5422855..74e63a3 100644
3006--- a/arch/mips/kernel/scall64-o32.S
3007+++ b/arch/mips/kernel/scall64-o32.S
3008@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3009 PTR 4b, bad_stack
3010 .previous
3011
3012- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3013+ li t1, _TIF_SYSCALL_WORK
3014 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3015 and t0, t1, t0
3016 bnez t0, trace_a_syscall
3017diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3018index 69ebd58..e4bff83 100644
3019--- a/arch/mips/mm/fault.c
3020+++ b/arch/mips/mm/fault.c
3021@@ -28,6 +28,23 @@
3022 #include <asm/highmem.h> /* For VMALLOC_END */
3023 #include <linux/kdebug.h>
3024
3025+#ifdef CONFIG_PAX_PAGEEXEC
3026+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3027+{
3028+ unsigned long i;
3029+
3030+ printk(KERN_ERR "PAX: bytes at PC: ");
3031+ for (i = 0; i < 5; i++) {
3032+ unsigned int c;
3033+ if (get_user(c, (unsigned int *)pc+i))
3034+ printk(KERN_CONT "???????? ");
3035+ else
3036+ printk(KERN_CONT "%08x ", c);
3037+ }
3038+ printk("\n");
3039+}
3040+#endif
3041+
3042 /*
3043 * This routine handles page faults. It determines the address,
3044 * and the problem, and then passes it off to one of the appropriate
3045diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3046index 302d779..7d35bf8 100644
3047--- a/arch/mips/mm/mmap.c
3048+++ b/arch/mips/mm/mmap.c
3049@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3050 do_color_align = 1;
3051
3052 /* requesting a specific address */
3053+
3054+#ifdef CONFIG_PAX_RANDMMAP
3055+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3056+#endif
3057+
3058 if (addr) {
3059 if (do_color_align)
3060 addr = COLOUR_ALIGN(addr, pgoff);
3061@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3062 addr = PAGE_ALIGN(addr);
3063
3064 vma = find_vma(mm, addr);
3065- if (TASK_SIZE - len >= addr &&
3066- (!vma || addr + len <= vma->vm_start))
3067+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3068 return addr;
3069 }
3070
3071@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3072 /* At this point: (!vma || addr < vma->vm_end). */
3073 if (TASK_SIZE - len < addr)
3074 return -ENOMEM;
3075- if (!vma || addr + len <= vma->vm_start)
3076+ if (check_heap_stack_gap(vmm, addr, len))
3077 return addr;
3078 addr = vma->vm_end;
3079 if (do_color_align)
3080@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3081 /* make sure it can fit in the remaining address space */
3082 if (likely(addr > len)) {
3083 vma = find_vma(mm, addr - len);
3084- if (!vma || addr <= vma->vm_start) {
3085+ if (check_heap_stack_gap(vmm, addr - len, len))
3086 /* cache the address as a hint for next time */
3087 return mm->free_area_cache = addr - len;
3088 }
3089@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3090 * return with success:
3091 */
3092 vma = find_vma(mm, addr);
3093- if (likely(!vma || addr + len <= vma->vm_start)) {
3094+ if (check_heap_stack_gap(vmm, addr, len)) {
3095 /* cache the address as a hint for next time */
3096 return mm->free_area_cache = addr;
3097 }
3098@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3099 mm->unmap_area = arch_unmap_area_topdown;
3100 }
3101 }
3102-
3103-static inline unsigned long brk_rnd(void)
3104-{
3105- unsigned long rnd = get_random_int();
3106-
3107- rnd = rnd << PAGE_SHIFT;
3108- /* 8MB for 32bit, 256MB for 64bit */
3109- if (TASK_IS_32BIT_ADDR)
3110- rnd = rnd & 0x7ffffful;
3111- else
3112- rnd = rnd & 0xffffffful;
3113-
3114- return rnd;
3115-}
3116-
3117-unsigned long arch_randomize_brk(struct mm_struct *mm)
3118-{
3119- unsigned long base = mm->brk;
3120- unsigned long ret;
3121-
3122- ret = PAGE_ALIGN(base + brk_rnd());
3123-
3124- if (ret < mm->brk)
3125- return mm->brk;
3126-
3127- return ret;
3128-}
3129diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3130index 967d144..db12197 100644
3131--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3132+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3133@@ -11,12 +11,14 @@
3134 #ifndef _ASM_PROC_CACHE_H
3135 #define _ASM_PROC_CACHE_H
3136
3137+#include <linux/const.h>
3138+
3139 /* L1 cache */
3140
3141 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3142 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3143-#define L1_CACHE_BYTES 16 /* bytes per entry */
3144 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3145+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3146 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3147
3148 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3149diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3150index bcb5df2..84fabd2 100644
3151--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3152+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3153@@ -16,13 +16,15 @@
3154 #ifndef _ASM_PROC_CACHE_H
3155 #define _ASM_PROC_CACHE_H
3156
3157+#include <linux/const.h>
3158+
3159 /*
3160 * L1 cache
3161 */
3162 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3163 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3164-#define L1_CACHE_BYTES 32 /* bytes per entry */
3165 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3166+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3167 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3168
3169 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3170diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3171index 4ce7a01..449202a 100644
3172--- a/arch/openrisc/include/asm/cache.h
3173+++ b/arch/openrisc/include/asm/cache.h
3174@@ -19,11 +19,13 @@
3175 #ifndef __ASM_OPENRISC_CACHE_H
3176 #define __ASM_OPENRISC_CACHE_H
3177
3178+#include <linux/const.h>
3179+
3180 /* FIXME: How can we replace these with values from the CPU...
3181 * they shouldn't be hard-coded!
3182 */
3183
3184-#define L1_CACHE_BYTES 16
3185 #define L1_CACHE_SHIFT 4
3186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3187
3188 #endif /* __ASM_OPENRISC_CACHE_H */
3189diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3190index 4054b31..a10c105 100644
3191--- a/arch/parisc/include/asm/atomic.h
3192+++ b/arch/parisc/include/asm/atomic.h
3193@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3194
3195 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3196
3197+#define atomic64_read_unchecked(v) atomic64_read(v)
3198+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3199+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3200+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3201+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3202+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3203+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3204+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3205+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3206+
3207 #endif /* !CONFIG_64BIT */
3208
3209
3210diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3211index 47f11c7..3420df2 100644
3212--- a/arch/parisc/include/asm/cache.h
3213+++ b/arch/parisc/include/asm/cache.h
3214@@ -5,6 +5,7 @@
3215 #ifndef __ARCH_PARISC_CACHE_H
3216 #define __ARCH_PARISC_CACHE_H
3217
3218+#include <linux/const.h>
3219
3220 /*
3221 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3222@@ -15,13 +16,13 @@
3223 * just ruin performance.
3224 */
3225 #ifdef CONFIG_PA20
3226-#define L1_CACHE_BYTES 64
3227 #define L1_CACHE_SHIFT 6
3228 #else
3229-#define L1_CACHE_BYTES 32
3230 #define L1_CACHE_SHIFT 5
3231 #endif
3232
3233+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3234+
3235 #ifndef __ASSEMBLY__
3236
3237 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3238diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3239index 19f6cb1..6c78cf2 100644
3240--- a/arch/parisc/include/asm/elf.h
3241+++ b/arch/parisc/include/asm/elf.h
3242@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3243
3244 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3245
3246+#ifdef CONFIG_PAX_ASLR
3247+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3248+
3249+#define PAX_DELTA_MMAP_LEN 16
3250+#define PAX_DELTA_STACK_LEN 16
3251+#endif
3252+
3253 /* This yields a mask that user programs can use to figure out what
3254 instruction set this CPU supports. This could be done in user space,
3255 but it's not easy, and we've already done it here. */
3256diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3257index fc987a1..6e068ef 100644
3258--- a/arch/parisc/include/asm/pgalloc.h
3259+++ b/arch/parisc/include/asm/pgalloc.h
3260@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3261 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3262 }
3263
3264+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3265+{
3266+ pgd_populate(mm, pgd, pmd);
3267+}
3268+
3269 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3270 {
3271 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3272@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3273 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3274 #define pmd_free(mm, x) do { } while (0)
3275 #define pgd_populate(mm, pmd, pte) BUG()
3276+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3277
3278 #endif
3279
3280diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3281index 22dadeb..f6c2be4 100644
3282--- a/arch/parisc/include/asm/pgtable.h
3283+++ b/arch/parisc/include/asm/pgtable.h
3284@@ -210,6 +210,17 @@ struct vm_area_struct;
3285 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3286 #define PAGE_COPY PAGE_EXECREAD
3287 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3288+
3289+#ifdef CONFIG_PAX_PAGEEXEC
3290+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3291+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3292+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3293+#else
3294+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3295+# define PAGE_COPY_NOEXEC PAGE_COPY
3296+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3297+#endif
3298+
3299 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3300 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3301 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3302diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3303index 5e34ccf..672bc9c 100644
3304--- a/arch/parisc/kernel/module.c
3305+++ b/arch/parisc/kernel/module.c
3306@@ -98,16 +98,38 @@
3307
3308 /* three functions to determine where in the module core
3309 * or init pieces the location is */
3310+static inline int in_init_rx(struct module *me, void *loc)
3311+{
3312+ return (loc >= me->module_init_rx &&
3313+ loc < (me->module_init_rx + me->init_size_rx));
3314+}
3315+
3316+static inline int in_init_rw(struct module *me, void *loc)
3317+{
3318+ return (loc >= me->module_init_rw &&
3319+ loc < (me->module_init_rw + me->init_size_rw));
3320+}
3321+
3322 static inline int in_init(struct module *me, void *loc)
3323 {
3324- return (loc >= me->module_init &&
3325- loc <= (me->module_init + me->init_size));
3326+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3327+}
3328+
3329+static inline int in_core_rx(struct module *me, void *loc)
3330+{
3331+ return (loc >= me->module_core_rx &&
3332+ loc < (me->module_core_rx + me->core_size_rx));
3333+}
3334+
3335+static inline int in_core_rw(struct module *me, void *loc)
3336+{
3337+ return (loc >= me->module_core_rw &&
3338+ loc < (me->module_core_rw + me->core_size_rw));
3339 }
3340
3341 static inline int in_core(struct module *me, void *loc)
3342 {
3343- return (loc >= me->module_core &&
3344- loc <= (me->module_core + me->core_size));
3345+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3346 }
3347
3348 static inline int in_local(struct module *me, void *loc)
3349@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3350 }
3351
3352 /* align things a bit */
3353- me->core_size = ALIGN(me->core_size, 16);
3354- me->arch.got_offset = me->core_size;
3355- me->core_size += gots * sizeof(struct got_entry);
3356+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3357+ me->arch.got_offset = me->core_size_rw;
3358+ me->core_size_rw += gots * sizeof(struct got_entry);
3359
3360- me->core_size = ALIGN(me->core_size, 16);
3361- me->arch.fdesc_offset = me->core_size;
3362- me->core_size += fdescs * sizeof(Elf_Fdesc);
3363+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3364+ me->arch.fdesc_offset = me->core_size_rw;
3365+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3366
3367 me->arch.got_max = gots;
3368 me->arch.fdesc_max = fdescs;
3369@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3370
3371 BUG_ON(value == 0);
3372
3373- got = me->module_core + me->arch.got_offset;
3374+ got = me->module_core_rw + me->arch.got_offset;
3375 for (i = 0; got[i].addr; i++)
3376 if (got[i].addr == value)
3377 goto out;
3378@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3379 #ifdef CONFIG_64BIT
3380 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3381 {
3382- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3383+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3384
3385 if (!value) {
3386 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3387@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3388
3389 /* Create new one */
3390 fdesc->addr = value;
3391- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3392+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3393 return (Elf_Addr)fdesc;
3394 }
3395 #endif /* CONFIG_64BIT */
3396@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3397
3398 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3399 end = table + sechdrs[me->arch.unwind_section].sh_size;
3400- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3401+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3402
3403 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3404 me->arch.unwind_section, table, end, gp);
3405diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3406index c9b9322..02d8940 100644
3407--- a/arch/parisc/kernel/sys_parisc.c
3408+++ b/arch/parisc/kernel/sys_parisc.c
3409@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3410 /* At this point: (!vma || addr < vma->vm_end). */
3411 if (TASK_SIZE - len < addr)
3412 return -ENOMEM;
3413- if (!vma || addr + len <= vma->vm_start)
3414+ if (check_heap_stack_gap(vma, addr, len))
3415 return addr;
3416 addr = vma->vm_end;
3417 }
3418@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3419 /* At this point: (!vma || addr < vma->vm_end). */
3420 if (TASK_SIZE - len < addr)
3421 return -ENOMEM;
3422- if (!vma || addr + len <= vma->vm_start)
3423+ if (check_heap_stack_gap(vma, addr, len))
3424 return addr;
3425 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3426 if (addr < vma->vm_end) /* handle wraparound */
3427@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3428 if (flags & MAP_FIXED)
3429 return addr;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (filp) {
3435 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3436diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3437index f19e660..414fe24 100644
3438--- a/arch/parisc/kernel/traps.c
3439+++ b/arch/parisc/kernel/traps.c
3440@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3441
3442 down_read(&current->mm->mmap_sem);
3443 vma = find_vma(current->mm,regs->iaoq[0]);
3444- if (vma && (regs->iaoq[0] >= vma->vm_start)
3445- && (vma->vm_flags & VM_EXEC)) {
3446-
3447+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3448 fault_address = regs->iaoq[0];
3449 fault_space = regs->iasq[0];
3450
3451diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3452index 18162ce..94de376 100644
3453--- a/arch/parisc/mm/fault.c
3454+++ b/arch/parisc/mm/fault.c
3455@@ -15,6 +15,7 @@
3456 #include <linux/sched.h>
3457 #include <linux/interrupt.h>
3458 #include <linux/module.h>
3459+#include <linux/unistd.h>
3460
3461 #include <asm/uaccess.h>
3462 #include <asm/traps.h>
3463@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3464 static unsigned long
3465 parisc_acctyp(unsigned long code, unsigned int inst)
3466 {
3467- if (code == 6 || code == 16)
3468+ if (code == 6 || code == 7 || code == 16)
3469 return VM_EXEC;
3470
3471 switch (inst & 0xf0000000) {
3472@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3473 }
3474 #endif
3475
3476+#ifdef CONFIG_PAX_PAGEEXEC
3477+/*
3478+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3479+ *
3480+ * returns 1 when task should be killed
3481+ * 2 when rt_sigreturn trampoline was detected
3482+ * 3 when unpatched PLT trampoline was detected
3483+ */
3484+static int pax_handle_fetch_fault(struct pt_regs *regs)
3485+{
3486+
3487+#ifdef CONFIG_PAX_EMUPLT
3488+ int err;
3489+
3490+ do { /* PaX: unpatched PLT emulation */
3491+ unsigned int bl, depwi;
3492+
3493+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3494+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3495+
3496+ if (err)
3497+ break;
3498+
3499+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3500+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3501+
3502+ err = get_user(ldw, (unsigned int *)addr);
3503+ err |= get_user(bv, (unsigned int *)(addr+4));
3504+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3505+
3506+ if (err)
3507+ break;
3508+
3509+ if (ldw == 0x0E801096U &&
3510+ bv == 0xEAC0C000U &&
3511+ ldw2 == 0x0E881095U)
3512+ {
3513+ unsigned int resolver, map;
3514+
3515+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3516+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3517+ if (err)
3518+ break;
3519+
3520+ regs->gr[20] = instruction_pointer(regs)+8;
3521+ regs->gr[21] = map;
3522+ regs->gr[22] = resolver;
3523+ regs->iaoq[0] = resolver | 3UL;
3524+ regs->iaoq[1] = regs->iaoq[0] + 4;
3525+ return 3;
3526+ }
3527+ }
3528+ } while (0);
3529+#endif
3530+
3531+#ifdef CONFIG_PAX_EMUTRAMP
3532+
3533+#ifndef CONFIG_PAX_EMUSIGRT
3534+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3535+ return 1;
3536+#endif
3537+
3538+ do { /* PaX: rt_sigreturn emulation */
3539+ unsigned int ldi1, ldi2, bel, nop;
3540+
3541+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3542+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3543+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3544+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3545+
3546+ if (err)
3547+ break;
3548+
3549+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3550+ ldi2 == 0x3414015AU &&
3551+ bel == 0xE4008200U &&
3552+ nop == 0x08000240U)
3553+ {
3554+ regs->gr[25] = (ldi1 & 2) >> 1;
3555+ regs->gr[20] = __NR_rt_sigreturn;
3556+ regs->gr[31] = regs->iaoq[1] + 16;
3557+ regs->sr[0] = regs->iasq[1];
3558+ regs->iaoq[0] = 0x100UL;
3559+ regs->iaoq[1] = regs->iaoq[0] + 4;
3560+ regs->iasq[0] = regs->sr[2];
3561+ regs->iasq[1] = regs->sr[2];
3562+ return 2;
3563+ }
3564+ } while (0);
3565+#endif
3566+
3567+ return 1;
3568+}
3569+
3570+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3571+{
3572+ unsigned long i;
3573+
3574+ printk(KERN_ERR "PAX: bytes at PC: ");
3575+ for (i = 0; i < 5; i++) {
3576+ unsigned int c;
3577+ if (get_user(c, (unsigned int *)pc+i))
3578+ printk(KERN_CONT "???????? ");
3579+ else
3580+ printk(KERN_CONT "%08x ", c);
3581+ }
3582+ printk("\n");
3583+}
3584+#endif
3585+
3586 int fixup_exception(struct pt_regs *regs)
3587 {
3588 const struct exception_table_entry *fix;
3589@@ -192,8 +303,33 @@ good_area:
3590
3591 acc_type = parisc_acctyp(code,regs->iir);
3592
3593- if ((vma->vm_flags & acc_type) != acc_type)
3594+ if ((vma->vm_flags & acc_type) != acc_type) {
3595+
3596+#ifdef CONFIG_PAX_PAGEEXEC
3597+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3598+ (address & ~3UL) == instruction_pointer(regs))
3599+ {
3600+ up_read(&mm->mmap_sem);
3601+ switch (pax_handle_fetch_fault(regs)) {
3602+
3603+#ifdef CONFIG_PAX_EMUPLT
3604+ case 3:
3605+ return;
3606+#endif
3607+
3608+#ifdef CONFIG_PAX_EMUTRAMP
3609+ case 2:
3610+ return;
3611+#endif
3612+
3613+ }
3614+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3615+ do_group_exit(SIGKILL);
3616+ }
3617+#endif
3618+
3619 goto bad_area;
3620+ }
3621
3622 /*
3623 * If for any reason at all we couldn't handle the fault, make
3624diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3625index 02e41b5..ec6e26c 100644
3626--- a/arch/powerpc/include/asm/atomic.h
3627+++ b/arch/powerpc/include/asm/atomic.h
3628@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3629
3630 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3631
3632+#define atomic64_read_unchecked(v) atomic64_read(v)
3633+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3634+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3635+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3636+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3637+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3638+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3639+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3640+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3641+
3642 #endif /* __powerpc64__ */
3643
3644 #endif /* __KERNEL__ */
3645diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3646index 4b50941..5605819 100644
3647--- a/arch/powerpc/include/asm/cache.h
3648+++ b/arch/powerpc/include/asm/cache.h
3649@@ -3,6 +3,7 @@
3650
3651 #ifdef __KERNEL__
3652
3653+#include <linux/const.h>
3654
3655 /* bytes per L1 cache line */
3656 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3657@@ -22,7 +23,7 @@
3658 #define L1_CACHE_SHIFT 7
3659 #endif
3660
3661-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3662+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3663
3664 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3665
3666diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3667index 3bf9cca..e7457d0 100644
3668--- a/arch/powerpc/include/asm/elf.h
3669+++ b/arch/powerpc/include/asm/elf.h
3670@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3671 the loader. We need to make sure that it is out of the way of the program
3672 that it will "exec", and that there is sufficient room for the brk. */
3673
3674-extern unsigned long randomize_et_dyn(unsigned long base);
3675-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3676+#define ELF_ET_DYN_BASE (0x20000000)
3677+
3678+#ifdef CONFIG_PAX_ASLR
3679+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3680+
3681+#ifdef __powerpc64__
3682+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3683+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3684+#else
3685+#define PAX_DELTA_MMAP_LEN 15
3686+#define PAX_DELTA_STACK_LEN 15
3687+#endif
3688+#endif
3689
3690 /*
3691 * Our registers are always unsigned longs, whether we're a 32 bit
3692@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3693 (0x7ff >> (PAGE_SHIFT - 12)) : \
3694 (0x3ffff >> (PAGE_SHIFT - 12)))
3695
3696-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3697-#define arch_randomize_brk arch_randomize_brk
3698-
3699 #endif /* __KERNEL__ */
3700
3701 /*
3702diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3703index bca8fdc..61e9580 100644
3704--- a/arch/powerpc/include/asm/kmap_types.h
3705+++ b/arch/powerpc/include/asm/kmap_types.h
3706@@ -27,6 +27,7 @@ enum km_type {
3707 KM_PPC_SYNC_PAGE,
3708 KM_PPC_SYNC_ICACHE,
3709 KM_KDB,
3710+ KM_CLEARPAGE,
3711 KM_TYPE_NR
3712 };
3713
3714diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3715index d4a7f64..451de1c 100644
3716--- a/arch/powerpc/include/asm/mman.h
3717+++ b/arch/powerpc/include/asm/mman.h
3718@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3719 }
3720 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3721
3722-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3723+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3724 {
3725 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3726 }
3727diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3728index f072e97..b436dee 100644
3729--- a/arch/powerpc/include/asm/page.h
3730+++ b/arch/powerpc/include/asm/page.h
3731@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3732 * and needs to be executable. This means the whole heap ends
3733 * up being executable.
3734 */
3735-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3736- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3737+#define VM_DATA_DEFAULT_FLAGS32 \
3738+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3739+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3740
3741 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3742 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3743@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3744 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3745 #endif
3746
3747+#define ktla_ktva(addr) (addr)
3748+#define ktva_ktla(addr) (addr)
3749+
3750 /*
3751 * Use the top bit of the higher-level page table entries to indicate whether
3752 * the entries we point to contain hugepages. This works because we know that
3753diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3754index fed85e6..da5c71b 100644
3755--- a/arch/powerpc/include/asm/page_64.h
3756+++ b/arch/powerpc/include/asm/page_64.h
3757@@ -146,15 +146,18 @@ do { \
3758 * stack by default, so in the absence of a PT_GNU_STACK program header
3759 * we turn execute permission off.
3760 */
3761-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3762- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3763+#define VM_STACK_DEFAULT_FLAGS32 \
3764+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3765+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3766
3767 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3768 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3769
3770+#ifndef CONFIG_PAX_PAGEEXEC
3771 #define VM_STACK_DEFAULT_FLAGS \
3772 (is_32bit_task() ? \
3773 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3774+#endif
3775
3776 #include <asm-generic/getorder.h>
3777
3778diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3779index 292725c..f87ae14 100644
3780--- a/arch/powerpc/include/asm/pgalloc-64.h
3781+++ b/arch/powerpc/include/asm/pgalloc-64.h
3782@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3783 #ifndef CONFIG_PPC_64K_PAGES
3784
3785 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3786+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3787
3788 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3789 {
3790@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3791 pud_set(pud, (unsigned long)pmd);
3792 }
3793
3794+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3795+{
3796+ pud_populate(mm, pud, pmd);
3797+}
3798+
3799 #define pmd_populate(mm, pmd, pte_page) \
3800 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3801 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3802@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3803 #else /* CONFIG_PPC_64K_PAGES */
3804
3805 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3806+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3807
3808 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3809 pte_t *pte)
3810diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3811index 2e0e411..7899c68 100644
3812--- a/arch/powerpc/include/asm/pgtable.h
3813+++ b/arch/powerpc/include/asm/pgtable.h
3814@@ -2,6 +2,7 @@
3815 #define _ASM_POWERPC_PGTABLE_H
3816 #ifdef __KERNEL__
3817
3818+#include <linux/const.h>
3819 #ifndef __ASSEMBLY__
3820 #include <asm/processor.h> /* For TASK_SIZE */
3821 #include <asm/mmu.h>
3822diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3823index 4aad413..85d86bf 100644
3824--- a/arch/powerpc/include/asm/pte-hash32.h
3825+++ b/arch/powerpc/include/asm/pte-hash32.h
3826@@ -21,6 +21,7 @@
3827 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3828 #define _PAGE_USER 0x004 /* usermode access allowed */
3829 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3830+#define _PAGE_EXEC _PAGE_GUARDED
3831 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3832 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3833 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3834diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3835index 7fdc2c0..e47a9b02d3 100644
3836--- a/arch/powerpc/include/asm/reg.h
3837+++ b/arch/powerpc/include/asm/reg.h
3838@@ -212,6 +212,7 @@
3839 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3840 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3841 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3842+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3843 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3844 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3845 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3846diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3847index c377457..3c69fbc 100644
3848--- a/arch/powerpc/include/asm/system.h
3849+++ b/arch/powerpc/include/asm/system.h
3850@@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3851 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3852 #endif
3853
3854-extern unsigned long arch_align_stack(unsigned long sp);
3855+#define arch_align_stack(x) ((x) & ~0xfUL)
3856
3857 /* Used in very early kernel initialization. */
3858 extern unsigned long reloc_offset(void);
3859diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3860index 96471494..60ed5a2 100644
3861--- a/arch/powerpc/include/asm/thread_info.h
3862+++ b/arch/powerpc/include/asm/thread_info.h
3863@@ -104,13 +104,15 @@ static inline struct thread_info *current_thread_info(void)
3864 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3865 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3866 #define TIF_SINGLESTEP 8 /* singlestepping active */
3867-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3868 #define TIF_SECCOMP 10 /* secure computing */
3869 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3870 #define TIF_NOERROR 12 /* Force successful syscall return */
3871 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3872 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3873 #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
3874+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
3875+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3876+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3877
3878 /* as above, but as bit values */
3879 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3880@@ -128,8 +130,11 @@ static inline struct thread_info *current_thread_info(void)
3881 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3882 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3883 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3884+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3885+
3886 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3887- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3888+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3889+ _TIF_GRSEC_SETXID)
3890
3891 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3892 _TIF_NOTIFY_RESUME)
3893diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3894index bd0fb84..a42a14b 100644
3895--- a/arch/powerpc/include/asm/uaccess.h
3896+++ b/arch/powerpc/include/asm/uaccess.h
3897@@ -13,6 +13,8 @@
3898 #define VERIFY_READ 0
3899 #define VERIFY_WRITE 1
3900
3901+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3902+
3903 /*
3904 * The fs value determines whether argument validity checking should be
3905 * performed or not. If get_fs() == USER_DS, checking is performed, with
3906@@ -327,52 +329,6 @@ do { \
3907 extern unsigned long __copy_tofrom_user(void __user *to,
3908 const void __user *from, unsigned long size);
3909
3910-#ifndef __powerpc64__
3911-
3912-static inline unsigned long copy_from_user(void *to,
3913- const void __user *from, unsigned long n)
3914-{
3915- unsigned long over;
3916-
3917- if (access_ok(VERIFY_READ, from, n))
3918- return __copy_tofrom_user((__force void __user *)to, from, n);
3919- if ((unsigned long)from < TASK_SIZE) {
3920- over = (unsigned long)from + n - TASK_SIZE;
3921- return __copy_tofrom_user((__force void __user *)to, from,
3922- n - over) + over;
3923- }
3924- return n;
3925-}
3926-
3927-static inline unsigned long copy_to_user(void __user *to,
3928- const void *from, unsigned long n)
3929-{
3930- unsigned long over;
3931-
3932- if (access_ok(VERIFY_WRITE, to, n))
3933- return __copy_tofrom_user(to, (__force void __user *)from, n);
3934- if ((unsigned long)to < TASK_SIZE) {
3935- over = (unsigned long)to + n - TASK_SIZE;
3936- return __copy_tofrom_user(to, (__force void __user *)from,
3937- n - over) + over;
3938- }
3939- return n;
3940-}
3941-
3942-#else /* __powerpc64__ */
3943-
3944-#define __copy_in_user(to, from, size) \
3945- __copy_tofrom_user((to), (from), (size))
3946-
3947-extern unsigned long copy_from_user(void *to, const void __user *from,
3948- unsigned long n);
3949-extern unsigned long copy_to_user(void __user *to, const void *from,
3950- unsigned long n);
3951-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3952- unsigned long n);
3953-
3954-#endif /* __powerpc64__ */
3955-
3956 static inline unsigned long __copy_from_user_inatomic(void *to,
3957 const void __user *from, unsigned long n)
3958 {
3959@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3960 if (ret == 0)
3961 return 0;
3962 }
3963+
3964+ if (!__builtin_constant_p(n))
3965+ check_object_size(to, n, false);
3966+
3967 return __copy_tofrom_user((__force void __user *)to, from, n);
3968 }
3969
3970@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3971 if (ret == 0)
3972 return 0;
3973 }
3974+
3975+ if (!__builtin_constant_p(n))
3976+ check_object_size(from, n, true);
3977+
3978 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3979 }
3980
3981@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3982 return __copy_to_user_inatomic(to, from, size);
3983 }
3984
3985+#ifndef __powerpc64__
3986+
3987+static inline unsigned long __must_check copy_from_user(void *to,
3988+ const void __user *from, unsigned long n)
3989+{
3990+ unsigned long over;
3991+
3992+ if ((long)n < 0)
3993+ return n;
3994+
3995+ if (access_ok(VERIFY_READ, from, n)) {
3996+ if (!__builtin_constant_p(n))
3997+ check_object_size(to, n, false);
3998+ return __copy_tofrom_user((__force void __user *)to, from, n);
3999+ }
4000+ if ((unsigned long)from < TASK_SIZE) {
4001+ over = (unsigned long)from + n - TASK_SIZE;
4002+ if (!__builtin_constant_p(n - over))
4003+ check_object_size(to, n - over, false);
4004+ return __copy_tofrom_user((__force void __user *)to, from,
4005+ n - over) + over;
4006+ }
4007+ return n;
4008+}
4009+
4010+static inline unsigned long __must_check copy_to_user(void __user *to,
4011+ const void *from, unsigned long n)
4012+{
4013+ unsigned long over;
4014+
4015+ if ((long)n < 0)
4016+ return n;
4017+
4018+ if (access_ok(VERIFY_WRITE, to, n)) {
4019+ if (!__builtin_constant_p(n))
4020+ check_object_size(from, n, true);
4021+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4022+ }
4023+ if ((unsigned long)to < TASK_SIZE) {
4024+ over = (unsigned long)to + n - TASK_SIZE;
4025+ if (!__builtin_constant_p(n))
4026+ check_object_size(from, n - over, true);
4027+ return __copy_tofrom_user(to, (__force void __user *)from,
4028+ n - over) + over;
4029+ }
4030+ return n;
4031+}
4032+
4033+#else /* __powerpc64__ */
4034+
4035+#define __copy_in_user(to, from, size) \
4036+ __copy_tofrom_user((to), (from), (size))
4037+
4038+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4039+{
4040+ if ((long)n < 0 || n > INT_MAX)
4041+ return n;
4042+
4043+ if (!__builtin_constant_p(n))
4044+ check_object_size(to, n, false);
4045+
4046+ if (likely(access_ok(VERIFY_READ, from, n)))
4047+ n = __copy_from_user(to, from, n);
4048+ else
4049+ memset(to, 0, n);
4050+ return n;
4051+}
4052+
4053+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4054+{
4055+ if ((long)n < 0 || n > INT_MAX)
4056+ return n;
4057+
4058+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4059+ if (!__builtin_constant_p(n))
4060+ check_object_size(from, n, true);
4061+ n = __copy_to_user(to, from, n);
4062+ }
4063+ return n;
4064+}
4065+
4066+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4067+ unsigned long n);
4068+
4069+#endif /* __powerpc64__ */
4070+
4071 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4072
4073 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4074diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4075index 429983c..7af363b 100644
4076--- a/arch/powerpc/kernel/exceptions-64e.S
4077+++ b/arch/powerpc/kernel/exceptions-64e.S
4078@@ -587,6 +587,7 @@ storage_fault_common:
4079 std r14,_DAR(r1)
4080 std r15,_DSISR(r1)
4081 addi r3,r1,STACK_FRAME_OVERHEAD
4082+ bl .save_nvgprs
4083 mr r4,r14
4084 mr r5,r15
4085 ld r14,PACA_EXGEN+EX_R14(r13)
4086@@ -596,8 +597,7 @@ storage_fault_common:
4087 cmpdi r3,0
4088 bne- 1f
4089 b .ret_from_except_lite
4090-1: bl .save_nvgprs
4091- mr r5,r3
4092+1: mr r5,r3
4093 addi r3,r1,STACK_FRAME_OVERHEAD
4094 ld r4,_DAR(r1)
4095 bl .bad_page_fault
4096diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4097index 15c5a4f..22a4000 100644
4098--- a/arch/powerpc/kernel/exceptions-64s.S
4099+++ b/arch/powerpc/kernel/exceptions-64s.S
4100@@ -1004,10 +1004,10 @@ handle_page_fault:
4101 11: ld r4,_DAR(r1)
4102 ld r5,_DSISR(r1)
4103 addi r3,r1,STACK_FRAME_OVERHEAD
4104+ bl .save_nvgprs
4105 bl .do_page_fault
4106 cmpdi r3,0
4107 beq+ 13f
4108- bl .save_nvgprs
4109 mr r5,r3
4110 addi r3,r1,STACK_FRAME_OVERHEAD
4111 lwz r4,_DAR(r1)
4112diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4113index 01e2877..a1ba360 100644
4114--- a/arch/powerpc/kernel/irq.c
4115+++ b/arch/powerpc/kernel/irq.c
4116@@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4117 host->ops = ops;
4118 host->of_node = of_node_get(of_node);
4119
4120- if (host->ops->match == NULL)
4121- host->ops->match = default_irq_host_match;
4122-
4123 raw_spin_lock_irqsave(&irq_big_lock, flags);
4124
4125 /* If it's a legacy controller, check for duplicates and
4126@@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4127 */
4128 raw_spin_lock_irqsave(&irq_big_lock, flags);
4129 list_for_each_entry(h, &irq_hosts, link)
4130- if (h->ops->match(h, node)) {
4131+ if (h->ops->match) {
4132+ if (h->ops->match(h, node)) {
4133+ found = h;
4134+ break;
4135+ }
4136+ } else if (default_irq_host_match(h, node)) {
4137 found = h;
4138 break;
4139 }
4140diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4141index 0b6d796..d760ddb 100644
4142--- a/arch/powerpc/kernel/module_32.c
4143+++ b/arch/powerpc/kernel/module_32.c
4144@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4145 me->arch.core_plt_section = i;
4146 }
4147 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4148- printk("Module doesn't contain .plt or .init.plt sections.\n");
4149+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4150 return -ENOEXEC;
4151 }
4152
4153@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4154
4155 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4156 /* Init, or core PLT? */
4157- if (location >= mod->module_core
4158- && location < mod->module_core + mod->core_size)
4159+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4160+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4161 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4162- else
4163+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4164+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4165 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4166+ else {
4167+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4168+ return ~0UL;
4169+ }
4170
4171 /* Find this entry, or if that fails, the next avail. entry */
4172 while (entry->jump[0]) {
4173diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4174index d817ab0..b23b18e 100644
4175--- a/arch/powerpc/kernel/process.c
4176+++ b/arch/powerpc/kernel/process.c
4177@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
4178 * Lookup NIP late so we have the best change of getting the
4179 * above info out without failing
4180 */
4181- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4182- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4183+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4184+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4185 #endif
4186 show_stack(current, (unsigned long *) regs->gpr[1]);
4187 if (!user_mode(regs))
4188@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4189 newsp = stack[0];
4190 ip = stack[STACK_FRAME_LR_SAVE];
4191 if (!firstframe || ip != lr) {
4192- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4193+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4195 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4196- printk(" (%pS)",
4197+ printk(" (%pA)",
4198 (void *)current->ret_stack[curr_frame].ret);
4199 curr_frame--;
4200 }
4201@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4202 struct pt_regs *regs = (struct pt_regs *)
4203 (sp + STACK_FRAME_OVERHEAD);
4204 lr = regs->link;
4205- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4206+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4207 regs->trap, (void *)regs->nip, (void *)lr);
4208 firstframe = 1;
4209 }
4210@@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
4211 }
4212
4213 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4214-
4215-unsigned long arch_align_stack(unsigned long sp)
4216-{
4217- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4218- sp -= get_random_int() & ~PAGE_MASK;
4219- return sp & ~0xf;
4220-}
4221-
4222-static inline unsigned long brk_rnd(void)
4223-{
4224- unsigned long rnd = 0;
4225-
4226- /* 8MB for 32bit, 1GB for 64bit */
4227- if (is_32bit_task())
4228- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4229- else
4230- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4231-
4232- return rnd << PAGE_SHIFT;
4233-}
4234-
4235-unsigned long arch_randomize_brk(struct mm_struct *mm)
4236-{
4237- unsigned long base = mm->brk;
4238- unsigned long ret;
4239-
4240-#ifdef CONFIG_PPC_STD_MMU_64
4241- /*
4242- * If we are using 1TB segments and we are allowed to randomise
4243- * the heap, we can put it above 1TB so it is backed by a 1TB
4244- * segment. Otherwise the heap will be in the bottom 1TB
4245- * which always uses 256MB segments and this may result in a
4246- * performance penalty.
4247- */
4248- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4249- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4250-#endif
4251-
4252- ret = PAGE_ALIGN(base + brk_rnd());
4253-
4254- if (ret < mm->brk)
4255- return mm->brk;
4256-
4257- return ret;
4258-}
4259-
4260-unsigned long randomize_et_dyn(unsigned long base)
4261-{
4262- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4263-
4264- if (ret < base)
4265- return base;
4266-
4267- return ret;
4268-}
4269diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4270index 5b43325..94a5bb4 100644
4271--- a/arch/powerpc/kernel/ptrace.c
4272+++ b/arch/powerpc/kernel/ptrace.c
4273@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4274 return ret;
4275 }
4276
4277+#ifdef CONFIG_GRKERNSEC_SETXID
4278+extern void gr_delayed_cred_worker(void);
4279+#endif
4280+
4281 /*
4282 * We must return the syscall number to actually look up in the table.
4283 * This can be -1L to skip running any syscall at all.
4284@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4285
4286 secure_computing(regs->gpr[0]);
4287
4288+#ifdef CONFIG_GRKERNSEC_SETXID
4289+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4290+ gr_delayed_cred_worker();
4291+#endif
4292+
4293 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4294 tracehook_report_syscall_entry(regs))
4295 /*
4296@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4297 {
4298 int step;
4299
4300+#ifdef CONFIG_GRKERNSEC_SETXID
4301+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4302+ gr_delayed_cred_worker();
4303+#endif
4304+
4305 audit_syscall_exit(regs);
4306
4307 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4308diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4309index 836a5a1..27289a3 100644
4310--- a/arch/powerpc/kernel/signal_32.c
4311+++ b/arch/powerpc/kernel/signal_32.c
4312@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4313 /* Save user registers on the stack */
4314 frame = &rt_sf->uc.uc_mcontext;
4315 addr = frame;
4316- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4317+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4318 if (save_user_regs(regs, frame, 0, 1))
4319 goto badframe;
4320 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4321diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4322index a50b5ec..547078a 100644
4323--- a/arch/powerpc/kernel/signal_64.c
4324+++ b/arch/powerpc/kernel/signal_64.c
4325@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4326 current->thread.fpscr.val = 0;
4327
4328 /* Set up to return from userspace. */
4329- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4330+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4331 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4332 } else {
4333 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4334diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4335index c091527..5592625 100644
4336--- a/arch/powerpc/kernel/traps.c
4337+++ b/arch/powerpc/kernel/traps.c
4338@@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4339 return flags;
4340 }
4341
4342+extern void gr_handle_kernel_exploit(void);
4343+
4344 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4345 int signr)
4346 {
4347@@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4348 panic("Fatal exception in interrupt");
4349 if (panic_on_oops)
4350 panic("Fatal exception");
4351+
4352+ gr_handle_kernel_exploit();
4353+
4354 do_exit(signr);
4355 }
4356
4357diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4358index 7d14bb6..1305601 100644
4359--- a/arch/powerpc/kernel/vdso.c
4360+++ b/arch/powerpc/kernel/vdso.c
4361@@ -35,6 +35,7 @@
4362 #include <asm/firmware.h>
4363 #include <asm/vdso.h>
4364 #include <asm/vdso_datapage.h>
4365+#include <asm/mman.h>
4366
4367 #include "setup.h"
4368
4369@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4370 vdso_base = VDSO32_MBASE;
4371 #endif
4372
4373- current->mm->context.vdso_base = 0;
4374+ current->mm->context.vdso_base = ~0UL;
4375
4376 /* vDSO has a problem and was disabled, just don't "enable" it for the
4377 * process
4378@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4379 vdso_base = get_unmapped_area(NULL, vdso_base,
4380 (vdso_pages << PAGE_SHIFT) +
4381 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4382- 0, 0);
4383+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4384 if (IS_ERR_VALUE(vdso_base)) {
4385 rc = vdso_base;
4386 goto fail_mmapsem;
4387diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4388index 5eea6f3..5d10396 100644
4389--- a/arch/powerpc/lib/usercopy_64.c
4390+++ b/arch/powerpc/lib/usercopy_64.c
4391@@ -9,22 +9,6 @@
4392 #include <linux/module.h>
4393 #include <asm/uaccess.h>
4394
4395-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4396-{
4397- if (likely(access_ok(VERIFY_READ, from, n)))
4398- n = __copy_from_user(to, from, n);
4399- else
4400- memset(to, 0, n);
4401- return n;
4402-}
4403-
4404-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4405-{
4406- if (likely(access_ok(VERIFY_WRITE, to, n)))
4407- n = __copy_to_user(to, from, n);
4408- return n;
4409-}
4410-
4411 unsigned long copy_in_user(void __user *to, const void __user *from,
4412 unsigned long n)
4413 {
4414@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4415 return n;
4416 }
4417
4418-EXPORT_SYMBOL(copy_from_user);
4419-EXPORT_SYMBOL(copy_to_user);
4420 EXPORT_SYMBOL(copy_in_user);
4421
4422diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4423index 2f0d1b0..36fb5cc 100644
4424--- a/arch/powerpc/mm/fault.c
4425+++ b/arch/powerpc/mm/fault.c
4426@@ -32,6 +32,10 @@
4427 #include <linux/perf_event.h>
4428 #include <linux/magic.h>
4429 #include <linux/ratelimit.h>
4430+#include <linux/slab.h>
4431+#include <linux/pagemap.h>
4432+#include <linux/compiler.h>
4433+#include <linux/unistd.h>
4434
4435 #include <asm/firmware.h>
4436 #include <asm/page.h>
4437@@ -43,6 +47,7 @@
4438 #include <asm/tlbflush.h>
4439 #include <asm/siginfo.h>
4440 #include <mm/mmu_decl.h>
4441+#include <asm/ptrace.h>
4442
4443 #include "icswx.h"
4444
4445@@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4446 }
4447 #endif
4448
4449+#ifdef CONFIG_PAX_PAGEEXEC
4450+/*
4451+ * PaX: decide what to do with offenders (regs->nip = fault address)
4452+ *
4453+ * returns 1 when task should be killed
4454+ */
4455+static int pax_handle_fetch_fault(struct pt_regs *regs)
4456+{
4457+ return 1;
4458+}
4459+
4460+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4461+{
4462+ unsigned long i;
4463+
4464+ printk(KERN_ERR "PAX: bytes at PC: ");
4465+ for (i = 0; i < 5; i++) {
4466+ unsigned int c;
4467+ if (get_user(c, (unsigned int __user *)pc+i))
4468+ printk(KERN_CONT "???????? ");
4469+ else
4470+ printk(KERN_CONT "%08x ", c);
4471+ }
4472+ printk("\n");
4473+}
4474+#endif
4475+
4476 /*
4477 * Check whether the instruction at regs->nip is a store using
4478 * an update addressing form which will update r1.
4479@@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4480 * indicate errors in DSISR but can validly be set in SRR1.
4481 */
4482 if (trap == 0x400)
4483- error_code &= 0x48200000;
4484+ error_code &= 0x58200000;
4485 else
4486 is_write = error_code & DSISR_ISSTORE;
4487 #else
4488@@ -276,7 +308,7 @@ good_area:
4489 * "undefined". Of those that can be set, this is the only
4490 * one which seems bad.
4491 */
4492- if (error_code & 0x10000000)
4493+ if (error_code & DSISR_GUARDED)
4494 /* Guarded storage error. */
4495 goto bad_area;
4496 #endif /* CONFIG_8xx */
4497@@ -291,7 +323,7 @@ good_area:
4498 * processors use the same I/D cache coherency mechanism
4499 * as embedded.
4500 */
4501- if (error_code & DSISR_PROTFAULT)
4502+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4503 goto bad_area;
4504 #endif /* CONFIG_PPC_STD_MMU */
4505
4506@@ -360,6 +392,23 @@ bad_area:
4507 bad_area_nosemaphore:
4508 /* User mode accesses cause a SIGSEGV */
4509 if (user_mode(regs)) {
4510+
4511+#ifdef CONFIG_PAX_PAGEEXEC
4512+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4513+#ifdef CONFIG_PPC_STD_MMU
4514+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4515+#else
4516+ if (is_exec && regs->nip == address) {
4517+#endif
4518+ switch (pax_handle_fetch_fault(regs)) {
4519+ }
4520+
4521+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4522+ do_group_exit(SIGKILL);
4523+ }
4524+ }
4525+#endif
4526+
4527 _exception(SIGSEGV, regs, code, address);
4528 return 0;
4529 }
4530diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4531index 67a42ed..1c7210c 100644
4532--- a/arch/powerpc/mm/mmap_64.c
4533+++ b/arch/powerpc/mm/mmap_64.c
4534@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4535 */
4536 if (mmap_is_legacy()) {
4537 mm->mmap_base = TASK_UNMAPPED_BASE;
4538+
4539+#ifdef CONFIG_PAX_RANDMMAP
4540+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4541+ mm->mmap_base += mm->delta_mmap;
4542+#endif
4543+
4544 mm->get_unmapped_area = arch_get_unmapped_area;
4545 mm->unmap_area = arch_unmap_area;
4546 } else {
4547 mm->mmap_base = mmap_base();
4548+
4549+#ifdef CONFIG_PAX_RANDMMAP
4550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4551+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4552+#endif
4553+
4554 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4555 mm->unmap_area = arch_unmap_area_topdown;
4556 }
4557diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4558index 73709f7..6b90313 100644
4559--- a/arch/powerpc/mm/slice.c
4560+++ b/arch/powerpc/mm/slice.c
4561@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4562 if ((mm->task_size - len) < addr)
4563 return 0;
4564 vma = find_vma(mm, addr);
4565- return (!vma || (addr + len) <= vma->vm_start);
4566+ return check_heap_stack_gap(vma, addr, len);
4567 }
4568
4569 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4570@@ -256,7 +256,7 @@ full_search:
4571 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4572 continue;
4573 }
4574- if (!vma || addr + len <= vma->vm_start) {
4575+ if (check_heap_stack_gap(vma, addr, len)) {
4576 /*
4577 * Remember the place where we stopped the search:
4578 */
4579@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4580 }
4581 }
4582
4583- addr = mm->mmap_base;
4584- while (addr > len) {
4585+ if (mm->mmap_base < len)
4586+ addr = -ENOMEM;
4587+ else
4588+ addr = mm->mmap_base - len;
4589+
4590+ while (!IS_ERR_VALUE(addr)) {
4591 /* Go down by chunk size */
4592- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4593+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4594
4595 /* Check for hit with different page size */
4596 mask = slice_range_to_mask(addr, len);
4597@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4598 * return with success:
4599 */
4600 vma = find_vma(mm, addr);
4601- if (!vma || (addr + len) <= vma->vm_start) {
4602+ if (check_heap_stack_gap(vma, addr, len)) {
4603 /* remember the address as a hint for next time */
4604 if (use_cache)
4605 mm->free_area_cache = addr;
4606@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4607 mm->cached_hole_size = vma->vm_start - addr;
4608
4609 /* try just below the current vma->vm_start */
4610- addr = vma->vm_start;
4611+ addr = skip_heap_stack_gap(vma, len);
4612 }
4613
4614 /*
4615@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4616 if (fixed && addr > (mm->task_size - len))
4617 return -EINVAL;
4618
4619+#ifdef CONFIG_PAX_RANDMMAP
4620+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4621+ addr = 0;
4622+#endif
4623+
4624 /* If hint, make sure it matches our alignment restrictions */
4625 if (!fixed && addr) {
4626 addr = _ALIGN_UP(addr, 1ul << pshift);
4627diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4628index 8517d2a..d2738d4 100644
4629--- a/arch/s390/include/asm/atomic.h
4630+++ b/arch/s390/include/asm/atomic.h
4631@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4632 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4633 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4634
4635+#define atomic64_read_unchecked(v) atomic64_read(v)
4636+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4637+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4638+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4639+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4640+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4641+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4642+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4643+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4644+
4645 #define smp_mb__before_atomic_dec() smp_mb()
4646 #define smp_mb__after_atomic_dec() smp_mb()
4647 #define smp_mb__before_atomic_inc() smp_mb()
4648diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4649index 2a30d5a..5e5586f 100644
4650--- a/arch/s390/include/asm/cache.h
4651+++ b/arch/s390/include/asm/cache.h
4652@@ -11,8 +11,10 @@
4653 #ifndef __ARCH_S390_CACHE_H
4654 #define __ARCH_S390_CACHE_H
4655
4656-#define L1_CACHE_BYTES 256
4657+#include <linux/const.h>
4658+
4659 #define L1_CACHE_SHIFT 8
4660+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4661 #define NET_SKB_PAD 32
4662
4663 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4664diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4665index 547f1a6..0b22b53 100644
4666--- a/arch/s390/include/asm/elf.h
4667+++ b/arch/s390/include/asm/elf.h
4668@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4669 the loader. We need to make sure that it is out of the way of the program
4670 that it will "exec", and that there is sufficient room for the brk. */
4671
4672-extern unsigned long randomize_et_dyn(unsigned long base);
4673-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4674+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4675+
4676+#ifdef CONFIG_PAX_ASLR
4677+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4678+
4679+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4680+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4681+#endif
4682
4683 /* This yields a mask that user programs can use to figure out what
4684 instruction set this CPU supports. */
4685@@ -211,7 +217,4 @@ struct linux_binprm;
4686 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4687 int arch_setup_additional_pages(struct linux_binprm *, int);
4688
4689-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4690-#define arch_randomize_brk arch_randomize_brk
4691-
4692 #endif
4693diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4694index d73cc6b..1a296ad 100644
4695--- a/arch/s390/include/asm/system.h
4696+++ b/arch/s390/include/asm/system.h
4697@@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4698 extern void (*_machine_halt)(void);
4699 extern void (*_machine_power_off)(void);
4700
4701-extern unsigned long arch_align_stack(unsigned long sp);
4702+#define arch_align_stack(x) ((x) & ~0xfUL)
4703
4704 static inline int tprot(unsigned long addr)
4705 {
4706diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4707index 2b23885..e136e31 100644
4708--- a/arch/s390/include/asm/uaccess.h
4709+++ b/arch/s390/include/asm/uaccess.h
4710@@ -235,6 +235,10 @@ static inline unsigned long __must_check
4711 copy_to_user(void __user *to, const void *from, unsigned long n)
4712 {
4713 might_fault();
4714+
4715+ if ((long)n < 0)
4716+ return n;
4717+
4718 if (access_ok(VERIFY_WRITE, to, n))
4719 n = __copy_to_user(to, from, n);
4720 return n;
4721@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4722 static inline unsigned long __must_check
4723 __copy_from_user(void *to, const void __user *from, unsigned long n)
4724 {
4725+ if ((long)n < 0)
4726+ return n;
4727+
4728 if (__builtin_constant_p(n) && (n <= 256))
4729 return uaccess.copy_from_user_small(n, from, to);
4730 else
4731@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4732 unsigned int sz = __compiletime_object_size(to);
4733
4734 might_fault();
4735+
4736+ if ((long)n < 0)
4737+ return n;
4738+
4739 if (unlikely(sz != -1 && sz < n)) {
4740 copy_from_user_overflow();
4741 return n;
4742diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4743index dfcb343..eda788a 100644
4744--- a/arch/s390/kernel/module.c
4745+++ b/arch/s390/kernel/module.c
4746@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4747
4748 /* Increase core size by size of got & plt and set start
4749 offsets for got and plt. */
4750- me->core_size = ALIGN(me->core_size, 4);
4751- me->arch.got_offset = me->core_size;
4752- me->core_size += me->arch.got_size;
4753- me->arch.plt_offset = me->core_size;
4754- me->core_size += me->arch.plt_size;
4755+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4756+ me->arch.got_offset = me->core_size_rw;
4757+ me->core_size_rw += me->arch.got_size;
4758+ me->arch.plt_offset = me->core_size_rx;
4759+ me->core_size_rx += me->arch.plt_size;
4760 return 0;
4761 }
4762
4763@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4764 if (info->got_initialized == 0) {
4765 Elf_Addr *gotent;
4766
4767- gotent = me->module_core + me->arch.got_offset +
4768+ gotent = me->module_core_rw + me->arch.got_offset +
4769 info->got_offset;
4770 *gotent = val;
4771 info->got_initialized = 1;
4772@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4773 else if (r_type == R_390_GOTENT ||
4774 r_type == R_390_GOTPLTENT)
4775 *(unsigned int *) loc =
4776- (val + (Elf_Addr) me->module_core - loc) >> 1;
4777+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4778 else if (r_type == R_390_GOT64 ||
4779 r_type == R_390_GOTPLT64)
4780 *(unsigned long *) loc = val;
4781@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4782 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4783 if (info->plt_initialized == 0) {
4784 unsigned int *ip;
4785- ip = me->module_core + me->arch.plt_offset +
4786+ ip = me->module_core_rx + me->arch.plt_offset +
4787 info->plt_offset;
4788 #ifndef CONFIG_64BIT
4789 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4790@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4791 val - loc + 0xffffUL < 0x1ffffeUL) ||
4792 (r_type == R_390_PLT32DBL &&
4793 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4794- val = (Elf_Addr) me->module_core +
4795+ val = (Elf_Addr) me->module_core_rx +
4796 me->arch.plt_offset +
4797 info->plt_offset;
4798 val += rela->r_addend - loc;
4799@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4800 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4801 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4802 val = val + rela->r_addend -
4803- ((Elf_Addr) me->module_core + me->arch.got_offset);
4804+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4805 if (r_type == R_390_GOTOFF16)
4806 *(unsigned short *) loc = val;
4807 else if (r_type == R_390_GOTOFF32)
4808@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4809 break;
4810 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4811 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4812- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4813+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4814 rela->r_addend - loc;
4815 if (r_type == R_390_GOTPC)
4816 *(unsigned int *) loc = val;
4817diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4818index e795933..b32563c 100644
4819--- a/arch/s390/kernel/process.c
4820+++ b/arch/s390/kernel/process.c
4821@@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4822 }
4823 return 0;
4824 }
4825-
4826-unsigned long arch_align_stack(unsigned long sp)
4827-{
4828- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4829- sp -= get_random_int() & ~PAGE_MASK;
4830- return sp & ~0xf;
4831-}
4832-
4833-static inline unsigned long brk_rnd(void)
4834-{
4835- /* 8MB for 32bit, 1GB for 64bit */
4836- if (is_32bit_task())
4837- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4838- else
4839- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4840-}
4841-
4842-unsigned long arch_randomize_brk(struct mm_struct *mm)
4843-{
4844- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4845-
4846- if (ret < mm->brk)
4847- return mm->brk;
4848- return ret;
4849-}
4850-
4851-unsigned long randomize_et_dyn(unsigned long base)
4852-{
4853- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4854-
4855- if (!(current->flags & PF_RANDOMIZE))
4856- return base;
4857- if (ret < base)
4858- return base;
4859- return ret;
4860-}
4861diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4862index a0155c0..34cc491 100644
4863--- a/arch/s390/mm/mmap.c
4864+++ b/arch/s390/mm/mmap.c
4865@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4866 */
4867 if (mmap_is_legacy()) {
4868 mm->mmap_base = TASK_UNMAPPED_BASE;
4869+
4870+#ifdef CONFIG_PAX_RANDMMAP
4871+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4872+ mm->mmap_base += mm->delta_mmap;
4873+#endif
4874+
4875 mm->get_unmapped_area = arch_get_unmapped_area;
4876 mm->unmap_area = arch_unmap_area;
4877 } else {
4878 mm->mmap_base = mmap_base();
4879+
4880+#ifdef CONFIG_PAX_RANDMMAP
4881+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4882+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4883+#endif
4884+
4885 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4886 mm->unmap_area = arch_unmap_area_topdown;
4887 }
4888@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4889 */
4890 if (mmap_is_legacy()) {
4891 mm->mmap_base = TASK_UNMAPPED_BASE;
4892+
4893+#ifdef CONFIG_PAX_RANDMMAP
4894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4895+ mm->mmap_base += mm->delta_mmap;
4896+#endif
4897+
4898 mm->get_unmapped_area = s390_get_unmapped_area;
4899 mm->unmap_area = arch_unmap_area;
4900 } else {
4901 mm->mmap_base = mmap_base();
4902+
4903+#ifdef CONFIG_PAX_RANDMMAP
4904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4905+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4906+#endif
4907+
4908 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4909 mm->unmap_area = arch_unmap_area_topdown;
4910 }
4911diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4912index ae3d59f..f65f075 100644
4913--- a/arch/score/include/asm/cache.h
4914+++ b/arch/score/include/asm/cache.h
4915@@ -1,7 +1,9 @@
4916 #ifndef _ASM_SCORE_CACHE_H
4917 #define _ASM_SCORE_CACHE_H
4918
4919+#include <linux/const.h>
4920+
4921 #define L1_CACHE_SHIFT 4
4922-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4923+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4924
4925 #endif /* _ASM_SCORE_CACHE_H */
4926diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4927index 589d5c7..669e274 100644
4928--- a/arch/score/include/asm/system.h
4929+++ b/arch/score/include/asm/system.h
4930@@ -17,7 +17,7 @@ do { \
4931 #define finish_arch_switch(prev) do {} while (0)
4932
4933 typedef void (*vi_handler_t)(void);
4934-extern unsigned long arch_align_stack(unsigned long sp);
4935+#define arch_align_stack(x) (x)
4936
4937 #define mb() barrier()
4938 #define rmb() barrier()
4939diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4940index 25d0803..d6c8e36 100644
4941--- a/arch/score/kernel/process.c
4942+++ b/arch/score/kernel/process.c
4943@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4944
4945 return task_pt_regs(task)->cp0_epc;
4946 }
4947-
4948-unsigned long arch_align_stack(unsigned long sp)
4949-{
4950- return sp;
4951-}
4952diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4953index ef9e555..331bd29 100644
4954--- a/arch/sh/include/asm/cache.h
4955+++ b/arch/sh/include/asm/cache.h
4956@@ -9,10 +9,11 @@
4957 #define __ASM_SH_CACHE_H
4958 #ifdef __KERNEL__
4959
4960+#include <linux/const.h>
4961 #include <linux/init.h>
4962 #include <cpu/cache.h>
4963
4964-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4965+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4966
4967 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4968
4969diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4970index afeb710..d1d1289 100644
4971--- a/arch/sh/mm/mmap.c
4972+++ b/arch/sh/mm/mmap.c
4973@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4974 addr = PAGE_ALIGN(addr);
4975
4976 vma = find_vma(mm, addr);
4977- if (TASK_SIZE - len >= addr &&
4978- (!vma || addr + len <= vma->vm_start))
4979+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4980 return addr;
4981 }
4982
4983@@ -106,7 +105,7 @@ full_search:
4984 }
4985 return -ENOMEM;
4986 }
4987- if (likely(!vma || addr + len <= vma->vm_start)) {
4988+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4989 /*
4990 * Remember the place where we stopped the search:
4991 */
4992@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4993 addr = PAGE_ALIGN(addr);
4994
4995 vma = find_vma(mm, addr);
4996- if (TASK_SIZE - len >= addr &&
4997- (!vma || addr + len <= vma->vm_start))
4998+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4999 return addr;
5000 }
5001
5002@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5003 /* make sure it can fit in the remaining address space */
5004 if (likely(addr > len)) {
5005 vma = find_vma(mm, addr-len);
5006- if (!vma || addr <= vma->vm_start) {
5007+ if (check_heap_stack_gap(vma, addr - len, len)) {
5008 /* remember the address as a hint for next time */
5009 return (mm->free_area_cache = addr-len);
5010 }
5011@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5012 if (unlikely(mm->mmap_base < len))
5013 goto bottomup;
5014
5015- addr = mm->mmap_base-len;
5016- if (do_colour_align)
5017- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5018+ addr = mm->mmap_base - len;
5019
5020 do {
5021+ if (do_colour_align)
5022+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5023 /*
5024 * Lookup failure means no vma is above this address,
5025 * else if new region fits below vma->vm_start,
5026 * return with success:
5027 */
5028 vma = find_vma(mm, addr);
5029- if (likely(!vma || addr+len <= vma->vm_start)) {
5030+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5031 /* remember the address as a hint for next time */
5032 return (mm->free_area_cache = addr);
5033 }
5034@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5035 mm->cached_hole_size = vma->vm_start - addr;
5036
5037 /* try just below the current vma->vm_start */
5038- addr = vma->vm_start-len;
5039- if (do_colour_align)
5040- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5041- } while (likely(len < vma->vm_start));
5042+ addr = skip_heap_stack_gap(vma, len);
5043+ } while (!IS_ERR_VALUE(addr));
5044
5045 bottomup:
5046 /*
5047diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5048index eddcfb3..b117d90 100644
5049--- a/arch/sparc/Makefile
5050+++ b/arch/sparc/Makefile
5051@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5052 # Export what is needed by arch/sparc/boot/Makefile
5053 export VMLINUX_INIT VMLINUX_MAIN
5054 VMLINUX_INIT := $(head-y) $(init-y)
5055-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5056+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5057 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5058 VMLINUX_MAIN += $(drivers-y) $(net-y)
5059
5060diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5061index 9f421df..b81fc12 100644
5062--- a/arch/sparc/include/asm/atomic_64.h
5063+++ b/arch/sparc/include/asm/atomic_64.h
5064@@ -14,18 +14,40 @@
5065 #define ATOMIC64_INIT(i) { (i) }
5066
5067 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5068+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5069+{
5070+ return v->counter;
5071+}
5072 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5073+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5074+{
5075+ return v->counter;
5076+}
5077
5078 #define atomic_set(v, i) (((v)->counter) = i)
5079+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5080+{
5081+ v->counter = i;
5082+}
5083 #define atomic64_set(v, i) (((v)->counter) = i)
5084+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5085+{
5086+ v->counter = i;
5087+}
5088
5089 extern void atomic_add(int, atomic_t *);
5090+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5091 extern void atomic64_add(long, atomic64_t *);
5092+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5093 extern void atomic_sub(int, atomic_t *);
5094+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5095 extern void atomic64_sub(long, atomic64_t *);
5096+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5097
5098 extern int atomic_add_ret(int, atomic_t *);
5099+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5100 extern long atomic64_add_ret(long, atomic64_t *);
5101+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5102 extern int atomic_sub_ret(int, atomic_t *);
5103 extern long atomic64_sub_ret(long, atomic64_t *);
5104
5105@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5106 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5107
5108 #define atomic_inc_return(v) atomic_add_ret(1, v)
5109+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5110+{
5111+ return atomic_add_ret_unchecked(1, v);
5112+}
5113 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5114+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5115+{
5116+ return atomic64_add_ret_unchecked(1, v);
5117+}
5118
5119 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5120 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5121
5122 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5123+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5124+{
5125+ return atomic_add_ret_unchecked(i, v);
5126+}
5127 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5128+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5129+{
5130+ return atomic64_add_ret_unchecked(i, v);
5131+}
5132
5133 /*
5134 * atomic_inc_and_test - increment and test
5135@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5136 * other cases.
5137 */
5138 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5140+{
5141+ return atomic_inc_return_unchecked(v) == 0;
5142+}
5143 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5144
5145 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5146@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5147 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5148
5149 #define atomic_inc(v) atomic_add(1, v)
5150+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5151+{
5152+ atomic_add_unchecked(1, v);
5153+}
5154 #define atomic64_inc(v) atomic64_add(1, v)
5155+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5156+{
5157+ atomic64_add_unchecked(1, v);
5158+}
5159
5160 #define atomic_dec(v) atomic_sub(1, v)
5161+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5162+{
5163+ atomic_sub_unchecked(1, v);
5164+}
5165 #define atomic64_dec(v) atomic64_sub(1, v)
5166+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5167+{
5168+ atomic64_sub_unchecked(1, v);
5169+}
5170
5171 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5172 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5173
5174 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5175+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5176+{
5177+ return cmpxchg(&v->counter, old, new);
5178+}
5179 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5180+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5181+{
5182+ return xchg(&v->counter, new);
5183+}
5184
5185 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5186 {
5187- int c, old;
5188+ int c, old, new;
5189 c = atomic_read(v);
5190 for (;;) {
5191- if (unlikely(c == (u)))
5192+ if (unlikely(c == u))
5193 break;
5194- old = atomic_cmpxchg((v), c, c + (a));
5195+
5196+ asm volatile("addcc %2, %0, %0\n"
5197+
5198+#ifdef CONFIG_PAX_REFCOUNT
5199+ "tvs %%icc, 6\n"
5200+#endif
5201+
5202+ : "=r" (new)
5203+ : "0" (c), "ir" (a)
5204+ : "cc");
5205+
5206+ old = atomic_cmpxchg(v, c, new);
5207 if (likely(old == c))
5208 break;
5209 c = old;
5210@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5211 #define atomic64_cmpxchg(v, o, n) \
5212 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5213 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5214+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5215+{
5216+ return xchg(&v->counter, new);
5217+}
5218
5219 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5220 {
5221- long c, old;
5222+ long c, old, new;
5223 c = atomic64_read(v);
5224 for (;;) {
5225- if (unlikely(c == (u)))
5226+ if (unlikely(c == u))
5227 break;
5228- old = atomic64_cmpxchg((v), c, c + (a));
5229+
5230+ asm volatile("addcc %2, %0, %0\n"
5231+
5232+#ifdef CONFIG_PAX_REFCOUNT
5233+ "tvs %%xcc, 6\n"
5234+#endif
5235+
5236+ : "=r" (new)
5237+ : "0" (c), "ir" (a)
5238+ : "cc");
5239+
5240+ old = atomic64_cmpxchg(v, c, new);
5241 if (likely(old == c))
5242 break;
5243 c = old;
5244 }
5245- return c != (u);
5246+ return c != u;
5247 }
5248
5249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5250diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5251index 69358b5..9d0d492 100644
5252--- a/arch/sparc/include/asm/cache.h
5253+++ b/arch/sparc/include/asm/cache.h
5254@@ -7,10 +7,12 @@
5255 #ifndef _SPARC_CACHE_H
5256 #define _SPARC_CACHE_H
5257
5258+#include <linux/const.h>
5259+
5260 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5261
5262 #define L1_CACHE_SHIFT 5
5263-#define L1_CACHE_BYTES 32
5264+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5265
5266 #ifdef CONFIG_SPARC32
5267 #define SMP_CACHE_BYTES_SHIFT 5
5268diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5269index 4269ca6..e3da77f 100644
5270--- a/arch/sparc/include/asm/elf_32.h
5271+++ b/arch/sparc/include/asm/elf_32.h
5272@@ -114,6 +114,13 @@ typedef struct {
5273
5274 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5275
5276+#ifdef CONFIG_PAX_ASLR
5277+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5278+
5279+#define PAX_DELTA_MMAP_LEN 16
5280+#define PAX_DELTA_STACK_LEN 16
5281+#endif
5282+
5283 /* This yields a mask that user programs can use to figure out what
5284 instruction set this cpu supports. This can NOT be done in userspace
5285 on Sparc. */
5286diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5287index 7df8b7f..4946269 100644
5288--- a/arch/sparc/include/asm/elf_64.h
5289+++ b/arch/sparc/include/asm/elf_64.h
5290@@ -180,6 +180,13 @@ typedef struct {
5291 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5292 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5293
5294+#ifdef CONFIG_PAX_ASLR
5295+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5296+
5297+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5298+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5299+#endif
5300+
5301 extern unsigned long sparc64_elf_hwcap;
5302 #define ELF_HWCAP sparc64_elf_hwcap
5303
5304diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5305index ca2b344..c6084f89 100644
5306--- a/arch/sparc/include/asm/pgalloc_32.h
5307+++ b/arch/sparc/include/asm/pgalloc_32.h
5308@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5309 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5310 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5311 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5312+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5313
5314 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5315 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5316diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5317index 40b2d7a..22a665b 100644
5318--- a/arch/sparc/include/asm/pgalloc_64.h
5319+++ b/arch/sparc/include/asm/pgalloc_64.h
5320@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5321 }
5322
5323 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5324+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5325
5326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5327 {
5328diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5329index a790cc6..091ed94 100644
5330--- a/arch/sparc/include/asm/pgtable_32.h
5331+++ b/arch/sparc/include/asm/pgtable_32.h
5332@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5333 BTFIXUPDEF_INT(page_none)
5334 BTFIXUPDEF_INT(page_copy)
5335 BTFIXUPDEF_INT(page_readonly)
5336+
5337+#ifdef CONFIG_PAX_PAGEEXEC
5338+BTFIXUPDEF_INT(page_shared_noexec)
5339+BTFIXUPDEF_INT(page_copy_noexec)
5340+BTFIXUPDEF_INT(page_readonly_noexec)
5341+#endif
5342+
5343 BTFIXUPDEF_INT(page_kernel)
5344
5345 #define PMD_SHIFT SUN4C_PMD_SHIFT
5346@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5347 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5348 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5349
5350+#ifdef CONFIG_PAX_PAGEEXEC
5351+extern pgprot_t PAGE_SHARED_NOEXEC;
5352+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5353+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5354+#else
5355+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5356+# define PAGE_COPY_NOEXEC PAGE_COPY
5357+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5358+#endif
5359+
5360 extern unsigned long page_kernel;
5361
5362 #ifdef MODULE
5363diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5364index f6ae2b2..b03ffc7 100644
5365--- a/arch/sparc/include/asm/pgtsrmmu.h
5366+++ b/arch/sparc/include/asm/pgtsrmmu.h
5367@@ -115,6 +115,13 @@
5368 SRMMU_EXEC | SRMMU_REF)
5369 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5370 SRMMU_EXEC | SRMMU_REF)
5371+
5372+#ifdef CONFIG_PAX_PAGEEXEC
5373+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5374+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5375+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5376+#endif
5377+
5378 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5379 SRMMU_DIRTY | SRMMU_REF)
5380
5381diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5382index 9689176..63c18ea 100644
5383--- a/arch/sparc/include/asm/spinlock_64.h
5384+++ b/arch/sparc/include/asm/spinlock_64.h
5385@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5386
5387 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5388
5389-static void inline arch_read_lock(arch_rwlock_t *lock)
5390+static inline void arch_read_lock(arch_rwlock_t *lock)
5391 {
5392 unsigned long tmp1, tmp2;
5393
5394 __asm__ __volatile__ (
5395 "1: ldsw [%2], %0\n"
5396 " brlz,pn %0, 2f\n"
5397-"4: add %0, 1, %1\n"
5398+"4: addcc %0, 1, %1\n"
5399+
5400+#ifdef CONFIG_PAX_REFCOUNT
5401+" tvs %%icc, 6\n"
5402+#endif
5403+
5404 " cas [%2], %0, %1\n"
5405 " cmp %0, %1\n"
5406 " bne,pn %%icc, 1b\n"
5407@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5408 " .previous"
5409 : "=&r" (tmp1), "=&r" (tmp2)
5410 : "r" (lock)
5411- : "memory");
5412+ : "memory", "cc");
5413 }
5414
5415-static int inline arch_read_trylock(arch_rwlock_t *lock)
5416+static inline int arch_read_trylock(arch_rwlock_t *lock)
5417 {
5418 int tmp1, tmp2;
5419
5420@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5421 "1: ldsw [%2], %0\n"
5422 " brlz,a,pn %0, 2f\n"
5423 " mov 0, %0\n"
5424-" add %0, 1, %1\n"
5425+" addcc %0, 1, %1\n"
5426+
5427+#ifdef CONFIG_PAX_REFCOUNT
5428+" tvs %%icc, 6\n"
5429+#endif
5430+
5431 " cas [%2], %0, %1\n"
5432 " cmp %0, %1\n"
5433 " bne,pn %%icc, 1b\n"
5434@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5435 return tmp1;
5436 }
5437
5438-static void inline arch_read_unlock(arch_rwlock_t *lock)
5439+static inline void arch_read_unlock(arch_rwlock_t *lock)
5440 {
5441 unsigned long tmp1, tmp2;
5442
5443 __asm__ __volatile__(
5444 "1: lduw [%2], %0\n"
5445-" sub %0, 1, %1\n"
5446+" subcc %0, 1, %1\n"
5447+
5448+#ifdef CONFIG_PAX_REFCOUNT
5449+" tvs %%icc, 6\n"
5450+#endif
5451+
5452 " cas [%2], %0, %1\n"
5453 " cmp %0, %1\n"
5454 " bne,pn %%xcc, 1b\n"
5455@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5456 : "memory");
5457 }
5458
5459-static void inline arch_write_lock(arch_rwlock_t *lock)
5460+static inline void arch_write_lock(arch_rwlock_t *lock)
5461 {
5462 unsigned long mask, tmp1, tmp2;
5463
5464@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5465 : "memory");
5466 }
5467
5468-static void inline arch_write_unlock(arch_rwlock_t *lock)
5469+static inline void arch_write_unlock(arch_rwlock_t *lock)
5470 {
5471 __asm__ __volatile__(
5472 " stw %%g0, [%0]"
5473@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5474 : "memory");
5475 }
5476
5477-static int inline arch_write_trylock(arch_rwlock_t *lock)
5478+static inline int arch_write_trylock(arch_rwlock_t *lock)
5479 {
5480 unsigned long mask, tmp1, tmp2, result;
5481
5482diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5483index c2a1080..21ed218 100644
5484--- a/arch/sparc/include/asm/thread_info_32.h
5485+++ b/arch/sparc/include/asm/thread_info_32.h
5486@@ -50,6 +50,8 @@ struct thread_info {
5487 unsigned long w_saved;
5488
5489 struct restart_block restart_block;
5490+
5491+ unsigned long lowest_stack;
5492 };
5493
5494 /*
5495diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5496index 01d057f..13a7d2f 100644
5497--- a/arch/sparc/include/asm/thread_info_64.h
5498+++ b/arch/sparc/include/asm/thread_info_64.h
5499@@ -63,6 +63,8 @@ struct thread_info {
5500 struct pt_regs *kern_una_regs;
5501 unsigned int kern_una_insn;
5502
5503+ unsigned long lowest_stack;
5504+
5505 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5506 };
5507
5508@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5509 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5510 /* flag bit 6 is available */
5511 #define TIF_32BIT 7 /* 32-bit binary */
5512-/* flag bit 8 is available */
5513+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5514 #define TIF_SECCOMP 9 /* secure computing */
5515 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5516 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5517+
5518 /* NOTE: Thread flags >= 12 should be ones we have no interest
5519 * in using in assembly, else we can't use the mask as
5520 * an immediate value in instructions such as andcc.
5521@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5522 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5523 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5524 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5525+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5526
5527 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5528 _TIF_DO_NOTIFY_RESUME_MASK | \
5529 _TIF_NEED_RESCHED)
5530 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5531
5532+#define _TIF_WORK_SYSCALL \
5533+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5534+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5535+
5536+
5537 /*
5538 * Thread-synchronous status.
5539 *
5540diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5541index e88fbe5..96b0ce5 100644
5542--- a/arch/sparc/include/asm/uaccess.h
5543+++ b/arch/sparc/include/asm/uaccess.h
5544@@ -1,5 +1,13 @@
5545 #ifndef ___ASM_SPARC_UACCESS_H
5546 #define ___ASM_SPARC_UACCESS_H
5547+
5548+#ifdef __KERNEL__
5549+#ifndef __ASSEMBLY__
5550+#include <linux/types.h>
5551+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5552+#endif
5553+#endif
5554+
5555 #if defined(__sparc__) && defined(__arch64__)
5556 #include <asm/uaccess_64.h>
5557 #else
5558diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5559index 8303ac4..07f333d 100644
5560--- a/arch/sparc/include/asm/uaccess_32.h
5561+++ b/arch/sparc/include/asm/uaccess_32.h
5562@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5563
5564 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5565 {
5566- if (n && __access_ok((unsigned long) to, n))
5567+ if ((long)n < 0)
5568+ return n;
5569+
5570+ if (n && __access_ok((unsigned long) to, n)) {
5571+ if (!__builtin_constant_p(n))
5572+ check_object_size(from, n, true);
5573 return __copy_user(to, (__force void __user *) from, n);
5574- else
5575+ } else
5576 return n;
5577 }
5578
5579 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5580 {
5581+ if ((long)n < 0)
5582+ return n;
5583+
5584+ if (!__builtin_constant_p(n))
5585+ check_object_size(from, n, true);
5586+
5587 return __copy_user(to, (__force void __user *) from, n);
5588 }
5589
5590 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5591 {
5592- if (n && __access_ok((unsigned long) from, n))
5593+ if ((long)n < 0)
5594+ return n;
5595+
5596+ if (n && __access_ok((unsigned long) from, n)) {
5597+ if (!__builtin_constant_p(n))
5598+ check_object_size(to, n, false);
5599 return __copy_user((__force void __user *) to, from, n);
5600- else
5601+ } else
5602 return n;
5603 }
5604
5605 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5606 {
5607+ if ((long)n < 0)
5608+ return n;
5609+
5610 return __copy_user((__force void __user *) to, from, n);
5611 }
5612
5613diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5614index 3e1449f..5293a0e 100644
5615--- a/arch/sparc/include/asm/uaccess_64.h
5616+++ b/arch/sparc/include/asm/uaccess_64.h
5617@@ -10,6 +10,7 @@
5618 #include <linux/compiler.h>
5619 #include <linux/string.h>
5620 #include <linux/thread_info.h>
5621+#include <linux/kernel.h>
5622 #include <asm/asi.h>
5623 #include <asm/system.h>
5624 #include <asm/spitfire.h>
5625@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5626 static inline unsigned long __must_check
5627 copy_from_user(void *to, const void __user *from, unsigned long size)
5628 {
5629- unsigned long ret = ___copy_from_user(to, from, size);
5630+ unsigned long ret;
5631
5632+ if ((long)size < 0 || size > INT_MAX)
5633+ return size;
5634+
5635+ if (!__builtin_constant_p(size))
5636+ check_object_size(to, size, false);
5637+
5638+ ret = ___copy_from_user(to, from, size);
5639 if (unlikely(ret))
5640 ret = copy_from_user_fixup(to, from, size);
5641
5642@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5643 static inline unsigned long __must_check
5644 copy_to_user(void __user *to, const void *from, unsigned long size)
5645 {
5646- unsigned long ret = ___copy_to_user(to, from, size);
5647+ unsigned long ret;
5648
5649+ if ((long)size < 0 || size > INT_MAX)
5650+ return size;
5651+
5652+ if (!__builtin_constant_p(size))
5653+ check_object_size(from, size, true);
5654+
5655+ ret = ___copy_to_user(to, from, size);
5656 if (unlikely(ret))
5657 ret = copy_to_user_fixup(to, from, size);
5658 return ret;
5659diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5660index cb85458..e063f17 100644
5661--- a/arch/sparc/kernel/Makefile
5662+++ b/arch/sparc/kernel/Makefile
5663@@ -3,7 +3,7 @@
5664 #
5665
5666 asflags-y := -ansi
5667-ccflags-y := -Werror
5668+#ccflags-y := -Werror
5669
5670 extra-y := head_$(BITS).o
5671 extra-y += init_task.o
5672diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5673index f793742..4d880af 100644
5674--- a/arch/sparc/kernel/process_32.c
5675+++ b/arch/sparc/kernel/process_32.c
5676@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5677 rw->ins[4], rw->ins[5],
5678 rw->ins[6],
5679 rw->ins[7]);
5680- printk("%pS\n", (void *) rw->ins[7]);
5681+ printk("%pA\n", (void *) rw->ins[7]);
5682 rw = (struct reg_window32 *) rw->ins[6];
5683 }
5684 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5685@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5686
5687 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5688 r->psr, r->pc, r->npc, r->y, print_tainted());
5689- printk("PC: <%pS>\n", (void *) r->pc);
5690+ printk("PC: <%pA>\n", (void *) r->pc);
5691 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5692 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5693 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5694 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5695 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5696 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5697- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5698+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5699
5700 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5701 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5702@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5703 rw = (struct reg_window32 *) fp;
5704 pc = rw->ins[7];
5705 printk("[%08lx : ", pc);
5706- printk("%pS ] ", (void *) pc);
5707+ printk("%pA ] ", (void *) pc);
5708 fp = rw->ins[6];
5709 } while (++count < 16);
5710 printk("\n");
5711diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5712index 39d8b05..d1a7d90 100644
5713--- a/arch/sparc/kernel/process_64.c
5714+++ b/arch/sparc/kernel/process_64.c
5715@@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5716 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5717 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5718 if (regs->tstate & TSTATE_PRIV)
5719- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5720+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5721 }
5722
5723 void show_regs(struct pt_regs *regs)
5724 {
5725 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5726 regs->tpc, regs->tnpc, regs->y, print_tainted());
5727- printk("TPC: <%pS>\n", (void *) regs->tpc);
5728+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5729 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5730 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5731 regs->u_regs[3]);
5732@@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5733 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5734 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5735 regs->u_regs[15]);
5736- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5737+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5738 show_regwindow(regs);
5739 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5740 }
5741@@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5742 ((tp && tp->task) ? tp->task->pid : -1));
5743
5744 if (gp->tstate & TSTATE_PRIV) {
5745- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5746+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5747 (void *) gp->tpc,
5748 (void *) gp->o7,
5749 (void *) gp->i7,
5750diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5751index 9388844..0075fd2 100644
5752--- a/arch/sparc/kernel/ptrace_64.c
5753+++ b/arch/sparc/kernel/ptrace_64.c
5754@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
5755 return ret;
5756 }
5757
5758+#ifdef CONFIG_GRKERNSEC_SETXID
5759+extern void gr_delayed_cred_worker(void);
5760+#endif
5761+
5762 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5763 {
5764 int ret = 0;
5765@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5766 /* do the secure computing check first */
5767 secure_computing(regs->u_regs[UREG_G1]);
5768
5769+#ifdef CONFIG_GRKERNSEC_SETXID
5770+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5771+ gr_delayed_cred_worker();
5772+#endif
5773+
5774 if (test_thread_flag(TIF_SYSCALL_TRACE))
5775 ret = tracehook_report_syscall_entry(regs);
5776
5777@@ -1085,6 +1094,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5778
5779 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5780 {
5781+#ifdef CONFIG_GRKERNSEC_SETXID
5782+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5783+ gr_delayed_cred_worker();
5784+#endif
5785+
5786 audit_syscall_exit(regs);
5787
5788 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5789diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5790index 42b282f..28ce9f2 100644
5791--- a/arch/sparc/kernel/sys_sparc_32.c
5792+++ b/arch/sparc/kernel/sys_sparc_32.c
5793@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5794 if (ARCH_SUN4C && len > 0x20000000)
5795 return -ENOMEM;
5796 if (!addr)
5797- addr = TASK_UNMAPPED_BASE;
5798+ addr = current->mm->mmap_base;
5799
5800 if (flags & MAP_SHARED)
5801 addr = COLOUR_ALIGN(addr);
5802@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5803 }
5804 if (TASK_SIZE - PAGE_SIZE - len < addr)
5805 return -ENOMEM;
5806- if (!vmm || addr + len <= vmm->vm_start)
5807+ if (check_heap_stack_gap(vmm, addr, len))
5808 return addr;
5809 addr = vmm->vm_end;
5810 if (flags & MAP_SHARED)
5811diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5812index 232df99..cee1f9c 100644
5813--- a/arch/sparc/kernel/sys_sparc_64.c
5814+++ b/arch/sparc/kernel/sys_sparc_64.c
5815@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5816 /* We do not accept a shared mapping if it would violate
5817 * cache aliasing constraints.
5818 */
5819- if ((flags & MAP_SHARED) &&
5820+ if ((filp || (flags & MAP_SHARED)) &&
5821 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5822 return -EINVAL;
5823 return addr;
5824@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5825 if (filp || (flags & MAP_SHARED))
5826 do_color_align = 1;
5827
5828+#ifdef CONFIG_PAX_RANDMMAP
5829+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5830+#endif
5831+
5832 if (addr) {
5833 if (do_color_align)
5834 addr = COLOUR_ALIGN(addr, pgoff);
5835@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5836 addr = PAGE_ALIGN(addr);
5837
5838 vma = find_vma(mm, addr);
5839- if (task_size - len >= addr &&
5840- (!vma || addr + len <= vma->vm_start))
5841+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5842 return addr;
5843 }
5844
5845 if (len > mm->cached_hole_size) {
5846- start_addr = addr = mm->free_area_cache;
5847+ start_addr = addr = mm->free_area_cache;
5848 } else {
5849- start_addr = addr = TASK_UNMAPPED_BASE;
5850+ start_addr = addr = mm->mmap_base;
5851 mm->cached_hole_size = 0;
5852 }
5853
5854@@ -174,14 +177,14 @@ full_search:
5855 vma = find_vma(mm, VA_EXCLUDE_END);
5856 }
5857 if (unlikely(task_size < addr)) {
5858- if (start_addr != TASK_UNMAPPED_BASE) {
5859- start_addr = addr = TASK_UNMAPPED_BASE;
5860+ if (start_addr != mm->mmap_base) {
5861+ start_addr = addr = mm->mmap_base;
5862 mm->cached_hole_size = 0;
5863 goto full_search;
5864 }
5865 return -ENOMEM;
5866 }
5867- if (likely(!vma || addr + len <= vma->vm_start)) {
5868+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5869 /*
5870 * Remember the place where we stopped the search:
5871 */
5872@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5873 /* We do not accept a shared mapping if it would violate
5874 * cache aliasing constraints.
5875 */
5876- if ((flags & MAP_SHARED) &&
5877+ if ((filp || (flags & MAP_SHARED)) &&
5878 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5879 return -EINVAL;
5880 return addr;
5881@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5882 addr = PAGE_ALIGN(addr);
5883
5884 vma = find_vma(mm, addr);
5885- if (task_size - len >= addr &&
5886- (!vma || addr + len <= vma->vm_start))
5887+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5888 return addr;
5889 }
5890
5891@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5892 /* make sure it can fit in the remaining address space */
5893 if (likely(addr > len)) {
5894 vma = find_vma(mm, addr-len);
5895- if (!vma || addr <= vma->vm_start) {
5896+ if (check_heap_stack_gap(vma, addr - len, len)) {
5897 /* remember the address as a hint for next time */
5898 return (mm->free_area_cache = addr-len);
5899 }
5900@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5901 if (unlikely(mm->mmap_base < len))
5902 goto bottomup;
5903
5904- addr = mm->mmap_base-len;
5905- if (do_color_align)
5906- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5907+ addr = mm->mmap_base - len;
5908
5909 do {
5910+ if (do_color_align)
5911+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5912 /*
5913 * Lookup failure means no vma is above this address,
5914 * else if new region fits below vma->vm_start,
5915 * return with success:
5916 */
5917 vma = find_vma(mm, addr);
5918- if (likely(!vma || addr+len <= vma->vm_start)) {
5919+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5920 /* remember the address as a hint for next time */
5921 return (mm->free_area_cache = addr);
5922 }
5923@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5924 mm->cached_hole_size = vma->vm_start - addr;
5925
5926 /* try just below the current vma->vm_start */
5927- addr = vma->vm_start-len;
5928- if (do_color_align)
5929- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5930- } while (likely(len < vma->vm_start));
5931+ addr = skip_heap_stack_gap(vma, len);
5932+ } while (!IS_ERR_VALUE(addr));
5933
5934 bottomup:
5935 /*
5936@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5937 gap == RLIM_INFINITY ||
5938 sysctl_legacy_va_layout) {
5939 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5940+
5941+#ifdef CONFIG_PAX_RANDMMAP
5942+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5943+ mm->mmap_base += mm->delta_mmap;
5944+#endif
5945+
5946 mm->get_unmapped_area = arch_get_unmapped_area;
5947 mm->unmap_area = arch_unmap_area;
5948 } else {
5949@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5950 gap = (task_size / 6 * 5);
5951
5952 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5953+
5954+#ifdef CONFIG_PAX_RANDMMAP
5955+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5956+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5957+#endif
5958+
5959 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5960 mm->unmap_area = arch_unmap_area_topdown;
5961 }
5962diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5963index 1d7e274..b39c527 100644
5964--- a/arch/sparc/kernel/syscalls.S
5965+++ b/arch/sparc/kernel/syscalls.S
5966@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5967 #endif
5968 .align 32
5969 1: ldx [%g6 + TI_FLAGS], %l5
5970- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5971+ andcc %l5, _TIF_WORK_SYSCALL, %g0
5972 be,pt %icc, rtrap
5973 nop
5974 call syscall_trace_leave
5975@@ -179,7 +179,7 @@ linux_sparc_syscall32:
5976
5977 srl %i5, 0, %o5 ! IEU1
5978 srl %i2, 0, %o2 ! IEU0 Group
5979- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5980+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5981 bne,pn %icc, linux_syscall_trace32 ! CTI
5982 mov %i0, %l5 ! IEU1
5983 call %l7 ! CTI Group brk forced
5984@@ -202,7 +202,7 @@ linux_sparc_syscall:
5985
5986 mov %i3, %o3 ! IEU1
5987 mov %i4, %o4 ! IEU0 Group
5988- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5989+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5990 bne,pn %icc, linux_syscall_trace ! CTI Group
5991 mov %i0, %l5 ! IEU0
5992 2: call %l7 ! CTI Group brk forced
5993@@ -226,7 +226,7 @@ ret_sys_call:
5994
5995 cmp %o0, -ERESTART_RESTARTBLOCK
5996 bgeu,pn %xcc, 1f
5997- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5998+ andcc %l0, _TIF_WORK_SYSCALL, %l6
5999 80:
6000 /* System call success, clear Carry condition code. */
6001 andn %g3, %g2, %g3
6002@@ -241,7 +241,7 @@ ret_sys_call:
6003 /* System call failure, set Carry condition code.
6004 * Also, get abs(errno) to return to the process.
6005 */
6006- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6007+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6008 sub %g0, %o0, %o0
6009 or %g3, %g2, %g3
6010 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6011diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6012index 591f20c..0f1b925 100644
6013--- a/arch/sparc/kernel/traps_32.c
6014+++ b/arch/sparc/kernel/traps_32.c
6015@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
6016 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6017 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6018
6019+extern void gr_handle_kernel_exploit(void);
6020+
6021 void die_if_kernel(char *str, struct pt_regs *regs)
6022 {
6023 static int die_counter;
6024@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6025 count++ < 30 &&
6026 (((unsigned long) rw) >= PAGE_OFFSET) &&
6027 !(((unsigned long) rw) & 0x7)) {
6028- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6029+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6030 (void *) rw->ins[7]);
6031 rw = (struct reg_window32 *)rw->ins[6];
6032 }
6033 }
6034 printk("Instruction DUMP:");
6035 instruction_dump ((unsigned long *) regs->pc);
6036- if(regs->psr & PSR_PS)
6037+ if(regs->psr & PSR_PS) {
6038+ gr_handle_kernel_exploit();
6039 do_exit(SIGKILL);
6040+ }
6041 do_exit(SIGSEGV);
6042 }
6043
6044diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6045index 0cbdaa4..438e4c9 100644
6046--- a/arch/sparc/kernel/traps_64.c
6047+++ b/arch/sparc/kernel/traps_64.c
6048@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6049 i + 1,
6050 p->trapstack[i].tstate, p->trapstack[i].tpc,
6051 p->trapstack[i].tnpc, p->trapstack[i].tt);
6052- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6053+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6054 }
6055 }
6056
6057@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6058
6059 lvl -= 0x100;
6060 if (regs->tstate & TSTATE_PRIV) {
6061+
6062+#ifdef CONFIG_PAX_REFCOUNT
6063+ if (lvl == 6)
6064+ pax_report_refcount_overflow(regs);
6065+#endif
6066+
6067 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6068 die_if_kernel(buffer, regs);
6069 }
6070@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6071 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6072 {
6073 char buffer[32];
6074-
6075+
6076 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6077 0, lvl, SIGTRAP) == NOTIFY_STOP)
6078 return;
6079
6080+#ifdef CONFIG_PAX_REFCOUNT
6081+ if (lvl == 6)
6082+ pax_report_refcount_overflow(regs);
6083+#endif
6084+
6085 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6086
6087 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6088@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6089 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6090 printk("%s" "ERROR(%d): ",
6091 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6092- printk("TPC<%pS>\n", (void *) regs->tpc);
6093+ printk("TPC<%pA>\n", (void *) regs->tpc);
6094 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6095 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6096 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6097@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6098 smp_processor_id(),
6099 (type & 0x1) ? 'I' : 'D',
6100 regs->tpc);
6101- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6102+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6103 panic("Irrecoverable Cheetah+ parity error.");
6104 }
6105
6106@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6107 smp_processor_id(),
6108 (type & 0x1) ? 'I' : 'D',
6109 regs->tpc);
6110- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6111+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6112 }
6113
6114 struct sun4v_error_entry {
6115@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6116
6117 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6118 regs->tpc, tl);
6119- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6120+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6121 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6122- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6123+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6124 (void *) regs->u_regs[UREG_I7]);
6125 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6126 "pte[%lx] error[%lx]\n",
6127@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6128
6129 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6130 regs->tpc, tl);
6131- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6132+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6133 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6134- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6135+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6136 (void *) regs->u_regs[UREG_I7]);
6137 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6138 "pte[%lx] error[%lx]\n",
6139@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6140 fp = (unsigned long)sf->fp + STACK_BIAS;
6141 }
6142
6143- printk(" [%016lx] %pS\n", pc, (void *) pc);
6144+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6145 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6146 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6147 int index = tsk->curr_ret_stack;
6148 if (tsk->ret_stack && index >= graph) {
6149 pc = tsk->ret_stack[index - graph].ret;
6150- printk(" [%016lx] %pS\n", pc, (void *) pc);
6151+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6152 graph++;
6153 }
6154 }
6155@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6156 return (struct reg_window *) (fp + STACK_BIAS);
6157 }
6158
6159+extern void gr_handle_kernel_exploit(void);
6160+
6161 void die_if_kernel(char *str, struct pt_regs *regs)
6162 {
6163 static int die_counter;
6164@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6165 while (rw &&
6166 count++ < 30 &&
6167 kstack_valid(tp, (unsigned long) rw)) {
6168- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6169+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6170 (void *) rw->ins[7]);
6171
6172 rw = kernel_stack_up(rw);
6173@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6174 }
6175 user_instruction_dump ((unsigned int __user *) regs->tpc);
6176 }
6177- if (regs->tstate & TSTATE_PRIV)
6178+ if (regs->tstate & TSTATE_PRIV) {
6179+ gr_handle_kernel_exploit();
6180 do_exit(SIGKILL);
6181+ }
6182 do_exit(SIGSEGV);
6183 }
6184 EXPORT_SYMBOL(die_if_kernel);
6185diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6186index 76e4ac1..78f8bb1 100644
6187--- a/arch/sparc/kernel/unaligned_64.c
6188+++ b/arch/sparc/kernel/unaligned_64.c
6189@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6190 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6191
6192 if (__ratelimit(&ratelimit)) {
6193- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6194+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6195 regs->tpc, (void *) regs->tpc);
6196 }
6197 }
6198diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6199index a3fc437..fea9957 100644
6200--- a/arch/sparc/lib/Makefile
6201+++ b/arch/sparc/lib/Makefile
6202@@ -2,7 +2,7 @@
6203 #
6204
6205 asflags-y := -ansi -DST_DIV0=0x02
6206-ccflags-y := -Werror
6207+#ccflags-y := -Werror
6208
6209 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6210 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6211diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6212index 59186e0..f747d7a 100644
6213--- a/arch/sparc/lib/atomic_64.S
6214+++ b/arch/sparc/lib/atomic_64.S
6215@@ -18,7 +18,12 @@
6216 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6217 BACKOFF_SETUP(%o2)
6218 1: lduw [%o1], %g1
6219- add %g1, %o0, %g7
6220+ addcc %g1, %o0, %g7
6221+
6222+#ifdef CONFIG_PAX_REFCOUNT
6223+ tvs %icc, 6
6224+#endif
6225+
6226 cas [%o1], %g1, %g7
6227 cmp %g1, %g7
6228 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6229@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6230 2: BACKOFF_SPIN(%o2, %o3, 1b)
6231 .size atomic_add, .-atomic_add
6232
6233+ .globl atomic_add_unchecked
6234+ .type atomic_add_unchecked,#function
6235+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6236+ BACKOFF_SETUP(%o2)
6237+1: lduw [%o1], %g1
6238+ add %g1, %o0, %g7
6239+ cas [%o1], %g1, %g7
6240+ cmp %g1, %g7
6241+ bne,pn %icc, 2f
6242+ nop
6243+ retl
6244+ nop
6245+2: BACKOFF_SPIN(%o2, %o3, 1b)
6246+ .size atomic_add_unchecked, .-atomic_add_unchecked
6247+
6248 .globl atomic_sub
6249 .type atomic_sub,#function
6250 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6251 BACKOFF_SETUP(%o2)
6252 1: lduw [%o1], %g1
6253- sub %g1, %o0, %g7
6254+ subcc %g1, %o0, %g7
6255+
6256+#ifdef CONFIG_PAX_REFCOUNT
6257+ tvs %icc, 6
6258+#endif
6259+
6260 cas [%o1], %g1, %g7
6261 cmp %g1, %g7
6262 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6263@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6264 2: BACKOFF_SPIN(%o2, %o3, 1b)
6265 .size atomic_sub, .-atomic_sub
6266
6267+ .globl atomic_sub_unchecked
6268+ .type atomic_sub_unchecked,#function
6269+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6270+ BACKOFF_SETUP(%o2)
6271+1: lduw [%o1], %g1
6272+ sub %g1, %o0, %g7
6273+ cas [%o1], %g1, %g7
6274+ cmp %g1, %g7
6275+ bne,pn %icc, 2f
6276+ nop
6277+ retl
6278+ nop
6279+2: BACKOFF_SPIN(%o2, %o3, 1b)
6280+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6281+
6282 .globl atomic_add_ret
6283 .type atomic_add_ret,#function
6284 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6285 BACKOFF_SETUP(%o2)
6286 1: lduw [%o1], %g1
6287- add %g1, %o0, %g7
6288+ addcc %g1, %o0, %g7
6289+
6290+#ifdef CONFIG_PAX_REFCOUNT
6291+ tvs %icc, 6
6292+#endif
6293+
6294 cas [%o1], %g1, %g7
6295 cmp %g1, %g7
6296 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6297@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6298 2: BACKOFF_SPIN(%o2, %o3, 1b)
6299 .size atomic_add_ret, .-atomic_add_ret
6300
6301+ .globl atomic_add_ret_unchecked
6302+ .type atomic_add_ret_unchecked,#function
6303+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6304+ BACKOFF_SETUP(%o2)
6305+1: lduw [%o1], %g1
6306+ addcc %g1, %o0, %g7
6307+ cas [%o1], %g1, %g7
6308+ cmp %g1, %g7
6309+ bne,pn %icc, 2f
6310+ add %g7, %o0, %g7
6311+ sra %g7, 0, %o0
6312+ retl
6313+ nop
6314+2: BACKOFF_SPIN(%o2, %o3, 1b)
6315+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6316+
6317 .globl atomic_sub_ret
6318 .type atomic_sub_ret,#function
6319 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6320 BACKOFF_SETUP(%o2)
6321 1: lduw [%o1], %g1
6322- sub %g1, %o0, %g7
6323+ subcc %g1, %o0, %g7
6324+
6325+#ifdef CONFIG_PAX_REFCOUNT
6326+ tvs %icc, 6
6327+#endif
6328+
6329 cas [%o1], %g1, %g7
6330 cmp %g1, %g7
6331 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6332@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6333 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6334 BACKOFF_SETUP(%o2)
6335 1: ldx [%o1], %g1
6336- add %g1, %o0, %g7
6337+ addcc %g1, %o0, %g7
6338+
6339+#ifdef CONFIG_PAX_REFCOUNT
6340+ tvs %xcc, 6
6341+#endif
6342+
6343 casx [%o1], %g1, %g7
6344 cmp %g1, %g7
6345 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6346@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6347 2: BACKOFF_SPIN(%o2, %o3, 1b)
6348 .size atomic64_add, .-atomic64_add
6349
6350+ .globl atomic64_add_unchecked
6351+ .type atomic64_add_unchecked,#function
6352+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6353+ BACKOFF_SETUP(%o2)
6354+1: ldx [%o1], %g1
6355+ addcc %g1, %o0, %g7
6356+ casx [%o1], %g1, %g7
6357+ cmp %g1, %g7
6358+ bne,pn %xcc, 2f
6359+ nop
6360+ retl
6361+ nop
6362+2: BACKOFF_SPIN(%o2, %o3, 1b)
6363+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6364+
6365 .globl atomic64_sub
6366 .type atomic64_sub,#function
6367 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6368 BACKOFF_SETUP(%o2)
6369 1: ldx [%o1], %g1
6370- sub %g1, %o0, %g7
6371+ subcc %g1, %o0, %g7
6372+
6373+#ifdef CONFIG_PAX_REFCOUNT
6374+ tvs %xcc, 6
6375+#endif
6376+
6377 casx [%o1], %g1, %g7
6378 cmp %g1, %g7
6379 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6380@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6381 2: BACKOFF_SPIN(%o2, %o3, 1b)
6382 .size atomic64_sub, .-atomic64_sub
6383
6384+ .globl atomic64_sub_unchecked
6385+ .type atomic64_sub_unchecked,#function
6386+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6387+ BACKOFF_SETUP(%o2)
6388+1: ldx [%o1], %g1
6389+ subcc %g1, %o0, %g7
6390+ casx [%o1], %g1, %g7
6391+ cmp %g1, %g7
6392+ bne,pn %xcc, 2f
6393+ nop
6394+ retl
6395+ nop
6396+2: BACKOFF_SPIN(%o2, %o3, 1b)
6397+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6398+
6399 .globl atomic64_add_ret
6400 .type atomic64_add_ret,#function
6401 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6402 BACKOFF_SETUP(%o2)
6403 1: ldx [%o1], %g1
6404- add %g1, %o0, %g7
6405+ addcc %g1, %o0, %g7
6406+
6407+#ifdef CONFIG_PAX_REFCOUNT
6408+ tvs %xcc, 6
6409+#endif
6410+
6411 casx [%o1], %g1, %g7
6412 cmp %g1, %g7
6413 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6414@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6415 2: BACKOFF_SPIN(%o2, %o3, 1b)
6416 .size atomic64_add_ret, .-atomic64_add_ret
6417
6418+ .globl atomic64_add_ret_unchecked
6419+ .type atomic64_add_ret_unchecked,#function
6420+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6421+ BACKOFF_SETUP(%o2)
6422+1: ldx [%o1], %g1
6423+ addcc %g1, %o0, %g7
6424+ casx [%o1], %g1, %g7
6425+ cmp %g1, %g7
6426+ bne,pn %xcc, 2f
6427+ add %g7, %o0, %g7
6428+ mov %g7, %o0
6429+ retl
6430+ nop
6431+2: BACKOFF_SPIN(%o2, %o3, 1b)
6432+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6433+
6434 .globl atomic64_sub_ret
6435 .type atomic64_sub_ret,#function
6436 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6437 BACKOFF_SETUP(%o2)
6438 1: ldx [%o1], %g1
6439- sub %g1, %o0, %g7
6440+ subcc %g1, %o0, %g7
6441+
6442+#ifdef CONFIG_PAX_REFCOUNT
6443+ tvs %xcc, 6
6444+#endif
6445+
6446 casx [%o1], %g1, %g7
6447 cmp %g1, %g7
6448 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6449diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6450index f73c224..662af10 100644
6451--- a/arch/sparc/lib/ksyms.c
6452+++ b/arch/sparc/lib/ksyms.c
6453@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6454
6455 /* Atomic counter implementation. */
6456 EXPORT_SYMBOL(atomic_add);
6457+EXPORT_SYMBOL(atomic_add_unchecked);
6458 EXPORT_SYMBOL(atomic_add_ret);
6459+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6460 EXPORT_SYMBOL(atomic_sub);
6461+EXPORT_SYMBOL(atomic_sub_unchecked);
6462 EXPORT_SYMBOL(atomic_sub_ret);
6463 EXPORT_SYMBOL(atomic64_add);
6464+EXPORT_SYMBOL(atomic64_add_unchecked);
6465 EXPORT_SYMBOL(atomic64_add_ret);
6466+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6467 EXPORT_SYMBOL(atomic64_sub);
6468+EXPORT_SYMBOL(atomic64_sub_unchecked);
6469 EXPORT_SYMBOL(atomic64_sub_ret);
6470
6471 /* Atomic bit operations. */
6472diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6473index 301421c..e2535d1 100644
6474--- a/arch/sparc/mm/Makefile
6475+++ b/arch/sparc/mm/Makefile
6476@@ -2,7 +2,7 @@
6477 #
6478
6479 asflags-y := -ansi
6480-ccflags-y := -Werror
6481+#ccflags-y := -Werror
6482
6483 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6484 obj-y += fault_$(BITS).o
6485diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6486index 8023fd7..c8e89e9 100644
6487--- a/arch/sparc/mm/fault_32.c
6488+++ b/arch/sparc/mm/fault_32.c
6489@@ -21,6 +21,9 @@
6490 #include <linux/perf_event.h>
6491 #include <linux/interrupt.h>
6492 #include <linux/kdebug.h>
6493+#include <linux/slab.h>
6494+#include <linux/pagemap.h>
6495+#include <linux/compiler.h>
6496
6497 #include <asm/system.h>
6498 #include <asm/page.h>
6499@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6500 return safe_compute_effective_address(regs, insn);
6501 }
6502
6503+#ifdef CONFIG_PAX_PAGEEXEC
6504+#ifdef CONFIG_PAX_DLRESOLVE
6505+static void pax_emuplt_close(struct vm_area_struct *vma)
6506+{
6507+ vma->vm_mm->call_dl_resolve = 0UL;
6508+}
6509+
6510+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6511+{
6512+ unsigned int *kaddr;
6513+
6514+ vmf->page = alloc_page(GFP_HIGHUSER);
6515+ if (!vmf->page)
6516+ return VM_FAULT_OOM;
6517+
6518+ kaddr = kmap(vmf->page);
6519+ memset(kaddr, 0, PAGE_SIZE);
6520+ kaddr[0] = 0x9DE3BFA8U; /* save */
6521+ flush_dcache_page(vmf->page);
6522+ kunmap(vmf->page);
6523+ return VM_FAULT_MAJOR;
6524+}
6525+
6526+static const struct vm_operations_struct pax_vm_ops = {
6527+ .close = pax_emuplt_close,
6528+ .fault = pax_emuplt_fault
6529+};
6530+
6531+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6532+{
6533+ int ret;
6534+
6535+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6536+ vma->vm_mm = current->mm;
6537+ vma->vm_start = addr;
6538+ vma->vm_end = addr + PAGE_SIZE;
6539+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6540+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6541+ vma->vm_ops = &pax_vm_ops;
6542+
6543+ ret = insert_vm_struct(current->mm, vma);
6544+ if (ret)
6545+ return ret;
6546+
6547+ ++current->mm->total_vm;
6548+ return 0;
6549+}
6550+#endif
6551+
6552+/*
6553+ * PaX: decide what to do with offenders (regs->pc = fault address)
6554+ *
6555+ * returns 1 when task should be killed
6556+ * 2 when patched PLT trampoline was detected
6557+ * 3 when unpatched PLT trampoline was detected
6558+ */
6559+static int pax_handle_fetch_fault(struct pt_regs *regs)
6560+{
6561+
6562+#ifdef CONFIG_PAX_EMUPLT
6563+ int err;
6564+
6565+ do { /* PaX: patched PLT emulation #1 */
6566+ unsigned int sethi1, sethi2, jmpl;
6567+
6568+ err = get_user(sethi1, (unsigned int *)regs->pc);
6569+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6570+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6571+
6572+ if (err)
6573+ break;
6574+
6575+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6576+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6577+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6578+ {
6579+ unsigned int addr;
6580+
6581+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6582+ addr = regs->u_regs[UREG_G1];
6583+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6584+ regs->pc = addr;
6585+ regs->npc = addr+4;
6586+ return 2;
6587+ }
6588+ } while (0);
6589+
6590+ { /* PaX: patched PLT emulation #2 */
6591+ unsigned int ba;
6592+
6593+ err = get_user(ba, (unsigned int *)regs->pc);
6594+
6595+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6596+ unsigned int addr;
6597+
6598+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6599+ regs->pc = addr;
6600+ regs->npc = addr+4;
6601+ return 2;
6602+ }
6603+ }
6604+
6605+ do { /* PaX: patched PLT emulation #3 */
6606+ unsigned int sethi, jmpl, nop;
6607+
6608+ err = get_user(sethi, (unsigned int *)regs->pc);
6609+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6610+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6611+
6612+ if (err)
6613+ break;
6614+
6615+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6616+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6617+ nop == 0x01000000U)
6618+ {
6619+ unsigned int addr;
6620+
6621+ addr = (sethi & 0x003FFFFFU) << 10;
6622+ regs->u_regs[UREG_G1] = addr;
6623+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6624+ regs->pc = addr;
6625+ regs->npc = addr+4;
6626+ return 2;
6627+ }
6628+ } while (0);
6629+
6630+ do { /* PaX: unpatched PLT emulation step 1 */
6631+ unsigned int sethi, ba, nop;
6632+
6633+ err = get_user(sethi, (unsigned int *)regs->pc);
6634+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6635+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6636+
6637+ if (err)
6638+ break;
6639+
6640+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6641+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6642+ nop == 0x01000000U)
6643+ {
6644+ unsigned int addr, save, call;
6645+
6646+ if ((ba & 0xFFC00000U) == 0x30800000U)
6647+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6648+ else
6649+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6650+
6651+ err = get_user(save, (unsigned int *)addr);
6652+ err |= get_user(call, (unsigned int *)(addr+4));
6653+ err |= get_user(nop, (unsigned int *)(addr+8));
6654+ if (err)
6655+ break;
6656+
6657+#ifdef CONFIG_PAX_DLRESOLVE
6658+ if (save == 0x9DE3BFA8U &&
6659+ (call & 0xC0000000U) == 0x40000000U &&
6660+ nop == 0x01000000U)
6661+ {
6662+ struct vm_area_struct *vma;
6663+ unsigned long call_dl_resolve;
6664+
6665+ down_read(&current->mm->mmap_sem);
6666+ call_dl_resolve = current->mm->call_dl_resolve;
6667+ up_read(&current->mm->mmap_sem);
6668+ if (likely(call_dl_resolve))
6669+ goto emulate;
6670+
6671+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6672+
6673+ down_write(&current->mm->mmap_sem);
6674+ if (current->mm->call_dl_resolve) {
6675+ call_dl_resolve = current->mm->call_dl_resolve;
6676+ up_write(&current->mm->mmap_sem);
6677+ if (vma)
6678+ kmem_cache_free(vm_area_cachep, vma);
6679+ goto emulate;
6680+ }
6681+
6682+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6683+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6684+ up_write(&current->mm->mmap_sem);
6685+ if (vma)
6686+ kmem_cache_free(vm_area_cachep, vma);
6687+ return 1;
6688+ }
6689+
6690+ if (pax_insert_vma(vma, call_dl_resolve)) {
6691+ up_write(&current->mm->mmap_sem);
6692+ kmem_cache_free(vm_area_cachep, vma);
6693+ return 1;
6694+ }
6695+
6696+ current->mm->call_dl_resolve = call_dl_resolve;
6697+ up_write(&current->mm->mmap_sem);
6698+
6699+emulate:
6700+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6701+ regs->pc = call_dl_resolve;
6702+ regs->npc = addr+4;
6703+ return 3;
6704+ }
6705+#endif
6706+
6707+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6708+ if ((save & 0xFFC00000U) == 0x05000000U &&
6709+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6710+ nop == 0x01000000U)
6711+ {
6712+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6713+ regs->u_regs[UREG_G2] = addr + 4;
6714+ addr = (save & 0x003FFFFFU) << 10;
6715+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6716+ regs->pc = addr;
6717+ regs->npc = addr+4;
6718+ return 3;
6719+ }
6720+ }
6721+ } while (0);
6722+
6723+ do { /* PaX: unpatched PLT emulation step 2 */
6724+ unsigned int save, call, nop;
6725+
6726+ err = get_user(save, (unsigned int *)(regs->pc-4));
6727+ err |= get_user(call, (unsigned int *)regs->pc);
6728+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6729+ if (err)
6730+ break;
6731+
6732+ if (save == 0x9DE3BFA8U &&
6733+ (call & 0xC0000000U) == 0x40000000U &&
6734+ nop == 0x01000000U)
6735+ {
6736+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6737+
6738+ regs->u_regs[UREG_RETPC] = regs->pc;
6739+ regs->pc = dl_resolve;
6740+ regs->npc = dl_resolve+4;
6741+ return 3;
6742+ }
6743+ } while (0);
6744+#endif
6745+
6746+ return 1;
6747+}
6748+
6749+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6750+{
6751+ unsigned long i;
6752+
6753+ printk(KERN_ERR "PAX: bytes at PC: ");
6754+ for (i = 0; i < 8; i++) {
6755+ unsigned int c;
6756+ if (get_user(c, (unsigned int *)pc+i))
6757+ printk(KERN_CONT "???????? ");
6758+ else
6759+ printk(KERN_CONT "%08x ", c);
6760+ }
6761+ printk("\n");
6762+}
6763+#endif
6764+
6765 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6766 int text_fault)
6767 {
6768@@ -280,6 +545,24 @@ good_area:
6769 if(!(vma->vm_flags & VM_WRITE))
6770 goto bad_area;
6771 } else {
6772+
6773+#ifdef CONFIG_PAX_PAGEEXEC
6774+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6775+ up_read(&mm->mmap_sem);
6776+ switch (pax_handle_fetch_fault(regs)) {
6777+
6778+#ifdef CONFIG_PAX_EMUPLT
6779+ case 2:
6780+ case 3:
6781+ return;
6782+#endif
6783+
6784+ }
6785+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6786+ do_group_exit(SIGKILL);
6787+ }
6788+#endif
6789+
6790 /* Allow reads even for write-only mappings */
6791 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6792 goto bad_area;
6793diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6794index 504c062..6fcb9c6 100644
6795--- a/arch/sparc/mm/fault_64.c
6796+++ b/arch/sparc/mm/fault_64.c
6797@@ -21,6 +21,9 @@
6798 #include <linux/kprobes.h>
6799 #include <linux/kdebug.h>
6800 #include <linux/percpu.h>
6801+#include <linux/slab.h>
6802+#include <linux/pagemap.h>
6803+#include <linux/compiler.h>
6804
6805 #include <asm/page.h>
6806 #include <asm/pgtable.h>
6807@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6808 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6809 regs->tpc);
6810 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6811- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6812+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6813 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6814 dump_stack();
6815 unhandled_fault(regs->tpc, current, regs);
6816@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6817 show_regs(regs);
6818 }
6819
6820+#ifdef CONFIG_PAX_PAGEEXEC
6821+#ifdef CONFIG_PAX_DLRESOLVE
6822+static void pax_emuplt_close(struct vm_area_struct *vma)
6823+{
6824+ vma->vm_mm->call_dl_resolve = 0UL;
6825+}
6826+
6827+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6828+{
6829+ unsigned int *kaddr;
6830+
6831+ vmf->page = alloc_page(GFP_HIGHUSER);
6832+ if (!vmf->page)
6833+ return VM_FAULT_OOM;
6834+
6835+ kaddr = kmap(vmf->page);
6836+ memset(kaddr, 0, PAGE_SIZE);
6837+ kaddr[0] = 0x9DE3BFA8U; /* save */
6838+ flush_dcache_page(vmf->page);
6839+ kunmap(vmf->page);
6840+ return VM_FAULT_MAJOR;
6841+}
6842+
6843+static const struct vm_operations_struct pax_vm_ops = {
6844+ .close = pax_emuplt_close,
6845+ .fault = pax_emuplt_fault
6846+};
6847+
6848+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6849+{
6850+ int ret;
6851+
6852+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6853+ vma->vm_mm = current->mm;
6854+ vma->vm_start = addr;
6855+ vma->vm_end = addr + PAGE_SIZE;
6856+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6857+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6858+ vma->vm_ops = &pax_vm_ops;
6859+
6860+ ret = insert_vm_struct(current->mm, vma);
6861+ if (ret)
6862+ return ret;
6863+
6864+ ++current->mm->total_vm;
6865+ return 0;
6866+}
6867+#endif
6868+
6869+/*
6870+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6871+ *
6872+ * returns 1 when task should be killed
6873+ * 2 when patched PLT trampoline was detected
6874+ * 3 when unpatched PLT trampoline was detected
6875+ */
6876+static int pax_handle_fetch_fault(struct pt_regs *regs)
6877+{
6878+
6879+#ifdef CONFIG_PAX_EMUPLT
6880+ int err;
6881+
6882+ do { /* PaX: patched PLT emulation #1 */
6883+ unsigned int sethi1, sethi2, jmpl;
6884+
6885+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6886+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6887+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6888+
6889+ if (err)
6890+ break;
6891+
6892+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6893+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6894+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6895+ {
6896+ unsigned long addr;
6897+
6898+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6899+ addr = regs->u_regs[UREG_G1];
6900+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6901+
6902+ if (test_thread_flag(TIF_32BIT))
6903+ addr &= 0xFFFFFFFFUL;
6904+
6905+ regs->tpc = addr;
6906+ regs->tnpc = addr+4;
6907+ return 2;
6908+ }
6909+ } while (0);
6910+
6911+ { /* PaX: patched PLT emulation #2 */
6912+ unsigned int ba;
6913+
6914+ err = get_user(ba, (unsigned int *)regs->tpc);
6915+
6916+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6917+ unsigned long addr;
6918+
6919+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6920+
6921+ if (test_thread_flag(TIF_32BIT))
6922+ addr &= 0xFFFFFFFFUL;
6923+
6924+ regs->tpc = addr;
6925+ regs->tnpc = addr+4;
6926+ return 2;
6927+ }
6928+ }
6929+
6930+ do { /* PaX: patched PLT emulation #3 */
6931+ unsigned int sethi, jmpl, nop;
6932+
6933+ err = get_user(sethi, (unsigned int *)regs->tpc);
6934+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6935+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6936+
6937+ if (err)
6938+ break;
6939+
6940+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6941+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6942+ nop == 0x01000000U)
6943+ {
6944+ unsigned long addr;
6945+
6946+ addr = (sethi & 0x003FFFFFU) << 10;
6947+ regs->u_regs[UREG_G1] = addr;
6948+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6949+
6950+ if (test_thread_flag(TIF_32BIT))
6951+ addr &= 0xFFFFFFFFUL;
6952+
6953+ regs->tpc = addr;
6954+ regs->tnpc = addr+4;
6955+ return 2;
6956+ }
6957+ } while (0);
6958+
6959+ do { /* PaX: patched PLT emulation #4 */
6960+ unsigned int sethi, mov1, call, mov2;
6961+
6962+ err = get_user(sethi, (unsigned int *)regs->tpc);
6963+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6964+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6965+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6966+
6967+ if (err)
6968+ break;
6969+
6970+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6971+ mov1 == 0x8210000FU &&
6972+ (call & 0xC0000000U) == 0x40000000U &&
6973+ mov2 == 0x9E100001U)
6974+ {
6975+ unsigned long addr;
6976+
6977+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6978+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6979+
6980+ if (test_thread_flag(TIF_32BIT))
6981+ addr &= 0xFFFFFFFFUL;
6982+
6983+ regs->tpc = addr;
6984+ regs->tnpc = addr+4;
6985+ return 2;
6986+ }
6987+ } while (0);
6988+
6989+ do { /* PaX: patched PLT emulation #5 */
6990+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6991+
6992+ err = get_user(sethi, (unsigned int *)regs->tpc);
6993+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6994+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6995+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6996+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6997+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6998+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6999+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7000+
7001+ if (err)
7002+ break;
7003+
7004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7005+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7006+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7007+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7008+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7009+ sllx == 0x83287020U &&
7010+ jmpl == 0x81C04005U &&
7011+ nop == 0x01000000U)
7012+ {
7013+ unsigned long addr;
7014+
7015+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7016+ regs->u_regs[UREG_G1] <<= 32;
7017+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7018+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7019+ regs->tpc = addr;
7020+ regs->tnpc = addr+4;
7021+ return 2;
7022+ }
7023+ } while (0);
7024+
7025+ do { /* PaX: patched PLT emulation #6 */
7026+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7027+
7028+ err = get_user(sethi, (unsigned int *)regs->tpc);
7029+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7030+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7031+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7032+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7033+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7034+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7035+
7036+ if (err)
7037+ break;
7038+
7039+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7040+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7041+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7042+ sllx == 0x83287020U &&
7043+ (or & 0xFFFFE000U) == 0x8A116000U &&
7044+ jmpl == 0x81C04005U &&
7045+ nop == 0x01000000U)
7046+ {
7047+ unsigned long addr;
7048+
7049+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7050+ regs->u_regs[UREG_G1] <<= 32;
7051+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7052+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7053+ regs->tpc = addr;
7054+ regs->tnpc = addr+4;
7055+ return 2;
7056+ }
7057+ } while (0);
7058+
7059+ do { /* PaX: unpatched PLT emulation step 1 */
7060+ unsigned int sethi, ba, nop;
7061+
7062+ err = get_user(sethi, (unsigned int *)regs->tpc);
7063+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7064+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7065+
7066+ if (err)
7067+ break;
7068+
7069+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7070+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7071+ nop == 0x01000000U)
7072+ {
7073+ unsigned long addr;
7074+ unsigned int save, call;
7075+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7076+
7077+ if ((ba & 0xFFC00000U) == 0x30800000U)
7078+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7079+ else
7080+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7081+
7082+ if (test_thread_flag(TIF_32BIT))
7083+ addr &= 0xFFFFFFFFUL;
7084+
7085+ err = get_user(save, (unsigned int *)addr);
7086+ err |= get_user(call, (unsigned int *)(addr+4));
7087+ err |= get_user(nop, (unsigned int *)(addr+8));
7088+ if (err)
7089+ break;
7090+
7091+#ifdef CONFIG_PAX_DLRESOLVE
7092+ if (save == 0x9DE3BFA8U &&
7093+ (call & 0xC0000000U) == 0x40000000U &&
7094+ nop == 0x01000000U)
7095+ {
7096+ struct vm_area_struct *vma;
7097+ unsigned long call_dl_resolve;
7098+
7099+ down_read(&current->mm->mmap_sem);
7100+ call_dl_resolve = current->mm->call_dl_resolve;
7101+ up_read(&current->mm->mmap_sem);
7102+ if (likely(call_dl_resolve))
7103+ goto emulate;
7104+
7105+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7106+
7107+ down_write(&current->mm->mmap_sem);
7108+ if (current->mm->call_dl_resolve) {
7109+ call_dl_resolve = current->mm->call_dl_resolve;
7110+ up_write(&current->mm->mmap_sem);
7111+ if (vma)
7112+ kmem_cache_free(vm_area_cachep, vma);
7113+ goto emulate;
7114+ }
7115+
7116+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7117+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7118+ up_write(&current->mm->mmap_sem);
7119+ if (vma)
7120+ kmem_cache_free(vm_area_cachep, vma);
7121+ return 1;
7122+ }
7123+
7124+ if (pax_insert_vma(vma, call_dl_resolve)) {
7125+ up_write(&current->mm->mmap_sem);
7126+ kmem_cache_free(vm_area_cachep, vma);
7127+ return 1;
7128+ }
7129+
7130+ current->mm->call_dl_resolve = call_dl_resolve;
7131+ up_write(&current->mm->mmap_sem);
7132+
7133+emulate:
7134+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7135+ regs->tpc = call_dl_resolve;
7136+ regs->tnpc = addr+4;
7137+ return 3;
7138+ }
7139+#endif
7140+
7141+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7142+ if ((save & 0xFFC00000U) == 0x05000000U &&
7143+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7144+ nop == 0x01000000U)
7145+ {
7146+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7147+ regs->u_regs[UREG_G2] = addr + 4;
7148+ addr = (save & 0x003FFFFFU) << 10;
7149+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7150+
7151+ if (test_thread_flag(TIF_32BIT))
7152+ addr &= 0xFFFFFFFFUL;
7153+
7154+ regs->tpc = addr;
7155+ regs->tnpc = addr+4;
7156+ return 3;
7157+ }
7158+
7159+ /* PaX: 64-bit PLT stub */
7160+ err = get_user(sethi1, (unsigned int *)addr);
7161+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7162+ err |= get_user(or1, (unsigned int *)(addr+8));
7163+ err |= get_user(or2, (unsigned int *)(addr+12));
7164+ err |= get_user(sllx, (unsigned int *)(addr+16));
7165+ err |= get_user(add, (unsigned int *)(addr+20));
7166+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7167+ err |= get_user(nop, (unsigned int *)(addr+28));
7168+ if (err)
7169+ break;
7170+
7171+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7172+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7173+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7174+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7175+ sllx == 0x89293020U &&
7176+ add == 0x8A010005U &&
7177+ jmpl == 0x89C14000U &&
7178+ nop == 0x01000000U)
7179+ {
7180+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7181+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7182+ regs->u_regs[UREG_G4] <<= 32;
7183+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7184+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7185+ regs->u_regs[UREG_G4] = addr + 24;
7186+ addr = regs->u_regs[UREG_G5];
7187+ regs->tpc = addr;
7188+ regs->tnpc = addr+4;
7189+ return 3;
7190+ }
7191+ }
7192+ } while (0);
7193+
7194+#ifdef CONFIG_PAX_DLRESOLVE
7195+ do { /* PaX: unpatched PLT emulation step 2 */
7196+ unsigned int save, call, nop;
7197+
7198+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7199+ err |= get_user(call, (unsigned int *)regs->tpc);
7200+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7201+ if (err)
7202+ break;
7203+
7204+ if (save == 0x9DE3BFA8U &&
7205+ (call & 0xC0000000U) == 0x40000000U &&
7206+ nop == 0x01000000U)
7207+ {
7208+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7209+
7210+ if (test_thread_flag(TIF_32BIT))
7211+ dl_resolve &= 0xFFFFFFFFUL;
7212+
7213+ regs->u_regs[UREG_RETPC] = regs->tpc;
7214+ regs->tpc = dl_resolve;
7215+ regs->tnpc = dl_resolve+4;
7216+ return 3;
7217+ }
7218+ } while (0);
7219+#endif
7220+
7221+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7222+ unsigned int sethi, ba, nop;
7223+
7224+ err = get_user(sethi, (unsigned int *)regs->tpc);
7225+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7226+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7227+
7228+ if (err)
7229+ break;
7230+
7231+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7232+ (ba & 0xFFF00000U) == 0x30600000U &&
7233+ nop == 0x01000000U)
7234+ {
7235+ unsigned long addr;
7236+
7237+ addr = (sethi & 0x003FFFFFU) << 10;
7238+ regs->u_regs[UREG_G1] = addr;
7239+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7240+
7241+ if (test_thread_flag(TIF_32BIT))
7242+ addr &= 0xFFFFFFFFUL;
7243+
7244+ regs->tpc = addr;
7245+ regs->tnpc = addr+4;
7246+ return 2;
7247+ }
7248+ } while (0);
7249+
7250+#endif
7251+
7252+ return 1;
7253+}
7254+
7255+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7256+{
7257+ unsigned long i;
7258+
7259+ printk(KERN_ERR "PAX: bytes at PC: ");
7260+ for (i = 0; i < 8; i++) {
7261+ unsigned int c;
7262+ if (get_user(c, (unsigned int *)pc+i))
7263+ printk(KERN_CONT "???????? ");
7264+ else
7265+ printk(KERN_CONT "%08x ", c);
7266+ }
7267+ printk("\n");
7268+}
7269+#endif
7270+
7271 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7272 {
7273 struct mm_struct *mm = current->mm;
7274@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7275 if (!vma)
7276 goto bad_area;
7277
7278+#ifdef CONFIG_PAX_PAGEEXEC
7279+ /* PaX: detect ITLB misses on non-exec pages */
7280+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7281+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7282+ {
7283+ if (address != regs->tpc)
7284+ goto good_area;
7285+
7286+ up_read(&mm->mmap_sem);
7287+ switch (pax_handle_fetch_fault(regs)) {
7288+
7289+#ifdef CONFIG_PAX_EMUPLT
7290+ case 2:
7291+ case 3:
7292+ return;
7293+#endif
7294+
7295+ }
7296+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7297+ do_group_exit(SIGKILL);
7298+ }
7299+#endif
7300+
7301 /* Pure DTLB misses do not tell us whether the fault causing
7302 * load/store/atomic was a write or not, it only says that there
7303 * was no match. So in such a case we (carefully) read the
7304diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7305index 07e1453..0a7d9e9 100644
7306--- a/arch/sparc/mm/hugetlbpage.c
7307+++ b/arch/sparc/mm/hugetlbpage.c
7308@@ -67,7 +67,7 @@ full_search:
7309 }
7310 return -ENOMEM;
7311 }
7312- if (likely(!vma || addr + len <= vma->vm_start)) {
7313+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7314 /*
7315 * Remember the place where we stopped the search:
7316 */
7317@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7318 /* make sure it can fit in the remaining address space */
7319 if (likely(addr > len)) {
7320 vma = find_vma(mm, addr-len);
7321- if (!vma || addr <= vma->vm_start) {
7322+ if (check_heap_stack_gap(vma, addr - len, len)) {
7323 /* remember the address as a hint for next time */
7324 return (mm->free_area_cache = addr-len);
7325 }
7326@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7327 if (unlikely(mm->mmap_base < len))
7328 goto bottomup;
7329
7330- addr = (mm->mmap_base-len) & HPAGE_MASK;
7331+ addr = mm->mmap_base - len;
7332
7333 do {
7334+ addr &= HPAGE_MASK;
7335 /*
7336 * Lookup failure means no vma is above this address,
7337 * else if new region fits below vma->vm_start,
7338 * return with success:
7339 */
7340 vma = find_vma(mm, addr);
7341- if (likely(!vma || addr+len <= vma->vm_start)) {
7342+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7343 /* remember the address as a hint for next time */
7344 return (mm->free_area_cache = addr);
7345 }
7346@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7347 mm->cached_hole_size = vma->vm_start - addr;
7348
7349 /* try just below the current vma->vm_start */
7350- addr = (vma->vm_start-len) & HPAGE_MASK;
7351- } while (likely(len < vma->vm_start));
7352+ addr = skip_heap_stack_gap(vma, len);
7353+ } while (!IS_ERR_VALUE(addr));
7354
7355 bottomup:
7356 /*
7357@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7358 if (addr) {
7359 addr = ALIGN(addr, HPAGE_SIZE);
7360 vma = find_vma(mm, addr);
7361- if (task_size - len >= addr &&
7362- (!vma || addr + len <= vma->vm_start))
7363+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7364 return addr;
7365 }
7366 if (mm->get_unmapped_area == arch_get_unmapped_area)
7367diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7368index 7b00de6..78239f4 100644
7369--- a/arch/sparc/mm/init_32.c
7370+++ b/arch/sparc/mm/init_32.c
7371@@ -316,6 +316,9 @@ extern void device_scan(void);
7372 pgprot_t PAGE_SHARED __read_mostly;
7373 EXPORT_SYMBOL(PAGE_SHARED);
7374
7375+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7376+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7377+
7378 void __init paging_init(void)
7379 {
7380 switch(sparc_cpu_model) {
7381@@ -344,17 +347,17 @@ void __init paging_init(void)
7382
7383 /* Initialize the protection map with non-constant, MMU dependent values. */
7384 protection_map[0] = PAGE_NONE;
7385- protection_map[1] = PAGE_READONLY;
7386- protection_map[2] = PAGE_COPY;
7387- protection_map[3] = PAGE_COPY;
7388+ protection_map[1] = PAGE_READONLY_NOEXEC;
7389+ protection_map[2] = PAGE_COPY_NOEXEC;
7390+ protection_map[3] = PAGE_COPY_NOEXEC;
7391 protection_map[4] = PAGE_READONLY;
7392 protection_map[5] = PAGE_READONLY;
7393 protection_map[6] = PAGE_COPY;
7394 protection_map[7] = PAGE_COPY;
7395 protection_map[8] = PAGE_NONE;
7396- protection_map[9] = PAGE_READONLY;
7397- protection_map[10] = PAGE_SHARED;
7398- protection_map[11] = PAGE_SHARED;
7399+ protection_map[9] = PAGE_READONLY_NOEXEC;
7400+ protection_map[10] = PAGE_SHARED_NOEXEC;
7401+ protection_map[11] = PAGE_SHARED_NOEXEC;
7402 protection_map[12] = PAGE_READONLY;
7403 protection_map[13] = PAGE_READONLY;
7404 protection_map[14] = PAGE_SHARED;
7405diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7406index cbef74e..c38fead 100644
7407--- a/arch/sparc/mm/srmmu.c
7408+++ b/arch/sparc/mm/srmmu.c
7409@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7410 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7411 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7412 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7413+
7414+#ifdef CONFIG_PAX_PAGEEXEC
7415+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7416+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7417+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7418+#endif
7419+
7420 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7421 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7422
7423diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7424index 27fe667..36d474c 100644
7425--- a/arch/tile/include/asm/atomic_64.h
7426+++ b/arch/tile/include/asm/atomic_64.h
7427@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7428
7429 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7430
7431+#define atomic64_read_unchecked(v) atomic64_read(v)
7432+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7433+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7434+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7435+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7436+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7437+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7438+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7439+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7440+
7441 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7442 #define smp_mb__before_atomic_dec() smp_mb()
7443 #define smp_mb__after_atomic_dec() smp_mb()
7444diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7445index 392e533..536b092 100644
7446--- a/arch/tile/include/asm/cache.h
7447+++ b/arch/tile/include/asm/cache.h
7448@@ -15,11 +15,12 @@
7449 #ifndef _ASM_TILE_CACHE_H
7450 #define _ASM_TILE_CACHE_H
7451
7452+#include <linux/const.h>
7453 #include <arch/chip.h>
7454
7455 /* bytes per L1 data cache line */
7456 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7457-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7458+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7459
7460 /* bytes per L2 cache line */
7461 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7462diff --git a/arch/um/Makefile b/arch/um/Makefile
7463index 28688e6..4c0aa1c 100644
7464--- a/arch/um/Makefile
7465+++ b/arch/um/Makefile
7466@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7467 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7468 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7469
7470+ifdef CONSTIFY_PLUGIN
7471+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7472+endif
7473+
7474 #This will adjust *FLAGS accordingly to the platform.
7475 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7476
7477diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7478index 19e1bdd..3665b77 100644
7479--- a/arch/um/include/asm/cache.h
7480+++ b/arch/um/include/asm/cache.h
7481@@ -1,6 +1,7 @@
7482 #ifndef __UM_CACHE_H
7483 #define __UM_CACHE_H
7484
7485+#include <linux/const.h>
7486
7487 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7488 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7489@@ -12,6 +13,6 @@
7490 # define L1_CACHE_SHIFT 5
7491 #endif
7492
7493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7494+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7495
7496 #endif
7497diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7498index 6c03acd..a5e0215 100644
7499--- a/arch/um/include/asm/kmap_types.h
7500+++ b/arch/um/include/asm/kmap_types.h
7501@@ -23,6 +23,7 @@ enum km_type {
7502 KM_IRQ1,
7503 KM_SOFTIRQ0,
7504 KM_SOFTIRQ1,
7505+ KM_CLEARPAGE,
7506 KM_TYPE_NR
7507 };
7508
7509diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7510index 7cfc3ce..cbd1a58 100644
7511--- a/arch/um/include/asm/page.h
7512+++ b/arch/um/include/asm/page.h
7513@@ -14,6 +14,9 @@
7514 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7515 #define PAGE_MASK (~(PAGE_SIZE-1))
7516
7517+#define ktla_ktva(addr) (addr)
7518+#define ktva_ktla(addr) (addr)
7519+
7520 #ifndef __ASSEMBLY__
7521
7522 struct page;
7523diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7524index 0032f92..cd151e0 100644
7525--- a/arch/um/include/asm/pgtable-3level.h
7526+++ b/arch/um/include/asm/pgtable-3level.h
7527@@ -58,6 +58,7 @@
7528 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7529 #define pud_populate(mm, pud, pmd) \
7530 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7531+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7532
7533 #ifdef CONFIG_64BIT
7534 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7535diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7536index 69f2490..2634831 100644
7537--- a/arch/um/kernel/process.c
7538+++ b/arch/um/kernel/process.c
7539@@ -408,22 +408,6 @@ int singlestepping(void * t)
7540 return 2;
7541 }
7542
7543-/*
7544- * Only x86 and x86_64 have an arch_align_stack().
7545- * All other arches have "#define arch_align_stack(x) (x)"
7546- * in their asm/system.h
7547- * As this is included in UML from asm-um/system-generic.h,
7548- * we can use it to behave as the subarch does.
7549- */
7550-#ifndef arch_align_stack
7551-unsigned long arch_align_stack(unsigned long sp)
7552-{
7553- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7554- sp -= get_random_int() % 8192;
7555- return sp & ~0xf;
7556-}
7557-#endif
7558-
7559 unsigned long get_wchan(struct task_struct *p)
7560 {
7561 unsigned long stack_page, sp, ip;
7562diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7563index ad8f795..2c7eec6 100644
7564--- a/arch/unicore32/include/asm/cache.h
7565+++ b/arch/unicore32/include/asm/cache.h
7566@@ -12,8 +12,10 @@
7567 #ifndef __UNICORE_CACHE_H__
7568 #define __UNICORE_CACHE_H__
7569
7570-#define L1_CACHE_SHIFT (5)
7571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7572+#include <linux/const.h>
7573+
7574+#define L1_CACHE_SHIFT 5
7575+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7576
7577 /*
7578 * Memory returned by kmalloc() may be used for DMA, so we must make
7579diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7580index 5bed94e..fbcf200 100644
7581--- a/arch/x86/Kconfig
7582+++ b/arch/x86/Kconfig
7583@@ -226,7 +226,7 @@ config X86_HT
7584
7585 config X86_32_LAZY_GS
7586 def_bool y
7587- depends on X86_32 && !CC_STACKPROTECTOR
7588+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7589
7590 config ARCH_HWEIGHT_CFLAGS
7591 string
7592@@ -1058,7 +1058,7 @@ choice
7593
7594 config NOHIGHMEM
7595 bool "off"
7596- depends on !X86_NUMAQ
7597+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7598 ---help---
7599 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7600 However, the address space of 32-bit x86 processors is only 4
7601@@ -1095,7 +1095,7 @@ config NOHIGHMEM
7602
7603 config HIGHMEM4G
7604 bool "4GB"
7605- depends on !X86_NUMAQ
7606+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7607 ---help---
7608 Select this if you have a 32-bit processor and between 1 and 4
7609 gigabytes of physical RAM.
7610@@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7611 hex
7612 default 0xB0000000 if VMSPLIT_3G_OPT
7613 default 0x80000000 if VMSPLIT_2G
7614- default 0x78000000 if VMSPLIT_2G_OPT
7615+ default 0x70000000 if VMSPLIT_2G_OPT
7616 default 0x40000000 if VMSPLIT_1G
7617 default 0xC0000000
7618 depends on X86_32
7619@@ -1539,6 +1539,7 @@ config SECCOMP
7620
7621 config CC_STACKPROTECTOR
7622 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7623+ depends on X86_64 || !PAX_MEMORY_UDEREF
7624 ---help---
7625 This option turns on the -fstack-protector GCC feature. This
7626 feature puts, at the beginning of functions, a canary value on
7627@@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7628 config PHYSICAL_START
7629 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7630 default "0x1000000"
7631+ range 0x400000 0x40000000
7632 ---help---
7633 This gives the physical address where the kernel is loaded.
7634
7635@@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7636 config PHYSICAL_ALIGN
7637 hex "Alignment value to which kernel should be aligned" if X86_32
7638 default "0x1000000"
7639+ range 0x400000 0x1000000 if PAX_KERNEXEC
7640 range 0x2000 0x1000000
7641 ---help---
7642 This value puts the alignment restrictions on physical address
7643@@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7644 Say N if you want to disable CPU hotplug.
7645
7646 config COMPAT_VDSO
7647- def_bool y
7648+ def_bool n
7649 prompt "Compat VDSO support"
7650 depends on X86_32 || IA32_EMULATION
7651+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7652 ---help---
7653 Map the 32-bit VDSO to the predictable old-style address too.
7654
7655diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7656index 3c57033..22d44aa 100644
7657--- a/arch/x86/Kconfig.cpu
7658+++ b/arch/x86/Kconfig.cpu
7659@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7660
7661 config X86_F00F_BUG
7662 def_bool y
7663- depends on M586MMX || M586TSC || M586 || M486 || M386
7664+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7665
7666 config X86_INVD_BUG
7667 def_bool y
7668@@ -359,7 +359,7 @@ config X86_POPAD_OK
7669
7670 config X86_ALIGNMENT_16
7671 def_bool y
7672- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7673+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7674
7675 config X86_INTEL_USERCOPY
7676 def_bool y
7677@@ -405,7 +405,7 @@ config X86_CMPXCHG64
7678 # generates cmov.
7679 config X86_CMOV
7680 def_bool y
7681- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7682+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7683
7684 config X86_MINIMUM_CPU_FAMILY
7685 int
7686diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7687index e46c214..7c72b55 100644
7688--- a/arch/x86/Kconfig.debug
7689+++ b/arch/x86/Kconfig.debug
7690@@ -84,7 +84,7 @@ config X86_PTDUMP
7691 config DEBUG_RODATA
7692 bool "Write protect kernel read-only data structures"
7693 default y
7694- depends on DEBUG_KERNEL
7695+ depends on DEBUG_KERNEL && BROKEN
7696 ---help---
7697 Mark the kernel read-only data as write-protected in the pagetables,
7698 in order to catch accidental (and incorrect) writes to such const
7699@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7700
7701 config DEBUG_SET_MODULE_RONX
7702 bool "Set loadable kernel module data as NX and text as RO"
7703- depends on MODULES
7704+ depends on MODULES && BROKEN
7705 ---help---
7706 This option helps catch unintended modifications to loadable
7707 kernel module's text and read-only data. It also prevents execution
7708diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7709index 209ba12..15140db 100644
7710--- a/arch/x86/Makefile
7711+++ b/arch/x86/Makefile
7712@@ -46,6 +46,7 @@ else
7713 UTS_MACHINE := x86_64
7714 CHECKFLAGS += -D__x86_64__ -m64
7715
7716+ biarch := $(call cc-option,-m64)
7717 KBUILD_AFLAGS += -m64
7718 KBUILD_CFLAGS += -m64
7719
7720@@ -201,3 +202,12 @@ define archhelp
7721 echo ' FDARGS="..." arguments for the booted kernel'
7722 echo ' FDINITRD=file initrd for the booted kernel'
7723 endef
7724+
7725+define OLD_LD
7726+
7727+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7728+*** Please upgrade your binutils to 2.18 or newer
7729+endef
7730+
7731+archprepare:
7732+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7733diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7734index 5a747dd..ff7b12c 100644
7735--- a/arch/x86/boot/Makefile
7736+++ b/arch/x86/boot/Makefile
7737@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7738 $(call cc-option, -fno-stack-protector) \
7739 $(call cc-option, -mpreferred-stack-boundary=2)
7740 KBUILD_CFLAGS += $(call cc-option, -m32)
7741+ifdef CONSTIFY_PLUGIN
7742+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7743+endif
7744 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7745 GCOV_PROFILE := n
7746
7747diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7748index 878e4b9..20537ab 100644
7749--- a/arch/x86/boot/bitops.h
7750+++ b/arch/x86/boot/bitops.h
7751@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7752 u8 v;
7753 const u32 *p = (const u32 *)addr;
7754
7755- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7756+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7757 return v;
7758 }
7759
7760@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7761
7762 static inline void set_bit(int nr, void *addr)
7763 {
7764- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7765+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7766 }
7767
7768 #endif /* BOOT_BITOPS_H */
7769diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7770index c7093bd..d4247ffe0 100644
7771--- a/arch/x86/boot/boot.h
7772+++ b/arch/x86/boot/boot.h
7773@@ -85,7 +85,7 @@ static inline void io_delay(void)
7774 static inline u16 ds(void)
7775 {
7776 u16 seg;
7777- asm("movw %%ds,%0" : "=rm" (seg));
7778+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7779 return seg;
7780 }
7781
7782@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7783 static inline int memcmp(const void *s1, const void *s2, size_t len)
7784 {
7785 u8 diff;
7786- asm("repe; cmpsb; setnz %0"
7787+ asm volatile("repe; cmpsb; setnz %0"
7788 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7789 return diff;
7790 }
7791diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7792index fd55a2f..217b501 100644
7793--- a/arch/x86/boot/compressed/Makefile
7794+++ b/arch/x86/boot/compressed/Makefile
7795@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7796 KBUILD_CFLAGS += $(cflags-y)
7797 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7798 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7799+ifdef CONSTIFY_PLUGIN
7800+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7801+endif
7802
7803 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7804 GCOV_PROFILE := n
7805diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7806index c85e3ac..6f5aa80 100644
7807--- a/arch/x86/boot/compressed/head_32.S
7808+++ b/arch/x86/boot/compressed/head_32.S
7809@@ -106,7 +106,7 @@ preferred_addr:
7810 notl %eax
7811 andl %eax, %ebx
7812 #else
7813- movl $LOAD_PHYSICAL_ADDR, %ebx
7814+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7815 #endif
7816
7817 /* Target address to relocate to for decompression */
7818@@ -192,7 +192,7 @@ relocated:
7819 * and where it was actually loaded.
7820 */
7821 movl %ebp, %ebx
7822- subl $LOAD_PHYSICAL_ADDR, %ebx
7823+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7824 jz 2f /* Nothing to be done if loaded at compiled addr. */
7825 /*
7826 * Process relocations.
7827@@ -200,8 +200,7 @@ relocated:
7828
7829 1: subl $4, %edi
7830 movl (%edi), %ecx
7831- testl %ecx, %ecx
7832- jz 2f
7833+ jecxz 2f
7834 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7835 jmp 1b
7836 2:
7837diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7838index 87e03a1..0d94c76 100644
7839--- a/arch/x86/boot/compressed/head_64.S
7840+++ b/arch/x86/boot/compressed/head_64.S
7841@@ -91,7 +91,7 @@ ENTRY(startup_32)
7842 notl %eax
7843 andl %eax, %ebx
7844 #else
7845- movl $LOAD_PHYSICAL_ADDR, %ebx
7846+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7847 #endif
7848
7849 /* Target address to relocate to for decompression */
7850@@ -263,7 +263,7 @@ preferred_addr:
7851 notq %rax
7852 andq %rax, %rbp
7853 #else
7854- movq $LOAD_PHYSICAL_ADDR, %rbp
7855+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7856 #endif
7857
7858 /* Target address to relocate to for decompression */
7859diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7860index 7116dcb..d9ae1d7 100644
7861--- a/arch/x86/boot/compressed/misc.c
7862+++ b/arch/x86/boot/compressed/misc.c
7863@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7864 case PT_LOAD:
7865 #ifdef CONFIG_RELOCATABLE
7866 dest = output;
7867- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7868+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7869 #else
7870 dest = (void *)(phdr->p_paddr);
7871 #endif
7872@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7873 error("Destination address too large");
7874 #endif
7875 #ifndef CONFIG_RELOCATABLE
7876- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7877+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7878 error("Wrong destination address");
7879 #endif
7880
7881diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7882index e77f4e4..17e511f 100644
7883--- a/arch/x86/boot/compressed/relocs.c
7884+++ b/arch/x86/boot/compressed/relocs.c
7885@@ -13,8 +13,11 @@
7886
7887 static void die(char *fmt, ...);
7888
7889+#include "../../../../include/generated/autoconf.h"
7890+
7891 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7892 static Elf32_Ehdr ehdr;
7893+static Elf32_Phdr *phdr;
7894 static unsigned long reloc_count, reloc_idx;
7895 static unsigned long *relocs;
7896
7897@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7898 }
7899 }
7900
7901+static void read_phdrs(FILE *fp)
7902+{
7903+ unsigned int i;
7904+
7905+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7906+ if (!phdr) {
7907+ die("Unable to allocate %d program headers\n",
7908+ ehdr.e_phnum);
7909+ }
7910+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7911+ die("Seek to %d failed: %s\n",
7912+ ehdr.e_phoff, strerror(errno));
7913+ }
7914+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7915+ die("Cannot read ELF program headers: %s\n",
7916+ strerror(errno));
7917+ }
7918+ for(i = 0; i < ehdr.e_phnum; i++) {
7919+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7920+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7921+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7922+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7923+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7924+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7925+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7926+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7927+ }
7928+
7929+}
7930+
7931 static void read_shdrs(FILE *fp)
7932 {
7933- int i;
7934+ unsigned int i;
7935 Elf32_Shdr shdr;
7936
7937 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7938@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7939
7940 static void read_strtabs(FILE *fp)
7941 {
7942- int i;
7943+ unsigned int i;
7944 for (i = 0; i < ehdr.e_shnum; i++) {
7945 struct section *sec = &secs[i];
7946 if (sec->shdr.sh_type != SHT_STRTAB) {
7947@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7948
7949 static void read_symtabs(FILE *fp)
7950 {
7951- int i,j;
7952+ unsigned int i,j;
7953 for (i = 0; i < ehdr.e_shnum; i++) {
7954 struct section *sec = &secs[i];
7955 if (sec->shdr.sh_type != SHT_SYMTAB) {
7956@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7957
7958 static void read_relocs(FILE *fp)
7959 {
7960- int i,j;
7961+ unsigned int i,j;
7962+ uint32_t base;
7963+
7964 for (i = 0; i < ehdr.e_shnum; i++) {
7965 struct section *sec = &secs[i];
7966 if (sec->shdr.sh_type != SHT_REL) {
7967@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7968 die("Cannot read symbol table: %s\n",
7969 strerror(errno));
7970 }
7971+ base = 0;
7972+ for (j = 0; j < ehdr.e_phnum; j++) {
7973+ if (phdr[j].p_type != PT_LOAD )
7974+ continue;
7975+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7976+ continue;
7977+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7978+ break;
7979+ }
7980 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7981 Elf32_Rel *rel = &sec->reltab[j];
7982- rel->r_offset = elf32_to_cpu(rel->r_offset);
7983+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7984 rel->r_info = elf32_to_cpu(rel->r_info);
7985 }
7986 }
7987@@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
7988
7989 static void print_absolute_symbols(void)
7990 {
7991- int i;
7992+ unsigned int i;
7993 printf("Absolute symbols\n");
7994 printf(" Num: Value Size Type Bind Visibility Name\n");
7995 for (i = 0; i < ehdr.e_shnum; i++) {
7996 struct section *sec = &secs[i];
7997 char *sym_strtab;
7998- int j;
7999+ unsigned int j;
8000
8001 if (sec->shdr.sh_type != SHT_SYMTAB) {
8002 continue;
8003@@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
8004
8005 static void print_absolute_relocs(void)
8006 {
8007- int i, printed = 0;
8008+ unsigned int i, printed = 0;
8009
8010 for (i = 0; i < ehdr.e_shnum; i++) {
8011 struct section *sec = &secs[i];
8012 struct section *sec_applies, *sec_symtab;
8013 char *sym_strtab;
8014 Elf32_Sym *sh_symtab;
8015- int j;
8016+ unsigned int j;
8017 if (sec->shdr.sh_type != SHT_REL) {
8018 continue;
8019 }
8020@@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
8021
8022 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8023 {
8024- int i;
8025+ unsigned int i;
8026 /* Walk through the relocations */
8027 for (i = 0; i < ehdr.e_shnum; i++) {
8028 char *sym_strtab;
8029 Elf32_Sym *sh_symtab;
8030 struct section *sec_applies, *sec_symtab;
8031- int j;
8032+ unsigned int j;
8033 struct section *sec = &secs[i];
8034
8035 if (sec->shdr.sh_type != SHT_REL) {
8036@@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8037 !is_rel_reloc(sym_name(sym_strtab, sym))) {
8038 continue;
8039 }
8040+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8041+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8042+ continue;
8043+
8044+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8045+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8046+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8047+ continue;
8048+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8049+ continue;
8050+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8051+ continue;
8052+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8053+ continue;
8054+#endif
8055+
8056 switch (r_type) {
8057 case R_386_NONE:
8058 case R_386_PC32:
8059@@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
8060
8061 static void emit_relocs(int as_text)
8062 {
8063- int i;
8064+ unsigned int i;
8065 /* Count how many relocations I have and allocate space for them. */
8066 reloc_count = 0;
8067 walk_relocs(count_reloc);
8068@@ -663,6 +723,7 @@ int main(int argc, char **argv)
8069 fname, strerror(errno));
8070 }
8071 read_ehdr(fp);
8072+ read_phdrs(fp);
8073 read_shdrs(fp);
8074 read_strtabs(fp);
8075 read_symtabs(fp);
8076diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8077index 4d3ff03..e4972ff 100644
8078--- a/arch/x86/boot/cpucheck.c
8079+++ b/arch/x86/boot/cpucheck.c
8080@@ -74,7 +74,7 @@ static int has_fpu(void)
8081 u16 fcw = -1, fsw = -1;
8082 u32 cr0;
8083
8084- asm("movl %%cr0,%0" : "=r" (cr0));
8085+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8086 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8087 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8088 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8089@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8090 {
8091 u32 f0, f1;
8092
8093- asm("pushfl ; "
8094+ asm volatile("pushfl ; "
8095 "pushfl ; "
8096 "popl %0 ; "
8097 "movl %0,%1 ; "
8098@@ -115,7 +115,7 @@ static void get_flags(void)
8099 set_bit(X86_FEATURE_FPU, cpu.flags);
8100
8101 if (has_eflag(X86_EFLAGS_ID)) {
8102- asm("cpuid"
8103+ asm volatile("cpuid"
8104 : "=a" (max_intel_level),
8105 "=b" (cpu_vendor[0]),
8106 "=d" (cpu_vendor[1]),
8107@@ -124,7 +124,7 @@ static void get_flags(void)
8108
8109 if (max_intel_level >= 0x00000001 &&
8110 max_intel_level <= 0x0000ffff) {
8111- asm("cpuid"
8112+ asm volatile("cpuid"
8113 : "=a" (tfms),
8114 "=c" (cpu.flags[4]),
8115 "=d" (cpu.flags[0])
8116@@ -136,7 +136,7 @@ static void get_flags(void)
8117 cpu.model += ((tfms >> 16) & 0xf) << 4;
8118 }
8119
8120- asm("cpuid"
8121+ asm volatile("cpuid"
8122 : "=a" (max_amd_level)
8123 : "a" (0x80000000)
8124 : "ebx", "ecx", "edx");
8125@@ -144,7 +144,7 @@ static void get_flags(void)
8126 if (max_amd_level >= 0x80000001 &&
8127 max_amd_level <= 0x8000ffff) {
8128 u32 eax = 0x80000001;
8129- asm("cpuid"
8130+ asm volatile("cpuid"
8131 : "+a" (eax),
8132 "=c" (cpu.flags[6]),
8133 "=d" (cpu.flags[1])
8134@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8135 u32 ecx = MSR_K7_HWCR;
8136 u32 eax, edx;
8137
8138- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8139+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8140 eax &= ~(1 << 15);
8141- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8142+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8143
8144 get_flags(); /* Make sure it really did something */
8145 err = check_flags();
8146@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8147 u32 ecx = MSR_VIA_FCR;
8148 u32 eax, edx;
8149
8150- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8151+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8152 eax |= (1<<1)|(1<<7);
8153- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8154+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8155
8156 set_bit(X86_FEATURE_CX8, cpu.flags);
8157 err = check_flags();
8158@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8159 u32 eax, edx;
8160 u32 level = 1;
8161
8162- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8163- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8164- asm("cpuid"
8165+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8166+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8167+ asm volatile("cpuid"
8168 : "+a" (level), "=d" (cpu.flags[0])
8169 : : "ecx", "ebx");
8170- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8171+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8172
8173 err = check_flags();
8174 }
8175diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8176index f1bbeeb..aff09cb 100644
8177--- a/arch/x86/boot/header.S
8178+++ b/arch/x86/boot/header.S
8179@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8180 # single linked list of
8181 # struct setup_data
8182
8183-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8184+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8185
8186 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8187 #define VO_INIT_SIZE (VO__end - VO__text)
8188diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8189index db75d07..8e6d0af 100644
8190--- a/arch/x86/boot/memory.c
8191+++ b/arch/x86/boot/memory.c
8192@@ -19,7 +19,7 @@
8193
8194 static int detect_memory_e820(void)
8195 {
8196- int count = 0;
8197+ unsigned int count = 0;
8198 struct biosregs ireg, oreg;
8199 struct e820entry *desc = boot_params.e820_map;
8200 static struct e820entry buf; /* static so it is zeroed */
8201diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8202index 11e8c6e..fdbb1ed 100644
8203--- a/arch/x86/boot/video-vesa.c
8204+++ b/arch/x86/boot/video-vesa.c
8205@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8206
8207 boot_params.screen_info.vesapm_seg = oreg.es;
8208 boot_params.screen_info.vesapm_off = oreg.di;
8209+ boot_params.screen_info.vesapm_size = oreg.cx;
8210 }
8211
8212 /*
8213diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8214index 43eda28..5ab5fdb 100644
8215--- a/arch/x86/boot/video.c
8216+++ b/arch/x86/boot/video.c
8217@@ -96,7 +96,7 @@ static void store_mode_params(void)
8218 static unsigned int get_entry(void)
8219 {
8220 char entry_buf[4];
8221- int i, len = 0;
8222+ unsigned int i, len = 0;
8223 int key;
8224 unsigned int v;
8225
8226diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8227index 5b577d5..3c1fed4 100644
8228--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8229+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8230@@ -8,6 +8,8 @@
8231 * including this sentence is retained in full.
8232 */
8233
8234+#include <asm/alternative-asm.h>
8235+
8236 .extern crypto_ft_tab
8237 .extern crypto_it_tab
8238 .extern crypto_fl_tab
8239@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8240 je B192; \
8241 leaq 32(r9),r9;
8242
8243+#define ret pax_force_retaddr 0, 1; ret
8244+
8245 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8246 movq r1,r2; \
8247 movq r3,r4; \
8248diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8249index be6d9e3..21fbbca 100644
8250--- a/arch/x86/crypto/aesni-intel_asm.S
8251+++ b/arch/x86/crypto/aesni-intel_asm.S
8252@@ -31,6 +31,7 @@
8253
8254 #include <linux/linkage.h>
8255 #include <asm/inst.h>
8256+#include <asm/alternative-asm.h>
8257
8258 #ifdef __x86_64__
8259 .data
8260@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8261 pop %r14
8262 pop %r13
8263 pop %r12
8264+ pax_force_retaddr 0, 1
8265 ret
8266+ENDPROC(aesni_gcm_dec)
8267
8268
8269 /*****************************************************************************
8270@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8271 pop %r14
8272 pop %r13
8273 pop %r12
8274+ pax_force_retaddr 0, 1
8275 ret
8276+ENDPROC(aesni_gcm_enc)
8277
8278 #endif
8279
8280@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8281 pxor %xmm1, %xmm0
8282 movaps %xmm0, (TKEYP)
8283 add $0x10, TKEYP
8284+ pax_force_retaddr_bts
8285 ret
8286
8287 .align 4
8288@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8289 shufps $0b01001110, %xmm2, %xmm1
8290 movaps %xmm1, 0x10(TKEYP)
8291 add $0x20, TKEYP
8292+ pax_force_retaddr_bts
8293 ret
8294
8295 .align 4
8296@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8297
8298 movaps %xmm0, (TKEYP)
8299 add $0x10, TKEYP
8300+ pax_force_retaddr_bts
8301 ret
8302
8303 .align 4
8304@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8305 pxor %xmm1, %xmm2
8306 movaps %xmm2, (TKEYP)
8307 add $0x10, TKEYP
8308+ pax_force_retaddr_bts
8309 ret
8310
8311 /*
8312@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8313 #ifndef __x86_64__
8314 popl KEYP
8315 #endif
8316+ pax_force_retaddr 0, 1
8317 ret
8318+ENDPROC(aesni_set_key)
8319
8320 /*
8321 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8322@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8323 popl KLEN
8324 popl KEYP
8325 #endif
8326+ pax_force_retaddr 0, 1
8327 ret
8328+ENDPROC(aesni_enc)
8329
8330 /*
8331 * _aesni_enc1: internal ABI
8332@@ -1959,6 +1972,7 @@ _aesni_enc1:
8333 AESENC KEY STATE
8334 movaps 0x70(TKEYP), KEY
8335 AESENCLAST KEY STATE
8336+ pax_force_retaddr_bts
8337 ret
8338
8339 /*
8340@@ -2067,6 +2081,7 @@ _aesni_enc4:
8341 AESENCLAST KEY STATE2
8342 AESENCLAST KEY STATE3
8343 AESENCLAST KEY STATE4
8344+ pax_force_retaddr_bts
8345 ret
8346
8347 /*
8348@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8349 popl KLEN
8350 popl KEYP
8351 #endif
8352+ pax_force_retaddr 0, 1
8353 ret
8354+ENDPROC(aesni_dec)
8355
8356 /*
8357 * _aesni_dec1: internal ABI
8358@@ -2146,6 +2163,7 @@ _aesni_dec1:
8359 AESDEC KEY STATE
8360 movaps 0x70(TKEYP), KEY
8361 AESDECLAST KEY STATE
8362+ pax_force_retaddr_bts
8363 ret
8364
8365 /*
8366@@ -2254,6 +2272,7 @@ _aesni_dec4:
8367 AESDECLAST KEY STATE2
8368 AESDECLAST KEY STATE3
8369 AESDECLAST KEY STATE4
8370+ pax_force_retaddr_bts
8371 ret
8372
8373 /*
8374@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8375 popl KEYP
8376 popl LEN
8377 #endif
8378+ pax_force_retaddr 0, 1
8379 ret
8380+ENDPROC(aesni_ecb_enc)
8381
8382 /*
8383 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8384@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8385 popl KEYP
8386 popl LEN
8387 #endif
8388+ pax_force_retaddr 0, 1
8389 ret
8390+ENDPROC(aesni_ecb_dec)
8391
8392 /*
8393 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8394@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8395 popl LEN
8396 popl IVP
8397 #endif
8398+ pax_force_retaddr 0, 1
8399 ret
8400+ENDPROC(aesni_cbc_enc)
8401
8402 /*
8403 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8404@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8405 popl LEN
8406 popl IVP
8407 #endif
8408+ pax_force_retaddr 0, 1
8409 ret
8410+ENDPROC(aesni_cbc_dec)
8411
8412 #ifdef __x86_64__
8413 .align 16
8414@@ -2524,6 +2551,7 @@ _aesni_inc_init:
8415 mov $1, TCTR_LOW
8416 MOVQ_R64_XMM TCTR_LOW INC
8417 MOVQ_R64_XMM CTR TCTR_LOW
8418+ pax_force_retaddr_bts
8419 ret
8420
8421 /*
8422@@ -2552,6 +2580,7 @@ _aesni_inc:
8423 .Linc_low:
8424 movaps CTR, IV
8425 PSHUFB_XMM BSWAP_MASK IV
8426+ pax_force_retaddr_bts
8427 ret
8428
8429 /*
8430@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8431 .Lctr_enc_ret:
8432 movups IV, (IVP)
8433 .Lctr_enc_just_ret:
8434+ pax_force_retaddr 0, 1
8435 ret
8436+ENDPROC(aesni_ctr_enc)
8437 #endif
8438diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
8439index 545d0ce..14841a6 100644
8440--- a/arch/x86/crypto/aesni-intel_glue.c
8441+++ b/arch/x86/crypto/aesni-intel_glue.c
8442@@ -929,6 +929,8 @@ out_free_ablkcipher:
8443 }
8444
8445 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8446+ unsigned int key_len) __size_overflow(3);
8447+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8448 unsigned int key_len)
8449 {
8450 int ret = 0;
8451diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8452index 391d245..67f35c2 100644
8453--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8454+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8455@@ -20,6 +20,8 @@
8456 *
8457 */
8458
8459+#include <asm/alternative-asm.h>
8460+
8461 .file "blowfish-x86_64-asm.S"
8462 .text
8463
8464@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8465 jnz __enc_xor;
8466
8467 write_block();
8468+ pax_force_retaddr 0, 1
8469 ret;
8470 __enc_xor:
8471 xor_block();
8472+ pax_force_retaddr 0, 1
8473 ret;
8474
8475 .align 8
8476@@ -188,6 +192,7 @@ blowfish_dec_blk:
8477
8478 movq %r11, %rbp;
8479
8480+ pax_force_retaddr 0, 1
8481 ret;
8482
8483 /**********************************************************************
8484@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8485
8486 popq %rbx;
8487 popq %rbp;
8488+ pax_force_retaddr 0, 1
8489 ret;
8490
8491 __enc_xor4:
8492@@ -349,6 +355,7 @@ __enc_xor4:
8493
8494 popq %rbx;
8495 popq %rbp;
8496+ pax_force_retaddr 0, 1
8497 ret;
8498
8499 .align 8
8500@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8501 popq %rbx;
8502 popq %rbp;
8503
8504+ pax_force_retaddr 0, 1
8505 ret;
8506
8507diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8508index 6214a9b..1f4fc9a 100644
8509--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8510+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8511@@ -1,3 +1,5 @@
8512+#include <asm/alternative-asm.h>
8513+
8514 # enter ECRYPT_encrypt_bytes
8515 .text
8516 .p2align 5
8517@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8518 add %r11,%rsp
8519 mov %rdi,%rax
8520 mov %rsi,%rdx
8521+ pax_force_retaddr 0, 1
8522 ret
8523 # bytesatleast65:
8524 ._bytesatleast65:
8525@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8526 add %r11,%rsp
8527 mov %rdi,%rax
8528 mov %rsi,%rdx
8529+ pax_force_retaddr
8530 ret
8531 # enter ECRYPT_ivsetup
8532 .text
8533@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8534 add %r11,%rsp
8535 mov %rdi,%rax
8536 mov %rsi,%rdx
8537+ pax_force_retaddr
8538 ret
8539diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8540index 7f24a15..9cd3ffe 100644
8541--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8542+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8543@@ -24,6 +24,8 @@
8544 *
8545 */
8546
8547+#include <asm/alternative-asm.h>
8548+
8549 .file "serpent-sse2-x86_64-asm_64.S"
8550 .text
8551
8552@@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8553 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8554 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8555
8556+ pax_force_retaddr
8557 ret;
8558
8559 __enc_xor8:
8560 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8561 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8562
8563+ pax_force_retaddr
8564 ret;
8565
8566 .align 8
8567@@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8568 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8569 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8570
8571+ pax_force_retaddr
8572 ret;
8573diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8574index b2c2f57..8470cab 100644
8575--- a/arch/x86/crypto/sha1_ssse3_asm.S
8576+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8577@@ -28,6 +28,8 @@
8578 * (at your option) any later version.
8579 */
8580
8581+#include <asm/alternative-asm.h>
8582+
8583 #define CTX %rdi // arg1
8584 #define BUF %rsi // arg2
8585 #define CNT %rdx // arg3
8586@@ -104,6 +106,7 @@
8587 pop %r12
8588 pop %rbp
8589 pop %rbx
8590+ pax_force_retaddr 0, 1
8591 ret
8592
8593 .size \name, .-\name
8594diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8595index 5b012a2..36d5364 100644
8596--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8597+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8598@@ -20,6 +20,8 @@
8599 *
8600 */
8601
8602+#include <asm/alternative-asm.h>
8603+
8604 .file "twofish-x86_64-asm-3way.S"
8605 .text
8606
8607@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8608 popq %r13;
8609 popq %r14;
8610 popq %r15;
8611+ pax_force_retaddr 0, 1
8612 ret;
8613
8614 __enc_xor3:
8615@@ -271,6 +274,7 @@ __enc_xor3:
8616 popq %r13;
8617 popq %r14;
8618 popq %r15;
8619+ pax_force_retaddr 0, 1
8620 ret;
8621
8622 .global twofish_dec_blk_3way
8623@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8624 popq %r13;
8625 popq %r14;
8626 popq %r15;
8627+ pax_force_retaddr 0, 1
8628 ret;
8629
8630diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8631index 7bcf3fc..f53832f 100644
8632--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8633+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8634@@ -21,6 +21,7 @@
8635 .text
8636
8637 #include <asm/asm-offsets.h>
8638+#include <asm/alternative-asm.h>
8639
8640 #define a_offset 0
8641 #define b_offset 4
8642@@ -268,6 +269,7 @@ twofish_enc_blk:
8643
8644 popq R1
8645 movq $1,%rax
8646+ pax_force_retaddr 0, 1
8647 ret
8648
8649 twofish_dec_blk:
8650@@ -319,4 +321,5 @@ twofish_dec_blk:
8651
8652 popq R1
8653 movq $1,%rax
8654+ pax_force_retaddr 0, 1
8655 ret
8656diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8657index 39e4909..887aa7e 100644
8658--- a/arch/x86/ia32/ia32_aout.c
8659+++ b/arch/x86/ia32/ia32_aout.c
8660@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8661 unsigned long dump_start, dump_size;
8662 struct user32 dump;
8663
8664+ memset(&dump, 0, sizeof(dump));
8665+
8666 fs = get_fs();
8667 set_fs(KERNEL_DS);
8668 has_dumped = 1;
8669diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8670index 6557769..ef6ae89 100644
8671--- a/arch/x86/ia32/ia32_signal.c
8672+++ b/arch/x86/ia32/ia32_signal.c
8673@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8674 }
8675 seg = get_fs();
8676 set_fs(KERNEL_DS);
8677- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8678+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8679 set_fs(seg);
8680 if (ret >= 0 && uoss_ptr) {
8681 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8682@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8683 */
8684 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8685 size_t frame_size,
8686- void **fpstate)
8687+ void __user **fpstate)
8688 {
8689 unsigned long sp;
8690
8691@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8692
8693 if (used_math()) {
8694 sp = sp - sig_xstate_ia32_size;
8695- *fpstate = (struct _fpstate_ia32 *) sp;
8696+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8697 if (save_i387_xstate_ia32(*fpstate) < 0)
8698 return (void __user *) -1L;
8699 }
8700@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8701 sp -= frame_size;
8702 /* Align the stack pointer according to the i386 ABI,
8703 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8704- sp = ((sp + 4) & -16ul) - 4;
8705+ sp = ((sp - 12) & -16ul) - 4;
8706 return (void __user *) sp;
8707 }
8708
8709@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8710 * These are actually not used anymore, but left because some
8711 * gdb versions depend on them as a marker.
8712 */
8713- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8714+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8715 } put_user_catch(err);
8716
8717 if (err)
8718@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8719 0xb8,
8720 __NR_ia32_rt_sigreturn,
8721 0x80cd,
8722- 0,
8723+ 0
8724 };
8725
8726 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8727@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8728
8729 if (ka->sa.sa_flags & SA_RESTORER)
8730 restorer = ka->sa.sa_restorer;
8731+ else if (current->mm->context.vdso)
8732+ /* Return stub is in 32bit vsyscall page */
8733+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8734 else
8735- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8736- rt_sigreturn);
8737+ restorer = &frame->retcode;
8738 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8739
8740 /*
8741 * Not actually used anymore, but left because some gdb
8742 * versions need it.
8743 */
8744- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8745+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8746 } put_user_catch(err);
8747
8748 if (err)
8749diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8750index e3e7340..05ed805 100644
8751--- a/arch/x86/ia32/ia32entry.S
8752+++ b/arch/x86/ia32/ia32entry.S
8753@@ -13,8 +13,10 @@
8754 #include <asm/thread_info.h>
8755 #include <asm/segment.h>
8756 #include <asm/irqflags.h>
8757+#include <asm/pgtable.h>
8758 #include <linux/linkage.h>
8759 #include <linux/err.h>
8760+#include <asm/alternative-asm.h>
8761
8762 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8763 #include <linux/elf-em.h>
8764@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8765 ENDPROC(native_irq_enable_sysexit)
8766 #endif
8767
8768+ .macro pax_enter_kernel_user
8769+ pax_set_fptr_mask
8770+#ifdef CONFIG_PAX_MEMORY_UDEREF
8771+ call pax_enter_kernel_user
8772+#endif
8773+ .endm
8774+
8775+ .macro pax_exit_kernel_user
8776+#ifdef CONFIG_PAX_MEMORY_UDEREF
8777+ call pax_exit_kernel_user
8778+#endif
8779+#ifdef CONFIG_PAX_RANDKSTACK
8780+ pushq %rax
8781+ pushq %r11
8782+ call pax_randomize_kstack
8783+ popq %r11
8784+ popq %rax
8785+#endif
8786+ .endm
8787+
8788+.macro pax_erase_kstack
8789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8790+ call pax_erase_kstack
8791+#endif
8792+.endm
8793+
8794 /*
8795 * 32bit SYSENTER instruction entry.
8796 *
8797@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8798 CFI_REGISTER rsp,rbp
8799 SWAPGS_UNSAFE_STACK
8800 movq PER_CPU_VAR(kernel_stack), %rsp
8801- addq $(KERNEL_STACK_OFFSET),%rsp
8802- /*
8803- * No need to follow this irqs on/off section: the syscall
8804- * disabled irqs, here we enable it straight after entry:
8805- */
8806- ENABLE_INTERRUPTS(CLBR_NONE)
8807 movl %ebp,%ebp /* zero extension */
8808 pushq_cfi $__USER32_DS
8809 /*CFI_REL_OFFSET ss,0*/
8810@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8811 CFI_REL_OFFSET rsp,0
8812 pushfq_cfi
8813 /*CFI_REL_OFFSET rflags,0*/
8814- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8815- CFI_REGISTER rip,r10
8816+ orl $X86_EFLAGS_IF,(%rsp)
8817+ GET_THREAD_INFO(%r11)
8818+ movl TI_sysenter_return(%r11), %r11d
8819+ CFI_REGISTER rip,r11
8820 pushq_cfi $__USER32_CS
8821 /*CFI_REL_OFFSET cs,0*/
8822 movl %eax, %eax
8823- pushq_cfi %r10
8824+ pushq_cfi %r11
8825 CFI_REL_OFFSET rip,0
8826 pushq_cfi %rax
8827 cld
8828 SAVE_ARGS 0,1,0
8829+ pax_enter_kernel_user
8830+ /*
8831+ * No need to follow this irqs on/off section: the syscall
8832+ * disabled irqs, here we enable it straight after entry:
8833+ */
8834+ ENABLE_INTERRUPTS(CLBR_NONE)
8835 /* no need to do an access_ok check here because rbp has been
8836 32bit zero extended */
8837+
8838+#ifdef CONFIG_PAX_MEMORY_UDEREF
8839+ mov $PAX_USER_SHADOW_BASE,%r11
8840+ add %r11,%rbp
8841+#endif
8842+
8843 1: movl (%rbp),%ebp
8844 .section __ex_table,"a"
8845 .quad 1b,ia32_badarg
8846 .previous
8847- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8848- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8849+ GET_THREAD_INFO(%r11)
8850+ orl $TS_COMPAT,TI_status(%r11)
8851+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8852 CFI_REMEMBER_STATE
8853 jnz sysenter_tracesys
8854 cmpq $(IA32_NR_syscalls-1),%rax
8855@@ -160,12 +197,15 @@ sysenter_do_call:
8856 sysenter_dispatch:
8857 call *ia32_sys_call_table(,%rax,8)
8858 movq %rax,RAX-ARGOFFSET(%rsp)
8859+ GET_THREAD_INFO(%r11)
8860 DISABLE_INTERRUPTS(CLBR_NONE)
8861 TRACE_IRQS_OFF
8862- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8863+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8864 jnz sysexit_audit
8865 sysexit_from_sys_call:
8866- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8867+ pax_exit_kernel_user
8868+ pax_erase_kstack
8869+ andl $~TS_COMPAT,TI_status(%r11)
8870 /* clear IF, that popfq doesn't enable interrupts early */
8871 andl $~0x200,EFLAGS-R11(%rsp)
8872 movl RIP-R11(%rsp),%edx /* User %eip */
8873@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8874 movl %eax,%esi /* 2nd arg: syscall number */
8875 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8876 call __audit_syscall_entry
8877+
8878+ pax_erase_kstack
8879+
8880 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8881 cmpq $(IA32_NR_syscalls-1),%rax
8882 ja ia32_badsys
8883@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8884 .endm
8885
8886 .macro auditsys_exit exit
8887- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8888+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8889 jnz ia32_ret_from_sys_call
8890 TRACE_IRQS_ON
8891 sti
8892@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8893 1: setbe %al /* 1 if error, 0 if not */
8894 movzbl %al,%edi /* zero-extend that into %edi */
8895 call __audit_syscall_exit
8896+ GET_THREAD_INFO(%r11)
8897 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8898 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8899 cli
8900 TRACE_IRQS_OFF
8901- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8902+ testl %edi,TI_flags(%r11)
8903 jz \exit
8904 CLEAR_RREGS -ARGOFFSET
8905 jmp int_with_check
8906@@ -235,7 +279,7 @@ sysexit_audit:
8907
8908 sysenter_tracesys:
8909 #ifdef CONFIG_AUDITSYSCALL
8910- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8911+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8912 jz sysenter_auditsys
8913 #endif
8914 SAVE_REST
8915@@ -243,6 +287,9 @@ sysenter_tracesys:
8916 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8917 movq %rsp,%rdi /* &pt_regs -> arg1 */
8918 call syscall_trace_enter
8919+
8920+ pax_erase_kstack
8921+
8922 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8923 RESTORE_REST
8924 cmpq $(IA32_NR_syscalls-1),%rax
8925@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8926 ENTRY(ia32_cstar_target)
8927 CFI_STARTPROC32 simple
8928 CFI_SIGNAL_FRAME
8929- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8930+ CFI_DEF_CFA rsp,0
8931 CFI_REGISTER rip,rcx
8932 /*CFI_REGISTER rflags,r11*/
8933 SWAPGS_UNSAFE_STACK
8934 movl %esp,%r8d
8935 CFI_REGISTER rsp,r8
8936 movq PER_CPU_VAR(kernel_stack),%rsp
8937+ SAVE_ARGS 8*6,0,0
8938+ pax_enter_kernel_user
8939 /*
8940 * No need to follow this irqs on/off section: the syscall
8941 * disabled irqs and here we enable it straight after entry:
8942 */
8943 ENABLE_INTERRUPTS(CLBR_NONE)
8944- SAVE_ARGS 8,0,0
8945 movl %eax,%eax /* zero extension */
8946 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8947 movq %rcx,RIP-ARGOFFSET(%rsp)
8948@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8949 /* no need to do an access_ok check here because r8 has been
8950 32bit zero extended */
8951 /* hardware stack frame is complete now */
8952+
8953+#ifdef CONFIG_PAX_MEMORY_UDEREF
8954+ mov $PAX_USER_SHADOW_BASE,%r11
8955+ add %r11,%r8
8956+#endif
8957+
8958 1: movl (%r8),%r9d
8959 .section __ex_table,"a"
8960 .quad 1b,ia32_badarg
8961 .previous
8962- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8963- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8964+ GET_THREAD_INFO(%r11)
8965+ orl $TS_COMPAT,TI_status(%r11)
8966+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8967 CFI_REMEMBER_STATE
8968 jnz cstar_tracesys
8969 cmpq $IA32_NR_syscalls-1,%rax
8970@@ -317,12 +372,15 @@ cstar_do_call:
8971 cstar_dispatch:
8972 call *ia32_sys_call_table(,%rax,8)
8973 movq %rax,RAX-ARGOFFSET(%rsp)
8974+ GET_THREAD_INFO(%r11)
8975 DISABLE_INTERRUPTS(CLBR_NONE)
8976 TRACE_IRQS_OFF
8977- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8978+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8979 jnz sysretl_audit
8980 sysretl_from_sys_call:
8981- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8982+ pax_exit_kernel_user
8983+ pax_erase_kstack
8984+ andl $~TS_COMPAT,TI_status(%r11)
8985 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8986 movl RIP-ARGOFFSET(%rsp),%ecx
8987 CFI_REGISTER rip,rcx
8988@@ -350,7 +408,7 @@ sysretl_audit:
8989
8990 cstar_tracesys:
8991 #ifdef CONFIG_AUDITSYSCALL
8992- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8993+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8994 jz cstar_auditsys
8995 #endif
8996 xchgl %r9d,%ebp
8997@@ -359,6 +417,9 @@ cstar_tracesys:
8998 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8999 movq %rsp,%rdi /* &pt_regs -> arg1 */
9000 call syscall_trace_enter
9001+
9002+ pax_erase_kstack
9003+
9004 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9005 RESTORE_REST
9006 xchgl %ebp,%r9d
9007@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
9008 CFI_REL_OFFSET rip,RIP-RIP
9009 PARAVIRT_ADJUST_EXCEPTION_FRAME
9010 SWAPGS
9011- /*
9012- * No need to follow this irqs on/off section: the syscall
9013- * disabled irqs and here we enable it straight after entry:
9014- */
9015- ENABLE_INTERRUPTS(CLBR_NONE)
9016 movl %eax,%eax
9017 pushq_cfi %rax
9018 cld
9019 /* note the registers are not zero extended to the sf.
9020 this could be a problem. */
9021 SAVE_ARGS 0,1,0
9022- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9023- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9024+ pax_enter_kernel_user
9025+ /*
9026+ * No need to follow this irqs on/off section: the syscall
9027+ * disabled irqs and here we enable it straight after entry:
9028+ */
9029+ ENABLE_INTERRUPTS(CLBR_NONE)
9030+ GET_THREAD_INFO(%r11)
9031+ orl $TS_COMPAT,TI_status(%r11)
9032+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9033 jnz ia32_tracesys
9034 cmpq $(IA32_NR_syscalls-1),%rax
9035 ja ia32_badsys
9036@@ -435,6 +498,9 @@ ia32_tracesys:
9037 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9038 movq %rsp,%rdi /* &pt_regs -> arg1 */
9039 call syscall_trace_enter
9040+
9041+ pax_erase_kstack
9042+
9043 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9044 RESTORE_REST
9045 cmpq $(IA32_NR_syscalls-1),%rax
9046diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9047index f6f5c53..b358b28 100644
9048--- a/arch/x86/ia32/sys_ia32.c
9049+++ b/arch/x86/ia32/sys_ia32.c
9050@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9051 */
9052 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9053 {
9054- typeof(ubuf->st_uid) uid = 0;
9055- typeof(ubuf->st_gid) gid = 0;
9056+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9057+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9058 SET_UID(uid, stat->uid);
9059 SET_GID(gid, stat->gid);
9060 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9061@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9062 }
9063 set_fs(KERNEL_DS);
9064 ret = sys_rt_sigprocmask(how,
9065- set ? (sigset_t __user *)&s : NULL,
9066- oset ? (sigset_t __user *)&s : NULL,
9067+ set ? (sigset_t __force_user *)&s : NULL,
9068+ oset ? (sigset_t __force_user *)&s : NULL,
9069 sigsetsize);
9070 set_fs(old_fs);
9071 if (ret)
9072@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
9073 return alarm_setitimer(seconds);
9074 }
9075
9076-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9077+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9078 int options)
9079 {
9080 return compat_sys_wait4(pid, stat_addr, options, NULL);
9081@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9082 mm_segment_t old_fs = get_fs();
9083
9084 set_fs(KERNEL_DS);
9085- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9086+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9087 set_fs(old_fs);
9088 if (put_compat_timespec(&t, interval))
9089 return -EFAULT;
9090@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9091 mm_segment_t old_fs = get_fs();
9092
9093 set_fs(KERNEL_DS);
9094- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9095+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9096 set_fs(old_fs);
9097 if (!ret) {
9098 switch (_NSIG_WORDS) {
9099@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9100 if (copy_siginfo_from_user32(&info, uinfo))
9101 return -EFAULT;
9102 set_fs(KERNEL_DS);
9103- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9104+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9105 set_fs(old_fs);
9106 return ret;
9107 }
9108@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9109 return -EFAULT;
9110
9111 set_fs(KERNEL_DS);
9112- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9113+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9114 count);
9115 set_fs(old_fs);
9116
9117diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9118index 952bd01..7692c6f 100644
9119--- a/arch/x86/include/asm/alternative-asm.h
9120+++ b/arch/x86/include/asm/alternative-asm.h
9121@@ -15,6 +15,45 @@
9122 .endm
9123 #endif
9124
9125+#ifdef KERNEXEC_PLUGIN
9126+ .macro pax_force_retaddr_bts rip=0
9127+ btsq $63,\rip(%rsp)
9128+ .endm
9129+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9130+ .macro pax_force_retaddr rip=0, reload=0
9131+ btsq $63,\rip(%rsp)
9132+ .endm
9133+ .macro pax_force_fptr ptr
9134+ btsq $63,\ptr
9135+ .endm
9136+ .macro pax_set_fptr_mask
9137+ .endm
9138+#endif
9139+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9140+ .macro pax_force_retaddr rip=0, reload=0
9141+ .if \reload
9142+ pax_set_fptr_mask
9143+ .endif
9144+ orq %r10,\rip(%rsp)
9145+ .endm
9146+ .macro pax_force_fptr ptr
9147+ orq %r10,\ptr
9148+ .endm
9149+ .macro pax_set_fptr_mask
9150+ movabs $0x8000000000000000,%r10
9151+ .endm
9152+#endif
9153+#else
9154+ .macro pax_force_retaddr rip=0, reload=0
9155+ .endm
9156+ .macro pax_force_fptr ptr
9157+ .endm
9158+ .macro pax_force_retaddr_bts rip=0
9159+ .endm
9160+ .macro pax_set_fptr_mask
9161+ .endm
9162+#endif
9163+
9164 .macro altinstruction_entry orig alt feature orig_len alt_len
9165 .long \orig - .
9166 .long \alt - .
9167diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9168index 37ad100..7d47faa 100644
9169--- a/arch/x86/include/asm/alternative.h
9170+++ b/arch/x86/include/asm/alternative.h
9171@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9172 ".section .discard,\"aw\",@progbits\n" \
9173 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9174 ".previous\n" \
9175- ".section .altinstr_replacement, \"ax\"\n" \
9176+ ".section .altinstr_replacement, \"a\"\n" \
9177 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9178 ".previous"
9179
9180diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9181index 3ab9bdd..238033e 100644
9182--- a/arch/x86/include/asm/apic.h
9183+++ b/arch/x86/include/asm/apic.h
9184@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
9185
9186 #ifdef CONFIG_X86_LOCAL_APIC
9187
9188-extern unsigned int apic_verbosity;
9189+extern int apic_verbosity;
9190 extern int local_apic_timer_c2_ok;
9191
9192 extern int disable_apic;
9193diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9194index 20370c6..a2eb9b0 100644
9195--- a/arch/x86/include/asm/apm.h
9196+++ b/arch/x86/include/asm/apm.h
9197@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9198 __asm__ __volatile__(APM_DO_ZERO_SEGS
9199 "pushl %%edi\n\t"
9200 "pushl %%ebp\n\t"
9201- "lcall *%%cs:apm_bios_entry\n\t"
9202+ "lcall *%%ss:apm_bios_entry\n\t"
9203 "setc %%al\n\t"
9204 "popl %%ebp\n\t"
9205 "popl %%edi\n\t"
9206@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9207 __asm__ __volatile__(APM_DO_ZERO_SEGS
9208 "pushl %%edi\n\t"
9209 "pushl %%ebp\n\t"
9210- "lcall *%%cs:apm_bios_entry\n\t"
9211+ "lcall *%%ss:apm_bios_entry\n\t"
9212 "setc %%bl\n\t"
9213 "popl %%ebp\n\t"
9214 "popl %%edi\n\t"
9215diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9216index 58cb6d4..ca9010d 100644
9217--- a/arch/x86/include/asm/atomic.h
9218+++ b/arch/x86/include/asm/atomic.h
9219@@ -22,7 +22,18 @@
9220 */
9221 static inline int atomic_read(const atomic_t *v)
9222 {
9223- return (*(volatile int *)&(v)->counter);
9224+ return (*(volatile const int *)&(v)->counter);
9225+}
9226+
9227+/**
9228+ * atomic_read_unchecked - read atomic variable
9229+ * @v: pointer of type atomic_unchecked_t
9230+ *
9231+ * Atomically reads the value of @v.
9232+ */
9233+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9234+{
9235+ return (*(volatile const int *)&(v)->counter);
9236 }
9237
9238 /**
9239@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9240 }
9241
9242 /**
9243+ * atomic_set_unchecked - set atomic variable
9244+ * @v: pointer of type atomic_unchecked_t
9245+ * @i: required value
9246+ *
9247+ * Atomically sets the value of @v to @i.
9248+ */
9249+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9250+{
9251+ v->counter = i;
9252+}
9253+
9254+/**
9255 * atomic_add - add integer to atomic variable
9256 * @i: integer value to add
9257 * @v: pointer of type atomic_t
9258@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9259 */
9260 static inline void atomic_add(int i, atomic_t *v)
9261 {
9262- asm volatile(LOCK_PREFIX "addl %1,%0"
9263+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9264+
9265+#ifdef CONFIG_PAX_REFCOUNT
9266+ "jno 0f\n"
9267+ LOCK_PREFIX "subl %1,%0\n"
9268+ "int $4\n0:\n"
9269+ _ASM_EXTABLE(0b, 0b)
9270+#endif
9271+
9272+ : "+m" (v->counter)
9273+ : "ir" (i));
9274+}
9275+
9276+/**
9277+ * atomic_add_unchecked - add integer to atomic variable
9278+ * @i: integer value to add
9279+ * @v: pointer of type atomic_unchecked_t
9280+ *
9281+ * Atomically adds @i to @v.
9282+ */
9283+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9284+{
9285+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9286 : "+m" (v->counter)
9287 : "ir" (i));
9288 }
9289@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9290 */
9291 static inline void atomic_sub(int i, atomic_t *v)
9292 {
9293- asm volatile(LOCK_PREFIX "subl %1,%0"
9294+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9295+
9296+#ifdef CONFIG_PAX_REFCOUNT
9297+ "jno 0f\n"
9298+ LOCK_PREFIX "addl %1,%0\n"
9299+ "int $4\n0:\n"
9300+ _ASM_EXTABLE(0b, 0b)
9301+#endif
9302+
9303+ : "+m" (v->counter)
9304+ : "ir" (i));
9305+}
9306+
9307+/**
9308+ * atomic_sub_unchecked - subtract integer from atomic variable
9309+ * @i: integer value to subtract
9310+ * @v: pointer of type atomic_unchecked_t
9311+ *
9312+ * Atomically subtracts @i from @v.
9313+ */
9314+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9315+{
9316+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9317 : "+m" (v->counter)
9318 : "ir" (i));
9319 }
9320@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9321 {
9322 unsigned char c;
9323
9324- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9325+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9326+
9327+#ifdef CONFIG_PAX_REFCOUNT
9328+ "jno 0f\n"
9329+ LOCK_PREFIX "addl %2,%0\n"
9330+ "int $4\n0:\n"
9331+ _ASM_EXTABLE(0b, 0b)
9332+#endif
9333+
9334+ "sete %1\n"
9335 : "+m" (v->counter), "=qm" (c)
9336 : "ir" (i) : "memory");
9337 return c;
9338@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9339 */
9340 static inline void atomic_inc(atomic_t *v)
9341 {
9342- asm volatile(LOCK_PREFIX "incl %0"
9343+ asm volatile(LOCK_PREFIX "incl %0\n"
9344+
9345+#ifdef CONFIG_PAX_REFCOUNT
9346+ "jno 0f\n"
9347+ LOCK_PREFIX "decl %0\n"
9348+ "int $4\n0:\n"
9349+ _ASM_EXTABLE(0b, 0b)
9350+#endif
9351+
9352+ : "+m" (v->counter));
9353+}
9354+
9355+/**
9356+ * atomic_inc_unchecked - increment atomic variable
9357+ * @v: pointer of type atomic_unchecked_t
9358+ *
9359+ * Atomically increments @v by 1.
9360+ */
9361+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9362+{
9363+ asm volatile(LOCK_PREFIX "incl %0\n"
9364 : "+m" (v->counter));
9365 }
9366
9367@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9368 */
9369 static inline void atomic_dec(atomic_t *v)
9370 {
9371- asm volatile(LOCK_PREFIX "decl %0"
9372+ asm volatile(LOCK_PREFIX "decl %0\n"
9373+
9374+#ifdef CONFIG_PAX_REFCOUNT
9375+ "jno 0f\n"
9376+ LOCK_PREFIX "incl %0\n"
9377+ "int $4\n0:\n"
9378+ _ASM_EXTABLE(0b, 0b)
9379+#endif
9380+
9381+ : "+m" (v->counter));
9382+}
9383+
9384+/**
9385+ * atomic_dec_unchecked - decrement atomic variable
9386+ * @v: pointer of type atomic_unchecked_t
9387+ *
9388+ * Atomically decrements @v by 1.
9389+ */
9390+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9391+{
9392+ asm volatile(LOCK_PREFIX "decl %0\n"
9393 : "+m" (v->counter));
9394 }
9395
9396@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9397 {
9398 unsigned char c;
9399
9400- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9401+ asm volatile(LOCK_PREFIX "decl %0\n"
9402+
9403+#ifdef CONFIG_PAX_REFCOUNT
9404+ "jno 0f\n"
9405+ LOCK_PREFIX "incl %0\n"
9406+ "int $4\n0:\n"
9407+ _ASM_EXTABLE(0b, 0b)
9408+#endif
9409+
9410+ "sete %1\n"
9411 : "+m" (v->counter), "=qm" (c)
9412 : : "memory");
9413 return c != 0;
9414@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9415 {
9416 unsigned char c;
9417
9418- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9419+ asm volatile(LOCK_PREFIX "incl %0\n"
9420+
9421+#ifdef CONFIG_PAX_REFCOUNT
9422+ "jno 0f\n"
9423+ LOCK_PREFIX "decl %0\n"
9424+ "int $4\n0:\n"
9425+ _ASM_EXTABLE(0b, 0b)
9426+#endif
9427+
9428+ "sete %1\n"
9429+ : "+m" (v->counter), "=qm" (c)
9430+ : : "memory");
9431+ return c != 0;
9432+}
9433+
9434+/**
9435+ * atomic_inc_and_test_unchecked - increment and test
9436+ * @v: pointer of type atomic_unchecked_t
9437+ *
9438+ * Atomically increments @v by 1
9439+ * and returns true if the result is zero, or false for all
9440+ * other cases.
9441+ */
9442+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9443+{
9444+ unsigned char c;
9445+
9446+ asm volatile(LOCK_PREFIX "incl %0\n"
9447+ "sete %1\n"
9448 : "+m" (v->counter), "=qm" (c)
9449 : : "memory");
9450 return c != 0;
9451@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9452 {
9453 unsigned char c;
9454
9455- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9456+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9457+
9458+#ifdef CONFIG_PAX_REFCOUNT
9459+ "jno 0f\n"
9460+ LOCK_PREFIX "subl %2,%0\n"
9461+ "int $4\n0:\n"
9462+ _ASM_EXTABLE(0b, 0b)
9463+#endif
9464+
9465+ "sets %1\n"
9466 : "+m" (v->counter), "=qm" (c)
9467 : "ir" (i) : "memory");
9468 return c;
9469@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9470 goto no_xadd;
9471 #endif
9472 /* Modern 486+ processor */
9473- return i + xadd(&v->counter, i);
9474+ return i + xadd_check_overflow(&v->counter, i);
9475
9476 #ifdef CONFIG_M386
9477 no_xadd: /* Legacy 386 processor */
9478@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9479 }
9480
9481 /**
9482+ * atomic_add_return_unchecked - add integer and return
9483+ * @i: integer value to add
9484+ * @v: pointer of type atomic_unchecked_t
9485+ *
9486+ * Atomically adds @i to @v and returns @i + @v
9487+ */
9488+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9489+{
9490+#ifdef CONFIG_M386
9491+ int __i;
9492+ unsigned long flags;
9493+ if (unlikely(boot_cpu_data.x86 <= 3))
9494+ goto no_xadd;
9495+#endif
9496+ /* Modern 486+ processor */
9497+ return i + xadd(&v->counter, i);
9498+
9499+#ifdef CONFIG_M386
9500+no_xadd: /* Legacy 386 processor */
9501+ raw_local_irq_save(flags);
9502+ __i = atomic_read_unchecked(v);
9503+ atomic_set_unchecked(v, i + __i);
9504+ raw_local_irq_restore(flags);
9505+ return i + __i;
9506+#endif
9507+}
9508+
9509+/**
9510 * atomic_sub_return - subtract integer and return
9511 * @v: pointer of type atomic_t
9512 * @i: integer value to subtract
9513@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9514 }
9515
9516 #define atomic_inc_return(v) (atomic_add_return(1, v))
9517+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9518+{
9519+ return atomic_add_return_unchecked(1, v);
9520+}
9521 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9522
9523 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9524@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9525 return cmpxchg(&v->counter, old, new);
9526 }
9527
9528+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9529+{
9530+ return cmpxchg(&v->counter, old, new);
9531+}
9532+
9533 static inline int atomic_xchg(atomic_t *v, int new)
9534 {
9535 return xchg(&v->counter, new);
9536 }
9537
9538+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9539+{
9540+ return xchg(&v->counter, new);
9541+}
9542+
9543 /**
9544 * __atomic_add_unless - add unless the number is already a given value
9545 * @v: pointer of type atomic_t
9546@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9547 */
9548 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9549 {
9550- int c, old;
9551+ int c, old, new;
9552 c = atomic_read(v);
9553 for (;;) {
9554- if (unlikely(c == (u)))
9555+ if (unlikely(c == u))
9556 break;
9557- old = atomic_cmpxchg((v), c, c + (a));
9558+
9559+ asm volatile("addl %2,%0\n"
9560+
9561+#ifdef CONFIG_PAX_REFCOUNT
9562+ "jno 0f\n"
9563+ "subl %2,%0\n"
9564+ "int $4\n0:\n"
9565+ _ASM_EXTABLE(0b, 0b)
9566+#endif
9567+
9568+ : "=r" (new)
9569+ : "0" (c), "ir" (a));
9570+
9571+ old = atomic_cmpxchg(v, c, new);
9572 if (likely(old == c))
9573 break;
9574 c = old;
9575@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9576 return c;
9577 }
9578
9579+/**
9580+ * atomic_inc_not_zero_hint - increment if not null
9581+ * @v: pointer of type atomic_t
9582+ * @hint: probable value of the atomic before the increment
9583+ *
9584+ * This version of atomic_inc_not_zero() gives a hint of probable
9585+ * value of the atomic. This helps processor to not read the memory
9586+ * before doing the atomic read/modify/write cycle, lowering
9587+ * number of bus transactions on some arches.
9588+ *
9589+ * Returns: 0 if increment was not done, 1 otherwise.
9590+ */
9591+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9592+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9593+{
9594+ int val, c = hint, new;
9595+
9596+ /* sanity test, should be removed by compiler if hint is a constant */
9597+ if (!hint)
9598+ return __atomic_add_unless(v, 1, 0);
9599+
9600+ do {
9601+ asm volatile("incl %0\n"
9602+
9603+#ifdef CONFIG_PAX_REFCOUNT
9604+ "jno 0f\n"
9605+ "decl %0\n"
9606+ "int $4\n0:\n"
9607+ _ASM_EXTABLE(0b, 0b)
9608+#endif
9609+
9610+ : "=r" (new)
9611+ : "0" (c));
9612+
9613+ val = atomic_cmpxchg(v, c, new);
9614+ if (val == c)
9615+ return 1;
9616+ c = val;
9617+ } while (c);
9618+
9619+ return 0;
9620+}
9621
9622 /*
9623 * atomic_dec_if_positive - decrement by 1 if old value positive
9624diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9625index fa13f0e..27c2e08 100644
9626--- a/arch/x86/include/asm/atomic64_32.h
9627+++ b/arch/x86/include/asm/atomic64_32.h
9628@@ -12,6 +12,14 @@ typedef struct {
9629 u64 __aligned(8) counter;
9630 } atomic64_t;
9631
9632+#ifdef CONFIG_PAX_REFCOUNT
9633+typedef struct {
9634+ u64 __aligned(8) counter;
9635+} atomic64_unchecked_t;
9636+#else
9637+typedef atomic64_t atomic64_unchecked_t;
9638+#endif
9639+
9640 #define ATOMIC64_INIT(val) { (val) }
9641
9642 #ifdef CONFIG_X86_CMPXCHG64
9643@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9644 }
9645
9646 /**
9647+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9648+ * @p: pointer to type atomic64_unchecked_t
9649+ * @o: expected value
9650+ * @n: new value
9651+ *
9652+ * Atomically sets @v to @n if it was equal to @o and returns
9653+ * the old value.
9654+ */
9655+
9656+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9657+{
9658+ return cmpxchg64(&v->counter, o, n);
9659+}
9660+
9661+/**
9662 * atomic64_xchg - xchg atomic64 variable
9663 * @v: pointer to type atomic64_t
9664 * @n: value to assign
9665@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9666 }
9667
9668 /**
9669+ * atomic64_set_unchecked - set atomic64 variable
9670+ * @v: pointer to type atomic64_unchecked_t
9671+ * @n: value to assign
9672+ *
9673+ * Atomically sets the value of @v to @n.
9674+ */
9675+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9676+{
9677+ unsigned high = (unsigned)(i >> 32);
9678+ unsigned low = (unsigned)i;
9679+ asm volatile(ATOMIC64_ALTERNATIVE(set)
9680+ : "+b" (low), "+c" (high)
9681+ : "S" (v)
9682+ : "eax", "edx", "memory"
9683+ );
9684+}
9685+
9686+/**
9687 * atomic64_read - read atomic64 variable
9688 * @v: pointer to type atomic64_t
9689 *
9690@@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9691 }
9692
9693 /**
9694+ * atomic64_read_unchecked - read atomic64 variable
9695+ * @v: pointer to type atomic64_unchecked_t
9696+ *
9697+ * Atomically reads the value of @v and returns it.
9698+ */
9699+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9700+{
9701+ long long r;
9702+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9703+ : "=A" (r), "+c" (v)
9704+ : : "memory"
9705+ );
9706+ return r;
9707+ }
9708+
9709+/**
9710 * atomic64_add_return - add and return
9711 * @i: integer value to add
9712 * @v: pointer to type atomic64_t
9713@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9714 return i;
9715 }
9716
9717+/**
9718+ * atomic64_add_return_unchecked - add and return
9719+ * @i: integer value to add
9720+ * @v: pointer to type atomic64_unchecked_t
9721+ *
9722+ * Atomically adds @i to @v and returns @i + *@v
9723+ */
9724+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9725+{
9726+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9727+ : "+A" (i), "+c" (v)
9728+ : : "memory"
9729+ );
9730+ return i;
9731+}
9732+
9733 /*
9734 * Other variants with different arithmetic operators:
9735 */
9736@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9737 return a;
9738 }
9739
9740+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9741+{
9742+ long long a;
9743+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9744+ : "=A" (a)
9745+ : "S" (v)
9746+ : "memory", "ecx"
9747+ );
9748+ return a;
9749+}
9750+
9751 static inline long long atomic64_dec_return(atomic64_t *v)
9752 {
9753 long long a;
9754@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9755 }
9756
9757 /**
9758+ * atomic64_add_unchecked - add integer to atomic64 variable
9759+ * @i: integer value to add
9760+ * @v: pointer to type atomic64_unchecked_t
9761+ *
9762+ * Atomically adds @i to @v.
9763+ */
9764+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9765+{
9766+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9767+ : "+A" (i), "+c" (v)
9768+ : : "memory"
9769+ );
9770+ return i;
9771+}
9772+
9773+/**
9774 * atomic64_sub - subtract the atomic64 variable
9775 * @i: integer value to subtract
9776 * @v: pointer to type atomic64_t
9777diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9778index 0e1cbfc..5623683 100644
9779--- a/arch/x86/include/asm/atomic64_64.h
9780+++ b/arch/x86/include/asm/atomic64_64.h
9781@@ -18,7 +18,19 @@
9782 */
9783 static inline long atomic64_read(const atomic64_t *v)
9784 {
9785- return (*(volatile long *)&(v)->counter);
9786+ return (*(volatile const long *)&(v)->counter);
9787+}
9788+
9789+/**
9790+ * atomic64_read_unchecked - read atomic64 variable
9791+ * @v: pointer of type atomic64_unchecked_t
9792+ *
9793+ * Atomically reads the value of @v.
9794+ * Doesn't imply a read memory barrier.
9795+ */
9796+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9797+{
9798+ return (*(volatile const long *)&(v)->counter);
9799 }
9800
9801 /**
9802@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9803 }
9804
9805 /**
9806+ * atomic64_set_unchecked - set atomic64 variable
9807+ * @v: pointer to type atomic64_unchecked_t
9808+ * @i: required value
9809+ *
9810+ * Atomically sets the value of @v to @i.
9811+ */
9812+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9813+{
9814+ v->counter = i;
9815+}
9816+
9817+/**
9818 * atomic64_add - add integer to atomic64 variable
9819 * @i: integer value to add
9820 * @v: pointer to type atomic64_t
9821@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9822 */
9823 static inline void atomic64_add(long i, atomic64_t *v)
9824 {
9825+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9826+
9827+#ifdef CONFIG_PAX_REFCOUNT
9828+ "jno 0f\n"
9829+ LOCK_PREFIX "subq %1,%0\n"
9830+ "int $4\n0:\n"
9831+ _ASM_EXTABLE(0b, 0b)
9832+#endif
9833+
9834+ : "=m" (v->counter)
9835+ : "er" (i), "m" (v->counter));
9836+}
9837+
9838+/**
9839+ * atomic64_add_unchecked - add integer to atomic64 variable
9840+ * @i: integer value to add
9841+ * @v: pointer to type atomic64_unchecked_t
9842+ *
9843+ * Atomically adds @i to @v.
9844+ */
9845+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9846+{
9847 asm volatile(LOCK_PREFIX "addq %1,%0"
9848 : "=m" (v->counter)
9849 : "er" (i), "m" (v->counter));
9850@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9851 */
9852 static inline void atomic64_sub(long i, atomic64_t *v)
9853 {
9854- asm volatile(LOCK_PREFIX "subq %1,%0"
9855+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9856+
9857+#ifdef CONFIG_PAX_REFCOUNT
9858+ "jno 0f\n"
9859+ LOCK_PREFIX "addq %1,%0\n"
9860+ "int $4\n0:\n"
9861+ _ASM_EXTABLE(0b, 0b)
9862+#endif
9863+
9864+ : "=m" (v->counter)
9865+ : "er" (i), "m" (v->counter));
9866+}
9867+
9868+/**
9869+ * atomic64_sub_unchecked - subtract the atomic64 variable
9870+ * @i: integer value to subtract
9871+ * @v: pointer to type atomic64_unchecked_t
9872+ *
9873+ * Atomically subtracts @i from @v.
9874+ */
9875+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9876+{
9877+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9878 : "=m" (v->counter)
9879 : "er" (i), "m" (v->counter));
9880 }
9881@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9882 {
9883 unsigned char c;
9884
9885- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9886+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9887+
9888+#ifdef CONFIG_PAX_REFCOUNT
9889+ "jno 0f\n"
9890+ LOCK_PREFIX "addq %2,%0\n"
9891+ "int $4\n0:\n"
9892+ _ASM_EXTABLE(0b, 0b)
9893+#endif
9894+
9895+ "sete %1\n"
9896 : "=m" (v->counter), "=qm" (c)
9897 : "er" (i), "m" (v->counter) : "memory");
9898 return c;
9899@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9900 */
9901 static inline void atomic64_inc(atomic64_t *v)
9902 {
9903+ asm volatile(LOCK_PREFIX "incq %0\n"
9904+
9905+#ifdef CONFIG_PAX_REFCOUNT
9906+ "jno 0f\n"
9907+ LOCK_PREFIX "decq %0\n"
9908+ "int $4\n0:\n"
9909+ _ASM_EXTABLE(0b, 0b)
9910+#endif
9911+
9912+ : "=m" (v->counter)
9913+ : "m" (v->counter));
9914+}
9915+
9916+/**
9917+ * atomic64_inc_unchecked - increment atomic64 variable
9918+ * @v: pointer to type atomic64_unchecked_t
9919+ *
9920+ * Atomically increments @v by 1.
9921+ */
9922+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9923+{
9924 asm volatile(LOCK_PREFIX "incq %0"
9925 : "=m" (v->counter)
9926 : "m" (v->counter));
9927@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9928 */
9929 static inline void atomic64_dec(atomic64_t *v)
9930 {
9931- asm volatile(LOCK_PREFIX "decq %0"
9932+ asm volatile(LOCK_PREFIX "decq %0\n"
9933+
9934+#ifdef CONFIG_PAX_REFCOUNT
9935+ "jno 0f\n"
9936+ LOCK_PREFIX "incq %0\n"
9937+ "int $4\n0:\n"
9938+ _ASM_EXTABLE(0b, 0b)
9939+#endif
9940+
9941+ : "=m" (v->counter)
9942+ : "m" (v->counter));
9943+}
9944+
9945+/**
9946+ * atomic64_dec_unchecked - decrement atomic64 variable
9947+ * @v: pointer to type atomic64_t
9948+ *
9949+ * Atomically decrements @v by 1.
9950+ */
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ asm volatile(LOCK_PREFIX "decq %0\n"
9954 : "=m" (v->counter)
9955 : "m" (v->counter));
9956 }
9957@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9958 {
9959 unsigned char c;
9960
9961- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9962+ asm volatile(LOCK_PREFIX "decq %0\n"
9963+
9964+#ifdef CONFIG_PAX_REFCOUNT
9965+ "jno 0f\n"
9966+ LOCK_PREFIX "incq %0\n"
9967+ "int $4\n0:\n"
9968+ _ASM_EXTABLE(0b, 0b)
9969+#endif
9970+
9971+ "sete %1\n"
9972 : "=m" (v->counter), "=qm" (c)
9973 : "m" (v->counter) : "memory");
9974 return c != 0;
9975@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9976 {
9977 unsigned char c;
9978
9979- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9980+ asm volatile(LOCK_PREFIX "incq %0\n"
9981+
9982+#ifdef CONFIG_PAX_REFCOUNT
9983+ "jno 0f\n"
9984+ LOCK_PREFIX "decq %0\n"
9985+ "int $4\n0:\n"
9986+ _ASM_EXTABLE(0b, 0b)
9987+#endif
9988+
9989+ "sete %1\n"
9990 : "=m" (v->counter), "=qm" (c)
9991 : "m" (v->counter) : "memory");
9992 return c != 0;
9993@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9994 {
9995 unsigned char c;
9996
9997- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9998+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9999+
10000+#ifdef CONFIG_PAX_REFCOUNT
10001+ "jno 0f\n"
10002+ LOCK_PREFIX "subq %2,%0\n"
10003+ "int $4\n0:\n"
10004+ _ASM_EXTABLE(0b, 0b)
10005+#endif
10006+
10007+ "sets %1\n"
10008 : "=m" (v->counter), "=qm" (c)
10009 : "er" (i), "m" (v->counter) : "memory");
10010 return c;
10011@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10012 */
10013 static inline long atomic64_add_return(long i, atomic64_t *v)
10014 {
10015+ return i + xadd_check_overflow(&v->counter, i);
10016+}
10017+
10018+/**
10019+ * atomic64_add_return_unchecked - add and return
10020+ * @i: integer value to add
10021+ * @v: pointer to type atomic64_unchecked_t
10022+ *
10023+ * Atomically adds @i to @v and returns @i + @v
10024+ */
10025+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10026+{
10027 return i + xadd(&v->counter, i);
10028 }
10029
10030@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10031 }
10032
10033 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10034+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10035+{
10036+ return atomic64_add_return_unchecked(1, v);
10037+}
10038 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10039
10040 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10041@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10042 return cmpxchg(&v->counter, old, new);
10043 }
10044
10045+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10046+{
10047+ return cmpxchg(&v->counter, old, new);
10048+}
10049+
10050 static inline long atomic64_xchg(atomic64_t *v, long new)
10051 {
10052 return xchg(&v->counter, new);
10053@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10054 */
10055 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10056 {
10057- long c, old;
10058+ long c, old, new;
10059 c = atomic64_read(v);
10060 for (;;) {
10061- if (unlikely(c == (u)))
10062+ if (unlikely(c == u))
10063 break;
10064- old = atomic64_cmpxchg((v), c, c + (a));
10065+
10066+ asm volatile("add %2,%0\n"
10067+
10068+#ifdef CONFIG_PAX_REFCOUNT
10069+ "jno 0f\n"
10070+ "sub %2,%0\n"
10071+ "int $4\n0:\n"
10072+ _ASM_EXTABLE(0b, 0b)
10073+#endif
10074+
10075+ : "=r" (new)
10076+ : "0" (c), "ir" (a));
10077+
10078+ old = atomic64_cmpxchg(v, c, new);
10079 if (likely(old == c))
10080 break;
10081 c = old;
10082 }
10083- return c != (u);
10084+ return c != u;
10085 }
10086
10087 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10088diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10089index b97596e..9bd48b06 100644
10090--- a/arch/x86/include/asm/bitops.h
10091+++ b/arch/x86/include/asm/bitops.h
10092@@ -38,7 +38,7 @@
10093 * a mask operation on a byte.
10094 */
10095 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10096-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10097+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10098 #define CONST_MASK(nr) (1 << ((nr) & 7))
10099
10100 /**
10101diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10102index 5e1a2ee..c9f9533 100644
10103--- a/arch/x86/include/asm/boot.h
10104+++ b/arch/x86/include/asm/boot.h
10105@@ -11,10 +11,15 @@
10106 #include <asm/pgtable_types.h>
10107
10108 /* Physical address where kernel should be loaded. */
10109-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10110+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10111 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10112 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10113
10114+#ifndef __ASSEMBLY__
10115+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10116+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10117+#endif
10118+
10119 /* Minimum kernel alignment, as a power of two */
10120 #ifdef CONFIG_X86_64
10121 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10122diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10123index 48f99f1..d78ebf9 100644
10124--- a/arch/x86/include/asm/cache.h
10125+++ b/arch/x86/include/asm/cache.h
10126@@ -5,12 +5,13 @@
10127
10128 /* L1 cache line size */
10129 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10130-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10131+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10132
10133 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10134+#define __read_only __attribute__((__section__(".data..read_only")))
10135
10136 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10137-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10138+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10139
10140 #ifdef CONFIG_X86_VSMP
10141 #ifdef CONFIG_SMP
10142diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10143index 4e12668..501d239 100644
10144--- a/arch/x86/include/asm/cacheflush.h
10145+++ b/arch/x86/include/asm/cacheflush.h
10146@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10147 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10148
10149 if (pg_flags == _PGMT_DEFAULT)
10150- return -1;
10151+ return ~0UL;
10152 else if (pg_flags == _PGMT_WC)
10153 return _PAGE_CACHE_WC;
10154 else if (pg_flags == _PGMT_UC_MINUS)
10155diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10156index 46fc474..b02b0f9 100644
10157--- a/arch/x86/include/asm/checksum_32.h
10158+++ b/arch/x86/include/asm/checksum_32.h
10159@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10160 int len, __wsum sum,
10161 int *src_err_ptr, int *dst_err_ptr);
10162
10163+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10164+ int len, __wsum sum,
10165+ int *src_err_ptr, int *dst_err_ptr);
10166+
10167+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10168+ int len, __wsum sum,
10169+ int *src_err_ptr, int *dst_err_ptr);
10170+
10171 /*
10172 * Note: when you get a NULL pointer exception here this means someone
10173 * passed in an incorrect kernel address to one of these functions.
10174@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10175 int *err_ptr)
10176 {
10177 might_sleep();
10178- return csum_partial_copy_generic((__force void *)src, dst,
10179+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10180 len, sum, err_ptr, NULL);
10181 }
10182
10183@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10184 {
10185 might_sleep();
10186 if (access_ok(VERIFY_WRITE, dst, len))
10187- return csum_partial_copy_generic(src, (__force void *)dst,
10188+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10189 len, sum, NULL, err_ptr);
10190
10191 if (len)
10192diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10193index 99480e5..d81165b 100644
10194--- a/arch/x86/include/asm/cmpxchg.h
10195+++ b/arch/x86/include/asm/cmpxchg.h
10196@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10197 __compiletime_error("Bad argument size for cmpxchg");
10198 extern void __xadd_wrong_size(void)
10199 __compiletime_error("Bad argument size for xadd");
10200+extern void __xadd_check_overflow_wrong_size(void)
10201+ __compiletime_error("Bad argument size for xadd_check_overflow");
10202 extern void __add_wrong_size(void)
10203 __compiletime_error("Bad argument size for add");
10204+extern void __add_check_overflow_wrong_size(void)
10205+ __compiletime_error("Bad argument size for add_check_overflow");
10206
10207 /*
10208 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10209@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10210 __ret; \
10211 })
10212
10213+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10214+ ({ \
10215+ __typeof__ (*(ptr)) __ret = (arg); \
10216+ switch (sizeof(*(ptr))) { \
10217+ case __X86_CASE_L: \
10218+ asm volatile (lock #op "l %0, %1\n" \
10219+ "jno 0f\n" \
10220+ "mov %0,%1\n" \
10221+ "int $4\n0:\n" \
10222+ _ASM_EXTABLE(0b, 0b) \
10223+ : "+r" (__ret), "+m" (*(ptr)) \
10224+ : : "memory", "cc"); \
10225+ break; \
10226+ case __X86_CASE_Q: \
10227+ asm volatile (lock #op "q %q0, %1\n" \
10228+ "jno 0f\n" \
10229+ "mov %0,%1\n" \
10230+ "int $4\n0:\n" \
10231+ _ASM_EXTABLE(0b, 0b) \
10232+ : "+r" (__ret), "+m" (*(ptr)) \
10233+ : : "memory", "cc"); \
10234+ break; \
10235+ default: \
10236+ __ ## op ## _check_overflow_wrong_size(); \
10237+ } \
10238+ __ret; \
10239+ })
10240+
10241 /*
10242 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10243 * Since this is generally used to protect other memory information, we
10244@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10245 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10246 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10247
10248+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10249+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10250+
10251 #define __add(ptr, inc, lock) \
10252 ({ \
10253 __typeof__ (*(ptr)) __ret = (inc); \
10254diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10255index 8d67d42..183d0eb 100644
10256--- a/arch/x86/include/asm/cpufeature.h
10257+++ b/arch/x86/include/asm/cpufeature.h
10258@@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10259 ".section .discard,\"aw\",@progbits\n"
10260 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10261 ".previous\n"
10262- ".section .altinstr_replacement,\"ax\"\n"
10263+ ".section .altinstr_replacement,\"a\"\n"
10264 "3: movb $1,%0\n"
10265 "4:\n"
10266 ".previous\n"
10267diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10268index e95822d..a90010e 100644
10269--- a/arch/x86/include/asm/desc.h
10270+++ b/arch/x86/include/asm/desc.h
10271@@ -4,6 +4,7 @@
10272 #include <asm/desc_defs.h>
10273 #include <asm/ldt.h>
10274 #include <asm/mmu.h>
10275+#include <asm/pgtable.h>
10276
10277 #include <linux/smp.h>
10278
10279@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10280
10281 desc->type = (info->read_exec_only ^ 1) << 1;
10282 desc->type |= info->contents << 2;
10283+ desc->type |= info->seg_not_present ^ 1;
10284
10285 desc->s = 1;
10286 desc->dpl = 0x3;
10287@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10288 }
10289
10290 extern struct desc_ptr idt_descr;
10291-extern gate_desc idt_table[];
10292 extern struct desc_ptr nmi_idt_descr;
10293-extern gate_desc nmi_idt_table[];
10294-
10295-struct gdt_page {
10296- struct desc_struct gdt[GDT_ENTRIES];
10297-} __attribute__((aligned(PAGE_SIZE)));
10298-
10299-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10300+extern gate_desc idt_table[256];
10301+extern gate_desc nmi_idt_table[256];
10302
10303+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10304 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10305 {
10306- return per_cpu(gdt_page, cpu).gdt;
10307+ return cpu_gdt_table[cpu];
10308 }
10309
10310 #ifdef CONFIG_X86_64
10311@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10312 unsigned long base, unsigned dpl, unsigned flags,
10313 unsigned short seg)
10314 {
10315- gate->a = (seg << 16) | (base & 0xffff);
10316- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10317+ gate->gate.offset_low = base;
10318+ gate->gate.seg = seg;
10319+ gate->gate.reserved = 0;
10320+ gate->gate.type = type;
10321+ gate->gate.s = 0;
10322+ gate->gate.dpl = dpl;
10323+ gate->gate.p = 1;
10324+ gate->gate.offset_high = base >> 16;
10325 }
10326
10327 #endif
10328@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10329
10330 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10331 {
10332+ pax_open_kernel();
10333 memcpy(&idt[entry], gate, sizeof(*gate));
10334+ pax_close_kernel();
10335 }
10336
10337 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10338 {
10339+ pax_open_kernel();
10340 memcpy(&ldt[entry], desc, 8);
10341+ pax_close_kernel();
10342 }
10343
10344 static inline void
10345@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10346 default: size = sizeof(*gdt); break;
10347 }
10348
10349+ pax_open_kernel();
10350 memcpy(&gdt[entry], desc, size);
10351+ pax_close_kernel();
10352 }
10353
10354 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10355@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10356
10357 static inline void native_load_tr_desc(void)
10358 {
10359+ pax_open_kernel();
10360 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10361+ pax_close_kernel();
10362 }
10363
10364 static inline void native_load_gdt(const struct desc_ptr *dtr)
10365@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10366 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10367 unsigned int i;
10368
10369+ pax_open_kernel();
10370 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10371 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10372+ pax_close_kernel();
10373 }
10374
10375 #define _LDT_empty(info) \
10376@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10377 }
10378
10379 #ifdef CONFIG_X86_64
10380-static inline void set_nmi_gate(int gate, void *addr)
10381+static inline void set_nmi_gate(int gate, const void *addr)
10382 {
10383 gate_desc s;
10384
10385@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10386 }
10387 #endif
10388
10389-static inline void _set_gate(int gate, unsigned type, void *addr,
10390+static inline void _set_gate(int gate, unsigned type, const void *addr,
10391 unsigned dpl, unsigned ist, unsigned seg)
10392 {
10393 gate_desc s;
10394@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10395 * Pentium F0 0F bugfix can have resulted in the mapped
10396 * IDT being write-protected.
10397 */
10398-static inline void set_intr_gate(unsigned int n, void *addr)
10399+static inline void set_intr_gate(unsigned int n, const void *addr)
10400 {
10401 BUG_ON((unsigned)n > 0xFF);
10402 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10403@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10404 /*
10405 * This routine sets up an interrupt gate at directory privilege level 3.
10406 */
10407-static inline void set_system_intr_gate(unsigned int n, void *addr)
10408+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10409 {
10410 BUG_ON((unsigned)n > 0xFF);
10411 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10412 }
10413
10414-static inline void set_system_trap_gate(unsigned int n, void *addr)
10415+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10416 {
10417 BUG_ON((unsigned)n > 0xFF);
10418 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10419 }
10420
10421-static inline void set_trap_gate(unsigned int n, void *addr)
10422+static inline void set_trap_gate(unsigned int n, const void *addr)
10423 {
10424 BUG_ON((unsigned)n > 0xFF);
10425 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10426@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10427 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10428 {
10429 BUG_ON((unsigned)n > 0xFF);
10430- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10431+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10432 }
10433
10434-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10435+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10436 {
10437 BUG_ON((unsigned)n > 0xFF);
10438 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10439 }
10440
10441-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10442+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10443 {
10444 BUG_ON((unsigned)n > 0xFF);
10445 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10446 }
10447
10448+#ifdef CONFIG_X86_32
10449+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10450+{
10451+ struct desc_struct d;
10452+
10453+ if (likely(limit))
10454+ limit = (limit - 1UL) >> PAGE_SHIFT;
10455+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10456+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10457+}
10458+#endif
10459+
10460 #endif /* _ASM_X86_DESC_H */
10461diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10462index 278441f..b95a174 100644
10463--- a/arch/x86/include/asm/desc_defs.h
10464+++ b/arch/x86/include/asm/desc_defs.h
10465@@ -31,6 +31,12 @@ struct desc_struct {
10466 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10467 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10468 };
10469+ struct {
10470+ u16 offset_low;
10471+ u16 seg;
10472+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10473+ unsigned offset_high: 16;
10474+ } gate;
10475 };
10476 } __attribute__((packed));
10477
10478diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10479index 3778256..c5d4fce 100644
10480--- a/arch/x86/include/asm/e820.h
10481+++ b/arch/x86/include/asm/e820.h
10482@@ -69,7 +69,7 @@ struct e820map {
10483 #define ISA_START_ADDRESS 0xa0000
10484 #define ISA_END_ADDRESS 0x100000
10485
10486-#define BIOS_BEGIN 0x000a0000
10487+#define BIOS_BEGIN 0x000c0000
10488 #define BIOS_END 0x00100000
10489
10490 #define BIOS_ROM_BASE 0xffe00000
10491diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10492index 5f962df..7289f09 100644
10493--- a/arch/x86/include/asm/elf.h
10494+++ b/arch/x86/include/asm/elf.h
10495@@ -238,7 +238,25 @@ extern int force_personality32;
10496 the loader. We need to make sure that it is out of the way of the program
10497 that it will "exec", and that there is sufficient room for the brk. */
10498
10499+#ifdef CONFIG_PAX_SEGMEXEC
10500+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10501+#else
10502 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10503+#endif
10504+
10505+#ifdef CONFIG_PAX_ASLR
10506+#ifdef CONFIG_X86_32
10507+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10508+
10509+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10510+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10511+#else
10512+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10513+
10514+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10515+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10516+#endif
10517+#endif
10518
10519 /* This yields a mask that user programs can use to figure out what
10520 instruction set this CPU supports. This could be done in user space,
10521@@ -291,9 +309,7 @@ do { \
10522
10523 #define ARCH_DLINFO \
10524 do { \
10525- if (vdso_enabled) \
10526- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10527- (unsigned long)current->mm->context.vdso); \
10528+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10529 } while (0)
10530
10531 #define AT_SYSINFO 32
10532@@ -304,7 +320,7 @@ do { \
10533
10534 #endif /* !CONFIG_X86_32 */
10535
10536-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10537+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10538
10539 #define VDSO_ENTRY \
10540 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10541@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10542 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10543 #define compat_arch_setup_additional_pages syscall32_setup_pages
10544
10545-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10546-#define arch_randomize_brk arch_randomize_brk
10547-
10548 /*
10549 * True on X86_32 or when emulating IA32 on X86_64
10550 */
10551diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10552index cc70c1c..d96d011 100644
10553--- a/arch/x86/include/asm/emergency-restart.h
10554+++ b/arch/x86/include/asm/emergency-restart.h
10555@@ -15,6 +15,6 @@ enum reboot_type {
10556
10557 extern enum reboot_type reboot_type;
10558
10559-extern void machine_emergency_restart(void);
10560+extern void machine_emergency_restart(void) __noreturn;
10561
10562 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10563diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
10564index dbe82a5..c6d8a00 100644
10565--- a/arch/x86/include/asm/floppy.h
10566+++ b/arch/x86/include/asm/floppy.h
10567@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
10568 }
10569
10570
10571+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
10572 static unsigned long vdma_mem_alloc(unsigned long size)
10573 {
10574 return (unsigned long)vmalloc(size);
10575diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10576index d09bb03..4ea4194 100644
10577--- a/arch/x86/include/asm/futex.h
10578+++ b/arch/x86/include/asm/futex.h
10579@@ -12,16 +12,18 @@
10580 #include <asm/system.h>
10581
10582 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10583+ typecheck(u32 __user *, uaddr); \
10584 asm volatile("1:\t" insn "\n" \
10585 "2:\t.section .fixup,\"ax\"\n" \
10586 "3:\tmov\t%3, %1\n" \
10587 "\tjmp\t2b\n" \
10588 "\t.previous\n" \
10589 _ASM_EXTABLE(1b, 3b) \
10590- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10591+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10592 : "i" (-EFAULT), "0" (oparg), "1" (0))
10593
10594 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10595+ typecheck(u32 __user *, uaddr); \
10596 asm volatile("1:\tmovl %2, %0\n" \
10597 "\tmovl\t%0, %3\n" \
10598 "\t" insn "\n" \
10599@@ -34,7 +36,7 @@
10600 _ASM_EXTABLE(1b, 4b) \
10601 _ASM_EXTABLE(2b, 4b) \
10602 : "=&a" (oldval), "=&r" (ret), \
10603- "+m" (*uaddr), "=&r" (tem) \
10604+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10605 : "r" (oparg), "i" (-EFAULT), "1" (0))
10606
10607 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10608@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10609
10610 switch (op) {
10611 case FUTEX_OP_SET:
10612- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10613+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10614 break;
10615 case FUTEX_OP_ADD:
10616- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10617+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10618 uaddr, oparg);
10619 break;
10620 case FUTEX_OP_OR:
10621@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10622 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10623 return -EFAULT;
10624
10625- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10626+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10627 "2:\t.section .fixup, \"ax\"\n"
10628 "3:\tmov %3, %0\n"
10629 "\tjmp 2b\n"
10630 "\t.previous\n"
10631 _ASM_EXTABLE(1b, 3b)
10632- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10633+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10634 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10635 : "memory"
10636 );
10637diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10638index eb92a6e..b98b2f4 100644
10639--- a/arch/x86/include/asm/hw_irq.h
10640+++ b/arch/x86/include/asm/hw_irq.h
10641@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10642 extern void enable_IO_APIC(void);
10643
10644 /* Statistics */
10645-extern atomic_t irq_err_count;
10646-extern atomic_t irq_mis_count;
10647+extern atomic_unchecked_t irq_err_count;
10648+extern atomic_unchecked_t irq_mis_count;
10649
10650 /* EISA */
10651 extern void eisa_set_level_irq(unsigned int irq);
10652diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10653index 2479049..3fb9795 100644
10654--- a/arch/x86/include/asm/i387.h
10655+++ b/arch/x86/include/asm/i387.h
10656@@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10657 {
10658 int err;
10659
10660+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10661+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10662+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10663+#endif
10664+
10665 /* See comment in fxsave() below. */
10666 #ifdef CONFIG_AS_FXSAVEQ
10667 asm volatile("1: fxrstorq %[fx]\n\t"
10668@@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10669 {
10670 int err;
10671
10672+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10673+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10674+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10675+#endif
10676+
10677 /*
10678 * Clear the bytes not touched by the fxsave and reserved
10679 * for the SW usage.
10680@@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10681 "emms\n\t" /* clear stack tags */
10682 "fildl %P[addr]", /* set F?P to defined value */
10683 X86_FEATURE_FXSAVE_LEAK,
10684- [addr] "m" (tsk->thread.fpu.has_fpu));
10685+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10686
10687 return fpu_restore_checking(&tsk->thread.fpu);
10688 }
10689@@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10690 static inline bool interrupted_user_mode(void)
10691 {
10692 struct pt_regs *regs = get_irq_regs();
10693- return regs && user_mode_vm(regs);
10694+ return regs && user_mode(regs);
10695 }
10696
10697 /*
10698diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10699index d8e8eef..99f81ae 100644
10700--- a/arch/x86/include/asm/io.h
10701+++ b/arch/x86/include/asm/io.h
10702@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10703
10704 #include <linux/vmalloc.h>
10705
10706+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10707+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10708+{
10709+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10710+}
10711+
10712+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10713+{
10714+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10715+}
10716+
10717 /*
10718 * Convert a virtual cached pointer to an uncached pointer
10719 */
10720diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10721index bba3cf8..06bc8da 100644
10722--- a/arch/x86/include/asm/irqflags.h
10723+++ b/arch/x86/include/asm/irqflags.h
10724@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10725 sti; \
10726 sysexit
10727
10728+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10729+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10730+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10731+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10732+
10733 #else
10734 #define INTERRUPT_RETURN iret
10735 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10736diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10737index 5478825..839e88c 100644
10738--- a/arch/x86/include/asm/kprobes.h
10739+++ b/arch/x86/include/asm/kprobes.h
10740@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10741 #define RELATIVEJUMP_SIZE 5
10742 #define RELATIVECALL_OPCODE 0xe8
10743 #define RELATIVE_ADDR_SIZE 4
10744-#define MAX_STACK_SIZE 64
10745-#define MIN_STACK_SIZE(ADDR) \
10746- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10747- THREAD_SIZE - (unsigned long)(ADDR))) \
10748- ? (MAX_STACK_SIZE) \
10749- : (((unsigned long)current_thread_info()) + \
10750- THREAD_SIZE - (unsigned long)(ADDR)))
10751+#define MAX_STACK_SIZE 64UL
10752+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10753
10754 #define flush_insn_slot(p) do { } while (0)
10755
10756diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10757index 52d6640..136b3bd 100644
10758--- a/arch/x86/include/asm/kvm_host.h
10759+++ b/arch/x86/include/asm/kvm_host.h
10760@@ -663,7 +663,7 @@ struct kvm_x86_ops {
10761 int (*check_intercept)(struct kvm_vcpu *vcpu,
10762 struct x86_instruction_info *info,
10763 enum x86_intercept_stage stage);
10764-};
10765+} __do_const;
10766
10767 struct kvm_arch_async_pf {
10768 u32 token;
10769@@ -694,7 +694,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
10770 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
10771
10772 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10773- const void *val, int bytes);
10774+ const void *val, int bytes) __size_overflow(2);
10775 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
10776
10777 extern bool tdp_enabled;
10778@@ -781,7 +781,7 @@ int fx_init(struct kvm_vcpu *vcpu);
10779
10780 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
10781 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
10782- const u8 *new, int bytes);
10783+ const u8 *new, int bytes) __size_overflow(2);
10784 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
10785 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
10786 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
10787diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10788index 9cdae5d..300d20f 100644
10789--- a/arch/x86/include/asm/local.h
10790+++ b/arch/x86/include/asm/local.h
10791@@ -18,26 +18,58 @@ typedef struct {
10792
10793 static inline void local_inc(local_t *l)
10794 {
10795- asm volatile(_ASM_INC "%0"
10796+ asm volatile(_ASM_INC "%0\n"
10797+
10798+#ifdef CONFIG_PAX_REFCOUNT
10799+ "jno 0f\n"
10800+ _ASM_DEC "%0\n"
10801+ "int $4\n0:\n"
10802+ _ASM_EXTABLE(0b, 0b)
10803+#endif
10804+
10805 : "+m" (l->a.counter));
10806 }
10807
10808 static inline void local_dec(local_t *l)
10809 {
10810- asm volatile(_ASM_DEC "%0"
10811+ asm volatile(_ASM_DEC "%0\n"
10812+
10813+#ifdef CONFIG_PAX_REFCOUNT
10814+ "jno 0f\n"
10815+ _ASM_INC "%0\n"
10816+ "int $4\n0:\n"
10817+ _ASM_EXTABLE(0b, 0b)
10818+#endif
10819+
10820 : "+m" (l->a.counter));
10821 }
10822
10823 static inline void local_add(long i, local_t *l)
10824 {
10825- asm volatile(_ASM_ADD "%1,%0"
10826+ asm volatile(_ASM_ADD "%1,%0\n"
10827+
10828+#ifdef CONFIG_PAX_REFCOUNT
10829+ "jno 0f\n"
10830+ _ASM_SUB "%1,%0\n"
10831+ "int $4\n0:\n"
10832+ _ASM_EXTABLE(0b, 0b)
10833+#endif
10834+
10835 : "+m" (l->a.counter)
10836 : "ir" (i));
10837 }
10838
10839 static inline void local_sub(long i, local_t *l)
10840 {
10841- asm volatile(_ASM_SUB "%1,%0"
10842+ asm volatile(_ASM_SUB "%1,%0\n"
10843+
10844+#ifdef CONFIG_PAX_REFCOUNT
10845+ "jno 0f\n"
10846+ _ASM_ADD "%1,%0\n"
10847+ "int $4\n0:\n"
10848+ _ASM_EXTABLE(0b, 0b)
10849+#endif
10850+
10851 : "+m" (l->a.counter)
10852 : "ir" (i));
10853 }
10854@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10855 {
10856 unsigned char c;
10857
10858- asm volatile(_ASM_SUB "%2,%0; sete %1"
10859+ asm volatile(_ASM_SUB "%2,%0\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
10862+ "jno 0f\n"
10863+ _ASM_ADD "%2,%0\n"
10864+ "int $4\n0:\n"
10865+ _ASM_EXTABLE(0b, 0b)
10866+#endif
10867+
10868+ "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : "ir" (i) : "memory");
10871 return c;
10872@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10873 {
10874 unsigned char c;
10875
10876- asm volatile(_ASM_DEC "%0; sete %1"
10877+ asm volatile(_ASM_DEC "%0\n"
10878+
10879+#ifdef CONFIG_PAX_REFCOUNT
10880+ "jno 0f\n"
10881+ _ASM_INC "%0\n"
10882+ "int $4\n0:\n"
10883+ _ASM_EXTABLE(0b, 0b)
10884+#endif
10885+
10886+ "sete %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : : "memory");
10889 return c != 0;
10890@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10891 {
10892 unsigned char c;
10893
10894- asm volatile(_ASM_INC "%0; sete %1"
10895+ asm volatile(_ASM_INC "%0\n"
10896+
10897+#ifdef CONFIG_PAX_REFCOUNT
10898+ "jno 0f\n"
10899+ _ASM_DEC "%0\n"
10900+ "int $4\n0:\n"
10901+ _ASM_EXTABLE(0b, 0b)
10902+#endif
10903+
10904+ "sete %1\n"
10905 : "+m" (l->a.counter), "=qm" (c)
10906 : : "memory");
10907 return c != 0;
10908@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10909 {
10910 unsigned char c;
10911
10912- asm volatile(_ASM_ADD "%2,%0; sets %1"
10913+ asm volatile(_ASM_ADD "%2,%0\n"
10914+
10915+#ifdef CONFIG_PAX_REFCOUNT
10916+ "jno 0f\n"
10917+ _ASM_SUB "%2,%0\n"
10918+ "int $4\n0:\n"
10919+ _ASM_EXTABLE(0b, 0b)
10920+#endif
10921+
10922+ "sets %1\n"
10923 : "+m" (l->a.counter), "=qm" (c)
10924 : "ir" (i) : "memory");
10925 return c;
10926@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10927 #endif
10928 /* Modern 486+ processor */
10929 __i = i;
10930- asm volatile(_ASM_XADD "%0, %1;"
10931+ asm volatile(_ASM_XADD "%0, %1\n"
10932+
10933+#ifdef CONFIG_PAX_REFCOUNT
10934+ "jno 0f\n"
10935+ _ASM_MOV "%0,%1\n"
10936+ "int $4\n0:\n"
10937+ _ASM_EXTABLE(0b, 0b)
10938+#endif
10939+
10940 : "+r" (i), "+m" (l->a.counter)
10941 : : "memory");
10942 return i + __i;
10943diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10944index 593e51d..fa69c9a 100644
10945--- a/arch/x86/include/asm/mman.h
10946+++ b/arch/x86/include/asm/mman.h
10947@@ -5,4 +5,14 @@
10948
10949 #include <asm-generic/mman.h>
10950
10951+#ifdef __KERNEL__
10952+#ifndef __ASSEMBLY__
10953+#ifdef CONFIG_X86_32
10954+#define arch_mmap_check i386_mmap_check
10955+int i386_mmap_check(unsigned long addr, unsigned long len,
10956+ unsigned long flags);
10957+#endif
10958+#endif
10959+#endif
10960+
10961 #endif /* _ASM_X86_MMAN_H */
10962diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10963index 5f55e69..e20bfb1 100644
10964--- a/arch/x86/include/asm/mmu.h
10965+++ b/arch/x86/include/asm/mmu.h
10966@@ -9,7 +9,7 @@
10967 * we put the segment information here.
10968 */
10969 typedef struct {
10970- void *ldt;
10971+ struct desc_struct *ldt;
10972 int size;
10973
10974 #ifdef CONFIG_X86_64
10975@@ -18,7 +18,19 @@ typedef struct {
10976 #endif
10977
10978 struct mutex lock;
10979- void *vdso;
10980+ unsigned long vdso;
10981+
10982+#ifdef CONFIG_X86_32
10983+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10984+ unsigned long user_cs_base;
10985+ unsigned long user_cs_limit;
10986+
10987+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10988+ cpumask_t cpu_user_cs_mask;
10989+#endif
10990+
10991+#endif
10992+#endif
10993 } mm_context_t;
10994
10995 #ifdef CONFIG_SMP
10996diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10997index 6902152..399f3a2 100644
10998--- a/arch/x86/include/asm/mmu_context.h
10999+++ b/arch/x86/include/asm/mmu_context.h
11000@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11001
11002 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11003 {
11004+
11005+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11006+ unsigned int i;
11007+ pgd_t *pgd;
11008+
11009+ pax_open_kernel();
11010+ pgd = get_cpu_pgd(smp_processor_id());
11011+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11012+ set_pgd_batched(pgd+i, native_make_pgd(0));
11013+ pax_close_kernel();
11014+#endif
11015+
11016 #ifdef CONFIG_SMP
11017 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11018 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11019@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11020 struct task_struct *tsk)
11021 {
11022 unsigned cpu = smp_processor_id();
11023+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11024+ int tlbstate = TLBSTATE_OK;
11025+#endif
11026
11027 if (likely(prev != next)) {
11028 #ifdef CONFIG_SMP
11029+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11030+ tlbstate = percpu_read(cpu_tlbstate.state);
11031+#endif
11032 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11033 percpu_write(cpu_tlbstate.active_mm, next);
11034 #endif
11035 cpumask_set_cpu(cpu, mm_cpumask(next));
11036
11037 /* Re-load page tables */
11038+#ifdef CONFIG_PAX_PER_CPU_PGD
11039+ pax_open_kernel();
11040+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11041+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11042+ pax_close_kernel();
11043+ load_cr3(get_cpu_pgd(cpu));
11044+#else
11045 load_cr3(next->pgd);
11046+#endif
11047
11048 /* stop flush ipis for the previous mm */
11049 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11050@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11051 */
11052 if (unlikely(prev->context.ldt != next->context.ldt))
11053 load_LDT_nolock(&next->context);
11054- }
11055+
11056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11057+ if (!(__supported_pte_mask & _PAGE_NX)) {
11058+ smp_mb__before_clear_bit();
11059+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11060+ smp_mb__after_clear_bit();
11061+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11062+ }
11063+#endif
11064+
11065+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11066+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11067+ prev->context.user_cs_limit != next->context.user_cs_limit))
11068+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11069 #ifdef CONFIG_SMP
11070+ else if (unlikely(tlbstate != TLBSTATE_OK))
11071+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11072+#endif
11073+#endif
11074+
11075+ }
11076 else {
11077+
11078+#ifdef CONFIG_PAX_PER_CPU_PGD
11079+ pax_open_kernel();
11080+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11081+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11082+ pax_close_kernel();
11083+ load_cr3(get_cpu_pgd(cpu));
11084+#endif
11085+
11086+#ifdef CONFIG_SMP
11087 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11088 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11089
11090@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11091 * tlb flush IPI delivery. We must reload CR3
11092 * to make sure to use no freed page tables.
11093 */
11094+
11095+#ifndef CONFIG_PAX_PER_CPU_PGD
11096 load_cr3(next->pgd);
11097+#endif
11098+
11099 load_LDT_nolock(&next->context);
11100+
11101+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11102+ if (!(__supported_pte_mask & _PAGE_NX))
11103+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11104+#endif
11105+
11106+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11107+#ifdef CONFIG_PAX_PAGEEXEC
11108+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11109+#endif
11110+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11111+#endif
11112+
11113 }
11114+#endif
11115 }
11116-#endif
11117 }
11118
11119 #define activate_mm(prev, next) \
11120diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11121index 9eae775..c914fea 100644
11122--- a/arch/x86/include/asm/module.h
11123+++ b/arch/x86/include/asm/module.h
11124@@ -5,6 +5,7 @@
11125
11126 #ifdef CONFIG_X86_64
11127 /* X86_64 does not define MODULE_PROC_FAMILY */
11128+#define MODULE_PROC_FAMILY ""
11129 #elif defined CONFIG_M386
11130 #define MODULE_PROC_FAMILY "386 "
11131 #elif defined CONFIG_M486
11132@@ -59,8 +60,20 @@
11133 #error unknown processor family
11134 #endif
11135
11136-#ifdef CONFIG_X86_32
11137-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11138+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11139+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11140+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11141+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11142+#else
11143+#define MODULE_PAX_KERNEXEC ""
11144 #endif
11145
11146+#ifdef CONFIG_PAX_MEMORY_UDEREF
11147+#define MODULE_PAX_UDEREF "UDEREF "
11148+#else
11149+#define MODULE_PAX_UDEREF ""
11150+#endif
11151+
11152+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11153+
11154 #endif /* _ASM_X86_MODULE_H */
11155diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11156index 7639dbf..e08a58c 100644
11157--- a/arch/x86/include/asm/page_64_types.h
11158+++ b/arch/x86/include/asm/page_64_types.h
11159@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11160
11161 /* duplicated to the one in bootmem.h */
11162 extern unsigned long max_pfn;
11163-extern unsigned long phys_base;
11164+extern const unsigned long phys_base;
11165
11166 extern unsigned long __phys_addr(unsigned long);
11167 #define __phys_reloc_hide(x) (x)
11168diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11169index a7d2db9..edb023e 100644
11170--- a/arch/x86/include/asm/paravirt.h
11171+++ b/arch/x86/include/asm/paravirt.h
11172@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11173 val);
11174 }
11175
11176+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11177+{
11178+ pgdval_t val = native_pgd_val(pgd);
11179+
11180+ if (sizeof(pgdval_t) > sizeof(long))
11181+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11182+ val, (u64)val >> 32);
11183+ else
11184+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11185+ val);
11186+}
11187+
11188 static inline void pgd_clear(pgd_t *pgdp)
11189 {
11190 set_pgd(pgdp, __pgd(0));
11191@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11192 pv_mmu_ops.set_fixmap(idx, phys, flags);
11193 }
11194
11195+#ifdef CONFIG_PAX_KERNEXEC
11196+static inline unsigned long pax_open_kernel(void)
11197+{
11198+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11199+}
11200+
11201+static inline unsigned long pax_close_kernel(void)
11202+{
11203+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11204+}
11205+#else
11206+static inline unsigned long pax_open_kernel(void) { return 0; }
11207+static inline unsigned long pax_close_kernel(void) { return 0; }
11208+#endif
11209+
11210 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11211
11212 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11213@@ -964,7 +991,7 @@ extern void default_banner(void);
11214
11215 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11216 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11217-#define PARA_INDIRECT(addr) *%cs:addr
11218+#define PARA_INDIRECT(addr) *%ss:addr
11219 #endif
11220
11221 #define INTERRUPT_RETURN \
11222@@ -1041,6 +1068,21 @@ extern void default_banner(void);
11223 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11224 CLBR_NONE, \
11225 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11226+
11227+#define GET_CR0_INTO_RDI \
11228+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11229+ mov %rax,%rdi
11230+
11231+#define SET_RDI_INTO_CR0 \
11232+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11233+
11234+#define GET_CR3_INTO_RDI \
11235+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11236+ mov %rax,%rdi
11237+
11238+#define SET_RDI_INTO_CR3 \
11239+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11240+
11241 #endif /* CONFIG_X86_32 */
11242
11243 #endif /* __ASSEMBLY__ */
11244diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11245index 8e8b9a4..f07d725 100644
11246--- a/arch/x86/include/asm/paravirt_types.h
11247+++ b/arch/x86/include/asm/paravirt_types.h
11248@@ -84,20 +84,20 @@ struct pv_init_ops {
11249 */
11250 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11251 unsigned long addr, unsigned len);
11252-};
11253+} __no_const;
11254
11255
11256 struct pv_lazy_ops {
11257 /* Set deferred update mode, used for batching operations. */
11258 void (*enter)(void);
11259 void (*leave)(void);
11260-};
11261+} __no_const;
11262
11263 struct pv_time_ops {
11264 unsigned long long (*sched_clock)(void);
11265 unsigned long long (*steal_clock)(int cpu);
11266 unsigned long (*get_tsc_khz)(void);
11267-};
11268+} __no_const;
11269
11270 struct pv_cpu_ops {
11271 /* hooks for various privileged instructions */
11272@@ -193,7 +193,7 @@ struct pv_cpu_ops {
11273
11274 void (*start_context_switch)(struct task_struct *prev);
11275 void (*end_context_switch)(struct task_struct *next);
11276-};
11277+} __no_const;
11278
11279 struct pv_irq_ops {
11280 /*
11281@@ -224,7 +224,7 @@ struct pv_apic_ops {
11282 unsigned long start_eip,
11283 unsigned long start_esp);
11284 #endif
11285-};
11286+} __no_const;
11287
11288 struct pv_mmu_ops {
11289 unsigned long (*read_cr2)(void);
11290@@ -313,6 +313,7 @@ struct pv_mmu_ops {
11291 struct paravirt_callee_save make_pud;
11292
11293 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11294+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11295 #endif /* PAGETABLE_LEVELS == 4 */
11296 #endif /* PAGETABLE_LEVELS >= 3 */
11297
11298@@ -324,6 +325,12 @@ struct pv_mmu_ops {
11299 an mfn. We can tell which is which from the index. */
11300 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11301 phys_addr_t phys, pgprot_t flags);
11302+
11303+#ifdef CONFIG_PAX_KERNEXEC
11304+ unsigned long (*pax_open_kernel)(void);
11305+ unsigned long (*pax_close_kernel)(void);
11306+#endif
11307+
11308 };
11309
11310 struct arch_spinlock;
11311@@ -334,7 +341,7 @@ struct pv_lock_ops {
11312 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11313 int (*spin_trylock)(struct arch_spinlock *lock);
11314 void (*spin_unlock)(struct arch_spinlock *lock);
11315-};
11316+} __no_const;
11317
11318 /* This contains all the paravirt structures: we get a convenient
11319 * number for each function using the offset which we use to indicate
11320diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11321index b4389a4..7024269 100644
11322--- a/arch/x86/include/asm/pgalloc.h
11323+++ b/arch/x86/include/asm/pgalloc.h
11324@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11325 pmd_t *pmd, pte_t *pte)
11326 {
11327 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11328+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11329+}
11330+
11331+static inline void pmd_populate_user(struct mm_struct *mm,
11332+ pmd_t *pmd, pte_t *pte)
11333+{
11334+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11335 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11336 }
11337
11338@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11339
11340 #ifdef CONFIG_X86_PAE
11341 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11342+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11343+{
11344+ pud_populate(mm, pudp, pmd);
11345+}
11346 #else /* !CONFIG_X86_PAE */
11347 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11348 {
11349 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11350 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11351 }
11352+
11353+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11354+{
11355+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11356+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11357+}
11358 #endif /* CONFIG_X86_PAE */
11359
11360 #if PAGETABLE_LEVELS > 3
11361@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11362 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11363 }
11364
11365+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11366+{
11367+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11368+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11369+}
11370+
11371 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11372 {
11373 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11374diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11375index 98391db..8f6984e 100644
11376--- a/arch/x86/include/asm/pgtable-2level.h
11377+++ b/arch/x86/include/asm/pgtable-2level.h
11378@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11379
11380 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11381 {
11382+ pax_open_kernel();
11383 *pmdp = pmd;
11384+ pax_close_kernel();
11385 }
11386
11387 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11388diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11389index effff47..f9e4035 100644
11390--- a/arch/x86/include/asm/pgtable-3level.h
11391+++ b/arch/x86/include/asm/pgtable-3level.h
11392@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11393
11394 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11395 {
11396+ pax_open_kernel();
11397 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11398+ pax_close_kernel();
11399 }
11400
11401 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11402 {
11403+ pax_open_kernel();
11404 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11405+ pax_close_kernel();
11406 }
11407
11408 /*
11409diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11410index 49afb3f..ed14d07 100644
11411--- a/arch/x86/include/asm/pgtable.h
11412+++ b/arch/x86/include/asm/pgtable.h
11413@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11414
11415 #ifndef __PAGETABLE_PUD_FOLDED
11416 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11417+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11418 #define pgd_clear(pgd) native_pgd_clear(pgd)
11419 #endif
11420
11421@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11422
11423 #define arch_end_context_switch(prev) do {} while(0)
11424
11425+#define pax_open_kernel() native_pax_open_kernel()
11426+#define pax_close_kernel() native_pax_close_kernel()
11427 #endif /* CONFIG_PARAVIRT */
11428
11429+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11430+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11431+
11432+#ifdef CONFIG_PAX_KERNEXEC
11433+static inline unsigned long native_pax_open_kernel(void)
11434+{
11435+ unsigned long cr0;
11436+
11437+ preempt_disable();
11438+ barrier();
11439+ cr0 = read_cr0() ^ X86_CR0_WP;
11440+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11441+ write_cr0(cr0);
11442+ return cr0 ^ X86_CR0_WP;
11443+}
11444+
11445+static inline unsigned long native_pax_close_kernel(void)
11446+{
11447+ unsigned long cr0;
11448+
11449+ cr0 = read_cr0() ^ X86_CR0_WP;
11450+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11451+ write_cr0(cr0);
11452+ barrier();
11453+ preempt_enable_no_resched();
11454+ return cr0 ^ X86_CR0_WP;
11455+}
11456+#else
11457+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11458+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11459+#endif
11460+
11461 /*
11462 * The following only work if pte_present() is true.
11463 * Undefined behaviour if not..
11464 */
11465+static inline int pte_user(pte_t pte)
11466+{
11467+ return pte_val(pte) & _PAGE_USER;
11468+}
11469+
11470 static inline int pte_dirty(pte_t pte)
11471 {
11472 return pte_flags(pte) & _PAGE_DIRTY;
11473@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11474 return pte_clear_flags(pte, _PAGE_RW);
11475 }
11476
11477+static inline pte_t pte_mkread(pte_t pte)
11478+{
11479+ return __pte(pte_val(pte) | _PAGE_USER);
11480+}
11481+
11482 static inline pte_t pte_mkexec(pte_t pte)
11483 {
11484- return pte_clear_flags(pte, _PAGE_NX);
11485+#ifdef CONFIG_X86_PAE
11486+ if (__supported_pte_mask & _PAGE_NX)
11487+ return pte_clear_flags(pte, _PAGE_NX);
11488+ else
11489+#endif
11490+ return pte_set_flags(pte, _PAGE_USER);
11491+}
11492+
11493+static inline pte_t pte_exprotect(pte_t pte)
11494+{
11495+#ifdef CONFIG_X86_PAE
11496+ if (__supported_pte_mask & _PAGE_NX)
11497+ return pte_set_flags(pte, _PAGE_NX);
11498+ else
11499+#endif
11500+ return pte_clear_flags(pte, _PAGE_USER);
11501 }
11502
11503 static inline pte_t pte_mkdirty(pte_t pte)
11504@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11505 #endif
11506
11507 #ifndef __ASSEMBLY__
11508+
11509+#ifdef CONFIG_PAX_PER_CPU_PGD
11510+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11511+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11512+{
11513+ return cpu_pgd[cpu];
11514+}
11515+#endif
11516+
11517 #include <linux/mm_types.h>
11518
11519 static inline int pte_none(pte_t pte)
11520@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11521
11522 static inline int pgd_bad(pgd_t pgd)
11523 {
11524- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11525+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11526 }
11527
11528 static inline int pgd_none(pgd_t pgd)
11529@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11530 * pgd_offset() returns a (pgd_t *)
11531 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11532 */
11533-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11534+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11535+
11536+#ifdef CONFIG_PAX_PER_CPU_PGD
11537+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11538+#endif
11539+
11540 /*
11541 * a shortcut which implies the use of the kernel's pgd, instead
11542 * of a process's
11543@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11544 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11545 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11546
11547+#ifdef CONFIG_X86_32
11548+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11549+#else
11550+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11551+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11552+
11553+#ifdef CONFIG_PAX_MEMORY_UDEREF
11554+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11555+#else
11556+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11557+#endif
11558+
11559+#endif
11560+
11561 #ifndef __ASSEMBLY__
11562
11563 extern int direct_gbpages;
11564@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11565 * dst and src can be on the same page, but the range must not overlap,
11566 * and must not cross a page boundary.
11567 */
11568-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11569+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11570 {
11571- memcpy(dst, src, count * sizeof(pgd_t));
11572+ pax_open_kernel();
11573+ while (count--)
11574+ *dst++ = *src++;
11575+ pax_close_kernel();
11576 }
11577
11578+#ifdef CONFIG_PAX_PER_CPU_PGD
11579+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11580+#endif
11581+
11582+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11583+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11584+#else
11585+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11586+#endif
11587
11588 #include <asm-generic/pgtable.h>
11589 #endif /* __ASSEMBLY__ */
11590diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11591index 0c92113..34a77c6 100644
11592--- a/arch/x86/include/asm/pgtable_32.h
11593+++ b/arch/x86/include/asm/pgtable_32.h
11594@@ -25,9 +25,6 @@
11595 struct mm_struct;
11596 struct vm_area_struct;
11597
11598-extern pgd_t swapper_pg_dir[1024];
11599-extern pgd_t initial_page_table[1024];
11600-
11601 static inline void pgtable_cache_init(void) { }
11602 static inline void check_pgt_cache(void) { }
11603 void paging_init(void);
11604@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11605 # include <asm/pgtable-2level.h>
11606 #endif
11607
11608+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11609+extern pgd_t initial_page_table[PTRS_PER_PGD];
11610+#ifdef CONFIG_X86_PAE
11611+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11612+#endif
11613+
11614 #if defined(CONFIG_HIGHPTE)
11615 #define pte_offset_map(dir, address) \
11616 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11617@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11618 /* Clear a kernel PTE and flush it from the TLB */
11619 #define kpte_clear_flush(ptep, vaddr) \
11620 do { \
11621+ pax_open_kernel(); \
11622 pte_clear(&init_mm, (vaddr), (ptep)); \
11623+ pax_close_kernel(); \
11624 __flush_tlb_one((vaddr)); \
11625 } while (0)
11626
11627@@ -74,6 +79,9 @@ do { \
11628
11629 #endif /* !__ASSEMBLY__ */
11630
11631+#define HAVE_ARCH_UNMAPPED_AREA
11632+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11633+
11634 /*
11635 * kern_addr_valid() is (1) for FLATMEM and (0) for
11636 * SPARSEMEM and DISCONTIGMEM
11637diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11638index ed5903b..c7fe163 100644
11639--- a/arch/x86/include/asm/pgtable_32_types.h
11640+++ b/arch/x86/include/asm/pgtable_32_types.h
11641@@ -8,7 +8,7 @@
11642 */
11643 #ifdef CONFIG_X86_PAE
11644 # include <asm/pgtable-3level_types.h>
11645-# define PMD_SIZE (1UL << PMD_SHIFT)
11646+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11647 # define PMD_MASK (~(PMD_SIZE - 1))
11648 #else
11649 # include <asm/pgtable-2level_types.h>
11650@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11651 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11652 #endif
11653
11654+#ifdef CONFIG_PAX_KERNEXEC
11655+#ifndef __ASSEMBLY__
11656+extern unsigned char MODULES_EXEC_VADDR[];
11657+extern unsigned char MODULES_EXEC_END[];
11658+#endif
11659+#include <asm/boot.h>
11660+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11661+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11662+#else
11663+#define ktla_ktva(addr) (addr)
11664+#define ktva_ktla(addr) (addr)
11665+#endif
11666+
11667 #define MODULES_VADDR VMALLOC_START
11668 #define MODULES_END VMALLOC_END
11669 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11670diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11671index 975f709..9f779c9 100644
11672--- a/arch/x86/include/asm/pgtable_64.h
11673+++ b/arch/x86/include/asm/pgtable_64.h
11674@@ -16,10 +16,14 @@
11675
11676 extern pud_t level3_kernel_pgt[512];
11677 extern pud_t level3_ident_pgt[512];
11678+extern pud_t level3_vmalloc_start_pgt[512];
11679+extern pud_t level3_vmalloc_end_pgt[512];
11680+extern pud_t level3_vmemmap_pgt[512];
11681+extern pud_t level2_vmemmap_pgt[512];
11682 extern pmd_t level2_kernel_pgt[512];
11683 extern pmd_t level2_fixmap_pgt[512];
11684-extern pmd_t level2_ident_pgt[512];
11685-extern pgd_t init_level4_pgt[];
11686+extern pmd_t level2_ident_pgt[512*2];
11687+extern pgd_t init_level4_pgt[512];
11688
11689 #define swapper_pg_dir init_level4_pgt
11690
11691@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11692
11693 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11694 {
11695+ pax_open_kernel();
11696 *pmdp = pmd;
11697+ pax_close_kernel();
11698 }
11699
11700 static inline void native_pmd_clear(pmd_t *pmd)
11701@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11702
11703 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11704 {
11705+ pax_open_kernel();
11706 *pudp = pud;
11707+ pax_close_kernel();
11708 }
11709
11710 static inline void native_pud_clear(pud_t *pud)
11711@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11712
11713 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11714 {
11715+ pax_open_kernel();
11716+ *pgdp = pgd;
11717+ pax_close_kernel();
11718+}
11719+
11720+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11721+{
11722 *pgdp = pgd;
11723 }
11724
11725diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11726index 766ea16..5b96cb3 100644
11727--- a/arch/x86/include/asm/pgtable_64_types.h
11728+++ b/arch/x86/include/asm/pgtable_64_types.h
11729@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11730 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11731 #define MODULES_END _AC(0xffffffffff000000, UL)
11732 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11733+#define MODULES_EXEC_VADDR MODULES_VADDR
11734+#define MODULES_EXEC_END MODULES_END
11735+
11736+#define ktla_ktva(addr) (addr)
11737+#define ktva_ktla(addr) (addr)
11738
11739 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11740diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11741index 013286a..8b42f4f 100644
11742--- a/arch/x86/include/asm/pgtable_types.h
11743+++ b/arch/x86/include/asm/pgtable_types.h
11744@@ -16,13 +16,12 @@
11745 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11746 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11747 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11748-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11749+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11750 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11751 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11752 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11753-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11754-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11755-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11756+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11757+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11758 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11759
11760 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11761@@ -40,7 +39,6 @@
11762 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11763 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11764 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11765-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11766 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11767 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11768 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11769@@ -57,8 +55,10 @@
11770
11771 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11772 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11773-#else
11774+#elif defined(CONFIG_KMEMCHECK)
11775 #define _PAGE_NX (_AT(pteval_t, 0))
11776+#else
11777+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11778 #endif
11779
11780 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11781@@ -96,6 +96,9 @@
11782 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11783 _PAGE_ACCESSED)
11784
11785+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11786+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11787+
11788 #define __PAGE_KERNEL_EXEC \
11789 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11790 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11791@@ -106,7 +109,7 @@
11792 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11793 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11794 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11795-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11796+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11797 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11798 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11799 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11800@@ -168,8 +171,8 @@
11801 * bits are combined, this will alow user to access the high address mapped
11802 * VDSO in the presence of CONFIG_COMPAT_VDSO
11803 */
11804-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11805-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11806+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11807+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11808 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11809 #endif
11810
11811@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11812 {
11813 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11814 }
11815+#endif
11816
11817+#if PAGETABLE_LEVELS == 3
11818+#include <asm-generic/pgtable-nopud.h>
11819+#endif
11820+
11821+#if PAGETABLE_LEVELS == 2
11822+#include <asm-generic/pgtable-nopmd.h>
11823+#endif
11824+
11825+#ifndef __ASSEMBLY__
11826 #if PAGETABLE_LEVELS > 3
11827 typedef struct { pudval_t pud; } pud_t;
11828
11829@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11830 return pud.pud;
11831 }
11832 #else
11833-#include <asm-generic/pgtable-nopud.h>
11834-
11835 static inline pudval_t native_pud_val(pud_t pud)
11836 {
11837 return native_pgd_val(pud.pgd);
11838@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11839 return pmd.pmd;
11840 }
11841 #else
11842-#include <asm-generic/pgtable-nopmd.h>
11843-
11844 static inline pmdval_t native_pmd_val(pmd_t pmd)
11845 {
11846 return native_pgd_val(pmd.pud.pgd);
11847@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11848
11849 extern pteval_t __supported_pte_mask;
11850 extern void set_nx(void);
11851-extern int nx_enabled;
11852
11853 #define pgprot_writecombine pgprot_writecombine
11854 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11855diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11856index 58545c9..fe6fc38e 100644
11857--- a/arch/x86/include/asm/processor.h
11858+++ b/arch/x86/include/asm/processor.h
11859@@ -266,7 +266,7 @@ struct tss_struct {
11860
11861 } ____cacheline_aligned;
11862
11863-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11864+extern struct tss_struct init_tss[NR_CPUS];
11865
11866 /*
11867 * Save the original ist values for checking stack pointers during debugging
11868@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11869 */
11870 #define TASK_SIZE PAGE_OFFSET
11871 #define TASK_SIZE_MAX TASK_SIZE
11872+
11873+#ifdef CONFIG_PAX_SEGMEXEC
11874+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11875+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11876+#else
11877 #define STACK_TOP TASK_SIZE
11878-#define STACK_TOP_MAX STACK_TOP
11879+#endif
11880+
11881+#define STACK_TOP_MAX TASK_SIZE
11882
11883 #define INIT_THREAD { \
11884- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11885+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11886 .vm86_info = NULL, \
11887 .sysenter_cs = __KERNEL_CS, \
11888 .io_bitmap_ptr = NULL, \
11889@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11890 */
11891 #define INIT_TSS { \
11892 .x86_tss = { \
11893- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11894+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11895 .ss0 = __KERNEL_DS, \
11896 .ss1 = __KERNEL_CS, \
11897 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11898@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11899 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11900
11901 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11902-#define KSTK_TOP(info) \
11903-({ \
11904- unsigned long *__ptr = (unsigned long *)(info); \
11905- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11906-})
11907+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11908
11909 /*
11910 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11911@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11912 #define task_pt_regs(task) \
11913 ({ \
11914 struct pt_regs *__regs__; \
11915- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11916+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11917 __regs__ - 1; \
11918 })
11919
11920@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11921 /*
11922 * User space process size. 47bits minus one guard page.
11923 */
11924-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11925+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11926
11927 /* This decides where the kernel will search for a free chunk of vm
11928 * space during mmap's.
11929 */
11930 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11931- 0xc0000000 : 0xFFFFe000)
11932+ 0xc0000000 : 0xFFFFf000)
11933
11934 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11936@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11937 #define STACK_TOP_MAX TASK_SIZE_MAX
11938
11939 #define INIT_THREAD { \
11940- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11941+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11942 }
11943
11944 #define INIT_TSS { \
11945- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11946+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11947 }
11948
11949 /*
11950@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11951 */
11952 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11953
11954+#ifdef CONFIG_PAX_SEGMEXEC
11955+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11956+#endif
11957+
11958 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11959
11960 /* Get/set a process' ability to use the timestamp counter instruction */
11961diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11962index 3566454..4bdfb8c 100644
11963--- a/arch/x86/include/asm/ptrace.h
11964+++ b/arch/x86/include/asm/ptrace.h
11965@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11966 }
11967
11968 /*
11969- * user_mode_vm(regs) determines whether a register set came from user mode.
11970+ * user_mode(regs) determines whether a register set came from user mode.
11971 * This is true if V8086 mode was enabled OR if the register set was from
11972 * protected mode with RPL-3 CS value. This tricky test checks that with
11973 * one comparison. Many places in the kernel can bypass this full check
11974- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11975+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11976+ * be used.
11977 */
11978-static inline int user_mode(struct pt_regs *regs)
11979+static inline int user_mode_novm(struct pt_regs *regs)
11980 {
11981 #ifdef CONFIG_X86_32
11982 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11983 #else
11984- return !!(regs->cs & 3);
11985+ return !!(regs->cs & SEGMENT_RPL_MASK);
11986 #endif
11987 }
11988
11989-static inline int user_mode_vm(struct pt_regs *regs)
11990+static inline int user_mode(struct pt_regs *regs)
11991 {
11992 #ifdef CONFIG_X86_32
11993 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11994 USER_RPL;
11995 #else
11996- return user_mode(regs);
11997+ return user_mode_novm(regs);
11998 #endif
11999 }
12000
12001@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
12002 #ifdef CONFIG_X86_64
12003 static inline bool user_64bit_mode(struct pt_regs *regs)
12004 {
12005+ unsigned long cs = regs->cs & 0xffff;
12006 #ifndef CONFIG_PARAVIRT
12007 /*
12008 * On non-paravirt systems, this is the only long mode CPL 3
12009 * selector. We do not allow long mode selectors in the LDT.
12010 */
12011- return regs->cs == __USER_CS;
12012+ return cs == __USER_CS;
12013 #else
12014 /* Headers are too twisted for this to go in paravirt.h. */
12015- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
12016+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
12017 #endif
12018 }
12019 #endif
12020diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12021index 92f29706..a79cbbb 100644
12022--- a/arch/x86/include/asm/reboot.h
12023+++ b/arch/x86/include/asm/reboot.h
12024@@ -6,19 +6,19 @@
12025 struct pt_regs;
12026
12027 struct machine_ops {
12028- void (*restart)(char *cmd);
12029- void (*halt)(void);
12030- void (*power_off)(void);
12031+ void (* __noreturn restart)(char *cmd);
12032+ void (* __noreturn halt)(void);
12033+ void (* __noreturn power_off)(void);
12034 void (*shutdown)(void);
12035 void (*crash_shutdown)(struct pt_regs *);
12036- void (*emergency_restart)(void);
12037-};
12038+ void (* __noreturn emergency_restart)(void);
12039+} __no_const;
12040
12041 extern struct machine_ops machine_ops;
12042
12043 void native_machine_crash_shutdown(struct pt_regs *regs);
12044 void native_machine_shutdown(void);
12045-void machine_real_restart(unsigned int type);
12046+void machine_real_restart(unsigned int type) __noreturn;
12047 /* These must match dispatch_table in reboot_32.S */
12048 #define MRR_BIOS 0
12049 #define MRR_APM 1
12050diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12051index 2dbe4a7..ce1db00 100644
12052--- a/arch/x86/include/asm/rwsem.h
12053+++ b/arch/x86/include/asm/rwsem.h
12054@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12055 {
12056 asm volatile("# beginning down_read\n\t"
12057 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12058+
12059+#ifdef CONFIG_PAX_REFCOUNT
12060+ "jno 0f\n"
12061+ LOCK_PREFIX _ASM_DEC "(%1)\n"
12062+ "int $4\n0:\n"
12063+ _ASM_EXTABLE(0b, 0b)
12064+#endif
12065+
12066 /* adds 0x00000001 */
12067 " jns 1f\n"
12068 " call call_rwsem_down_read_failed\n"
12069@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12070 "1:\n\t"
12071 " mov %1,%2\n\t"
12072 " add %3,%2\n\t"
12073+
12074+#ifdef CONFIG_PAX_REFCOUNT
12075+ "jno 0f\n"
12076+ "sub %3,%2\n"
12077+ "int $4\n0:\n"
12078+ _ASM_EXTABLE(0b, 0b)
12079+#endif
12080+
12081 " jle 2f\n\t"
12082 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12083 " jnz 1b\n\t"
12084@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12085 long tmp;
12086 asm volatile("# beginning down_write\n\t"
12087 LOCK_PREFIX " xadd %1,(%2)\n\t"
12088+
12089+#ifdef CONFIG_PAX_REFCOUNT
12090+ "jno 0f\n"
12091+ "mov %1,(%2)\n"
12092+ "int $4\n0:\n"
12093+ _ASM_EXTABLE(0b, 0b)
12094+#endif
12095+
12096 /* adds 0xffff0001, returns the old value */
12097 " test %1,%1\n\t"
12098 /* was the count 0 before? */
12099@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12100 long tmp;
12101 asm volatile("# beginning __up_read\n\t"
12102 LOCK_PREFIX " xadd %1,(%2)\n\t"
12103+
12104+#ifdef CONFIG_PAX_REFCOUNT
12105+ "jno 0f\n"
12106+ "mov %1,(%2)\n"
12107+ "int $4\n0:\n"
12108+ _ASM_EXTABLE(0b, 0b)
12109+#endif
12110+
12111 /* subtracts 1, returns the old value */
12112 " jns 1f\n\t"
12113 " call call_rwsem_wake\n" /* expects old value in %edx */
12114@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12115 long tmp;
12116 asm volatile("# beginning __up_write\n\t"
12117 LOCK_PREFIX " xadd %1,(%2)\n\t"
12118+
12119+#ifdef CONFIG_PAX_REFCOUNT
12120+ "jno 0f\n"
12121+ "mov %1,(%2)\n"
12122+ "int $4\n0:\n"
12123+ _ASM_EXTABLE(0b, 0b)
12124+#endif
12125+
12126 /* subtracts 0xffff0001, returns the old value */
12127 " jns 1f\n\t"
12128 " call call_rwsem_wake\n" /* expects old value in %edx */
12129@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12130 {
12131 asm volatile("# beginning __downgrade_write\n\t"
12132 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12133+
12134+#ifdef CONFIG_PAX_REFCOUNT
12135+ "jno 0f\n"
12136+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12137+ "int $4\n0:\n"
12138+ _ASM_EXTABLE(0b, 0b)
12139+#endif
12140+
12141 /*
12142 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12143 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12144@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12145 */
12146 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12147 {
12148- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12149+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12150+
12151+#ifdef CONFIG_PAX_REFCOUNT
12152+ "jno 0f\n"
12153+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12154+ "int $4\n0:\n"
12155+ _ASM_EXTABLE(0b, 0b)
12156+#endif
12157+
12158 : "+m" (sem->count)
12159 : "er" (delta));
12160 }
12161@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12162 */
12163 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12164 {
12165- return delta + xadd(&sem->count, delta);
12166+ return delta + xadd_check_overflow(&sem->count, delta);
12167 }
12168
12169 #endif /* __KERNEL__ */
12170diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12171index 5e64171..f58957e 100644
12172--- a/arch/x86/include/asm/segment.h
12173+++ b/arch/x86/include/asm/segment.h
12174@@ -64,10 +64,15 @@
12175 * 26 - ESPFIX small SS
12176 * 27 - per-cpu [ offset to per-cpu data area ]
12177 * 28 - stack_canary-20 [ for stack protector ]
12178- * 29 - unused
12179- * 30 - unused
12180+ * 29 - PCI BIOS CS
12181+ * 30 - PCI BIOS DS
12182 * 31 - TSS for double fault handler
12183 */
12184+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12185+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12186+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12187+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12188+
12189 #define GDT_ENTRY_TLS_MIN 6
12190 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12191
12192@@ -79,6 +84,8 @@
12193
12194 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12195
12196+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12197+
12198 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12199
12200 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12201@@ -104,6 +111,12 @@
12202 #define __KERNEL_STACK_CANARY 0
12203 #endif
12204
12205+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12206+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12207+
12208+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12209+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12210+
12211 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12212
12213 /*
12214@@ -141,7 +154,7 @@
12215 */
12216
12217 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12218-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12219+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12220
12221
12222 #else
12223@@ -165,6 +178,8 @@
12224 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12225 #define __USER32_DS __USER_DS
12226
12227+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12228+
12229 #define GDT_ENTRY_TSS 8 /* needs two entries */
12230 #define GDT_ENTRY_LDT 10 /* needs two entries */
12231 #define GDT_ENTRY_TLS_MIN 12
12232@@ -185,6 +200,7 @@
12233 #endif
12234
12235 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12236+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12237 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12238 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12239 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12240diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12241index 0434c40..1714bf0 100644
12242--- a/arch/x86/include/asm/smp.h
12243+++ b/arch/x86/include/asm/smp.h
12244@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12245 /* cpus sharing the last level cache: */
12246 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12247 DECLARE_PER_CPU(u16, cpu_llc_id);
12248-DECLARE_PER_CPU(int, cpu_number);
12249+DECLARE_PER_CPU(unsigned int, cpu_number);
12250
12251 static inline struct cpumask *cpu_sibling_mask(int cpu)
12252 {
12253@@ -77,7 +77,7 @@ struct smp_ops {
12254
12255 void (*send_call_func_ipi)(const struct cpumask *mask);
12256 void (*send_call_func_single_ipi)(int cpu);
12257-};
12258+} __no_const;
12259
12260 /* Globals due to paravirt */
12261 extern void set_cpu_sibling_map(int cpu);
12262@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12263 extern int safe_smp_processor_id(void);
12264
12265 #elif defined(CONFIG_X86_64_SMP)
12266-#define raw_smp_processor_id() (percpu_read(cpu_number))
12267-
12268-#define stack_smp_processor_id() \
12269-({ \
12270- struct thread_info *ti; \
12271- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12272- ti->cpu; \
12273-})
12274+#define raw_smp_processor_id() (percpu_read(cpu_number))
12275+#define stack_smp_processor_id() raw_smp_processor_id()
12276 #define safe_smp_processor_id() smp_processor_id()
12277
12278 #endif
12279diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12280index a82c2bf..2198f61 100644
12281--- a/arch/x86/include/asm/spinlock.h
12282+++ b/arch/x86/include/asm/spinlock.h
12283@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12284 static inline void arch_read_lock(arch_rwlock_t *rw)
12285 {
12286 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12287+
12288+#ifdef CONFIG_PAX_REFCOUNT
12289+ "jno 0f\n"
12290+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12291+ "int $4\n0:\n"
12292+ _ASM_EXTABLE(0b, 0b)
12293+#endif
12294+
12295 "jns 1f\n"
12296 "call __read_lock_failed\n\t"
12297 "1:\n"
12298@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12299 static inline void arch_write_lock(arch_rwlock_t *rw)
12300 {
12301 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12302+
12303+#ifdef CONFIG_PAX_REFCOUNT
12304+ "jno 0f\n"
12305+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12306+ "int $4\n0:\n"
12307+ _ASM_EXTABLE(0b, 0b)
12308+#endif
12309+
12310 "jz 1f\n"
12311 "call __write_lock_failed\n\t"
12312 "1:\n"
12313@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12314
12315 static inline void arch_read_unlock(arch_rwlock_t *rw)
12316 {
12317- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12318+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12319+
12320+#ifdef CONFIG_PAX_REFCOUNT
12321+ "jno 0f\n"
12322+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12323+ "int $4\n0:\n"
12324+ _ASM_EXTABLE(0b, 0b)
12325+#endif
12326+
12327 :"+m" (rw->lock) : : "memory");
12328 }
12329
12330 static inline void arch_write_unlock(arch_rwlock_t *rw)
12331 {
12332- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12333+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12334+
12335+#ifdef CONFIG_PAX_REFCOUNT
12336+ "jno 0f\n"
12337+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12338+ "int $4\n0:\n"
12339+ _ASM_EXTABLE(0b, 0b)
12340+#endif
12341+
12342 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12343 }
12344
12345diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12346index 1575177..cb23f52 100644
12347--- a/arch/x86/include/asm/stackprotector.h
12348+++ b/arch/x86/include/asm/stackprotector.h
12349@@ -48,7 +48,7 @@
12350 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12351 */
12352 #define GDT_STACK_CANARY_INIT \
12353- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12354+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12355
12356 /*
12357 * Initialize the stackprotector canary value.
12358@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12359
12360 static inline void load_stack_canary_segment(void)
12361 {
12362-#ifdef CONFIG_X86_32
12363+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12364 asm volatile ("mov %0, %%gs" : : "r" (0));
12365 #endif
12366 }
12367diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12368index 70bbe39..4ae2bd4 100644
12369--- a/arch/x86/include/asm/stacktrace.h
12370+++ b/arch/x86/include/asm/stacktrace.h
12371@@ -11,28 +11,20 @@
12372
12373 extern int kstack_depth_to_print;
12374
12375-struct thread_info;
12376+struct task_struct;
12377 struct stacktrace_ops;
12378
12379-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12380- unsigned long *stack,
12381- unsigned long bp,
12382- const struct stacktrace_ops *ops,
12383- void *data,
12384- unsigned long *end,
12385- int *graph);
12386+typedef unsigned long walk_stack_t(struct task_struct *task,
12387+ void *stack_start,
12388+ unsigned long *stack,
12389+ unsigned long bp,
12390+ const struct stacktrace_ops *ops,
12391+ void *data,
12392+ unsigned long *end,
12393+ int *graph);
12394
12395-extern unsigned long
12396-print_context_stack(struct thread_info *tinfo,
12397- unsigned long *stack, unsigned long bp,
12398- const struct stacktrace_ops *ops, void *data,
12399- unsigned long *end, int *graph);
12400-
12401-extern unsigned long
12402-print_context_stack_bp(struct thread_info *tinfo,
12403- unsigned long *stack, unsigned long bp,
12404- const struct stacktrace_ops *ops, void *data,
12405- unsigned long *end, int *graph);
12406+extern walk_stack_t print_context_stack;
12407+extern walk_stack_t print_context_stack_bp;
12408
12409 /* Generic stack tracer with callbacks */
12410
12411@@ -40,7 +32,7 @@ struct stacktrace_ops {
12412 void (*address)(void *data, unsigned long address, int reliable);
12413 /* On negative return stop dumping */
12414 int (*stack)(void *data, char *name);
12415- walk_stack_t walk_stack;
12416+ walk_stack_t *walk_stack;
12417 };
12418
12419 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12420diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12421index cb23852..2dde194 100644
12422--- a/arch/x86/include/asm/sys_ia32.h
12423+++ b/arch/x86/include/asm/sys_ia32.h
12424@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
12425 compat_sigset_t __user *, unsigned int);
12426 asmlinkage long sys32_alarm(unsigned int);
12427
12428-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12429+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12430 asmlinkage long sys32_sysfs(int, u32, u32);
12431
12432 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12433diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
12434index f1d8b44..a4de8b7 100644
12435--- a/arch/x86/include/asm/syscalls.h
12436+++ b/arch/x86/include/asm/syscalls.h
12437@@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *,
12438 void __user *, struct pt_regs *);
12439
12440 /* kernel/ldt.c */
12441-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
12442+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
12443
12444 /* kernel/signal.c */
12445 long sys_rt_sigreturn(struct pt_regs *);
12446diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12447index 2d2f01c..f985723 100644
12448--- a/arch/x86/include/asm/system.h
12449+++ b/arch/x86/include/asm/system.h
12450@@ -129,7 +129,7 @@ do { \
12451 "call __switch_to\n\t" \
12452 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12453 __switch_canary \
12454- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12455+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12456 "movq %%rax,%%rdi\n\t" \
12457 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12458 "jnz ret_from_fork\n\t" \
12459@@ -140,7 +140,7 @@ do { \
12460 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12461 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12462 [_tif_fork] "i" (_TIF_FORK), \
12463- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12464+ [thread_info] "m" (current_tinfo), \
12465 [current_task] "m" (current_task) \
12466 __switch_canary_iparam \
12467 : "memory", "cc" __EXTRA_CLOBBER)
12468@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12469 {
12470 unsigned long __limit;
12471 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12472- return __limit + 1;
12473+ return __limit;
12474 }
12475
12476 static inline void native_clts(void)
12477@@ -397,13 +397,13 @@ void enable_hlt(void);
12478
12479 void cpu_idle_wait(void);
12480
12481-extern unsigned long arch_align_stack(unsigned long sp);
12482+#define arch_align_stack(x) ((x) & ~0xfUL)
12483 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12484
12485 void default_idle(void);
12486 bool set_pm_idle_to_default(void);
12487
12488-void stop_this_cpu(void *dummy);
12489+void stop_this_cpu(void *dummy) __noreturn;
12490
12491 /*
12492 * Force strict CPU ordering.
12493diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12494index cfd8144..664ac89 100644
12495--- a/arch/x86/include/asm/thread_info.h
12496+++ b/arch/x86/include/asm/thread_info.h
12497@@ -10,6 +10,7 @@
12498 #include <linux/compiler.h>
12499 #include <asm/page.h>
12500 #include <asm/types.h>
12501+#include <asm/percpu.h>
12502
12503 /*
12504 * low level task data that entry.S needs immediate access to
12505@@ -24,7 +25,6 @@ struct exec_domain;
12506 #include <linux/atomic.h>
12507
12508 struct thread_info {
12509- struct task_struct *task; /* main task structure */
12510 struct exec_domain *exec_domain; /* execution domain */
12511 __u32 flags; /* low level flags */
12512 __u32 status; /* thread synchronous flags */
12513@@ -34,19 +34,13 @@ struct thread_info {
12514 mm_segment_t addr_limit;
12515 struct restart_block restart_block;
12516 void __user *sysenter_return;
12517-#ifdef CONFIG_X86_32
12518- unsigned long previous_esp; /* ESP of the previous stack in
12519- case of nested (IRQ) stacks
12520- */
12521- __u8 supervisor_stack[0];
12522-#endif
12523+ unsigned long lowest_stack;
12524 unsigned int sig_on_uaccess_error:1;
12525 unsigned int uaccess_err:1; /* uaccess failed */
12526 };
12527
12528-#define INIT_THREAD_INFO(tsk) \
12529+#define INIT_THREAD_INFO \
12530 { \
12531- .task = &tsk, \
12532 .exec_domain = &default_exec_domain, \
12533 .flags = 0, \
12534 .cpu = 0, \
12535@@ -57,7 +51,7 @@ struct thread_info {
12536 }, \
12537 }
12538
12539-#define init_thread_info (init_thread_union.thread_info)
12540+#define init_thread_info (init_thread_union.stack)
12541 #define init_stack (init_thread_union.stack)
12542
12543 #else /* !__ASSEMBLY__ */
12544@@ -95,6 +89,7 @@ struct thread_info {
12545 #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
12546 #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
12547 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12548+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
12549
12550 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12551 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12552@@ -116,16 +111,17 @@ struct thread_info {
12553 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
12554 #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
12555 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12556+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12557
12558 /* work to do in syscall_trace_enter() */
12559 #define _TIF_WORK_SYSCALL_ENTRY \
12560 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12561- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12562+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12563
12564 /* work to do in syscall_trace_leave() */
12565 #define _TIF_WORK_SYSCALL_EXIT \
12566 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12567- _TIF_SYSCALL_TRACEPOINT)
12568+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12569
12570 /* work to do on interrupt/exception return */
12571 #define _TIF_WORK_MASK \
12572@@ -135,7 +131,8 @@ struct thread_info {
12573
12574 /* work to do on any return to user space */
12575 #define _TIF_ALLWORK_MASK \
12576- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12577+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12578+ _TIF_GRSEC_SETXID)
12579
12580 /* Only used for 64 bit */
12581 #define _TIF_DO_NOTIFY_MASK \
12582@@ -169,45 +166,40 @@ struct thread_info {
12583 ret; \
12584 })
12585
12586-#ifdef CONFIG_X86_32
12587-
12588-#define STACK_WARN (THREAD_SIZE/8)
12589-/*
12590- * macros/functions for gaining access to the thread information structure
12591- *
12592- * preempt_count needs to be 1 initially, until the scheduler is functional.
12593- */
12594-#ifndef __ASSEMBLY__
12595-
12596-
12597-/* how to get the current stack pointer from C */
12598-register unsigned long current_stack_pointer asm("esp") __used;
12599-
12600-/* how to get the thread information struct from C */
12601-static inline struct thread_info *current_thread_info(void)
12602-{
12603- return (struct thread_info *)
12604- (current_stack_pointer & ~(THREAD_SIZE - 1));
12605-}
12606-
12607-#else /* !__ASSEMBLY__ */
12608-
12609+#ifdef __ASSEMBLY__
12610 /* how to get the thread information struct from ASM */
12611 #define GET_THREAD_INFO(reg) \
12612- movl $-THREAD_SIZE, reg; \
12613- andl %esp, reg
12614+ mov PER_CPU_VAR(current_tinfo), reg
12615
12616 /* use this one if reg already contains %esp */
12617-#define GET_THREAD_INFO_WITH_ESP(reg) \
12618- andl $-THREAD_SIZE, reg
12619+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12620+#else
12621+/* how to get the thread information struct from C */
12622+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12623+
12624+static __always_inline struct thread_info *current_thread_info(void)
12625+{
12626+ return percpu_read_stable(current_tinfo);
12627+}
12628+#endif
12629+
12630+#ifdef CONFIG_X86_32
12631+
12632+#define STACK_WARN (THREAD_SIZE/8)
12633+/*
12634+ * macros/functions for gaining access to the thread information structure
12635+ *
12636+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12637+ */
12638+#ifndef __ASSEMBLY__
12639+
12640+/* how to get the current stack pointer from C */
12641+register unsigned long current_stack_pointer asm("esp") __used;
12642
12643 #endif
12644
12645 #else /* X86_32 */
12646
12647-#include <asm/percpu.h>
12648-#define KERNEL_STACK_OFFSET (5*8)
12649-
12650 /*
12651 * macros/functions for gaining access to the thread information structure
12652 * preempt_count needs to be 1 initially, until the scheduler is functional.
12653@@ -215,27 +207,8 @@ static inline struct thread_info *current_thread_info(void)
12654 #ifndef __ASSEMBLY__
12655 DECLARE_PER_CPU(unsigned long, kernel_stack);
12656
12657-static inline struct thread_info *current_thread_info(void)
12658-{
12659- struct thread_info *ti;
12660- ti = (void *)(percpu_read_stable(kernel_stack) +
12661- KERNEL_STACK_OFFSET - THREAD_SIZE);
12662- return ti;
12663-}
12664-
12665-#else /* !__ASSEMBLY__ */
12666-
12667-/* how to get the thread information struct from ASM */
12668-#define GET_THREAD_INFO(reg) \
12669- movq PER_CPU_VAR(kernel_stack),reg ; \
12670- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12671-
12672-/*
12673- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12674- * a certain register (to be used in assembler memory operands).
12675- */
12676-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12677-
12678+/* how to get the current stack pointer from C */
12679+register unsigned long current_stack_pointer asm("rsp") __used;
12680 #endif
12681
12682 #endif /* !X86_32 */
12683@@ -269,5 +242,16 @@ extern void arch_task_cache_init(void);
12684 extern void free_thread_info(struct thread_info *ti);
12685 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12686 #define arch_task_cache_init arch_task_cache_init
12687+
12688+#define __HAVE_THREAD_FUNCTIONS
12689+#define task_thread_info(task) (&(task)->tinfo)
12690+#define task_stack_page(task) ((task)->stack)
12691+#define setup_thread_stack(p, org) do {} while (0)
12692+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12693+
12694+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12695+extern struct task_struct *alloc_task_struct_node(int node);
12696+extern void free_task_struct(struct task_struct *);
12697+
12698 #endif
12699 #endif /* _ASM_X86_THREAD_INFO_H */
12700diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12701index 8be5f54..7ae826d 100644
12702--- a/arch/x86/include/asm/uaccess.h
12703+++ b/arch/x86/include/asm/uaccess.h
12704@@ -7,12 +7,15 @@
12705 #include <linux/compiler.h>
12706 #include <linux/thread_info.h>
12707 #include <linux/string.h>
12708+#include <linux/sched.h>
12709 #include <asm/asm.h>
12710 #include <asm/page.h>
12711
12712 #define VERIFY_READ 0
12713 #define VERIFY_WRITE 1
12714
12715+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12716+
12717 /*
12718 * The fs value determines whether argument validity checking should be
12719 * performed or not. If get_fs() == USER_DS, checking is performed, with
12720@@ -28,7 +31,12 @@
12721
12722 #define get_ds() (KERNEL_DS)
12723 #define get_fs() (current_thread_info()->addr_limit)
12724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12725+void __set_fs(mm_segment_t x);
12726+void set_fs(mm_segment_t x);
12727+#else
12728 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12729+#endif
12730
12731 #define segment_eq(a, b) ((a).seg == (b).seg)
12732
12733@@ -76,7 +84,33 @@
12734 * checks that the pointer is in the user space range - after calling
12735 * this function, memory access functions may still return -EFAULT.
12736 */
12737-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12738+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12739+#define access_ok(type, addr, size) \
12740+({ \
12741+ long __size = size; \
12742+ unsigned long __addr = (unsigned long)addr; \
12743+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12744+ unsigned long __end_ao = __addr + __size - 1; \
12745+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12746+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12747+ while(__addr_ao <= __end_ao) { \
12748+ char __c_ao; \
12749+ __addr_ao += PAGE_SIZE; \
12750+ if (__size > PAGE_SIZE) \
12751+ cond_resched(); \
12752+ if (__get_user(__c_ao, (char __user *)__addr)) \
12753+ break; \
12754+ if (type != VERIFY_WRITE) { \
12755+ __addr = __addr_ao; \
12756+ continue; \
12757+ } \
12758+ if (__put_user(__c_ao, (char __user *)__addr)) \
12759+ break; \
12760+ __addr = __addr_ao; \
12761+ } \
12762+ } \
12763+ __ret_ao; \
12764+})
12765
12766 /*
12767 * The exception table consists of pairs of addresses: the first is the
12768@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12769 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12770 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12771
12772-
12773+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12774+#define __copyuser_seg "gs;"
12775+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12776+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12777+#else
12778+#define __copyuser_seg
12779+#define __COPYUSER_SET_ES
12780+#define __COPYUSER_RESTORE_ES
12781+#endif
12782
12783 #ifdef CONFIG_X86_32
12784 #define __put_user_asm_u64(x, addr, err, errret) \
12785- asm volatile("1: movl %%eax,0(%2)\n" \
12786- "2: movl %%edx,4(%2)\n" \
12787+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12788+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12789 "3:\n" \
12790 ".section .fixup,\"ax\"\n" \
12791 "4: movl %3,%0\n" \
12792@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12793 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12794
12795 #define __put_user_asm_ex_u64(x, addr) \
12796- asm volatile("1: movl %%eax,0(%1)\n" \
12797- "2: movl %%edx,4(%1)\n" \
12798+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12799+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12800 "3:\n" \
12801 _ASM_EXTABLE(1b, 2b - 1b) \
12802 _ASM_EXTABLE(2b, 3b - 2b) \
12803@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12804 __typeof__(*(ptr)) __pu_val; \
12805 __chk_user_ptr(ptr); \
12806 might_fault(); \
12807- __pu_val = x; \
12808+ __pu_val = (x); \
12809 switch (sizeof(*(ptr))) { \
12810 case 1: \
12811 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12812@@ -373,7 +415,7 @@ do { \
12813 } while (0)
12814
12815 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12816- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12817+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12818 "2:\n" \
12819 ".section .fixup,\"ax\"\n" \
12820 "3: mov %3,%0\n" \
12821@@ -381,7 +423,7 @@ do { \
12822 " jmp 2b\n" \
12823 ".previous\n" \
12824 _ASM_EXTABLE(1b, 3b) \
12825- : "=r" (err), ltype(x) \
12826+ : "=r" (err), ltype (x) \
12827 : "m" (__m(addr)), "i" (errret), "0" (err))
12828
12829 #define __get_user_size_ex(x, ptr, size) \
12830@@ -406,7 +448,7 @@ do { \
12831 } while (0)
12832
12833 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12834- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12835+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12836 "2:\n" \
12837 _ASM_EXTABLE(1b, 2b - 1b) \
12838 : ltype(x) : "m" (__m(addr)))
12839@@ -423,13 +465,24 @@ do { \
12840 int __gu_err; \
12841 unsigned long __gu_val; \
12842 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12843- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12844+ (x) = (__typeof__(*(ptr)))__gu_val; \
12845 __gu_err; \
12846 })
12847
12848 /* FIXME: this hack is definitely wrong -AK */
12849 struct __large_struct { unsigned long buf[100]; };
12850-#define __m(x) (*(struct __large_struct __user *)(x))
12851+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12852+#define ____m(x) \
12853+({ \
12854+ unsigned long ____x = (unsigned long)(x); \
12855+ if (____x < PAX_USER_SHADOW_BASE) \
12856+ ____x += PAX_USER_SHADOW_BASE; \
12857+ (void __user *)____x; \
12858+})
12859+#else
12860+#define ____m(x) (x)
12861+#endif
12862+#define __m(x) (*(struct __large_struct __user *)____m(x))
12863
12864 /*
12865 * Tell gcc we read from memory instead of writing: this is because
12866@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12867 * aliasing issues.
12868 */
12869 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12870- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12871+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12872 "2:\n" \
12873 ".section .fixup,\"ax\"\n" \
12874 "3: mov %3,%0\n" \
12875@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12876 ".previous\n" \
12877 _ASM_EXTABLE(1b, 3b) \
12878 : "=r"(err) \
12879- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12880+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12881
12882 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12883- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12884+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12885 "2:\n" \
12886 _ASM_EXTABLE(1b, 2b - 1b) \
12887 : : ltype(x), "m" (__m(addr)))
12888@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12889 * On error, the variable @x is set to zero.
12890 */
12891
12892+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12893+#define __get_user(x, ptr) get_user((x), (ptr))
12894+#else
12895 #define __get_user(x, ptr) \
12896 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12897+#endif
12898
12899 /**
12900 * __put_user: - Write a simple value into user space, with less checking.
12901@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12902 * Returns zero on success, or -EFAULT on error.
12903 */
12904
12905+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12906+#define __put_user(x, ptr) put_user((x), (ptr))
12907+#else
12908 #define __put_user(x, ptr) \
12909 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12910+#endif
12911
12912 #define __get_user_unaligned __get_user
12913 #define __put_user_unaligned __put_user
12914@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12915 #define get_user_ex(x, ptr) do { \
12916 unsigned long __gue_val; \
12917 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12918- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12919+ (x) = (__typeof__(*(ptr)))__gue_val; \
12920 } while (0)
12921
12922 #ifdef CONFIG_X86_WP_WORKS_OK
12923diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12924index 566e803..7183d0b 100644
12925--- a/arch/x86/include/asm/uaccess_32.h
12926+++ b/arch/x86/include/asm/uaccess_32.h
12927@@ -11,15 +11,15 @@
12928 #include <asm/page.h>
12929
12930 unsigned long __must_check __copy_to_user_ll
12931- (void __user *to, const void *from, unsigned long n);
12932+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12933 unsigned long __must_check __copy_from_user_ll
12934- (void *to, const void __user *from, unsigned long n);
12935+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12936 unsigned long __must_check __copy_from_user_ll_nozero
12937- (void *to, const void __user *from, unsigned long n);
12938+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12939 unsigned long __must_check __copy_from_user_ll_nocache
12940- (void *to, const void __user *from, unsigned long n);
12941+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12942 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12943- (void *to, const void __user *from, unsigned long n);
12944+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12945
12946 /**
12947 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12948@@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12949 */
12950
12951 static __always_inline unsigned long __must_check
12952+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12953+static __always_inline unsigned long __must_check
12954 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12955 {
12956+ if ((long)n < 0)
12957+ return n;
12958+
12959 if (__builtin_constant_p(n)) {
12960 unsigned long ret;
12961
12962@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12963 return ret;
12964 }
12965 }
12966+ if (!__builtin_constant_p(n))
12967+ check_object_size(from, n, true);
12968 return __copy_to_user_ll(to, from, n);
12969 }
12970
12971@@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12972 * On success, this will be zero.
12973 */
12974 static __always_inline unsigned long __must_check
12975+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12976+static __always_inline unsigned long __must_check
12977 __copy_to_user(void __user *to, const void *from, unsigned long n)
12978 {
12979 might_fault();
12980+
12981 return __copy_to_user_inatomic(to, from, n);
12982 }
12983
12984 static __always_inline unsigned long
12985+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12986+static __always_inline unsigned long
12987 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12988 {
12989+ if ((long)n < 0)
12990+ return n;
12991+
12992 /* Avoid zeroing the tail if the copy fails..
12993 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12994 * but as the zeroing behaviour is only significant when n is not
12995@@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12996 * for explanation of why this is needed.
12997 */
12998 static __always_inline unsigned long
12999+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13000+static __always_inline unsigned long
13001 __copy_from_user(void *to, const void __user *from, unsigned long n)
13002 {
13003 might_fault();
13004+
13005+ if ((long)n < 0)
13006+ return n;
13007+
13008 if (__builtin_constant_p(n)) {
13009 unsigned long ret;
13010
13011@@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13012 return ret;
13013 }
13014 }
13015+ if (!__builtin_constant_p(n))
13016+ check_object_size(to, n, false);
13017 return __copy_from_user_ll(to, from, n);
13018 }
13019
13020 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13021+ const void __user *from, unsigned long n) __size_overflow(3);
13022+static __always_inline unsigned long __copy_from_user_nocache(void *to,
13023 const void __user *from, unsigned long n)
13024 {
13025 might_fault();
13026+
13027+ if ((long)n < 0)
13028+ return n;
13029+
13030 if (__builtin_constant_p(n)) {
13031 unsigned long ret;
13032
13033@@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13034
13035 static __always_inline unsigned long
13036 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13037+ unsigned long n) __size_overflow(3);
13038+static __always_inline unsigned long
13039+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
13040 unsigned long n)
13041 {
13042- return __copy_from_user_ll_nocache_nozero(to, from, n);
13043+ if ((long)n < 0)
13044+ return n;
13045+
13046+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13047 }
13048
13049-unsigned long __must_check copy_to_user(void __user *to,
13050- const void *from, unsigned long n);
13051-unsigned long __must_check _copy_from_user(void *to,
13052- const void __user *from,
13053- unsigned long n);
13054-
13055+extern void copy_to_user_overflow(void)
13056+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13057+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13058+#else
13059+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13060+#endif
13061+;
13062
13063 extern void copy_from_user_overflow(void)
13064 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13065@@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void)
13066 #endif
13067 ;
13068
13069-static inline unsigned long __must_check copy_from_user(void *to,
13070- const void __user *from,
13071- unsigned long n)
13072+/**
13073+ * copy_to_user: - Copy a block of data into user space.
13074+ * @to: Destination address, in user space.
13075+ * @from: Source address, in kernel space.
13076+ * @n: Number of bytes to copy.
13077+ *
13078+ * Context: User context only. This function may sleep.
13079+ *
13080+ * Copy data from kernel space to user space.
13081+ *
13082+ * Returns number of bytes that could not be copied.
13083+ * On success, this will be zero.
13084+ */
13085+static inline unsigned long __must_check
13086+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13087+static inline unsigned long __must_check
13088+copy_to_user(void __user *to, const void *from, unsigned long n)
13089+{
13090+ int sz = __compiletime_object_size(from);
13091+
13092+ if (unlikely(sz != -1 && sz < n))
13093+ copy_to_user_overflow();
13094+ else if (access_ok(VERIFY_WRITE, to, n))
13095+ n = __copy_to_user(to, from, n);
13096+ return n;
13097+}
13098+
13099+/**
13100+ * copy_from_user: - Copy a block of data from user space.
13101+ * @to: Destination address, in kernel space.
13102+ * @from: Source address, in user space.
13103+ * @n: Number of bytes to copy.
13104+ *
13105+ * Context: User context only. This function may sleep.
13106+ *
13107+ * Copy data from user space to kernel space.
13108+ *
13109+ * Returns number of bytes that could not be copied.
13110+ * On success, this will be zero.
13111+ *
13112+ * If some data could not be copied, this function will pad the copied
13113+ * data to the requested size using zero bytes.
13114+ */
13115+static inline unsigned long __must_check
13116+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13117+static inline unsigned long __must_check
13118+copy_from_user(void *to, const void __user *from, unsigned long n)
13119 {
13120 int sz = __compiletime_object_size(to);
13121
13122- if (likely(sz == -1 || sz >= n))
13123- n = _copy_from_user(to, from, n);
13124- else
13125+ if (unlikely(sz != -1 && sz < n))
13126 copy_from_user_overflow();
13127-
13128+ else if (access_ok(VERIFY_READ, from, n))
13129+ n = __copy_from_user(to, from, n);
13130+ else if ((long)n > 0) {
13131+ if (!__builtin_constant_p(n))
13132+ check_object_size(to, n, false);
13133+ memset(to, 0, n);
13134+ }
13135 return n;
13136 }
13137
13138@@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst,
13139 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13140
13141 long strnlen_user(const char __user *str, long n);
13142-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13143-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13144+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13145+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13146
13147 #endif /* _ASM_X86_UACCESS_32_H */
13148diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13149index 1c66d30..e294b5f 100644
13150--- a/arch/x86/include/asm/uaccess_64.h
13151+++ b/arch/x86/include/asm/uaccess_64.h
13152@@ -10,6 +10,9 @@
13153 #include <asm/alternative.h>
13154 #include <asm/cpufeature.h>
13155 #include <asm/page.h>
13156+#include <asm/pgtable.h>
13157+
13158+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13159
13160 /*
13161 * Copy To/From Userspace
13162@@ -17,12 +20,14 @@
13163
13164 /* Handles exceptions in both to and from, but doesn't do access_ok */
13165 __must_check unsigned long
13166-copy_user_generic_string(void *to, const void *from, unsigned len);
13167+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13168 __must_check unsigned long
13169-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13170+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13171
13172 static __always_inline __must_check unsigned long
13173-copy_user_generic(void *to, const void *from, unsigned len)
13174+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13175+static __always_inline __must_check unsigned long
13176+copy_user_generic(void *to, const void *from, unsigned long len)
13177 {
13178 unsigned ret;
13179
13180@@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len)
13181 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13182 "=d" (len)),
13183 "1" (to), "2" (from), "3" (len)
13184- : "memory", "rcx", "r8", "r9", "r10", "r11");
13185+ : "memory", "rcx", "r8", "r9", "r11");
13186 return ret;
13187 }
13188
13189+static __always_inline __must_check unsigned long
13190+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13191+static __always_inline __must_check unsigned long
13192+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13193 __must_check unsigned long
13194-_copy_to_user(void __user *to, const void *from, unsigned len);
13195-__must_check unsigned long
13196-_copy_from_user(void *to, const void __user *from, unsigned len);
13197-__must_check unsigned long
13198-copy_in_user(void __user *to, const void __user *from, unsigned len);
13199+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13200
13201 static inline unsigned long __must_check copy_from_user(void *to,
13202 const void __user *from,
13203+ unsigned long n) __size_overflow(3);
13204+static inline unsigned long __must_check copy_from_user(void *to,
13205+ const void __user *from,
13206 unsigned long n)
13207 {
13208- int sz = __compiletime_object_size(to);
13209-
13210 might_fault();
13211- if (likely(sz == -1 || sz >= n))
13212- n = _copy_from_user(to, from, n);
13213-#ifdef CONFIG_DEBUG_VM
13214- else
13215- WARN(1, "Buffer overflow detected!\n");
13216-#endif
13217+
13218+ if (access_ok(VERIFY_READ, from, n))
13219+ n = __copy_from_user(to, from, n);
13220+ else if (n < INT_MAX) {
13221+ if (!__builtin_constant_p(n))
13222+ check_object_size(to, n, false);
13223+ memset(to, 0, n);
13224+ }
13225 return n;
13226 }
13227
13228 static __always_inline __must_check
13229-int copy_to_user(void __user *dst, const void *src, unsigned size)
13230+int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13231+static __always_inline __must_check
13232+int copy_to_user(void __user *dst, const void *src, unsigned long size)
13233 {
13234 might_fault();
13235
13236- return _copy_to_user(dst, src, size);
13237+ if (access_ok(VERIFY_WRITE, dst, size))
13238+ size = __copy_to_user(dst, src, size);
13239+ return size;
13240 }
13241
13242 static __always_inline __must_check
13243-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13244+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13245+static __always_inline __must_check
13246+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13247 {
13248- int ret = 0;
13249+ int sz = __compiletime_object_size(dst);
13250+ unsigned ret = 0;
13251
13252 might_fault();
13253- if (!__builtin_constant_p(size))
13254- return copy_user_generic(dst, (__force void *)src, size);
13255+
13256+ if (size > INT_MAX)
13257+ return size;
13258+
13259+#ifdef CONFIG_PAX_MEMORY_UDEREF
13260+ if (!__access_ok(VERIFY_READ, src, size))
13261+ return size;
13262+#endif
13263+
13264+ if (unlikely(sz != -1 && sz < size)) {
13265+#ifdef CONFIG_DEBUG_VM
13266+ WARN(1, "Buffer overflow detected!\n");
13267+#endif
13268+ return size;
13269+ }
13270+
13271+ if (!__builtin_constant_p(size)) {
13272+ check_object_size(dst, size, false);
13273+
13274+#ifdef CONFIG_PAX_MEMORY_UDEREF
13275+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13276+ src += PAX_USER_SHADOW_BASE;
13277+#endif
13278+
13279+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13280+ }
13281 switch (size) {
13282- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13283+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13284 ret, "b", "b", "=q", 1);
13285 return ret;
13286- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13287+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13288 ret, "w", "w", "=r", 2);
13289 return ret;
13290- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13291+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13292 ret, "l", "k", "=r", 4);
13293 return ret;
13294- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13295+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13296 ret, "q", "", "=r", 8);
13297 return ret;
13298 case 10:
13299- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13300+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13301 ret, "q", "", "=r", 10);
13302 if (unlikely(ret))
13303 return ret;
13304 __get_user_asm(*(u16 *)(8 + (char *)dst),
13305- (u16 __user *)(8 + (char __user *)src),
13306+ (const u16 __user *)(8 + (const char __user *)src),
13307 ret, "w", "w", "=r", 2);
13308 return ret;
13309 case 16:
13310- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13311+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13312 ret, "q", "", "=r", 16);
13313 if (unlikely(ret))
13314 return ret;
13315 __get_user_asm(*(u64 *)(8 + (char *)dst),
13316- (u64 __user *)(8 + (char __user *)src),
13317+ (const u64 __user *)(8 + (const char __user *)src),
13318 ret, "q", "", "=r", 8);
13319 return ret;
13320 default:
13321- return copy_user_generic(dst, (__force void *)src, size);
13322+
13323+#ifdef CONFIG_PAX_MEMORY_UDEREF
13324+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13325+ src += PAX_USER_SHADOW_BASE;
13326+#endif
13327+
13328+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13329 }
13330 }
13331
13332 static __always_inline __must_check
13333-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13334+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13335+static __always_inline __must_check
13336+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13337 {
13338- int ret = 0;
13339+ int sz = __compiletime_object_size(src);
13340+ unsigned ret = 0;
13341
13342 might_fault();
13343- if (!__builtin_constant_p(size))
13344- return copy_user_generic((__force void *)dst, src, size);
13345+
13346+ if (size > INT_MAX)
13347+ return size;
13348+
13349+#ifdef CONFIG_PAX_MEMORY_UDEREF
13350+ if (!__access_ok(VERIFY_WRITE, dst, size))
13351+ return size;
13352+#endif
13353+
13354+ if (unlikely(sz != -1 && sz < size)) {
13355+#ifdef CONFIG_DEBUG_VM
13356+ WARN(1, "Buffer overflow detected!\n");
13357+#endif
13358+ return size;
13359+ }
13360+
13361+ if (!__builtin_constant_p(size)) {
13362+ check_object_size(src, size, true);
13363+
13364+#ifdef CONFIG_PAX_MEMORY_UDEREF
13365+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13366+ dst += PAX_USER_SHADOW_BASE;
13367+#endif
13368+
13369+ return copy_user_generic((__force_kernel void *)dst, src, size);
13370+ }
13371 switch (size) {
13372- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13373+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13374 ret, "b", "b", "iq", 1);
13375 return ret;
13376- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13377+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13378 ret, "w", "w", "ir", 2);
13379 return ret;
13380- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13381+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13382 ret, "l", "k", "ir", 4);
13383 return ret;
13384- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13385+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13386 ret, "q", "", "er", 8);
13387 return ret;
13388 case 10:
13389- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13390+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13391 ret, "q", "", "er", 10);
13392 if (unlikely(ret))
13393 return ret;
13394 asm("":::"memory");
13395- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13396+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13397 ret, "w", "w", "ir", 2);
13398 return ret;
13399 case 16:
13400- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13401+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13402 ret, "q", "", "er", 16);
13403 if (unlikely(ret))
13404 return ret;
13405 asm("":::"memory");
13406- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13407+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13408 ret, "q", "", "er", 8);
13409 return ret;
13410 default:
13411- return copy_user_generic((__force void *)dst, src, size);
13412+
13413+#ifdef CONFIG_PAX_MEMORY_UDEREF
13414+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13415+ dst += PAX_USER_SHADOW_BASE;
13416+#endif
13417+
13418+ return copy_user_generic((__force_kernel void *)dst, src, size);
13419 }
13420 }
13421
13422 static __always_inline __must_check
13423-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13424+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13425+static __always_inline __must_check
13426+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13427 {
13428- int ret = 0;
13429+ unsigned ret = 0;
13430
13431 might_fault();
13432- if (!__builtin_constant_p(size))
13433- return copy_user_generic((__force void *)dst,
13434- (__force void *)src, size);
13435+
13436+ if (size > INT_MAX)
13437+ return size;
13438+
13439+#ifdef CONFIG_PAX_MEMORY_UDEREF
13440+ if (!__access_ok(VERIFY_READ, src, size))
13441+ return size;
13442+ if (!__access_ok(VERIFY_WRITE, dst, size))
13443+ return size;
13444+#endif
13445+
13446+ if (!__builtin_constant_p(size)) {
13447+
13448+#ifdef CONFIG_PAX_MEMORY_UDEREF
13449+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13450+ src += PAX_USER_SHADOW_BASE;
13451+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13452+ dst += PAX_USER_SHADOW_BASE;
13453+#endif
13454+
13455+ return copy_user_generic((__force_kernel void *)dst,
13456+ (__force_kernel const void *)src, size);
13457+ }
13458 switch (size) {
13459 case 1: {
13460 u8 tmp;
13461- __get_user_asm(tmp, (u8 __user *)src,
13462+ __get_user_asm(tmp, (const u8 __user *)src,
13463 ret, "b", "b", "=q", 1);
13464 if (likely(!ret))
13465 __put_user_asm(tmp, (u8 __user *)dst,
13466@@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13467 }
13468 case 2: {
13469 u16 tmp;
13470- __get_user_asm(tmp, (u16 __user *)src,
13471+ __get_user_asm(tmp, (const u16 __user *)src,
13472 ret, "w", "w", "=r", 2);
13473 if (likely(!ret))
13474 __put_user_asm(tmp, (u16 __user *)dst,
13475@@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13476
13477 case 4: {
13478 u32 tmp;
13479- __get_user_asm(tmp, (u32 __user *)src,
13480+ __get_user_asm(tmp, (const u32 __user *)src,
13481 ret, "l", "k", "=r", 4);
13482 if (likely(!ret))
13483 __put_user_asm(tmp, (u32 __user *)dst,
13484@@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13485 }
13486 case 8: {
13487 u64 tmp;
13488- __get_user_asm(tmp, (u64 __user *)src,
13489+ __get_user_asm(tmp, (const u64 __user *)src,
13490 ret, "q", "", "=r", 8);
13491 if (likely(!ret))
13492 __put_user_asm(tmp, (u64 __user *)dst,
13493@@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13494 return ret;
13495 }
13496 default:
13497- return copy_user_generic((__force void *)dst,
13498- (__force void *)src, size);
13499+
13500+#ifdef CONFIG_PAX_MEMORY_UDEREF
13501+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13502+ src += PAX_USER_SHADOW_BASE;
13503+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13504+ dst += PAX_USER_SHADOW_BASE;
13505+#endif
13506+
13507+ return copy_user_generic((__force_kernel void *)dst,
13508+ (__force_kernel const void *)src, size);
13509 }
13510 }
13511
13512@@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
13513 __must_check long strnlen_user(const char __user *str, long n);
13514 __must_check long __strnlen_user(const char __user *str, long n);
13515 __must_check long strlen_user(const char __user *str);
13516-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13517-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13518+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13519+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13520
13521 static __must_check __always_inline int
13522-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13523+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13524+static __must_check __always_inline int
13525+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13526 {
13527- return copy_user_generic(dst, (__force const void *)src, size);
13528+ if (size > INT_MAX)
13529+ return size;
13530+
13531+#ifdef CONFIG_PAX_MEMORY_UDEREF
13532+ if (!__access_ok(VERIFY_READ, src, size))
13533+ return size;
13534+
13535+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13536+ src += PAX_USER_SHADOW_BASE;
13537+#endif
13538+
13539+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13540 }
13541
13542-static __must_check __always_inline int
13543-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13544+static __must_check __always_inline unsigned long
13545+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13546+static __must_check __always_inline unsigned long
13547+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13548 {
13549- return copy_user_generic((__force void *)dst, src, size);
13550+ if (size > INT_MAX)
13551+ return size;
13552+
13553+#ifdef CONFIG_PAX_MEMORY_UDEREF
13554+ if (!__access_ok(VERIFY_WRITE, dst, size))
13555+ return size;
13556+
13557+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13558+ dst += PAX_USER_SHADOW_BASE;
13559+#endif
13560+
13561+ return copy_user_generic((__force_kernel void *)dst, src, size);
13562 }
13563
13564-extern long __copy_user_nocache(void *dst, const void __user *src,
13565- unsigned size, int zerorest);
13566+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13567+ unsigned long size, int zerorest) __size_overflow(3);
13568
13569-static inline int
13570-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13571+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13572+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13573 {
13574 might_sleep();
13575+
13576+ if (size > INT_MAX)
13577+ return size;
13578+
13579+#ifdef CONFIG_PAX_MEMORY_UDEREF
13580+ if (!__access_ok(VERIFY_READ, src, size))
13581+ return size;
13582+#endif
13583+
13584 return __copy_user_nocache(dst, src, size, 1);
13585 }
13586
13587-static inline int
13588-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13589- unsigned size)
13590+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13591+ unsigned long size) __size_overflow(3);
13592+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13593+ unsigned long size)
13594 {
13595+ if (size > INT_MAX)
13596+ return size;
13597+
13598+#ifdef CONFIG_PAX_MEMORY_UDEREF
13599+ if (!__access_ok(VERIFY_READ, src, size))
13600+ return size;
13601+#endif
13602+
13603 return __copy_user_nocache(dst, src, size, 0);
13604 }
13605
13606-unsigned long
13607-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13608+extern unsigned long
13609+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13610
13611 #endif /* _ASM_X86_UACCESS_64_H */
13612diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13613index bb05228..d763d5b 100644
13614--- a/arch/x86/include/asm/vdso.h
13615+++ b/arch/x86/include/asm/vdso.h
13616@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13617 #define VDSO32_SYMBOL(base, name) \
13618 ({ \
13619 extern const char VDSO32_##name[]; \
13620- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13621+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13622 })
13623 #endif
13624
13625diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13626index a609c39..7a68dc7 100644
13627--- a/arch/x86/include/asm/x86_init.h
13628+++ b/arch/x86/include/asm/x86_init.h
13629@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13630 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13631 void (*find_smp_config)(void);
13632 void (*get_smp_config)(unsigned int early);
13633-};
13634+} __no_const;
13635
13636 /**
13637 * struct x86_init_resources - platform specific resource related ops
13638@@ -43,7 +43,7 @@ struct x86_init_resources {
13639 void (*probe_roms)(void);
13640 void (*reserve_resources)(void);
13641 char *(*memory_setup)(void);
13642-};
13643+} __no_const;
13644
13645 /**
13646 * struct x86_init_irqs - platform specific interrupt setup
13647@@ -56,7 +56,7 @@ struct x86_init_irqs {
13648 void (*pre_vector_init)(void);
13649 void (*intr_init)(void);
13650 void (*trap_init)(void);
13651-};
13652+} __no_const;
13653
13654 /**
13655 * struct x86_init_oem - oem platform specific customizing functions
13656@@ -66,7 +66,7 @@ struct x86_init_irqs {
13657 struct x86_init_oem {
13658 void (*arch_setup)(void);
13659 void (*banner)(void);
13660-};
13661+} __no_const;
13662
13663 /**
13664 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13665@@ -77,7 +77,7 @@ struct x86_init_oem {
13666 */
13667 struct x86_init_mapping {
13668 void (*pagetable_reserve)(u64 start, u64 end);
13669-};
13670+} __no_const;
13671
13672 /**
13673 * struct x86_init_paging - platform specific paging functions
13674@@ -87,7 +87,7 @@ struct x86_init_mapping {
13675 struct x86_init_paging {
13676 void (*pagetable_setup_start)(pgd_t *base);
13677 void (*pagetable_setup_done)(pgd_t *base);
13678-};
13679+} __no_const;
13680
13681 /**
13682 * struct x86_init_timers - platform specific timer setup
13683@@ -102,7 +102,7 @@ struct x86_init_timers {
13684 void (*tsc_pre_init)(void);
13685 void (*timer_init)(void);
13686 void (*wallclock_init)(void);
13687-};
13688+} __no_const;
13689
13690 /**
13691 * struct x86_init_iommu - platform specific iommu setup
13692@@ -110,7 +110,7 @@ struct x86_init_timers {
13693 */
13694 struct x86_init_iommu {
13695 int (*iommu_init)(void);
13696-};
13697+} __no_const;
13698
13699 /**
13700 * struct x86_init_pci - platform specific pci init functions
13701@@ -124,7 +124,7 @@ struct x86_init_pci {
13702 int (*init)(void);
13703 void (*init_irq)(void);
13704 void (*fixup_irqs)(void);
13705-};
13706+} __no_const;
13707
13708 /**
13709 * struct x86_init_ops - functions for platform specific setup
13710@@ -140,7 +140,7 @@ struct x86_init_ops {
13711 struct x86_init_timers timers;
13712 struct x86_init_iommu iommu;
13713 struct x86_init_pci pci;
13714-};
13715+} __no_const;
13716
13717 /**
13718 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13719@@ -149,7 +149,7 @@ struct x86_init_ops {
13720 struct x86_cpuinit_ops {
13721 void (*setup_percpu_clockev)(void);
13722 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13723-};
13724+} __no_const;
13725
13726 /**
13727 * struct x86_platform_ops - platform specific runtime functions
13728@@ -171,7 +171,7 @@ struct x86_platform_ops {
13729 void (*nmi_init)(void);
13730 unsigned char (*get_nmi_reason)(void);
13731 int (*i8042_detect)(void);
13732-};
13733+} __no_const;
13734
13735 struct pci_dev;
13736
13737@@ -180,7 +180,7 @@ struct x86_msi_ops {
13738 void (*teardown_msi_irq)(unsigned int irq);
13739 void (*teardown_msi_irqs)(struct pci_dev *dev);
13740 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13741-};
13742+} __no_const;
13743
13744 extern struct x86_init_ops x86_init;
13745 extern struct x86_cpuinit_ops x86_cpuinit;
13746diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13747index c6ce245..ffbdab7 100644
13748--- a/arch/x86/include/asm/xsave.h
13749+++ b/arch/x86/include/asm/xsave.h
13750@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13751 {
13752 int err;
13753
13754+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13755+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13756+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13757+#endif
13758+
13759 /*
13760 * Clear the xsave header first, so that reserved fields are
13761 * initialized to zero.
13762@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13763 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13764 {
13765 int err;
13766- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13767+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13768 u32 lmask = mask;
13769 u32 hmask = mask >> 32;
13770
13771+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13772+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13773+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13774+#endif
13775+
13776 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13777 "2:\n"
13778 ".section .fixup,\"ax\"\n"
13779diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13780index 6a564ac..9b1340c 100644
13781--- a/arch/x86/kernel/acpi/realmode/Makefile
13782+++ b/arch/x86/kernel/acpi/realmode/Makefile
13783@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13784 $(call cc-option, -fno-stack-protector) \
13785 $(call cc-option, -mpreferred-stack-boundary=2)
13786 KBUILD_CFLAGS += $(call cc-option, -m32)
13787+ifdef CONSTIFY_PLUGIN
13788+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13789+endif
13790 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13791 GCOV_PROFILE := n
13792
13793diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13794index b4fd836..4358fe3 100644
13795--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13796+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13797@@ -108,6 +108,9 @@ wakeup_code:
13798 /* Do any other stuff... */
13799
13800 #ifndef CONFIG_64BIT
13801+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13802+ call verify_cpu
13803+
13804 /* This could also be done in C code... */
13805 movl pmode_cr3, %eax
13806 movl %eax, %cr3
13807@@ -131,6 +134,7 @@ wakeup_code:
13808 movl pmode_cr0, %eax
13809 movl %eax, %cr0
13810 jmp pmode_return
13811+# include "../../verify_cpu.S"
13812 #else
13813 pushw $0
13814 pushw trampoline_segment
13815diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13816index 103b6ab..2004d0a 100644
13817--- a/arch/x86/kernel/acpi/sleep.c
13818+++ b/arch/x86/kernel/acpi/sleep.c
13819@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13820 header->trampoline_segment = trampoline_address() >> 4;
13821 #ifdef CONFIG_SMP
13822 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13823+
13824+ pax_open_kernel();
13825 early_gdt_descr.address =
13826 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13827+ pax_close_kernel();
13828+
13829 initial_gs = per_cpu_offset(smp_processor_id());
13830 #endif
13831 initial_code = (unsigned long)wakeup_long64;
13832diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13833index 13ab720..95d5442 100644
13834--- a/arch/x86/kernel/acpi/wakeup_32.S
13835+++ b/arch/x86/kernel/acpi/wakeup_32.S
13836@@ -30,13 +30,11 @@ wakeup_pmode_return:
13837 # and restore the stack ... but you need gdt for this to work
13838 movl saved_context_esp, %esp
13839
13840- movl %cs:saved_magic, %eax
13841- cmpl $0x12345678, %eax
13842+ cmpl $0x12345678, saved_magic
13843 jne bogus_magic
13844
13845 # jump to place where we left off
13846- movl saved_eip, %eax
13847- jmp *%eax
13848+ jmp *(saved_eip)
13849
13850 bogus_magic:
13851 jmp bogus_magic
13852diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13853index 1f84794..e23f862 100644
13854--- a/arch/x86/kernel/alternative.c
13855+++ b/arch/x86/kernel/alternative.c
13856@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13857 */
13858 for (a = start; a < end; a++) {
13859 instr = (u8 *)&a->instr_offset + a->instr_offset;
13860+
13861+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13862+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13863+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13864+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13865+#endif
13866+
13867 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13868 BUG_ON(a->replacementlen > a->instrlen);
13869 BUG_ON(a->instrlen > sizeof(insnbuf));
13870@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13871 for (poff = start; poff < end; poff++) {
13872 u8 *ptr = (u8 *)poff + *poff;
13873
13874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13875+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13876+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13877+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13878+#endif
13879+
13880 if (!*poff || ptr < text || ptr >= text_end)
13881 continue;
13882 /* turn DS segment override prefix into lock prefix */
13883- if (*ptr == 0x3e)
13884+ if (*ktla_ktva(ptr) == 0x3e)
13885 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13886 };
13887 mutex_unlock(&text_mutex);
13888@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13889 for (poff = start; poff < end; poff++) {
13890 u8 *ptr = (u8 *)poff + *poff;
13891
13892+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13893+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13894+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13895+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13896+#endif
13897+
13898 if (!*poff || ptr < text || ptr >= text_end)
13899 continue;
13900 /* turn lock prefix into DS segment override prefix */
13901- if (*ptr == 0xf0)
13902+ if (*ktla_ktva(ptr) == 0xf0)
13903 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13904 };
13905 mutex_unlock(&text_mutex);
13906@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13907
13908 BUG_ON(p->len > MAX_PATCH_LEN);
13909 /* prep the buffer with the original instructions */
13910- memcpy(insnbuf, p->instr, p->len);
13911+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13912 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13913 (unsigned long)p->instr, p->len);
13914
13915@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13916 if (smp_alt_once)
13917 free_init_pages("SMP alternatives",
13918 (unsigned long)__smp_locks,
13919- (unsigned long)__smp_locks_end);
13920+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13921
13922 restart_nmi();
13923 }
13924@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13925 * instructions. And on the local CPU you need to be protected again NMI or MCE
13926 * handlers seeing an inconsistent instruction while you patch.
13927 */
13928-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13929+void *__kprobes text_poke_early(void *addr, const void *opcode,
13930 size_t len)
13931 {
13932 unsigned long flags;
13933 local_irq_save(flags);
13934- memcpy(addr, opcode, len);
13935+
13936+ pax_open_kernel();
13937+ memcpy(ktla_ktva(addr), opcode, len);
13938 sync_core();
13939+ pax_close_kernel();
13940+
13941 local_irq_restore(flags);
13942 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13943 that causes hangs on some VIA CPUs. */
13944@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13945 */
13946 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13947 {
13948- unsigned long flags;
13949- char *vaddr;
13950+ unsigned char *vaddr = ktla_ktva(addr);
13951 struct page *pages[2];
13952- int i;
13953+ size_t i;
13954
13955 if (!core_kernel_text((unsigned long)addr)) {
13956- pages[0] = vmalloc_to_page(addr);
13957- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13958+ pages[0] = vmalloc_to_page(vaddr);
13959+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13960 } else {
13961- pages[0] = virt_to_page(addr);
13962+ pages[0] = virt_to_page(vaddr);
13963 WARN_ON(!PageReserved(pages[0]));
13964- pages[1] = virt_to_page(addr + PAGE_SIZE);
13965+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13966 }
13967 BUG_ON(!pages[0]);
13968- local_irq_save(flags);
13969- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13970- if (pages[1])
13971- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13972- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13973- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13974- clear_fixmap(FIX_TEXT_POKE0);
13975- if (pages[1])
13976- clear_fixmap(FIX_TEXT_POKE1);
13977- local_flush_tlb();
13978- sync_core();
13979- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13980- that causes hangs on some VIA CPUs. */
13981+ text_poke_early(addr, opcode, len);
13982 for (i = 0; i < len; i++)
13983- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13984- local_irq_restore(flags);
13985+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13986 return addr;
13987 }
13988
13989diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13990index 5b3f88e..61232b4 100644
13991--- a/arch/x86/kernel/apic/apic.c
13992+++ b/arch/x86/kernel/apic/apic.c
13993@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13994 /*
13995 * Debug level, exported for io_apic.c
13996 */
13997-unsigned int apic_verbosity;
13998+int apic_verbosity;
13999
14000 int pic_mode;
14001
14002@@ -1912,7 +1912,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14003 apic_write(APIC_ESR, 0);
14004 v1 = apic_read(APIC_ESR);
14005 ack_APIC_irq();
14006- atomic_inc(&irq_err_count);
14007+ atomic_inc_unchecked(&irq_err_count);
14008
14009 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
14010 smp_processor_id(), v0 , v1);
14011diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14012index fb07275..e06bb59 100644
14013--- a/arch/x86/kernel/apic/io_apic.c
14014+++ b/arch/x86/kernel/apic/io_apic.c
14015@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14016 }
14017 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14018
14019-void lock_vector_lock(void)
14020+void lock_vector_lock(void) __acquires(vector_lock)
14021 {
14022 /* Used to the online set of cpus does not change
14023 * during assign_irq_vector.
14024@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
14025 raw_spin_lock(&vector_lock);
14026 }
14027
14028-void unlock_vector_lock(void)
14029+void unlock_vector_lock(void) __releases(vector_lock)
14030 {
14031 raw_spin_unlock(&vector_lock);
14032 }
14033@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
14034 ack_APIC_irq();
14035 }
14036
14037-atomic_t irq_mis_count;
14038+atomic_unchecked_t irq_mis_count;
14039
14040 static void ack_apic_level(struct irq_data *data)
14041 {
14042@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
14043 * at the cpu.
14044 */
14045 if (!(v & (1 << (i & 0x1f)))) {
14046- atomic_inc(&irq_mis_count);
14047+ atomic_inc_unchecked(&irq_mis_count);
14048
14049 eoi_ioapic_irq(irq, cfg);
14050 }
14051diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14052index f76623c..aab694f 100644
14053--- a/arch/x86/kernel/apm_32.c
14054+++ b/arch/x86/kernel/apm_32.c
14055@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
14056 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14057 * even though they are called in protected mode.
14058 */
14059-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14060+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14061 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14062
14063 static const char driver_version[] = "1.16ac"; /* no spaces */
14064@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
14065 BUG_ON(cpu != 0);
14066 gdt = get_cpu_gdt_table(cpu);
14067 save_desc_40 = gdt[0x40 / 8];
14068+
14069+ pax_open_kernel();
14070 gdt[0x40 / 8] = bad_bios_desc;
14071+ pax_close_kernel();
14072
14073 apm_irq_save(flags);
14074 APM_DO_SAVE_SEGS;
14075@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
14076 &call->esi);
14077 APM_DO_RESTORE_SEGS;
14078 apm_irq_restore(flags);
14079+
14080+ pax_open_kernel();
14081 gdt[0x40 / 8] = save_desc_40;
14082+ pax_close_kernel();
14083+
14084 put_cpu();
14085
14086 return call->eax & 0xff;
14087@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
14088 BUG_ON(cpu != 0);
14089 gdt = get_cpu_gdt_table(cpu);
14090 save_desc_40 = gdt[0x40 / 8];
14091+
14092+ pax_open_kernel();
14093 gdt[0x40 / 8] = bad_bios_desc;
14094+ pax_close_kernel();
14095
14096 apm_irq_save(flags);
14097 APM_DO_SAVE_SEGS;
14098@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
14099 &call->eax);
14100 APM_DO_RESTORE_SEGS;
14101 apm_irq_restore(flags);
14102+
14103+ pax_open_kernel();
14104 gdt[0x40 / 8] = save_desc_40;
14105+ pax_close_kernel();
14106+
14107 put_cpu();
14108 return error;
14109 }
14110@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
14111 * code to that CPU.
14112 */
14113 gdt = get_cpu_gdt_table(0);
14114+
14115+ pax_open_kernel();
14116 set_desc_base(&gdt[APM_CS >> 3],
14117 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14118 set_desc_base(&gdt[APM_CS_16 >> 3],
14119 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14120 set_desc_base(&gdt[APM_DS >> 3],
14121 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14122+ pax_close_kernel();
14123
14124 proc_create("apm", 0, NULL, &apm_file_ops);
14125
14126diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14127index 68de2dc..1f3c720 100644
14128--- a/arch/x86/kernel/asm-offsets.c
14129+++ b/arch/x86/kernel/asm-offsets.c
14130@@ -33,6 +33,8 @@ void common(void) {
14131 OFFSET(TI_status, thread_info, status);
14132 OFFSET(TI_addr_limit, thread_info, addr_limit);
14133 OFFSET(TI_preempt_count, thread_info, preempt_count);
14134+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14135+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14136
14137 BLANK();
14138 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14139@@ -53,8 +55,26 @@ void common(void) {
14140 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14141 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14142 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14143+
14144+#ifdef CONFIG_PAX_KERNEXEC
14145+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14146 #endif
14147
14148+#ifdef CONFIG_PAX_MEMORY_UDEREF
14149+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14150+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14151+#ifdef CONFIG_X86_64
14152+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14153+#endif
14154+#endif
14155+
14156+#endif
14157+
14158+ BLANK();
14159+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14160+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14161+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14162+
14163 #ifdef CONFIG_XEN
14164 BLANK();
14165 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14166diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14167index 834e897..dacddc8 100644
14168--- a/arch/x86/kernel/asm-offsets_64.c
14169+++ b/arch/x86/kernel/asm-offsets_64.c
14170@@ -70,6 +70,7 @@ int main(void)
14171 BLANK();
14172 #undef ENTRY
14173
14174+ DEFINE(TSS_size, sizeof(struct tss_struct));
14175 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14176 BLANK();
14177
14178diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14179index 25f24dc..4094a7f 100644
14180--- a/arch/x86/kernel/cpu/Makefile
14181+++ b/arch/x86/kernel/cpu/Makefile
14182@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14183 CFLAGS_REMOVE_perf_event.o = -pg
14184 endif
14185
14186-# Make sure load_percpu_segment has no stackprotector
14187-nostackp := $(call cc-option, -fno-stack-protector)
14188-CFLAGS_common.o := $(nostackp)
14189-
14190 obj-y := intel_cacheinfo.o scattered.o topology.o
14191 obj-y += proc.o capflags.o powerflags.o common.o
14192 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14193diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14194index 80ab83d..0a7b34e 100644
14195--- a/arch/x86/kernel/cpu/amd.c
14196+++ b/arch/x86/kernel/cpu/amd.c
14197@@ -670,7 +670,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14198 unsigned int size)
14199 {
14200 /* AMD errata T13 (order #21922) */
14201- if ((c->x86 == 6)) {
14202+ if (c->x86 == 6) {
14203 /* Duron Rev A0 */
14204 if (c->x86_model == 3 && c->x86_mask == 0)
14205 size = 64;
14206diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14207index 1a810e4..9fa8201 100644
14208--- a/arch/x86/kernel/cpu/common.c
14209+++ b/arch/x86/kernel/cpu/common.c
14210@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14211
14212 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14213
14214-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14215-#ifdef CONFIG_X86_64
14216- /*
14217- * We need valid kernel segments for data and code in long mode too
14218- * IRET will check the segment types kkeil 2000/10/28
14219- * Also sysret mandates a special GDT layout
14220- *
14221- * TLS descriptors are currently at a different place compared to i386.
14222- * Hopefully nobody expects them at a fixed place (Wine?)
14223- */
14224- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14225- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14226- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14227- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14228- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14229- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14230-#else
14231- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14232- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14233- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14234- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14235- /*
14236- * Segments used for calling PnP BIOS have byte granularity.
14237- * They code segments and data segments have fixed 64k limits,
14238- * the transfer segment sizes are set at run time.
14239- */
14240- /* 32-bit code */
14241- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14242- /* 16-bit code */
14243- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14244- /* 16-bit data */
14245- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14246- /* 16-bit data */
14247- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14248- /* 16-bit data */
14249- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14250- /*
14251- * The APM segments have byte granularity and their bases
14252- * are set at run time. All have 64k limits.
14253- */
14254- /* 32-bit code */
14255- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14256- /* 16-bit code */
14257- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14258- /* data */
14259- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14260-
14261- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14262- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14263- GDT_STACK_CANARY_INIT
14264-#endif
14265-} };
14266-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14267-
14268 static int __init x86_xsave_setup(char *s)
14269 {
14270 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14271@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
14272 {
14273 struct desc_ptr gdt_descr;
14274
14275- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14276+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14277 gdt_descr.size = GDT_SIZE - 1;
14278 load_gdt(&gdt_descr);
14279 /* Reload the per-cpu base */
14280@@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14281 /* Filter out anything that depends on CPUID levels we don't have */
14282 filter_cpuid_features(c, true);
14283
14284+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14285+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14286+#endif
14287+
14288 /* If the model name is still unset, do table lookup. */
14289 if (!c->x86_model_id[0]) {
14290 const char *p;
14291@@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
14292 }
14293 __setup("clearcpuid=", setup_disablecpuid);
14294
14295+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14296+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14297+
14298 #ifdef CONFIG_X86_64
14299 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14300-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14301- (unsigned long) nmi_idt_table };
14302+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14303
14304 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14305 irq_stack_union) __aligned(PAGE_SIZE);
14306@@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14307 EXPORT_PER_CPU_SYMBOL(current_task);
14308
14309 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14310- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14311+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14312 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14313
14314 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14315@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14316 {
14317 memset(regs, 0, sizeof(struct pt_regs));
14318 regs->fs = __KERNEL_PERCPU;
14319- regs->gs = __KERNEL_STACK_CANARY;
14320+ savesegment(gs, regs->gs);
14321
14322 return regs;
14323 }
14324@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14325 int i;
14326
14327 cpu = stack_smp_processor_id();
14328- t = &per_cpu(init_tss, cpu);
14329+ t = init_tss + cpu;
14330 oist = &per_cpu(orig_ist, cpu);
14331
14332 #ifdef CONFIG_NUMA
14333@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14334 switch_to_new_gdt(cpu);
14335 loadsegment(fs, 0);
14336
14337- load_idt((const struct desc_ptr *)&idt_descr);
14338+ load_idt(&idt_descr);
14339
14340 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14341 syscall_init();
14342@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14343 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14344 barrier();
14345
14346- x86_configure_nx();
14347 if (cpu != 0)
14348 enable_x2apic();
14349
14350@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14351 {
14352 int cpu = smp_processor_id();
14353 struct task_struct *curr = current;
14354- struct tss_struct *t = &per_cpu(init_tss, cpu);
14355+ struct tss_struct *t = init_tss + cpu;
14356 struct thread_struct *thread = &curr->thread;
14357
14358 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14359diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14360index 3e6ff6c..54b4992 100644
14361--- a/arch/x86/kernel/cpu/intel.c
14362+++ b/arch/x86/kernel/cpu/intel.c
14363@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14364 * Update the IDT descriptor and reload the IDT so that
14365 * it uses the read-only mapped virtual address.
14366 */
14367- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14368+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14369 load_idt(&idt_descr);
14370 }
14371 #endif
14372diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14373index fc4beb3..f20a5a7 100644
14374--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14375+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14376@@ -199,6 +199,8 @@ static void raise_mce(struct mce *m)
14377
14378 /* Error injection interface */
14379 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14380+ size_t usize, loff_t *off) __size_overflow(3);
14381+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14382 size_t usize, loff_t *off)
14383 {
14384 struct mce m;
14385diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14386index 5a11ae2..a1a1c8a 100644
14387--- a/arch/x86/kernel/cpu/mcheck/mce.c
14388+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14389@@ -42,6 +42,7 @@
14390 #include <asm/processor.h>
14391 #include <asm/mce.h>
14392 #include <asm/msr.h>
14393+#include <asm/local.h>
14394
14395 #include "mce-internal.h"
14396
14397@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14398 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14399 m->cs, m->ip);
14400
14401- if (m->cs == __KERNEL_CS)
14402+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14403 print_symbol("{%s}", m->ip);
14404 pr_cont("\n");
14405 }
14406@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14407
14408 #define PANIC_TIMEOUT 5 /* 5 seconds */
14409
14410-static atomic_t mce_paniced;
14411+static atomic_unchecked_t mce_paniced;
14412
14413 static int fake_panic;
14414-static atomic_t mce_fake_paniced;
14415+static atomic_unchecked_t mce_fake_paniced;
14416
14417 /* Panic in progress. Enable interrupts and wait for final IPI */
14418 static void wait_for_panic(void)
14419@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14420 /*
14421 * Make sure only one CPU runs in machine check panic
14422 */
14423- if (atomic_inc_return(&mce_paniced) > 1)
14424+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14425 wait_for_panic();
14426 barrier();
14427
14428@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14429 console_verbose();
14430 } else {
14431 /* Don't log too much for fake panic */
14432- if (atomic_inc_return(&mce_fake_paniced) > 1)
14433+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14434 return;
14435 }
14436 /* First print corrected ones that are still unlogged */
14437@@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
14438 * might have been modified by someone else.
14439 */
14440 rmb();
14441- if (atomic_read(&mce_paniced))
14442+ if (atomic_read_unchecked(&mce_paniced))
14443 wait_for_panic();
14444 if (!monarch_timeout)
14445 goto out;
14446@@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14447 }
14448
14449 /* Call the installed machine check handler for this CPU setup. */
14450-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14451+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14452 unexpected_machine_check;
14453
14454 /*
14455@@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14456 return;
14457 }
14458
14459+ pax_open_kernel();
14460 machine_check_vector = do_machine_check;
14461+ pax_close_kernel();
14462
14463 __mcheck_cpu_init_generic();
14464 __mcheck_cpu_init_vendor(c);
14465@@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14466 */
14467
14468 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14469-static int mce_chrdev_open_count; /* #times opened */
14470+static local_t mce_chrdev_open_count; /* #times opened */
14471 static int mce_chrdev_open_exclu; /* already open exclusive? */
14472
14473 static int mce_chrdev_open(struct inode *inode, struct file *file)
14474@@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14475 spin_lock(&mce_chrdev_state_lock);
14476
14477 if (mce_chrdev_open_exclu ||
14478- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14479+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14480 spin_unlock(&mce_chrdev_state_lock);
14481
14482 return -EBUSY;
14483@@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14484
14485 if (file->f_flags & O_EXCL)
14486 mce_chrdev_open_exclu = 1;
14487- mce_chrdev_open_count++;
14488+ local_inc(&mce_chrdev_open_count);
14489
14490 spin_unlock(&mce_chrdev_state_lock);
14491
14492@@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14493 {
14494 spin_lock(&mce_chrdev_state_lock);
14495
14496- mce_chrdev_open_count--;
14497+ local_dec(&mce_chrdev_open_count);
14498 mce_chrdev_open_exclu = 0;
14499
14500 spin_unlock(&mce_chrdev_state_lock);
14501@@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
14502 static void mce_reset(void)
14503 {
14504 cpu_missing = 0;
14505- atomic_set(&mce_fake_paniced, 0);
14506+ atomic_set_unchecked(&mce_fake_paniced, 0);
14507 atomic_set(&mce_executing, 0);
14508 atomic_set(&mce_callin, 0);
14509 atomic_set(&global_nwo, 0);
14510diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14511index 5c0e653..0882b0a 100644
14512--- a/arch/x86/kernel/cpu/mcheck/p5.c
14513+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14514@@ -12,6 +12,7 @@
14515 #include <asm/system.h>
14516 #include <asm/mce.h>
14517 #include <asm/msr.h>
14518+#include <asm/pgtable.h>
14519
14520 /* By default disabled */
14521 int mce_p5_enabled __read_mostly;
14522@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14523 if (!cpu_has(c, X86_FEATURE_MCE))
14524 return;
14525
14526+ pax_open_kernel();
14527 machine_check_vector = pentium_machine_check;
14528+ pax_close_kernel();
14529 /* Make sure the vector pointer is visible before we enable MCEs: */
14530 wmb();
14531
14532diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14533index 54060f5..c1a7577 100644
14534--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14535+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14536@@ -11,6 +11,7 @@
14537 #include <asm/system.h>
14538 #include <asm/mce.h>
14539 #include <asm/msr.h>
14540+#include <asm/pgtable.h>
14541
14542 /* Machine check handler for WinChip C6: */
14543 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14544@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14545 {
14546 u32 lo, hi;
14547
14548+ pax_open_kernel();
14549 machine_check_vector = winchip_machine_check;
14550+ pax_close_kernel();
14551 /* Make sure the vector pointer is visible before we enable MCEs: */
14552 wmb();
14553
14554diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
14555index 7928963..1b16001 100644
14556--- a/arch/x86/kernel/cpu/mtrr/if.c
14557+++ b/arch/x86/kernel/cpu/mtrr/if.c
14558@@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
14559 * "base=%Lx size=%Lx type=%s" or "disable=%d"
14560 */
14561 static ssize_t
14562+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
14563+static ssize_t
14564 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
14565 {
14566 int i, err;
14567diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14568index 6b96110..0da73eb 100644
14569--- a/arch/x86/kernel/cpu/mtrr/main.c
14570+++ b/arch/x86/kernel/cpu/mtrr/main.c
14571@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14572 u64 size_or_mask, size_and_mask;
14573 static bool mtrr_aps_delayed_init;
14574
14575-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14576+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14577
14578 const struct mtrr_ops *mtrr_if;
14579
14580diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14581index df5e41f..816c719 100644
14582--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14583+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14584@@ -25,7 +25,7 @@ struct mtrr_ops {
14585 int (*validate_add_page)(unsigned long base, unsigned long size,
14586 unsigned int type);
14587 int (*have_wrcomb)(void);
14588-};
14589+} __do_const;
14590
14591 extern int generic_get_free_region(unsigned long base, unsigned long size,
14592 int replace_reg);
14593diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14594index 5adce10..99284ec 100644
14595--- a/arch/x86/kernel/cpu/perf_event.c
14596+++ b/arch/x86/kernel/cpu/perf_event.c
14597@@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14598 break;
14599
14600 perf_callchain_store(entry, frame.return_address);
14601- fp = frame.next_frame;
14602+ fp = (const void __force_user *)frame.next_frame;
14603 }
14604 }
14605
14606diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14607index 13ad899..f642b9a 100644
14608--- a/arch/x86/kernel/crash.c
14609+++ b/arch/x86/kernel/crash.c
14610@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14611 {
14612 #ifdef CONFIG_X86_32
14613 struct pt_regs fixed_regs;
14614-#endif
14615
14616-#ifdef CONFIG_X86_32
14617- if (!user_mode_vm(regs)) {
14618+ if (!user_mode(regs)) {
14619 crash_fixup_ss_esp(&fixed_regs, regs);
14620 regs = &fixed_regs;
14621 }
14622diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14623index 37250fe..bf2ec74 100644
14624--- a/arch/x86/kernel/doublefault_32.c
14625+++ b/arch/x86/kernel/doublefault_32.c
14626@@ -11,7 +11,7 @@
14627
14628 #define DOUBLEFAULT_STACKSIZE (1024)
14629 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14630-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14631+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14632
14633 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14634
14635@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14636 unsigned long gdt, tss;
14637
14638 store_gdt(&gdt_desc);
14639- gdt = gdt_desc.address;
14640+ gdt = (unsigned long)gdt_desc.address;
14641
14642 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14643
14644@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14645 /* 0x2 bit is always set */
14646 .flags = X86_EFLAGS_SF | 0x2,
14647 .sp = STACK_START,
14648- .es = __USER_DS,
14649+ .es = __KERNEL_DS,
14650 .cs = __KERNEL_CS,
14651 .ss = __KERNEL_DS,
14652- .ds = __USER_DS,
14653+ .ds = __KERNEL_DS,
14654 .fs = __KERNEL_PERCPU,
14655
14656 .__cr3 = __pa_nodebug(swapper_pg_dir),
14657diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14658index 4025fe4..d8451c6 100644
14659--- a/arch/x86/kernel/dumpstack.c
14660+++ b/arch/x86/kernel/dumpstack.c
14661@@ -2,6 +2,9 @@
14662 * Copyright (C) 1991, 1992 Linus Torvalds
14663 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14664 */
14665+#ifdef CONFIG_GRKERNSEC_HIDESYM
14666+#define __INCLUDED_BY_HIDESYM 1
14667+#endif
14668 #include <linux/kallsyms.h>
14669 #include <linux/kprobes.h>
14670 #include <linux/uaccess.h>
14671@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
14672 static void
14673 print_ftrace_graph_addr(unsigned long addr, void *data,
14674 const struct stacktrace_ops *ops,
14675- struct thread_info *tinfo, int *graph)
14676+ struct task_struct *task, int *graph)
14677 {
14678- struct task_struct *task = tinfo->task;
14679 unsigned long ret_addr;
14680 int index = task->curr_ret_stack;
14681
14682@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14683 static inline void
14684 print_ftrace_graph_addr(unsigned long addr, void *data,
14685 const struct stacktrace_ops *ops,
14686- struct thread_info *tinfo, int *graph)
14687+ struct task_struct *task, int *graph)
14688 { }
14689 #endif
14690
14691@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14692 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14693 */
14694
14695-static inline int valid_stack_ptr(struct thread_info *tinfo,
14696- void *p, unsigned int size, void *end)
14697+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14698 {
14699- void *t = tinfo;
14700 if (end) {
14701 if (p < end && p >= (end-THREAD_SIZE))
14702 return 1;
14703@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14704 }
14705
14706 unsigned long
14707-print_context_stack(struct thread_info *tinfo,
14708+print_context_stack(struct task_struct *task, void *stack_start,
14709 unsigned long *stack, unsigned long bp,
14710 const struct stacktrace_ops *ops, void *data,
14711 unsigned long *end, int *graph)
14712 {
14713 struct stack_frame *frame = (struct stack_frame *)bp;
14714
14715- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14716+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14717 unsigned long addr;
14718
14719 addr = *stack;
14720@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
14721 } else {
14722 ops->address(data, addr, 0);
14723 }
14724- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14725+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14726 }
14727 stack++;
14728 }
14729@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
14730 EXPORT_SYMBOL_GPL(print_context_stack);
14731
14732 unsigned long
14733-print_context_stack_bp(struct thread_info *tinfo,
14734+print_context_stack_bp(struct task_struct *task, void *stack_start,
14735 unsigned long *stack, unsigned long bp,
14736 const struct stacktrace_ops *ops, void *data,
14737 unsigned long *end, int *graph)
14738@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14739 struct stack_frame *frame = (struct stack_frame *)bp;
14740 unsigned long *ret_addr = &frame->return_address;
14741
14742- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14743+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14744 unsigned long addr = *ret_addr;
14745
14746 if (!__kernel_text_address(addr))
14747@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14748 ops->address(data, addr, 1);
14749 frame = frame->next_frame;
14750 ret_addr = &frame->return_address;
14751- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14752+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14753 }
14754
14755 return (unsigned long)frame;
14756@@ -186,7 +186,7 @@ void dump_stack(void)
14757
14758 bp = stack_frame(current, NULL);
14759 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14760- current->pid, current->comm, print_tainted(),
14761+ task_pid_nr(current), current->comm, print_tainted(),
14762 init_utsname()->release,
14763 (int)strcspn(init_utsname()->version, " "),
14764 init_utsname()->version);
14765@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
14766 }
14767 EXPORT_SYMBOL_GPL(oops_begin);
14768
14769+extern void gr_handle_kernel_exploit(void);
14770+
14771 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14772 {
14773 if (regs && kexec_should_crash(current))
14774@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14775 panic("Fatal exception in interrupt");
14776 if (panic_on_oops)
14777 panic("Fatal exception");
14778- do_exit(signr);
14779+
14780+ gr_handle_kernel_exploit();
14781+
14782+ do_group_exit(signr);
14783 }
14784
14785 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14786@@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14787
14788 show_registers(regs);
14789 #ifdef CONFIG_X86_32
14790- if (user_mode_vm(regs)) {
14791+ if (user_mode(regs)) {
14792 sp = regs->sp;
14793 ss = regs->ss & 0xffff;
14794 } else {
14795@@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14796 unsigned long flags = oops_begin();
14797 int sig = SIGSEGV;
14798
14799- if (!user_mode_vm(regs))
14800+ if (!user_mode(regs))
14801 report_bug(regs->ip, regs);
14802
14803 if (__die(str, regs, err))
14804diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14805index c99f9ed..2a15d80 100644
14806--- a/arch/x86/kernel/dumpstack_32.c
14807+++ b/arch/x86/kernel/dumpstack_32.c
14808@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14809 bp = stack_frame(task, regs);
14810
14811 for (;;) {
14812- struct thread_info *context;
14813+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14814
14815- context = (struct thread_info *)
14816- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14817- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14818+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14819
14820- stack = (unsigned long *)context->previous_esp;
14821- if (!stack)
14822+ if (stack_start == task_stack_page(task))
14823 break;
14824+ stack = *(unsigned long **)stack_start;
14825 if (ops->stack(data, "IRQ") < 0)
14826 break;
14827 touch_nmi_watchdog();
14828@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14829 * When in-kernel, we also print out the stack and code at the
14830 * time of the fault..
14831 */
14832- if (!user_mode_vm(regs)) {
14833+ if (!user_mode(regs)) {
14834 unsigned int code_prologue = code_bytes * 43 / 64;
14835 unsigned int code_len = code_bytes;
14836 unsigned char c;
14837 u8 *ip;
14838+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14839
14840 printk(KERN_EMERG "Stack:\n");
14841 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14842
14843 printk(KERN_EMERG "Code: ");
14844
14845- ip = (u8 *)regs->ip - code_prologue;
14846+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14847 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14848 /* try starting at IP */
14849- ip = (u8 *)regs->ip;
14850+ ip = (u8 *)regs->ip + cs_base;
14851 code_len = code_len - code_prologue + 1;
14852 }
14853 for (i = 0; i < code_len; i++, ip++) {
14854@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14855 printk(KERN_CONT " Bad EIP value.");
14856 break;
14857 }
14858- if (ip == (u8 *)regs->ip)
14859+ if (ip == (u8 *)regs->ip + cs_base)
14860 printk(KERN_CONT "<%02x> ", c);
14861 else
14862 printk(KERN_CONT "%02x ", c);
14863@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14864 {
14865 unsigned short ud2;
14866
14867+ ip = ktla_ktva(ip);
14868 if (ip < PAGE_OFFSET)
14869 return 0;
14870 if (probe_kernel_address((unsigned short *)ip, ud2))
14871@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14872
14873 return ud2 == 0x0b0f;
14874 }
14875+
14876+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14877+void pax_check_alloca(unsigned long size)
14878+{
14879+ unsigned long sp = (unsigned long)&sp, stack_left;
14880+
14881+ /* all kernel stacks are of the same size */
14882+ stack_left = sp & (THREAD_SIZE - 1);
14883+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14884+}
14885+EXPORT_SYMBOL(pax_check_alloca);
14886+#endif
14887diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14888index 17107bd..b2deecf 100644
14889--- a/arch/x86/kernel/dumpstack_64.c
14890+++ b/arch/x86/kernel/dumpstack_64.c
14891@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14892 unsigned long *irq_stack_end =
14893 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14894 unsigned used = 0;
14895- struct thread_info *tinfo;
14896 int graph = 0;
14897 unsigned long dummy;
14898+ void *stack_start;
14899
14900 if (!task)
14901 task = current;
14902@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14903 * current stack address. If the stacks consist of nested
14904 * exceptions
14905 */
14906- tinfo = task_thread_info(task);
14907 for (;;) {
14908 char *id;
14909 unsigned long *estack_end;
14910+
14911 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14912 &used, &id);
14913
14914@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14915 if (ops->stack(data, id) < 0)
14916 break;
14917
14918- bp = ops->walk_stack(tinfo, stack, bp, ops,
14919+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14920 data, estack_end, &graph);
14921 ops->stack(data, "<EOE>");
14922 /*
14923@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14924 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14925 if (ops->stack(data, "IRQ") < 0)
14926 break;
14927- bp = ops->walk_stack(tinfo, stack, bp,
14928+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14929 ops, data, irq_stack_end, &graph);
14930 /*
14931 * We link to the next stack (which would be
14932@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14933 /*
14934 * This handles the process stack:
14935 */
14936- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14937+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14938+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14939 put_cpu();
14940 }
14941 EXPORT_SYMBOL(dump_trace);
14942@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14943
14944 return ud2 == 0x0b0f;
14945 }
14946+
14947+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14948+void pax_check_alloca(unsigned long size)
14949+{
14950+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14951+ unsigned cpu, used;
14952+ char *id;
14953+
14954+ /* check the process stack first */
14955+ stack_start = (unsigned long)task_stack_page(current);
14956+ stack_end = stack_start + THREAD_SIZE;
14957+ if (likely(stack_start <= sp && sp < stack_end)) {
14958+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14959+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14960+ return;
14961+ }
14962+
14963+ cpu = get_cpu();
14964+
14965+ /* check the irq stacks */
14966+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14967+ stack_start = stack_end - IRQ_STACK_SIZE;
14968+ if (stack_start <= sp && sp < stack_end) {
14969+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14970+ put_cpu();
14971+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14972+ return;
14973+ }
14974+
14975+ /* check the exception stacks */
14976+ used = 0;
14977+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14978+ stack_start = stack_end - EXCEPTION_STKSZ;
14979+ if (stack_end && stack_start <= sp && sp < stack_end) {
14980+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14981+ put_cpu();
14982+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14983+ return;
14984+ }
14985+
14986+ put_cpu();
14987+
14988+ /* unknown stack */
14989+ BUG();
14990+}
14991+EXPORT_SYMBOL(pax_check_alloca);
14992+#endif
14993diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14994index 9b9f18b..9fcaa04 100644
14995--- a/arch/x86/kernel/early_printk.c
14996+++ b/arch/x86/kernel/early_printk.c
14997@@ -7,6 +7,7 @@
14998 #include <linux/pci_regs.h>
14999 #include <linux/pci_ids.h>
15000 #include <linux/errno.h>
15001+#include <linux/sched.h>
15002 #include <asm/io.h>
15003 #include <asm/processor.h>
15004 #include <asm/fcntl.h>
15005diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15006index 7b784f4..db6b628 100644
15007--- a/arch/x86/kernel/entry_32.S
15008+++ b/arch/x86/kernel/entry_32.S
15009@@ -179,13 +179,146 @@
15010 /*CFI_REL_OFFSET gs, PT_GS*/
15011 .endm
15012 .macro SET_KERNEL_GS reg
15013+
15014+#ifdef CONFIG_CC_STACKPROTECTOR
15015 movl $(__KERNEL_STACK_CANARY), \reg
15016+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15017+ movl $(__USER_DS), \reg
15018+#else
15019+ xorl \reg, \reg
15020+#endif
15021+
15022 movl \reg, %gs
15023 .endm
15024
15025 #endif /* CONFIG_X86_32_LAZY_GS */
15026
15027-.macro SAVE_ALL
15028+.macro pax_enter_kernel
15029+#ifdef CONFIG_PAX_KERNEXEC
15030+ call pax_enter_kernel
15031+#endif
15032+.endm
15033+
15034+.macro pax_exit_kernel
15035+#ifdef CONFIG_PAX_KERNEXEC
15036+ call pax_exit_kernel
15037+#endif
15038+.endm
15039+
15040+#ifdef CONFIG_PAX_KERNEXEC
15041+ENTRY(pax_enter_kernel)
15042+#ifdef CONFIG_PARAVIRT
15043+ pushl %eax
15044+ pushl %ecx
15045+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15046+ mov %eax, %esi
15047+#else
15048+ mov %cr0, %esi
15049+#endif
15050+ bts $16, %esi
15051+ jnc 1f
15052+ mov %cs, %esi
15053+ cmp $__KERNEL_CS, %esi
15054+ jz 3f
15055+ ljmp $__KERNEL_CS, $3f
15056+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15057+2:
15058+#ifdef CONFIG_PARAVIRT
15059+ mov %esi, %eax
15060+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15061+#else
15062+ mov %esi, %cr0
15063+#endif
15064+3:
15065+#ifdef CONFIG_PARAVIRT
15066+ popl %ecx
15067+ popl %eax
15068+#endif
15069+ ret
15070+ENDPROC(pax_enter_kernel)
15071+
15072+ENTRY(pax_exit_kernel)
15073+#ifdef CONFIG_PARAVIRT
15074+ pushl %eax
15075+ pushl %ecx
15076+#endif
15077+ mov %cs, %esi
15078+ cmp $__KERNEXEC_KERNEL_CS, %esi
15079+ jnz 2f
15080+#ifdef CONFIG_PARAVIRT
15081+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15082+ mov %eax, %esi
15083+#else
15084+ mov %cr0, %esi
15085+#endif
15086+ btr $16, %esi
15087+ ljmp $__KERNEL_CS, $1f
15088+1:
15089+#ifdef CONFIG_PARAVIRT
15090+ mov %esi, %eax
15091+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15092+#else
15093+ mov %esi, %cr0
15094+#endif
15095+2:
15096+#ifdef CONFIG_PARAVIRT
15097+ popl %ecx
15098+ popl %eax
15099+#endif
15100+ ret
15101+ENDPROC(pax_exit_kernel)
15102+#endif
15103+
15104+.macro pax_erase_kstack
15105+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15106+ call pax_erase_kstack
15107+#endif
15108+.endm
15109+
15110+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15111+/*
15112+ * ebp: thread_info
15113+ * ecx, edx: can be clobbered
15114+ */
15115+ENTRY(pax_erase_kstack)
15116+ pushl %edi
15117+ pushl %eax
15118+
15119+ mov TI_lowest_stack(%ebp), %edi
15120+ mov $-0xBEEF, %eax
15121+ std
15122+
15123+1: mov %edi, %ecx
15124+ and $THREAD_SIZE_asm - 1, %ecx
15125+ shr $2, %ecx
15126+ repne scasl
15127+ jecxz 2f
15128+
15129+ cmp $2*16, %ecx
15130+ jc 2f
15131+
15132+ mov $2*16, %ecx
15133+ repe scasl
15134+ jecxz 2f
15135+ jne 1b
15136+
15137+2: cld
15138+ mov %esp, %ecx
15139+ sub %edi, %ecx
15140+ shr $2, %ecx
15141+ rep stosl
15142+
15143+ mov TI_task_thread_sp0(%ebp), %edi
15144+ sub $128, %edi
15145+ mov %edi, TI_lowest_stack(%ebp)
15146+
15147+ popl %eax
15148+ popl %edi
15149+ ret
15150+ENDPROC(pax_erase_kstack)
15151+#endif
15152+
15153+.macro __SAVE_ALL _DS
15154 cld
15155 PUSH_GS
15156 pushl_cfi %fs
15157@@ -208,7 +341,7 @@
15158 CFI_REL_OFFSET ecx, 0
15159 pushl_cfi %ebx
15160 CFI_REL_OFFSET ebx, 0
15161- movl $(__USER_DS), %edx
15162+ movl $\_DS, %edx
15163 movl %edx, %ds
15164 movl %edx, %es
15165 movl $(__KERNEL_PERCPU), %edx
15166@@ -216,6 +349,15 @@
15167 SET_KERNEL_GS %edx
15168 .endm
15169
15170+.macro SAVE_ALL
15171+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15172+ __SAVE_ALL __KERNEL_DS
15173+ pax_enter_kernel
15174+#else
15175+ __SAVE_ALL __USER_DS
15176+#endif
15177+.endm
15178+
15179 .macro RESTORE_INT_REGS
15180 popl_cfi %ebx
15181 CFI_RESTORE ebx
15182@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15183 popfl_cfi
15184 jmp syscall_exit
15185 CFI_ENDPROC
15186-END(ret_from_fork)
15187+ENDPROC(ret_from_fork)
15188
15189 /*
15190 * Interrupt exit functions should be protected against kprobes
15191@@ -335,7 +477,15 @@ resume_userspace_sig:
15192 andl $SEGMENT_RPL_MASK, %eax
15193 #endif
15194 cmpl $USER_RPL, %eax
15195+
15196+#ifdef CONFIG_PAX_KERNEXEC
15197+ jae resume_userspace
15198+
15199+ pax_exit_kernel
15200+ jmp resume_kernel
15201+#else
15202 jb resume_kernel # not returning to v8086 or userspace
15203+#endif
15204
15205 ENTRY(resume_userspace)
15206 LOCKDEP_SYS_EXIT
15207@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15208 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15209 # int/exception return?
15210 jne work_pending
15211- jmp restore_all
15212-END(ret_from_exception)
15213+ jmp restore_all_pax
15214+ENDPROC(ret_from_exception)
15215
15216 #ifdef CONFIG_PREEMPT
15217 ENTRY(resume_kernel)
15218@@ -363,7 +513,7 @@ need_resched:
15219 jz restore_all
15220 call preempt_schedule_irq
15221 jmp need_resched
15222-END(resume_kernel)
15223+ENDPROC(resume_kernel)
15224 #endif
15225 CFI_ENDPROC
15226 /*
15227@@ -397,23 +547,34 @@ sysenter_past_esp:
15228 /*CFI_REL_OFFSET cs, 0*/
15229 /*
15230 * Push current_thread_info()->sysenter_return to the stack.
15231- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15232- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15233 */
15234- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15235+ pushl_cfi $0
15236 CFI_REL_OFFSET eip, 0
15237
15238 pushl_cfi %eax
15239 SAVE_ALL
15240+ GET_THREAD_INFO(%ebp)
15241+ movl TI_sysenter_return(%ebp),%ebp
15242+ movl %ebp,PT_EIP(%esp)
15243 ENABLE_INTERRUPTS(CLBR_NONE)
15244
15245 /*
15246 * Load the potential sixth argument from user stack.
15247 * Careful about security.
15248 */
15249+ movl PT_OLDESP(%esp),%ebp
15250+
15251+#ifdef CONFIG_PAX_MEMORY_UDEREF
15252+ mov PT_OLDSS(%esp),%ds
15253+1: movl %ds:(%ebp),%ebp
15254+ push %ss
15255+ pop %ds
15256+#else
15257 cmpl $__PAGE_OFFSET-3,%ebp
15258 jae syscall_fault
15259 1: movl (%ebp),%ebp
15260+#endif
15261+
15262 movl %ebp,PT_EBP(%esp)
15263 .section __ex_table,"a"
15264 .align 4
15265@@ -436,12 +597,24 @@ sysenter_do_call:
15266 testl $_TIF_ALLWORK_MASK, %ecx
15267 jne sysexit_audit
15268 sysenter_exit:
15269+
15270+#ifdef CONFIG_PAX_RANDKSTACK
15271+ pushl_cfi %eax
15272+ movl %esp, %eax
15273+ call pax_randomize_kstack
15274+ popl_cfi %eax
15275+#endif
15276+
15277+ pax_erase_kstack
15278+
15279 /* if something modifies registers it must also disable sysexit */
15280 movl PT_EIP(%esp), %edx
15281 movl PT_OLDESP(%esp), %ecx
15282 xorl %ebp,%ebp
15283 TRACE_IRQS_ON
15284 1: mov PT_FS(%esp), %fs
15285+2: mov PT_DS(%esp), %ds
15286+3: mov PT_ES(%esp), %es
15287 PTGS_TO_GS
15288 ENABLE_INTERRUPTS_SYSEXIT
15289
15290@@ -458,6 +631,9 @@ sysenter_audit:
15291 movl %eax,%edx /* 2nd arg: syscall number */
15292 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15293 call __audit_syscall_entry
15294+
15295+ pax_erase_kstack
15296+
15297 pushl_cfi %ebx
15298 movl PT_EAX(%esp),%eax /* reload syscall number */
15299 jmp sysenter_do_call
15300@@ -483,11 +659,17 @@ sysexit_audit:
15301
15302 CFI_ENDPROC
15303 .pushsection .fixup,"ax"
15304-2: movl $0,PT_FS(%esp)
15305+4: movl $0,PT_FS(%esp)
15306+ jmp 1b
15307+5: movl $0,PT_DS(%esp)
15308+ jmp 1b
15309+6: movl $0,PT_ES(%esp)
15310 jmp 1b
15311 .section __ex_table,"a"
15312 .align 4
15313- .long 1b,2b
15314+ .long 1b,4b
15315+ .long 2b,5b
15316+ .long 3b,6b
15317 .popsection
15318 PTGS_TO_GS_EX
15319 ENDPROC(ia32_sysenter_target)
15320@@ -520,6 +702,15 @@ syscall_exit:
15321 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15322 jne syscall_exit_work
15323
15324+restore_all_pax:
15325+
15326+#ifdef CONFIG_PAX_RANDKSTACK
15327+ movl %esp, %eax
15328+ call pax_randomize_kstack
15329+#endif
15330+
15331+ pax_erase_kstack
15332+
15333 restore_all:
15334 TRACE_IRQS_IRET
15335 restore_all_notrace:
15336@@ -579,14 +770,34 @@ ldt_ss:
15337 * compensating for the offset by changing to the ESPFIX segment with
15338 * a base address that matches for the difference.
15339 */
15340-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15341+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15342 mov %esp, %edx /* load kernel esp */
15343 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15344 mov %dx, %ax /* eax: new kernel esp */
15345 sub %eax, %edx /* offset (low word is 0) */
15346+#ifdef CONFIG_SMP
15347+ movl PER_CPU_VAR(cpu_number), %ebx
15348+ shll $PAGE_SHIFT_asm, %ebx
15349+ addl $cpu_gdt_table, %ebx
15350+#else
15351+ movl $cpu_gdt_table, %ebx
15352+#endif
15353 shr $16, %edx
15354- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15355- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15356+
15357+#ifdef CONFIG_PAX_KERNEXEC
15358+ mov %cr0, %esi
15359+ btr $16, %esi
15360+ mov %esi, %cr0
15361+#endif
15362+
15363+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15364+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15365+
15366+#ifdef CONFIG_PAX_KERNEXEC
15367+ bts $16, %esi
15368+ mov %esi, %cr0
15369+#endif
15370+
15371 pushl_cfi $__ESPFIX_SS
15372 pushl_cfi %eax /* new kernel esp */
15373 /* Disable interrupts, but do not irqtrace this section: we
15374@@ -615,38 +826,30 @@ work_resched:
15375 movl TI_flags(%ebp), %ecx
15376 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15377 # than syscall tracing?
15378- jz restore_all
15379+ jz restore_all_pax
15380 testb $_TIF_NEED_RESCHED, %cl
15381 jnz work_resched
15382
15383 work_notifysig: # deal with pending signals and
15384 # notify-resume requests
15385+ movl %esp, %eax
15386 #ifdef CONFIG_VM86
15387 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15388- movl %esp, %eax
15389- jne work_notifysig_v86 # returning to kernel-space or
15390+ jz 1f # returning to kernel-space or
15391 # vm86-space
15392- TRACE_IRQS_ON
15393- ENABLE_INTERRUPTS(CLBR_NONE)
15394- xorl %edx, %edx
15395- call do_notify_resume
15396- jmp resume_userspace_sig
15397
15398- ALIGN
15399-work_notifysig_v86:
15400 pushl_cfi %ecx # save ti_flags for do_notify_resume
15401 call save_v86_state # %eax contains pt_regs pointer
15402 popl_cfi %ecx
15403 movl %eax, %esp
15404-#else
15405- movl %esp, %eax
15406+1:
15407 #endif
15408 TRACE_IRQS_ON
15409 ENABLE_INTERRUPTS(CLBR_NONE)
15410 xorl %edx, %edx
15411 call do_notify_resume
15412 jmp resume_userspace_sig
15413-END(work_pending)
15414+ENDPROC(work_pending)
15415
15416 # perform syscall exit tracing
15417 ALIGN
15418@@ -654,11 +857,14 @@ syscall_trace_entry:
15419 movl $-ENOSYS,PT_EAX(%esp)
15420 movl %esp, %eax
15421 call syscall_trace_enter
15422+
15423+ pax_erase_kstack
15424+
15425 /* What it returned is what we'll actually use. */
15426 cmpl $(NR_syscalls), %eax
15427 jnae syscall_call
15428 jmp syscall_exit
15429-END(syscall_trace_entry)
15430+ENDPROC(syscall_trace_entry)
15431
15432 # perform syscall exit tracing
15433 ALIGN
15434@@ -671,20 +877,24 @@ syscall_exit_work:
15435 movl %esp, %eax
15436 call syscall_trace_leave
15437 jmp resume_userspace
15438-END(syscall_exit_work)
15439+ENDPROC(syscall_exit_work)
15440 CFI_ENDPROC
15441
15442 RING0_INT_FRAME # can't unwind into user space anyway
15443 syscall_fault:
15444+#ifdef CONFIG_PAX_MEMORY_UDEREF
15445+ push %ss
15446+ pop %ds
15447+#endif
15448 GET_THREAD_INFO(%ebp)
15449 movl $-EFAULT,PT_EAX(%esp)
15450 jmp resume_userspace
15451-END(syscall_fault)
15452+ENDPROC(syscall_fault)
15453
15454 syscall_badsys:
15455 movl $-ENOSYS,PT_EAX(%esp)
15456 jmp resume_userspace
15457-END(syscall_badsys)
15458+ENDPROC(syscall_badsys)
15459 CFI_ENDPROC
15460 /*
15461 * End of kprobes section
15462@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15463 CFI_ENDPROC
15464 ENDPROC(ptregs_clone)
15465
15466+ ALIGN;
15467+ENTRY(kernel_execve)
15468+ CFI_STARTPROC
15469+ pushl_cfi %ebp
15470+ sub $PT_OLDSS+4,%esp
15471+ pushl_cfi %edi
15472+ pushl_cfi %ecx
15473+ pushl_cfi %eax
15474+ lea 3*4(%esp),%edi
15475+ mov $PT_OLDSS/4+1,%ecx
15476+ xorl %eax,%eax
15477+ rep stosl
15478+ popl_cfi %eax
15479+ popl_cfi %ecx
15480+ popl_cfi %edi
15481+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15482+ pushl_cfi %esp
15483+ call sys_execve
15484+ add $4,%esp
15485+ CFI_ADJUST_CFA_OFFSET -4
15486+ GET_THREAD_INFO(%ebp)
15487+ test %eax,%eax
15488+ jz syscall_exit
15489+ add $PT_OLDSS+4,%esp
15490+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15491+ popl_cfi %ebp
15492+ ret
15493+ CFI_ENDPROC
15494+ENDPROC(kernel_execve)
15495+
15496 .macro FIXUP_ESPFIX_STACK
15497 /*
15498 * Switch back for ESPFIX stack to the normal zerobased stack
15499@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15500 * normal stack and adjusts ESP with the matching offset.
15501 */
15502 /* fixup the stack */
15503- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15504- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15505+#ifdef CONFIG_SMP
15506+ movl PER_CPU_VAR(cpu_number), %ebx
15507+ shll $PAGE_SHIFT_asm, %ebx
15508+ addl $cpu_gdt_table, %ebx
15509+#else
15510+ movl $cpu_gdt_table, %ebx
15511+#endif
15512+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15513+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15514 shl $16, %eax
15515 addl %esp, %eax /* the adjusted stack pointer */
15516 pushl_cfi $__KERNEL_DS
15517@@ -819,7 +1066,7 @@ vector=vector+1
15518 .endr
15519 2: jmp common_interrupt
15520 .endr
15521-END(irq_entries_start)
15522+ENDPROC(irq_entries_start)
15523
15524 .previous
15525 END(interrupt)
15526@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15527 pushl_cfi $do_coprocessor_error
15528 jmp error_code
15529 CFI_ENDPROC
15530-END(coprocessor_error)
15531+ENDPROC(coprocessor_error)
15532
15533 ENTRY(simd_coprocessor_error)
15534 RING0_INT_FRAME
15535@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15536 #endif
15537 jmp error_code
15538 CFI_ENDPROC
15539-END(simd_coprocessor_error)
15540+ENDPROC(simd_coprocessor_error)
15541
15542 ENTRY(device_not_available)
15543 RING0_INT_FRAME
15544@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15545 pushl_cfi $do_device_not_available
15546 jmp error_code
15547 CFI_ENDPROC
15548-END(device_not_available)
15549+ENDPROC(device_not_available)
15550
15551 #ifdef CONFIG_PARAVIRT
15552 ENTRY(native_iret)
15553@@ -905,12 +1152,12 @@ ENTRY(native_iret)
15554 .align 4
15555 .long native_iret, iret_exc
15556 .previous
15557-END(native_iret)
15558+ENDPROC(native_iret)
15559
15560 ENTRY(native_irq_enable_sysexit)
15561 sti
15562 sysexit
15563-END(native_irq_enable_sysexit)
15564+ENDPROC(native_irq_enable_sysexit)
15565 #endif
15566
15567 ENTRY(overflow)
15568@@ -919,7 +1166,7 @@ ENTRY(overflow)
15569 pushl_cfi $do_overflow
15570 jmp error_code
15571 CFI_ENDPROC
15572-END(overflow)
15573+ENDPROC(overflow)
15574
15575 ENTRY(bounds)
15576 RING0_INT_FRAME
15577@@ -927,7 +1174,7 @@ ENTRY(bounds)
15578 pushl_cfi $do_bounds
15579 jmp error_code
15580 CFI_ENDPROC
15581-END(bounds)
15582+ENDPROC(bounds)
15583
15584 ENTRY(invalid_op)
15585 RING0_INT_FRAME
15586@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15587 pushl_cfi $do_invalid_op
15588 jmp error_code
15589 CFI_ENDPROC
15590-END(invalid_op)
15591+ENDPROC(invalid_op)
15592
15593 ENTRY(coprocessor_segment_overrun)
15594 RING0_INT_FRAME
15595@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15596 pushl_cfi $do_coprocessor_segment_overrun
15597 jmp error_code
15598 CFI_ENDPROC
15599-END(coprocessor_segment_overrun)
15600+ENDPROC(coprocessor_segment_overrun)
15601
15602 ENTRY(invalid_TSS)
15603 RING0_EC_FRAME
15604 pushl_cfi $do_invalid_TSS
15605 jmp error_code
15606 CFI_ENDPROC
15607-END(invalid_TSS)
15608+ENDPROC(invalid_TSS)
15609
15610 ENTRY(segment_not_present)
15611 RING0_EC_FRAME
15612 pushl_cfi $do_segment_not_present
15613 jmp error_code
15614 CFI_ENDPROC
15615-END(segment_not_present)
15616+ENDPROC(segment_not_present)
15617
15618 ENTRY(stack_segment)
15619 RING0_EC_FRAME
15620 pushl_cfi $do_stack_segment
15621 jmp error_code
15622 CFI_ENDPROC
15623-END(stack_segment)
15624+ENDPROC(stack_segment)
15625
15626 ENTRY(alignment_check)
15627 RING0_EC_FRAME
15628 pushl_cfi $do_alignment_check
15629 jmp error_code
15630 CFI_ENDPROC
15631-END(alignment_check)
15632+ENDPROC(alignment_check)
15633
15634 ENTRY(divide_error)
15635 RING0_INT_FRAME
15636@@ -979,7 +1226,7 @@ ENTRY(divide_error)
15637 pushl_cfi $do_divide_error
15638 jmp error_code
15639 CFI_ENDPROC
15640-END(divide_error)
15641+ENDPROC(divide_error)
15642
15643 #ifdef CONFIG_X86_MCE
15644 ENTRY(machine_check)
15645@@ -988,7 +1235,7 @@ ENTRY(machine_check)
15646 pushl_cfi machine_check_vector
15647 jmp error_code
15648 CFI_ENDPROC
15649-END(machine_check)
15650+ENDPROC(machine_check)
15651 #endif
15652
15653 ENTRY(spurious_interrupt_bug)
15654@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15655 pushl_cfi $do_spurious_interrupt_bug
15656 jmp error_code
15657 CFI_ENDPROC
15658-END(spurious_interrupt_bug)
15659+ENDPROC(spurious_interrupt_bug)
15660 /*
15661 * End of kprobes section
15662 */
15663@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15664
15665 ENTRY(mcount)
15666 ret
15667-END(mcount)
15668+ENDPROC(mcount)
15669
15670 ENTRY(ftrace_caller)
15671 cmpl $0, function_trace_stop
15672@@ -1141,7 +1388,7 @@ ftrace_graph_call:
15673 .globl ftrace_stub
15674 ftrace_stub:
15675 ret
15676-END(ftrace_caller)
15677+ENDPROC(ftrace_caller)
15678
15679 #else /* ! CONFIG_DYNAMIC_FTRACE */
15680
15681@@ -1177,7 +1424,7 @@ trace:
15682 popl %ecx
15683 popl %eax
15684 jmp ftrace_stub
15685-END(mcount)
15686+ENDPROC(mcount)
15687 #endif /* CONFIG_DYNAMIC_FTRACE */
15688 #endif /* CONFIG_FUNCTION_TRACER */
15689
15690@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15691 popl %ecx
15692 popl %eax
15693 ret
15694-END(ftrace_graph_caller)
15695+ENDPROC(ftrace_graph_caller)
15696
15697 .globl return_to_handler
15698 return_to_handler:
15699@@ -1253,15 +1500,18 @@ error_code:
15700 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15701 REG_TO_PTGS %ecx
15702 SET_KERNEL_GS %ecx
15703- movl $(__USER_DS), %ecx
15704+ movl $(__KERNEL_DS), %ecx
15705 movl %ecx, %ds
15706 movl %ecx, %es
15707+
15708+ pax_enter_kernel
15709+
15710 TRACE_IRQS_OFF
15711 movl %esp,%eax # pt_regs pointer
15712 call *%edi
15713 jmp ret_from_exception
15714 CFI_ENDPROC
15715-END(page_fault)
15716+ENDPROC(page_fault)
15717
15718 /*
15719 * Debug traps and NMI can happen at the one SYSENTER instruction
15720@@ -1303,7 +1553,7 @@ debug_stack_correct:
15721 call do_debug
15722 jmp ret_from_exception
15723 CFI_ENDPROC
15724-END(debug)
15725+ENDPROC(debug)
15726
15727 /*
15728 * NMI is doubly nasty. It can happen _while_ we're handling
15729@@ -1340,6 +1590,9 @@ nmi_stack_correct:
15730 xorl %edx,%edx # zero error code
15731 movl %esp,%eax # pt_regs pointer
15732 call do_nmi
15733+
15734+ pax_exit_kernel
15735+
15736 jmp restore_all_notrace
15737 CFI_ENDPROC
15738
15739@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15740 FIXUP_ESPFIX_STACK # %eax == %esp
15741 xorl %edx,%edx # zero error code
15742 call do_nmi
15743+
15744+ pax_exit_kernel
15745+
15746 RESTORE_REGS
15747 lss 12+4(%esp), %esp # back to espfix stack
15748 CFI_ADJUST_CFA_OFFSET -24
15749 jmp irq_return
15750 CFI_ENDPROC
15751-END(nmi)
15752+ENDPROC(nmi)
15753
15754 ENTRY(int3)
15755 RING0_INT_FRAME
15756@@ -1393,14 +1649,14 @@ ENTRY(int3)
15757 call do_int3
15758 jmp ret_from_exception
15759 CFI_ENDPROC
15760-END(int3)
15761+ENDPROC(int3)
15762
15763 ENTRY(general_protection)
15764 RING0_EC_FRAME
15765 pushl_cfi $do_general_protection
15766 jmp error_code
15767 CFI_ENDPROC
15768-END(general_protection)
15769+ENDPROC(general_protection)
15770
15771 #ifdef CONFIG_KVM_GUEST
15772 ENTRY(async_page_fault)
15773@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15774 pushl_cfi $do_async_page_fault
15775 jmp error_code
15776 CFI_ENDPROC
15777-END(async_page_fault)
15778+ENDPROC(async_page_fault)
15779 #endif
15780
15781 /*
15782diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15783index 1333d98..b340ca2 100644
15784--- a/arch/x86/kernel/entry_64.S
15785+++ b/arch/x86/kernel/entry_64.S
15786@@ -56,6 +56,8 @@
15787 #include <asm/ftrace.h>
15788 #include <asm/percpu.h>
15789 #include <linux/err.h>
15790+#include <asm/pgtable.h>
15791+#include <asm/alternative-asm.h>
15792
15793 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15794 #include <linux/elf-em.h>
15795@@ -69,8 +71,9 @@
15796 #ifdef CONFIG_FUNCTION_TRACER
15797 #ifdef CONFIG_DYNAMIC_FTRACE
15798 ENTRY(mcount)
15799+ pax_force_retaddr
15800 retq
15801-END(mcount)
15802+ENDPROC(mcount)
15803
15804 ENTRY(ftrace_caller)
15805 cmpl $0, function_trace_stop
15806@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15807 #endif
15808
15809 GLOBAL(ftrace_stub)
15810+ pax_force_retaddr
15811 retq
15812-END(ftrace_caller)
15813+ENDPROC(ftrace_caller)
15814
15815 #else /* ! CONFIG_DYNAMIC_FTRACE */
15816 ENTRY(mcount)
15817@@ -113,6 +117,7 @@ ENTRY(mcount)
15818 #endif
15819
15820 GLOBAL(ftrace_stub)
15821+ pax_force_retaddr
15822 retq
15823
15824 trace:
15825@@ -122,12 +127,13 @@ trace:
15826 movq 8(%rbp), %rsi
15827 subq $MCOUNT_INSN_SIZE, %rdi
15828
15829+ pax_force_fptr ftrace_trace_function
15830 call *ftrace_trace_function
15831
15832 MCOUNT_RESTORE_FRAME
15833
15834 jmp ftrace_stub
15835-END(mcount)
15836+ENDPROC(mcount)
15837 #endif /* CONFIG_DYNAMIC_FTRACE */
15838 #endif /* CONFIG_FUNCTION_TRACER */
15839
15840@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15841
15842 MCOUNT_RESTORE_FRAME
15843
15844+ pax_force_retaddr
15845 retq
15846-END(ftrace_graph_caller)
15847+ENDPROC(ftrace_graph_caller)
15848
15849 GLOBAL(return_to_handler)
15850 subq $24, %rsp
15851@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15852 movq 8(%rsp), %rdx
15853 movq (%rsp), %rax
15854 addq $24, %rsp
15855+ pax_force_fptr %rdi
15856 jmp *%rdi
15857 #endif
15858
15859@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15860 ENDPROC(native_usergs_sysret64)
15861 #endif /* CONFIG_PARAVIRT */
15862
15863+ .macro ljmpq sel, off
15864+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15865+ .byte 0x48; ljmp *1234f(%rip)
15866+ .pushsection .rodata
15867+ .align 16
15868+ 1234: .quad \off; .word \sel
15869+ .popsection
15870+#else
15871+ pushq $\sel
15872+ pushq $\off
15873+ lretq
15874+#endif
15875+ .endm
15876+
15877+ .macro pax_enter_kernel
15878+ pax_set_fptr_mask
15879+#ifdef CONFIG_PAX_KERNEXEC
15880+ call pax_enter_kernel
15881+#endif
15882+ .endm
15883+
15884+ .macro pax_exit_kernel
15885+#ifdef CONFIG_PAX_KERNEXEC
15886+ call pax_exit_kernel
15887+#endif
15888+ .endm
15889+
15890+#ifdef CONFIG_PAX_KERNEXEC
15891+ENTRY(pax_enter_kernel)
15892+ pushq %rdi
15893+
15894+#ifdef CONFIG_PARAVIRT
15895+ PV_SAVE_REGS(CLBR_RDI)
15896+#endif
15897+
15898+ GET_CR0_INTO_RDI
15899+ bts $16,%rdi
15900+ jnc 3f
15901+ mov %cs,%edi
15902+ cmp $__KERNEL_CS,%edi
15903+ jnz 2f
15904+1:
15905+
15906+#ifdef CONFIG_PARAVIRT
15907+ PV_RESTORE_REGS(CLBR_RDI)
15908+#endif
15909+
15910+ popq %rdi
15911+ pax_force_retaddr
15912+ retq
15913+
15914+2: ljmpq __KERNEL_CS,1f
15915+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15916+4: SET_RDI_INTO_CR0
15917+ jmp 1b
15918+ENDPROC(pax_enter_kernel)
15919+
15920+ENTRY(pax_exit_kernel)
15921+ pushq %rdi
15922+
15923+#ifdef CONFIG_PARAVIRT
15924+ PV_SAVE_REGS(CLBR_RDI)
15925+#endif
15926+
15927+ mov %cs,%rdi
15928+ cmp $__KERNEXEC_KERNEL_CS,%edi
15929+ jz 2f
15930+1:
15931+
15932+#ifdef CONFIG_PARAVIRT
15933+ PV_RESTORE_REGS(CLBR_RDI);
15934+#endif
15935+
15936+ popq %rdi
15937+ pax_force_retaddr
15938+ retq
15939+
15940+2: GET_CR0_INTO_RDI
15941+ btr $16,%rdi
15942+ ljmpq __KERNEL_CS,3f
15943+3: SET_RDI_INTO_CR0
15944+ jmp 1b
15945+#ifdef CONFIG_PARAVIRT
15946+ PV_RESTORE_REGS(CLBR_RDI);
15947+#endif
15948+
15949+ popq %rdi
15950+ pax_force_retaddr
15951+ retq
15952+ENDPROC(pax_exit_kernel)
15953+#endif
15954+
15955+ .macro pax_enter_kernel_user
15956+ pax_set_fptr_mask
15957+#ifdef CONFIG_PAX_MEMORY_UDEREF
15958+ call pax_enter_kernel_user
15959+#endif
15960+ .endm
15961+
15962+ .macro pax_exit_kernel_user
15963+#ifdef CONFIG_PAX_MEMORY_UDEREF
15964+ call pax_exit_kernel_user
15965+#endif
15966+#ifdef CONFIG_PAX_RANDKSTACK
15967+ pushq %rax
15968+ call pax_randomize_kstack
15969+ popq %rax
15970+#endif
15971+ .endm
15972+
15973+#ifdef CONFIG_PAX_MEMORY_UDEREF
15974+ENTRY(pax_enter_kernel_user)
15975+ pushq %rdi
15976+ pushq %rbx
15977+
15978+#ifdef CONFIG_PARAVIRT
15979+ PV_SAVE_REGS(CLBR_RDI)
15980+#endif
15981+
15982+ GET_CR3_INTO_RDI
15983+ mov %rdi,%rbx
15984+ add $__START_KERNEL_map,%rbx
15985+ sub phys_base(%rip),%rbx
15986+
15987+#ifdef CONFIG_PARAVIRT
15988+ pushq %rdi
15989+ cmpl $0, pv_info+PARAVIRT_enabled
15990+ jz 1f
15991+ i = 0
15992+ .rept USER_PGD_PTRS
15993+ mov i*8(%rbx),%rsi
15994+ mov $0,%sil
15995+ lea i*8(%rbx),%rdi
15996+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15997+ i = i + 1
15998+ .endr
15999+ jmp 2f
16000+1:
16001+#endif
16002+
16003+ i = 0
16004+ .rept USER_PGD_PTRS
16005+ movb $0,i*8(%rbx)
16006+ i = i + 1
16007+ .endr
16008+
16009+#ifdef CONFIG_PARAVIRT
16010+2: popq %rdi
16011+#endif
16012+ SET_RDI_INTO_CR3
16013+
16014+#ifdef CONFIG_PAX_KERNEXEC
16015+ GET_CR0_INTO_RDI
16016+ bts $16,%rdi
16017+ SET_RDI_INTO_CR0
16018+#endif
16019+
16020+#ifdef CONFIG_PARAVIRT
16021+ PV_RESTORE_REGS(CLBR_RDI)
16022+#endif
16023+
16024+ popq %rbx
16025+ popq %rdi
16026+ pax_force_retaddr
16027+ retq
16028+ENDPROC(pax_enter_kernel_user)
16029+
16030+ENTRY(pax_exit_kernel_user)
16031+ push %rdi
16032+
16033+#ifdef CONFIG_PARAVIRT
16034+ pushq %rbx
16035+ PV_SAVE_REGS(CLBR_RDI)
16036+#endif
16037+
16038+#ifdef CONFIG_PAX_KERNEXEC
16039+ GET_CR0_INTO_RDI
16040+ btr $16,%rdi
16041+ SET_RDI_INTO_CR0
16042+#endif
16043+
16044+ GET_CR3_INTO_RDI
16045+ add $__START_KERNEL_map,%rdi
16046+ sub phys_base(%rip),%rdi
16047+
16048+#ifdef CONFIG_PARAVIRT
16049+ cmpl $0, pv_info+PARAVIRT_enabled
16050+ jz 1f
16051+ mov %rdi,%rbx
16052+ i = 0
16053+ .rept USER_PGD_PTRS
16054+ mov i*8(%rbx),%rsi
16055+ mov $0x67,%sil
16056+ lea i*8(%rbx),%rdi
16057+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16058+ i = i + 1
16059+ .endr
16060+ jmp 2f
16061+1:
16062+#endif
16063+
16064+ i = 0
16065+ .rept USER_PGD_PTRS
16066+ movb $0x67,i*8(%rdi)
16067+ i = i + 1
16068+ .endr
16069+
16070+#ifdef CONFIG_PARAVIRT
16071+2: PV_RESTORE_REGS(CLBR_RDI)
16072+ popq %rbx
16073+#endif
16074+
16075+ popq %rdi
16076+ pax_force_retaddr
16077+ retq
16078+ENDPROC(pax_exit_kernel_user)
16079+#endif
16080+
16081+.macro pax_erase_kstack
16082+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16083+ call pax_erase_kstack
16084+#endif
16085+.endm
16086+
16087+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16088+/*
16089+ * r11: thread_info
16090+ * rcx, rdx: can be clobbered
16091+ */
16092+ENTRY(pax_erase_kstack)
16093+ pushq %rdi
16094+ pushq %rax
16095+ pushq %r11
16096+
16097+ GET_THREAD_INFO(%r11)
16098+ mov TI_lowest_stack(%r11), %rdi
16099+ mov $-0xBEEF, %rax
16100+ std
16101+
16102+1: mov %edi, %ecx
16103+ and $THREAD_SIZE_asm - 1, %ecx
16104+ shr $3, %ecx
16105+ repne scasq
16106+ jecxz 2f
16107+
16108+ cmp $2*8, %ecx
16109+ jc 2f
16110+
16111+ mov $2*8, %ecx
16112+ repe scasq
16113+ jecxz 2f
16114+ jne 1b
16115+
16116+2: cld
16117+ mov %esp, %ecx
16118+ sub %edi, %ecx
16119+
16120+ cmp $THREAD_SIZE_asm, %rcx
16121+ jb 3f
16122+ ud2
16123+3:
16124+
16125+ shr $3, %ecx
16126+ rep stosq
16127+
16128+ mov TI_task_thread_sp0(%r11), %rdi
16129+ sub $256, %rdi
16130+ mov %rdi, TI_lowest_stack(%r11)
16131+
16132+ popq %r11
16133+ popq %rax
16134+ popq %rdi
16135+ pax_force_retaddr
16136+ ret
16137+ENDPROC(pax_erase_kstack)
16138+#endif
16139
16140 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16141 #ifdef CONFIG_TRACE_IRQFLAGS
16142@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16143 .endm
16144
16145 .macro UNFAKE_STACK_FRAME
16146- addq $8*6, %rsp
16147- CFI_ADJUST_CFA_OFFSET -(6*8)
16148+ addq $8*6 + ARG_SKIP, %rsp
16149+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16150 .endm
16151
16152 /*
16153@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16154 movq %rsp, %rsi
16155
16156 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16157- testl $3, CS(%rdi)
16158+ testb $3, CS(%rdi)
16159 je 1f
16160 SWAPGS
16161 /*
16162@@ -356,9 +640,10 @@ ENTRY(save_rest)
16163 movq_cfi r15, R15+16
16164 movq %r11, 8(%rsp) /* return address */
16165 FIXUP_TOP_OF_STACK %r11, 16
16166+ pax_force_retaddr
16167 ret
16168 CFI_ENDPROC
16169-END(save_rest)
16170+ENDPROC(save_rest)
16171
16172 /* save complete stack frame */
16173 .pushsection .kprobes.text, "ax"
16174@@ -387,9 +672,10 @@ ENTRY(save_paranoid)
16175 js 1f /* negative -> in kernel */
16176 SWAPGS
16177 xorl %ebx,%ebx
16178-1: ret
16179+1: pax_force_retaddr_bts
16180+ ret
16181 CFI_ENDPROC
16182-END(save_paranoid)
16183+ENDPROC(save_paranoid)
16184 .popsection
16185
16186 /*
16187@@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
16188
16189 RESTORE_REST
16190
16191- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16192+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16193 jz retint_restore_args
16194
16195 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16196@@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
16197 jmp ret_from_sys_call # go to the SYSRET fastpath
16198
16199 CFI_ENDPROC
16200-END(ret_from_fork)
16201+ENDPROC(ret_from_fork)
16202
16203 /*
16204 * System call entry. Up to 6 arguments in registers are supported.
16205@@ -457,7 +743,7 @@ END(ret_from_fork)
16206 ENTRY(system_call)
16207 CFI_STARTPROC simple
16208 CFI_SIGNAL_FRAME
16209- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16210+ CFI_DEF_CFA rsp,0
16211 CFI_REGISTER rip,rcx
16212 /*CFI_REGISTER rflags,r11*/
16213 SWAPGS_UNSAFE_STACK
16214@@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
16215
16216 movq %rsp,PER_CPU_VAR(old_rsp)
16217 movq PER_CPU_VAR(kernel_stack),%rsp
16218+ SAVE_ARGS 8*6,0
16219+ pax_enter_kernel_user
16220 /*
16221 * No need to follow this irqs off/on section - it's straight
16222 * and short:
16223 */
16224 ENABLE_INTERRUPTS(CLBR_NONE)
16225- SAVE_ARGS 8,0
16226 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16227 movq %rcx,RIP-ARGOFFSET(%rsp)
16228 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16229- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16230+ GET_THREAD_INFO(%rcx)
16231+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16232 jnz tracesys
16233 system_call_fastpath:
16234 cmpq $__NR_syscall_max,%rax
16235 ja badsys
16236- movq %r10,%rcx
16237+ movq R10-ARGOFFSET(%rsp),%rcx
16238 call *sys_call_table(,%rax,8) # XXX: rip relative
16239 movq %rax,RAX-ARGOFFSET(%rsp)
16240 /*
16241@@ -498,10 +786,13 @@ sysret_check:
16242 LOCKDEP_SYS_EXIT
16243 DISABLE_INTERRUPTS(CLBR_NONE)
16244 TRACE_IRQS_OFF
16245- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16246+ GET_THREAD_INFO(%rcx)
16247+ movl TI_flags(%rcx),%edx
16248 andl %edi,%edx
16249 jnz sysret_careful
16250 CFI_REMEMBER_STATE
16251+ pax_exit_kernel_user
16252+ pax_erase_kstack
16253 /*
16254 * sysretq will re-enable interrupts:
16255 */
16256@@ -553,14 +844,18 @@ badsys:
16257 * jump back to the normal fast path.
16258 */
16259 auditsys:
16260- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16261+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16262 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16263 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16264 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16265 movq %rax,%rsi /* 2nd arg: syscall number */
16266 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16267 call __audit_syscall_entry
16268+
16269+ pax_erase_kstack
16270+
16271 LOAD_ARGS 0 /* reload call-clobbered registers */
16272+ pax_set_fptr_mask
16273 jmp system_call_fastpath
16274
16275 /*
16276@@ -581,7 +876,7 @@ sysret_audit:
16277 /* Do syscall tracing */
16278 tracesys:
16279 #ifdef CONFIG_AUDITSYSCALL
16280- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16281+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16282 jz auditsys
16283 #endif
16284 SAVE_REST
16285@@ -589,16 +884,20 @@ tracesys:
16286 FIXUP_TOP_OF_STACK %rdi
16287 movq %rsp,%rdi
16288 call syscall_trace_enter
16289+
16290+ pax_erase_kstack
16291+
16292 /*
16293 * Reload arg registers from stack in case ptrace changed them.
16294 * We don't reload %rax because syscall_trace_enter() returned
16295 * the value it wants us to use in the table lookup.
16296 */
16297 LOAD_ARGS ARGOFFSET, 1
16298+ pax_set_fptr_mask
16299 RESTORE_REST
16300 cmpq $__NR_syscall_max,%rax
16301 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16302- movq %r10,%rcx /* fixup for C */
16303+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16304 call *sys_call_table(,%rax,8)
16305 movq %rax,RAX-ARGOFFSET(%rsp)
16306 /* Use IRET because user could have changed frame */
16307@@ -619,6 +918,7 @@ GLOBAL(int_with_check)
16308 andl %edi,%edx
16309 jnz int_careful
16310 andl $~TS_COMPAT,TI_status(%rcx)
16311+ pax_erase_kstack
16312 jmp retint_swapgs
16313
16314 /* Either reschedule or signal or syscall exit tracking needed. */
16315@@ -665,7 +965,7 @@ int_restore_rest:
16316 TRACE_IRQS_OFF
16317 jmp int_with_check
16318 CFI_ENDPROC
16319-END(system_call)
16320+ENDPROC(system_call)
16321
16322 /*
16323 * Certain special system calls that need to save a complete full stack frame.
16324@@ -681,7 +981,7 @@ ENTRY(\label)
16325 call \func
16326 jmp ptregscall_common
16327 CFI_ENDPROC
16328-END(\label)
16329+ENDPROC(\label)
16330 .endm
16331
16332 PTREGSCALL stub_clone, sys_clone, %r8
16333@@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
16334 movq_cfi_restore R12+8, r12
16335 movq_cfi_restore RBP+8, rbp
16336 movq_cfi_restore RBX+8, rbx
16337+ pax_force_retaddr
16338 ret $REST_SKIP /* pop extended registers */
16339 CFI_ENDPROC
16340-END(ptregscall_common)
16341+ENDPROC(ptregscall_common)
16342
16343 ENTRY(stub_execve)
16344 CFI_STARTPROC
16345@@ -716,7 +1017,7 @@ ENTRY(stub_execve)
16346 RESTORE_REST
16347 jmp int_ret_from_sys_call
16348 CFI_ENDPROC
16349-END(stub_execve)
16350+ENDPROC(stub_execve)
16351
16352 /*
16353 * sigreturn is special because it needs to restore all registers on return.
16354@@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
16355 RESTORE_REST
16356 jmp int_ret_from_sys_call
16357 CFI_ENDPROC
16358-END(stub_rt_sigreturn)
16359+ENDPROC(stub_rt_sigreturn)
16360
16361 /*
16362 * Build the entry stubs and pointer table with some assembler magic.
16363@@ -769,7 +1070,7 @@ vector=vector+1
16364 2: jmp common_interrupt
16365 .endr
16366 CFI_ENDPROC
16367-END(irq_entries_start)
16368+ENDPROC(irq_entries_start)
16369
16370 .previous
16371 END(interrupt)
16372@@ -789,6 +1090,16 @@ END(interrupt)
16373 subq $ORIG_RAX-RBP, %rsp
16374 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16375 SAVE_ARGS_IRQ
16376+#ifdef CONFIG_PAX_MEMORY_UDEREF
16377+ testb $3, CS(%rdi)
16378+ jnz 1f
16379+ pax_enter_kernel
16380+ jmp 2f
16381+1: pax_enter_kernel_user
16382+2:
16383+#else
16384+ pax_enter_kernel
16385+#endif
16386 call \func
16387 .endm
16388
16389@@ -820,7 +1131,7 @@ ret_from_intr:
16390
16391 exit_intr:
16392 GET_THREAD_INFO(%rcx)
16393- testl $3,CS-ARGOFFSET(%rsp)
16394+ testb $3,CS-ARGOFFSET(%rsp)
16395 je retint_kernel
16396
16397 /* Interrupt came from user space */
16398@@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
16399 * The iretq could re-enable interrupts:
16400 */
16401 DISABLE_INTERRUPTS(CLBR_ANY)
16402+ pax_exit_kernel_user
16403 TRACE_IRQS_IRETQ
16404 SWAPGS
16405 jmp restore_args
16406
16407 retint_restore_args: /* return to kernel space */
16408 DISABLE_INTERRUPTS(CLBR_ANY)
16409+ pax_exit_kernel
16410+ pax_force_retaddr RIP-ARGOFFSET
16411 /*
16412 * The iretq could re-enable interrupts:
16413 */
16414@@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
16415 #endif
16416
16417 CFI_ENDPROC
16418-END(common_interrupt)
16419+ENDPROC(common_interrupt)
16420 /*
16421 * End of kprobes section
16422 */
16423@@ -953,7 +1267,7 @@ ENTRY(\sym)
16424 interrupt \do_sym
16425 jmp ret_from_intr
16426 CFI_ENDPROC
16427-END(\sym)
16428+ENDPROC(\sym)
16429 .endm
16430
16431 #ifdef CONFIG_SMP
16432@@ -1026,12 +1340,22 @@ ENTRY(\sym)
16433 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16434 call error_entry
16435 DEFAULT_FRAME 0
16436+#ifdef CONFIG_PAX_MEMORY_UDEREF
16437+ testb $3, CS(%rsp)
16438+ jnz 1f
16439+ pax_enter_kernel
16440+ jmp 2f
16441+1: pax_enter_kernel_user
16442+2:
16443+#else
16444+ pax_enter_kernel
16445+#endif
16446 movq %rsp,%rdi /* pt_regs pointer */
16447 xorl %esi,%esi /* no error code */
16448 call \do_sym
16449 jmp error_exit /* %ebx: no swapgs flag */
16450 CFI_ENDPROC
16451-END(\sym)
16452+ENDPROC(\sym)
16453 .endm
16454
16455 .macro paranoidzeroentry sym do_sym
16456@@ -1043,15 +1367,25 @@ ENTRY(\sym)
16457 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16458 call save_paranoid
16459 TRACE_IRQS_OFF
16460+#ifdef CONFIG_PAX_MEMORY_UDEREF
16461+ testb $3, CS(%rsp)
16462+ jnz 1f
16463+ pax_enter_kernel
16464+ jmp 2f
16465+1: pax_enter_kernel_user
16466+2:
16467+#else
16468+ pax_enter_kernel
16469+#endif
16470 movq %rsp,%rdi /* pt_regs pointer */
16471 xorl %esi,%esi /* no error code */
16472 call \do_sym
16473 jmp paranoid_exit /* %ebx: no swapgs flag */
16474 CFI_ENDPROC
16475-END(\sym)
16476+ENDPROC(\sym)
16477 .endm
16478
16479-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16480+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16481 .macro paranoidzeroentry_ist sym do_sym ist
16482 ENTRY(\sym)
16483 INTR_FRAME
16484@@ -1061,14 +1395,30 @@ ENTRY(\sym)
16485 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16486 call save_paranoid
16487 TRACE_IRQS_OFF
16488+#ifdef CONFIG_PAX_MEMORY_UDEREF
16489+ testb $3, CS(%rsp)
16490+ jnz 1f
16491+ pax_enter_kernel
16492+ jmp 2f
16493+1: pax_enter_kernel_user
16494+2:
16495+#else
16496+ pax_enter_kernel
16497+#endif
16498 movq %rsp,%rdi /* pt_regs pointer */
16499 xorl %esi,%esi /* no error code */
16500+#ifdef CONFIG_SMP
16501+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16502+ lea init_tss(%r12), %r12
16503+#else
16504+ lea init_tss(%rip), %r12
16505+#endif
16506 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16507 call \do_sym
16508 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16509 jmp paranoid_exit /* %ebx: no swapgs flag */
16510 CFI_ENDPROC
16511-END(\sym)
16512+ENDPROC(\sym)
16513 .endm
16514
16515 .macro errorentry sym do_sym
16516@@ -1079,13 +1429,23 @@ ENTRY(\sym)
16517 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16518 call error_entry
16519 DEFAULT_FRAME 0
16520+#ifdef CONFIG_PAX_MEMORY_UDEREF
16521+ testb $3, CS(%rsp)
16522+ jnz 1f
16523+ pax_enter_kernel
16524+ jmp 2f
16525+1: pax_enter_kernel_user
16526+2:
16527+#else
16528+ pax_enter_kernel
16529+#endif
16530 movq %rsp,%rdi /* pt_regs pointer */
16531 movq ORIG_RAX(%rsp),%rsi /* get error code */
16532 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16533 call \do_sym
16534 jmp error_exit /* %ebx: no swapgs flag */
16535 CFI_ENDPROC
16536-END(\sym)
16537+ENDPROC(\sym)
16538 .endm
16539
16540 /* error code is on the stack already */
16541@@ -1098,13 +1458,23 @@ ENTRY(\sym)
16542 call save_paranoid
16543 DEFAULT_FRAME 0
16544 TRACE_IRQS_OFF
16545+#ifdef CONFIG_PAX_MEMORY_UDEREF
16546+ testb $3, CS(%rsp)
16547+ jnz 1f
16548+ pax_enter_kernel
16549+ jmp 2f
16550+1: pax_enter_kernel_user
16551+2:
16552+#else
16553+ pax_enter_kernel
16554+#endif
16555 movq %rsp,%rdi /* pt_regs pointer */
16556 movq ORIG_RAX(%rsp),%rsi /* get error code */
16557 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16558 call \do_sym
16559 jmp paranoid_exit /* %ebx: no swapgs flag */
16560 CFI_ENDPROC
16561-END(\sym)
16562+ENDPROC(\sym)
16563 .endm
16564
16565 zeroentry divide_error do_divide_error
16566@@ -1134,9 +1504,10 @@ gs_change:
16567 2: mfence /* workaround */
16568 SWAPGS
16569 popfq_cfi
16570+ pax_force_retaddr
16571 ret
16572 CFI_ENDPROC
16573-END(native_load_gs_index)
16574+ENDPROC(native_load_gs_index)
16575
16576 .section __ex_table,"a"
16577 .align 8
16578@@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
16579 * Here we are in the child and the registers are set as they were
16580 * at kernel_thread() invocation in the parent.
16581 */
16582+ pax_force_fptr %rsi
16583 call *%rsi
16584 # exit
16585 mov %eax, %edi
16586 call do_exit
16587 ud2 # padding for call trace
16588 CFI_ENDPROC
16589-END(kernel_thread_helper)
16590+ENDPROC(kernel_thread_helper)
16591
16592 /*
16593 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16594@@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
16595 RESTORE_REST
16596 testq %rax,%rax
16597 je int_ret_from_sys_call
16598- RESTORE_ARGS
16599 UNFAKE_STACK_FRAME
16600+ pax_force_retaddr
16601 ret
16602 CFI_ENDPROC
16603-END(kernel_execve)
16604+ENDPROC(kernel_execve)
16605
16606 /* Call softirq on interrupt stack. Interrupts are off. */
16607 ENTRY(call_softirq)
16608@@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
16609 CFI_DEF_CFA_REGISTER rsp
16610 CFI_ADJUST_CFA_OFFSET -8
16611 decl PER_CPU_VAR(irq_count)
16612+ pax_force_retaddr
16613 ret
16614 CFI_ENDPROC
16615-END(call_softirq)
16616+ENDPROC(call_softirq)
16617
16618 #ifdef CONFIG_XEN
16619 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16620@@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16621 decl PER_CPU_VAR(irq_count)
16622 jmp error_exit
16623 CFI_ENDPROC
16624-END(xen_do_hypervisor_callback)
16625+ENDPROC(xen_do_hypervisor_callback)
16626
16627 /*
16628 * Hypervisor uses this for application faults while it executes.
16629@@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
16630 SAVE_ALL
16631 jmp error_exit
16632 CFI_ENDPROC
16633-END(xen_failsafe_callback)
16634+ENDPROC(xen_failsafe_callback)
16635
16636 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16637 xen_hvm_callback_vector xen_evtchn_do_upcall
16638@@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
16639 TRACE_IRQS_OFF
16640 testl %ebx,%ebx /* swapgs needed? */
16641 jnz paranoid_restore
16642- testl $3,CS(%rsp)
16643+ testb $3,CS(%rsp)
16644 jnz paranoid_userspace
16645+#ifdef CONFIG_PAX_MEMORY_UDEREF
16646+ pax_exit_kernel
16647+ TRACE_IRQS_IRETQ 0
16648+ SWAPGS_UNSAFE_STACK
16649+ RESTORE_ALL 8
16650+ pax_force_retaddr_bts
16651+ jmp irq_return
16652+#endif
16653 paranoid_swapgs:
16654+#ifdef CONFIG_PAX_MEMORY_UDEREF
16655+ pax_exit_kernel_user
16656+#else
16657+ pax_exit_kernel
16658+#endif
16659 TRACE_IRQS_IRETQ 0
16660 SWAPGS_UNSAFE_STACK
16661 RESTORE_ALL 8
16662 jmp irq_return
16663 paranoid_restore:
16664+ pax_exit_kernel
16665 TRACE_IRQS_IRETQ 0
16666 RESTORE_ALL 8
16667+ pax_force_retaddr_bts
16668 jmp irq_return
16669 paranoid_userspace:
16670 GET_THREAD_INFO(%rcx)
16671@@ -1399,7 +1787,7 @@ paranoid_schedule:
16672 TRACE_IRQS_OFF
16673 jmp paranoid_userspace
16674 CFI_ENDPROC
16675-END(paranoid_exit)
16676+ENDPROC(paranoid_exit)
16677
16678 /*
16679 * Exception entry point. This expects an error code/orig_rax on the stack.
16680@@ -1426,12 +1814,13 @@ ENTRY(error_entry)
16681 movq_cfi r14, R14+8
16682 movq_cfi r15, R15+8
16683 xorl %ebx,%ebx
16684- testl $3,CS+8(%rsp)
16685+ testb $3,CS+8(%rsp)
16686 je error_kernelspace
16687 error_swapgs:
16688 SWAPGS
16689 error_sti:
16690 TRACE_IRQS_OFF
16691+ pax_force_retaddr_bts
16692 ret
16693
16694 /*
16695@@ -1458,7 +1847,7 @@ bstep_iret:
16696 movq %rcx,RIP+8(%rsp)
16697 jmp error_swapgs
16698 CFI_ENDPROC
16699-END(error_entry)
16700+ENDPROC(error_entry)
16701
16702
16703 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16704@@ -1478,7 +1867,7 @@ ENTRY(error_exit)
16705 jnz retint_careful
16706 jmp retint_swapgs
16707 CFI_ENDPROC
16708-END(error_exit)
16709+ENDPROC(error_exit)
16710
16711 /*
16712 * Test if a given stack is an NMI stack or not.
16713@@ -1535,9 +1924,11 @@ ENTRY(nmi)
16714 * If %cs was not the kernel segment, then the NMI triggered in user
16715 * space, which means it is definitely not nested.
16716 */
16717+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16718+ je 1f
16719 cmpl $__KERNEL_CS, 16(%rsp)
16720 jne first_nmi
16721-
16722+1:
16723 /*
16724 * Check the special variable on the stack to see if NMIs are
16725 * executing.
16726@@ -1659,6 +2050,16 @@ restart_nmi:
16727 */
16728 call save_paranoid
16729 DEFAULT_FRAME 0
16730+#ifdef CONFIG_PAX_MEMORY_UDEREF
16731+ testb $3, CS(%rsp)
16732+ jnz 1f
16733+ pax_enter_kernel
16734+ jmp 2f
16735+1: pax_enter_kernel_user
16736+2:
16737+#else
16738+ pax_enter_kernel
16739+#endif
16740 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16741 movq %rsp,%rdi
16742 movq $-1,%rsi
16743@@ -1666,14 +2067,25 @@ restart_nmi:
16744 testl %ebx,%ebx /* swapgs needed? */
16745 jnz nmi_restore
16746 nmi_swapgs:
16747+#ifdef CONFIG_PAX_MEMORY_UDEREF
16748+ pax_exit_kernel_user
16749+#else
16750+ pax_exit_kernel
16751+#endif
16752 SWAPGS_UNSAFE_STACK
16753+ RESTORE_ALL 8
16754+ /* Clear the NMI executing stack variable */
16755+ movq $0, 10*8(%rsp)
16756+ jmp irq_return
16757 nmi_restore:
16758+ pax_exit_kernel
16759 RESTORE_ALL 8
16760+ pax_force_retaddr_bts
16761 /* Clear the NMI executing stack variable */
16762 movq $0, 10*8(%rsp)
16763 jmp irq_return
16764 CFI_ENDPROC
16765-END(nmi)
16766+ENDPROC(nmi)
16767
16768 /*
16769 * If an NMI hit an iret because of an exception or breakpoint,
16770@@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16771 mov $-ENOSYS,%eax
16772 sysret
16773 CFI_ENDPROC
16774-END(ignore_sysret)
16775+ENDPROC(ignore_sysret)
16776
16777 /*
16778 * End of kprobes section
16779diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16780index c9a281f..ce2f317 100644
16781--- a/arch/x86/kernel/ftrace.c
16782+++ b/arch/x86/kernel/ftrace.c
16783@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16784 static const void *mod_code_newcode; /* holds the text to write to the IP */
16785
16786 static unsigned nmi_wait_count;
16787-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16788+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16789
16790 int ftrace_arch_read_dyn_info(char *buf, int size)
16791 {
16792@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16793
16794 r = snprintf(buf, size, "%u %u",
16795 nmi_wait_count,
16796- atomic_read(&nmi_update_count));
16797+ atomic_read_unchecked(&nmi_update_count));
16798 return r;
16799 }
16800
16801@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16802
16803 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16804 smp_rmb();
16805+ pax_open_kernel();
16806 ftrace_mod_code();
16807- atomic_inc(&nmi_update_count);
16808+ pax_close_kernel();
16809+ atomic_inc_unchecked(&nmi_update_count);
16810 }
16811 /* Must have previous changes seen before executions */
16812 smp_mb();
16813@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16814 {
16815 unsigned char replaced[MCOUNT_INSN_SIZE];
16816
16817+ ip = ktla_ktva(ip);
16818+
16819 /*
16820 * Note: Due to modules and __init, code can
16821 * disappear and change, we need to protect against faulting
16822@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16823 unsigned char old[MCOUNT_INSN_SIZE], *new;
16824 int ret;
16825
16826- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16827+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16828 new = ftrace_call_replace(ip, (unsigned long)func);
16829 ret = ftrace_modify_code(ip, old, new);
16830
16831@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16832 {
16833 unsigned char code[MCOUNT_INSN_SIZE];
16834
16835+ ip = ktla_ktva(ip);
16836+
16837 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16838 return -EFAULT;
16839
16840diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16841index 51ff186..9e77418 100644
16842--- a/arch/x86/kernel/head32.c
16843+++ b/arch/x86/kernel/head32.c
16844@@ -19,6 +19,7 @@
16845 #include <asm/io_apic.h>
16846 #include <asm/bios_ebda.h>
16847 #include <asm/tlbflush.h>
16848+#include <asm/boot.h>
16849
16850 static void __init i386_default_early_setup(void)
16851 {
16852@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16853
16854 void __init i386_start_kernel(void)
16855 {
16856- memblock_reserve(__pa_symbol(&_text),
16857- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16858+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16859
16860 #ifdef CONFIG_BLK_DEV_INITRD
16861 /* Reserve INITRD */
16862diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16863index ce0be7c..c41476e 100644
16864--- a/arch/x86/kernel/head_32.S
16865+++ b/arch/x86/kernel/head_32.S
16866@@ -25,6 +25,12 @@
16867 /* Physical address */
16868 #define pa(X) ((X) - __PAGE_OFFSET)
16869
16870+#ifdef CONFIG_PAX_KERNEXEC
16871+#define ta(X) (X)
16872+#else
16873+#define ta(X) ((X) - __PAGE_OFFSET)
16874+#endif
16875+
16876 /*
16877 * References to members of the new_cpu_data structure.
16878 */
16879@@ -54,11 +60,7 @@
16880 * and small than max_low_pfn, otherwise will waste some page table entries
16881 */
16882
16883-#if PTRS_PER_PMD > 1
16884-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16885-#else
16886-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16887-#endif
16888+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16889
16890 /* Number of possible pages in the lowmem region */
16891 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16892@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16893 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16894
16895 /*
16896+ * Real beginning of normal "text" segment
16897+ */
16898+ENTRY(stext)
16899+ENTRY(_stext)
16900+
16901+/*
16902 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16903 * %esi points to the real-mode code as a 32-bit pointer.
16904 * CS and DS must be 4 GB flat segments, but we don't depend on
16905@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16906 * can.
16907 */
16908 __HEAD
16909+
16910+#ifdef CONFIG_PAX_KERNEXEC
16911+ jmp startup_32
16912+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16913+.fill PAGE_SIZE-5,1,0xcc
16914+#endif
16915+
16916 ENTRY(startup_32)
16917 movl pa(stack_start),%ecx
16918
16919@@ -105,6 +120,57 @@ ENTRY(startup_32)
16920 2:
16921 leal -__PAGE_OFFSET(%ecx),%esp
16922
16923+#ifdef CONFIG_SMP
16924+ movl $pa(cpu_gdt_table),%edi
16925+ movl $__per_cpu_load,%eax
16926+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16927+ rorl $16,%eax
16928+ movb %al,__KERNEL_PERCPU + 4(%edi)
16929+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16930+ movl $__per_cpu_end - 1,%eax
16931+ subl $__per_cpu_start,%eax
16932+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16933+#endif
16934+
16935+#ifdef CONFIG_PAX_MEMORY_UDEREF
16936+ movl $NR_CPUS,%ecx
16937+ movl $pa(cpu_gdt_table),%edi
16938+1:
16939+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16940+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16941+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16942+ addl $PAGE_SIZE_asm,%edi
16943+ loop 1b
16944+#endif
16945+
16946+#ifdef CONFIG_PAX_KERNEXEC
16947+ movl $pa(boot_gdt),%edi
16948+ movl $__LOAD_PHYSICAL_ADDR,%eax
16949+ movw %ax,__BOOT_CS + 2(%edi)
16950+ rorl $16,%eax
16951+ movb %al,__BOOT_CS + 4(%edi)
16952+ movb %ah,__BOOT_CS + 7(%edi)
16953+ rorl $16,%eax
16954+
16955+ ljmp $(__BOOT_CS),$1f
16956+1:
16957+
16958+ movl $NR_CPUS,%ecx
16959+ movl $pa(cpu_gdt_table),%edi
16960+ addl $__PAGE_OFFSET,%eax
16961+1:
16962+ movw %ax,__KERNEL_CS + 2(%edi)
16963+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16964+ rorl $16,%eax
16965+ movb %al,__KERNEL_CS + 4(%edi)
16966+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16967+ movb %ah,__KERNEL_CS + 7(%edi)
16968+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16969+ rorl $16,%eax
16970+ addl $PAGE_SIZE_asm,%edi
16971+ loop 1b
16972+#endif
16973+
16974 /*
16975 * Clear BSS first so that there are no surprises...
16976 */
16977@@ -195,8 +261,11 @@ ENTRY(startup_32)
16978 movl %eax, pa(max_pfn_mapped)
16979
16980 /* Do early initialization of the fixmap area */
16981- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16982- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16983+#ifdef CONFIG_COMPAT_VDSO
16984+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16985+#else
16986+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16987+#endif
16988 #else /* Not PAE */
16989
16990 page_pde_offset = (__PAGE_OFFSET >> 20);
16991@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16992 movl %eax, pa(max_pfn_mapped)
16993
16994 /* Do early initialization of the fixmap area */
16995- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16996- movl %eax,pa(initial_page_table+0xffc)
16997+#ifdef CONFIG_COMPAT_VDSO
16998+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16999+#else
17000+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
17001+#endif
17002 #endif
17003
17004 #ifdef CONFIG_PARAVIRT
17005@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17006 cmpl $num_subarch_entries, %eax
17007 jae bad_subarch
17008
17009- movl pa(subarch_entries)(,%eax,4), %eax
17010- subl $__PAGE_OFFSET, %eax
17011- jmp *%eax
17012+ jmp *pa(subarch_entries)(,%eax,4)
17013
17014 bad_subarch:
17015 WEAK(lguest_entry)
17016@@ -255,10 +325,10 @@ WEAK(xen_entry)
17017 __INITDATA
17018
17019 subarch_entries:
17020- .long default_entry /* normal x86/PC */
17021- .long lguest_entry /* lguest hypervisor */
17022- .long xen_entry /* Xen hypervisor */
17023- .long default_entry /* Moorestown MID */
17024+ .long ta(default_entry) /* normal x86/PC */
17025+ .long ta(lguest_entry) /* lguest hypervisor */
17026+ .long ta(xen_entry) /* Xen hypervisor */
17027+ .long ta(default_entry) /* Moorestown MID */
17028 num_subarch_entries = (. - subarch_entries) / 4
17029 .previous
17030 #else
17031@@ -312,6 +382,7 @@ default_entry:
17032 orl %edx,%eax
17033 movl %eax,%cr4
17034
17035+#ifdef CONFIG_X86_PAE
17036 testb $X86_CR4_PAE, %al # check if PAE is enabled
17037 jz 6f
17038
17039@@ -340,6 +411,9 @@ default_entry:
17040 /* Make changes effective */
17041 wrmsr
17042
17043+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17044+#endif
17045+
17046 6:
17047
17048 /*
17049@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
17050 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17051 movl %eax,%ss # after changing gdt.
17052
17053- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17054+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17055 movl %eax,%ds
17056 movl %eax,%es
17057
17058@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
17059 */
17060 cmpb $0,ready
17061 jne 1f
17062- movl $gdt_page,%eax
17063+ movl $cpu_gdt_table,%eax
17064 movl $stack_canary,%ecx
17065+#ifdef CONFIG_SMP
17066+ addl $__per_cpu_load,%ecx
17067+#endif
17068 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17069 shrl $16, %ecx
17070 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17071 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17072 1:
17073-#endif
17074 movl $(__KERNEL_STACK_CANARY),%eax
17075+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17076+ movl $(__USER_DS),%eax
17077+#else
17078+ xorl %eax,%eax
17079+#endif
17080 movl %eax,%gs
17081
17082 xorl %eax,%eax # Clear LDT
17083@@ -558,22 +639,22 @@ early_page_fault:
17084 jmp early_fault
17085
17086 early_fault:
17087- cld
17088 #ifdef CONFIG_PRINTK
17089+ cmpl $1,%ss:early_recursion_flag
17090+ je hlt_loop
17091+ incl %ss:early_recursion_flag
17092+ cld
17093 pusha
17094 movl $(__KERNEL_DS),%eax
17095 movl %eax,%ds
17096 movl %eax,%es
17097- cmpl $2,early_recursion_flag
17098- je hlt_loop
17099- incl early_recursion_flag
17100 movl %cr2,%eax
17101 pushl %eax
17102 pushl %edx /* trapno */
17103 pushl $fault_msg
17104 call printk
17105+; call dump_stack
17106 #endif
17107- call dump_stack
17108 hlt_loop:
17109 hlt
17110 jmp hlt_loop
17111@@ -581,8 +662,11 @@ hlt_loop:
17112 /* This is the default interrupt "handler" :-) */
17113 ALIGN
17114 ignore_int:
17115- cld
17116 #ifdef CONFIG_PRINTK
17117+ cmpl $2,%ss:early_recursion_flag
17118+ je hlt_loop
17119+ incl %ss:early_recursion_flag
17120+ cld
17121 pushl %eax
17122 pushl %ecx
17123 pushl %edx
17124@@ -591,9 +675,6 @@ ignore_int:
17125 movl $(__KERNEL_DS),%eax
17126 movl %eax,%ds
17127 movl %eax,%es
17128- cmpl $2,early_recursion_flag
17129- je hlt_loop
17130- incl early_recursion_flag
17131 pushl 16(%esp)
17132 pushl 24(%esp)
17133 pushl 32(%esp)
17134@@ -622,29 +703,43 @@ ENTRY(initial_code)
17135 /*
17136 * BSS section
17137 */
17138-__PAGE_ALIGNED_BSS
17139- .align PAGE_SIZE
17140 #ifdef CONFIG_X86_PAE
17141+.section .initial_pg_pmd,"a",@progbits
17142 initial_pg_pmd:
17143 .fill 1024*KPMDS,4,0
17144 #else
17145+.section .initial_page_table,"a",@progbits
17146 ENTRY(initial_page_table)
17147 .fill 1024,4,0
17148 #endif
17149+.section .initial_pg_fixmap,"a",@progbits
17150 initial_pg_fixmap:
17151 .fill 1024,4,0
17152+.section .empty_zero_page,"a",@progbits
17153 ENTRY(empty_zero_page)
17154 .fill 4096,1,0
17155+.section .swapper_pg_dir,"a",@progbits
17156 ENTRY(swapper_pg_dir)
17157+#ifdef CONFIG_X86_PAE
17158+ .fill 4,8,0
17159+#else
17160 .fill 1024,4,0
17161+#endif
17162+
17163+/*
17164+ * The IDT has to be page-aligned to simplify the Pentium
17165+ * F0 0F bug workaround.. We have a special link segment
17166+ * for this.
17167+ */
17168+.section .idt,"a",@progbits
17169+ENTRY(idt_table)
17170+ .fill 256,8,0
17171
17172 /*
17173 * This starts the data section.
17174 */
17175 #ifdef CONFIG_X86_PAE
17176-__PAGE_ALIGNED_DATA
17177- /* Page-aligned for the benefit of paravirt? */
17178- .align PAGE_SIZE
17179+.section .initial_page_table,"a",@progbits
17180 ENTRY(initial_page_table)
17181 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17182 # if KPMDS == 3
17183@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17184 # error "Kernel PMDs should be 1, 2 or 3"
17185 # endif
17186 .align PAGE_SIZE /* needs to be page-sized too */
17187+
17188+#ifdef CONFIG_PAX_PER_CPU_PGD
17189+ENTRY(cpu_pgd)
17190+ .rept NR_CPUS
17191+ .fill 4,8,0
17192+ .endr
17193+#endif
17194+
17195 #endif
17196
17197 .data
17198 .balign 4
17199 ENTRY(stack_start)
17200- .long init_thread_union+THREAD_SIZE
17201+ .long init_thread_union+THREAD_SIZE-8
17202
17203+ready: .byte 0
17204+
17205+.section .rodata,"a",@progbits
17206 early_recursion_flag:
17207 .long 0
17208
17209-ready: .byte 0
17210-
17211 int_msg:
17212 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17213
17214@@ -707,7 +811,7 @@ fault_msg:
17215 .word 0 # 32 bit align gdt_desc.address
17216 boot_gdt_descr:
17217 .word __BOOT_DS+7
17218- .long boot_gdt - __PAGE_OFFSET
17219+ .long pa(boot_gdt)
17220
17221 .word 0 # 32-bit align idt_desc.address
17222 idt_descr:
17223@@ -718,7 +822,7 @@ idt_descr:
17224 .word 0 # 32 bit align gdt_desc.address
17225 ENTRY(early_gdt_descr)
17226 .word GDT_ENTRIES*8-1
17227- .long gdt_page /* Overwritten for secondary CPUs */
17228+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17229
17230 /*
17231 * The boot_gdt must mirror the equivalent in setup.S and is
17232@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17233 .align L1_CACHE_BYTES
17234 ENTRY(boot_gdt)
17235 .fill GDT_ENTRY_BOOT_CS,8,0
17236- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17237- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17238+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17239+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17240+
17241+ .align PAGE_SIZE_asm
17242+ENTRY(cpu_gdt_table)
17243+ .rept NR_CPUS
17244+ .quad 0x0000000000000000 /* NULL descriptor */
17245+ .quad 0x0000000000000000 /* 0x0b reserved */
17246+ .quad 0x0000000000000000 /* 0x13 reserved */
17247+ .quad 0x0000000000000000 /* 0x1b reserved */
17248+
17249+#ifdef CONFIG_PAX_KERNEXEC
17250+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17251+#else
17252+ .quad 0x0000000000000000 /* 0x20 unused */
17253+#endif
17254+
17255+ .quad 0x0000000000000000 /* 0x28 unused */
17256+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17257+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17258+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17259+ .quad 0x0000000000000000 /* 0x4b reserved */
17260+ .quad 0x0000000000000000 /* 0x53 reserved */
17261+ .quad 0x0000000000000000 /* 0x5b reserved */
17262+
17263+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17264+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17265+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17266+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17267+
17268+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17269+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17270+
17271+ /*
17272+ * Segments used for calling PnP BIOS have byte granularity.
17273+ * The code segments and data segments have fixed 64k limits,
17274+ * the transfer segment sizes are set at run time.
17275+ */
17276+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17277+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17278+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17279+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17280+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17281+
17282+ /*
17283+ * The APM segments have byte granularity and their bases
17284+ * are set at run time. All have 64k limits.
17285+ */
17286+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17287+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17288+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17289+
17290+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17291+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17292+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17293+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17294+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17295+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17296+
17297+ /* Be sure this is zeroed to avoid false validations in Xen */
17298+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17299+ .endr
17300diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17301index 40f4eb3..6d24d9d 100644
17302--- a/arch/x86/kernel/head_64.S
17303+++ b/arch/x86/kernel/head_64.S
17304@@ -19,6 +19,8 @@
17305 #include <asm/cache.h>
17306 #include <asm/processor-flags.h>
17307 #include <asm/percpu.h>
17308+#include <asm/cpufeature.h>
17309+#include <asm/alternative-asm.h>
17310
17311 #ifdef CONFIG_PARAVIRT
17312 #include <asm/asm-offsets.h>
17313@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17314 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17315 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17316 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17317+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17318+L3_VMALLOC_START = pud_index(VMALLOC_START)
17319+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17320+L3_VMALLOC_END = pud_index(VMALLOC_END)
17321+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17322+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17323
17324 .text
17325 __HEAD
17326@@ -85,35 +93,23 @@ startup_64:
17327 */
17328 addq %rbp, init_level4_pgt + 0(%rip)
17329 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17330+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17331+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17332+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17333 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17334
17335 addq %rbp, level3_ident_pgt + 0(%rip)
17336+#ifndef CONFIG_XEN
17337+ addq %rbp, level3_ident_pgt + 8(%rip)
17338+#endif
17339
17340- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17341- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17342+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17343+
17344+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17345+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17346
17347 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17348-
17349- /* Add an Identity mapping if I am above 1G */
17350- leaq _text(%rip), %rdi
17351- andq $PMD_PAGE_MASK, %rdi
17352-
17353- movq %rdi, %rax
17354- shrq $PUD_SHIFT, %rax
17355- andq $(PTRS_PER_PUD - 1), %rax
17356- jz ident_complete
17357-
17358- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17359- leaq level3_ident_pgt(%rip), %rbx
17360- movq %rdx, 0(%rbx, %rax, 8)
17361-
17362- movq %rdi, %rax
17363- shrq $PMD_SHIFT, %rax
17364- andq $(PTRS_PER_PMD - 1), %rax
17365- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17366- leaq level2_spare_pgt(%rip), %rbx
17367- movq %rdx, 0(%rbx, %rax, 8)
17368-ident_complete:
17369+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17370
17371 /*
17372 * Fixup the kernel text+data virtual addresses. Note that
17373@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17374 * after the boot processor executes this code.
17375 */
17376
17377- /* Enable PAE mode and PGE */
17378- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17379+ /* Enable PAE mode and PSE/PGE */
17380+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17381 movq %rax, %cr4
17382
17383 /* Setup early boot stage 4 level pagetables. */
17384@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17385 movl $MSR_EFER, %ecx
17386 rdmsr
17387 btsl $_EFER_SCE, %eax /* Enable System Call */
17388- btl $20,%edi /* No Execute supported? */
17389+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17390 jnc 1f
17391 btsl $_EFER_NX, %eax
17392+ leaq init_level4_pgt(%rip), %rdi
17393+#ifndef CONFIG_EFI
17394+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17395+#endif
17396+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17397+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17398+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17399+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17400 1: wrmsr /* Make changes effective */
17401
17402 /* Setup cr0 */
17403@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17404 * jump. In addition we need to ensure %cs is set so we make this
17405 * a far return.
17406 */
17407+ pax_set_fptr_mask
17408 movq initial_code(%rip),%rax
17409 pushq $0 # fake return address to stop unwinder
17410 pushq $__KERNEL_CS # set correct cs
17411@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17412 bad_address:
17413 jmp bad_address
17414
17415- .section ".init.text","ax"
17416+ __INIT
17417 #ifdef CONFIG_EARLY_PRINTK
17418 .globl early_idt_handlers
17419 early_idt_handlers:
17420@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17421 #endif /* EARLY_PRINTK */
17422 1: hlt
17423 jmp 1b
17424+ .previous
17425
17426 #ifdef CONFIG_EARLY_PRINTK
17427+ __INITDATA
17428 early_recursion_flag:
17429 .long 0
17430+ .previous
17431
17432+ .section .rodata,"a",@progbits
17433 early_idt_msg:
17434 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17435 early_idt_ripmsg:
17436 .asciz "RIP %s\n"
17437+ .previous
17438 #endif /* CONFIG_EARLY_PRINTK */
17439- .previous
17440
17441+ .section .rodata,"a",@progbits
17442 #define NEXT_PAGE(name) \
17443 .balign PAGE_SIZE; \
17444 ENTRY(name)
17445@@ -338,7 +348,6 @@ ENTRY(name)
17446 i = i + 1 ; \
17447 .endr
17448
17449- .data
17450 /*
17451 * This default setting generates an ident mapping at address 0x100000
17452 * and a mapping for the kernel that precisely maps virtual address
17453@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17454 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17455 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17456 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17457+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17458+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17459+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17460+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17461+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17462+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17463 .org init_level4_pgt + L4_START_KERNEL*8, 0
17464 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17465 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17466
17467+#ifdef CONFIG_PAX_PER_CPU_PGD
17468+NEXT_PAGE(cpu_pgd)
17469+ .rept NR_CPUS
17470+ .fill 512,8,0
17471+ .endr
17472+#endif
17473+
17474 NEXT_PAGE(level3_ident_pgt)
17475 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17476+#ifdef CONFIG_XEN
17477 .fill 511,8,0
17478+#else
17479+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17480+ .fill 510,8,0
17481+#endif
17482+
17483+NEXT_PAGE(level3_vmalloc_start_pgt)
17484+ .fill 512,8,0
17485+
17486+NEXT_PAGE(level3_vmalloc_end_pgt)
17487+ .fill 512,8,0
17488+
17489+NEXT_PAGE(level3_vmemmap_pgt)
17490+ .fill L3_VMEMMAP_START,8,0
17491+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17492
17493 NEXT_PAGE(level3_kernel_pgt)
17494 .fill L3_START_KERNEL,8,0
17495@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17496 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17497 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17498
17499+NEXT_PAGE(level2_vmemmap_pgt)
17500+ .fill 512,8,0
17501+
17502 NEXT_PAGE(level2_fixmap_pgt)
17503- .fill 506,8,0
17504- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17505- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17506- .fill 5,8,0
17507+ .fill 507,8,0
17508+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17509+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17510+ .fill 4,8,0
17511
17512-NEXT_PAGE(level1_fixmap_pgt)
17513+NEXT_PAGE(level1_vsyscall_pgt)
17514 .fill 512,8,0
17515
17516-NEXT_PAGE(level2_ident_pgt)
17517- /* Since I easily can, map the first 1G.
17518+ /* Since I easily can, map the first 2G.
17519 * Don't set NX because code runs from these pages.
17520 */
17521- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17522+NEXT_PAGE(level2_ident_pgt)
17523+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17524
17525 NEXT_PAGE(level2_kernel_pgt)
17526 /*
17527@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17528 * If you want to increase this then increase MODULES_VADDR
17529 * too.)
17530 */
17531- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17532- KERNEL_IMAGE_SIZE/PMD_SIZE)
17533-
17534-NEXT_PAGE(level2_spare_pgt)
17535- .fill 512, 8, 0
17536+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17537
17538 #undef PMDS
17539 #undef NEXT_PAGE
17540
17541- .data
17542+ .align PAGE_SIZE
17543+ENTRY(cpu_gdt_table)
17544+ .rept NR_CPUS
17545+ .quad 0x0000000000000000 /* NULL descriptor */
17546+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17547+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17548+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17549+ .quad 0x00cffb000000ffff /* __USER32_CS */
17550+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17551+ .quad 0x00affb000000ffff /* __USER_CS */
17552+
17553+#ifdef CONFIG_PAX_KERNEXEC
17554+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17555+#else
17556+ .quad 0x0 /* unused */
17557+#endif
17558+
17559+ .quad 0,0 /* TSS */
17560+ .quad 0,0 /* LDT */
17561+ .quad 0,0,0 /* three TLS descriptors */
17562+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17563+ /* asm/segment.h:GDT_ENTRIES must match this */
17564+
17565+ /* zero the remaining page */
17566+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17567+ .endr
17568+
17569 .align 16
17570 .globl early_gdt_descr
17571 early_gdt_descr:
17572 .word GDT_ENTRIES*8-1
17573 early_gdt_descr_base:
17574- .quad INIT_PER_CPU_VAR(gdt_page)
17575+ .quad cpu_gdt_table
17576
17577 ENTRY(phys_base)
17578 /* This must match the first entry in level2_kernel_pgt */
17579 .quad 0x0000000000000000
17580
17581 #include "../../x86/xen/xen-head.S"
17582-
17583- .section .bss, "aw", @nobits
17584+
17585+ .section .rodata,"a",@progbits
17586 .align L1_CACHE_BYTES
17587 ENTRY(idt_table)
17588- .skip IDT_ENTRIES * 16
17589+ .fill 512,8,0
17590
17591 .align L1_CACHE_BYTES
17592 ENTRY(nmi_idt_table)
17593- .skip IDT_ENTRIES * 16
17594+ .fill 512,8,0
17595
17596 __PAGE_ALIGNED_BSS
17597 .align PAGE_SIZE
17598diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17599index 9c3bd4a..e1d9b35 100644
17600--- a/arch/x86/kernel/i386_ksyms_32.c
17601+++ b/arch/x86/kernel/i386_ksyms_32.c
17602@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17603 EXPORT_SYMBOL(cmpxchg8b_emu);
17604 #endif
17605
17606+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17607+
17608 /* Networking helper routines. */
17609 EXPORT_SYMBOL(csum_partial_copy_generic);
17610+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17611+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17612
17613 EXPORT_SYMBOL(__get_user_1);
17614 EXPORT_SYMBOL(__get_user_2);
17615@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17616
17617 EXPORT_SYMBOL(csum_partial);
17618 EXPORT_SYMBOL(empty_zero_page);
17619+
17620+#ifdef CONFIG_PAX_KERNEXEC
17621+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17622+#endif
17623diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17624index f239f30..aab2a58 100644
17625--- a/arch/x86/kernel/i387.c
17626+++ b/arch/x86/kernel/i387.c
17627@@ -189,6 +189,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
17628
17629 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17630 unsigned int pos, unsigned int count,
17631+ void *kbuf, void __user *ubuf) __size_overflow(4);
17632+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17633+ unsigned int pos, unsigned int count,
17634 void *kbuf, void __user *ubuf)
17635 {
17636 int ret;
17637@@ -208,6 +211,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17638
17639 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17640 unsigned int pos, unsigned int count,
17641+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17642+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17643+ unsigned int pos, unsigned int count,
17644 const void *kbuf, const void __user *ubuf)
17645 {
17646 int ret;
17647@@ -241,6 +247,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17648
17649 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17650 unsigned int pos, unsigned int count,
17651+ void *kbuf, void __user *ubuf) __size_overflow(4);
17652+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17653+ unsigned int pos, unsigned int count,
17654 void *kbuf, void __user *ubuf)
17655 {
17656 int ret;
17657@@ -270,6 +279,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17658
17659 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17660 unsigned int pos, unsigned int count,
17661+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17662+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17663+ unsigned int pos, unsigned int count,
17664 const void *kbuf, const void __user *ubuf)
17665 {
17666 int ret;
17667@@ -440,6 +452,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
17668
17669 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17670 unsigned int pos, unsigned int count,
17671+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
17672+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17673+ unsigned int pos, unsigned int count,
17674 void *kbuf, void __user *ubuf)
17675 {
17676 struct user_i387_ia32_struct env;
17677@@ -472,6 +487,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17678
17679 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17680 unsigned int pos, unsigned int count,
17681+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
17682+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17683+ unsigned int pos, unsigned int count,
17684 const void *kbuf, const void __user *ubuf)
17685 {
17686 struct user_i387_ia32_struct env;
17687@@ -620,6 +638,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
17688 }
17689
17690 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17691+ unsigned int size) __size_overflow(2);
17692+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17693 unsigned int size)
17694 {
17695 struct task_struct *tsk = current;
17696diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17697index 6104852..6114160 100644
17698--- a/arch/x86/kernel/i8259.c
17699+++ b/arch/x86/kernel/i8259.c
17700@@ -210,7 +210,7 @@ spurious_8259A_irq:
17701 "spurious 8259A interrupt: IRQ%d.\n", irq);
17702 spurious_irq_mask |= irqmask;
17703 }
17704- atomic_inc(&irq_err_count);
17705+ atomic_inc_unchecked(&irq_err_count);
17706 /*
17707 * Theoretically we do not have to handle this IRQ,
17708 * but in Linux this does not cause problems and is
17709diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17710index 43e9ccf..44ccf6f 100644
17711--- a/arch/x86/kernel/init_task.c
17712+++ b/arch/x86/kernel/init_task.c
17713@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17714 * way process stacks are handled. This is done by having a special
17715 * "init_task" linker map entry..
17716 */
17717-union thread_union init_thread_union __init_task_data =
17718- { INIT_THREAD_INFO(init_task) };
17719+union thread_union init_thread_union __init_task_data;
17720
17721 /*
17722 * Initial task structure.
17723@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17724 * section. Since TSS's are completely CPU-local, we want them
17725 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17726 */
17727-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17728-
17729+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17730+EXPORT_SYMBOL(init_tss);
17731diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17732index 8c96897..be66bfa 100644
17733--- a/arch/x86/kernel/ioport.c
17734+++ b/arch/x86/kernel/ioport.c
17735@@ -6,6 +6,7 @@
17736 #include <linux/sched.h>
17737 #include <linux/kernel.h>
17738 #include <linux/capability.h>
17739+#include <linux/security.h>
17740 #include <linux/errno.h>
17741 #include <linux/types.h>
17742 #include <linux/ioport.h>
17743@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17744
17745 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17746 return -EINVAL;
17747+#ifdef CONFIG_GRKERNSEC_IO
17748+ if (turn_on && grsec_disable_privio) {
17749+ gr_handle_ioperm();
17750+ return -EPERM;
17751+ }
17752+#endif
17753 if (turn_on && !capable(CAP_SYS_RAWIO))
17754 return -EPERM;
17755
17756@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17757 * because the ->io_bitmap_max value must match the bitmap
17758 * contents:
17759 */
17760- tss = &per_cpu(init_tss, get_cpu());
17761+ tss = init_tss + get_cpu();
17762
17763 if (turn_on)
17764 bitmap_clear(t->io_bitmap_ptr, from, num);
17765@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17766 return -EINVAL;
17767 /* Trying to gain more privileges? */
17768 if (level > old) {
17769+#ifdef CONFIG_GRKERNSEC_IO
17770+ if (grsec_disable_privio) {
17771+ gr_handle_iopl();
17772+ return -EPERM;
17773+ }
17774+#endif
17775 if (!capable(CAP_SYS_RAWIO))
17776 return -EPERM;
17777 }
17778diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17779index 7943e0c..dd32c5c 100644
17780--- a/arch/x86/kernel/irq.c
17781+++ b/arch/x86/kernel/irq.c
17782@@ -18,7 +18,7 @@
17783 #include <asm/mce.h>
17784 #include <asm/hw_irq.h>
17785
17786-atomic_t irq_err_count;
17787+atomic_unchecked_t irq_err_count;
17788
17789 /* Function pointer for generic interrupt vector handling */
17790 void (*x86_platform_ipi_callback)(void) = NULL;
17791@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17792 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17793 seq_printf(p, " Machine check polls\n");
17794 #endif
17795- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17796+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17797 #if defined(CONFIG_X86_IO_APIC)
17798- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17799+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17800 #endif
17801 return 0;
17802 }
17803@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17804
17805 u64 arch_irq_stat(void)
17806 {
17807- u64 sum = atomic_read(&irq_err_count);
17808+ u64 sum = atomic_read_unchecked(&irq_err_count);
17809
17810 #ifdef CONFIG_X86_IO_APIC
17811- sum += atomic_read(&irq_mis_count);
17812+ sum += atomic_read_unchecked(&irq_mis_count);
17813 #endif
17814 return sum;
17815 }
17816diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17817index 40fc861..9b8739b 100644
17818--- a/arch/x86/kernel/irq_32.c
17819+++ b/arch/x86/kernel/irq_32.c
17820@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17821 __asm__ __volatile__("andl %%esp,%0" :
17822 "=r" (sp) : "0" (THREAD_SIZE - 1));
17823
17824- return sp < (sizeof(struct thread_info) + STACK_WARN);
17825+ return sp < STACK_WARN;
17826 }
17827
17828 static void print_stack_overflow(void)
17829@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17830 * per-CPU IRQ handling contexts (thread information and stack)
17831 */
17832 union irq_ctx {
17833- struct thread_info tinfo;
17834- u32 stack[THREAD_SIZE/sizeof(u32)];
17835+ unsigned long previous_esp;
17836+ u32 stack[THREAD_SIZE/sizeof(u32)];
17837 } __attribute__((aligned(THREAD_SIZE)));
17838
17839 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17840@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17841 static inline int
17842 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17843 {
17844- union irq_ctx *curctx, *irqctx;
17845+ union irq_ctx *irqctx;
17846 u32 *isp, arg1, arg2;
17847
17848- curctx = (union irq_ctx *) current_thread_info();
17849 irqctx = __this_cpu_read(hardirq_ctx);
17850
17851 /*
17852@@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17853 * handler) we can't do that and just have to keep using the
17854 * current stack (which is the irq stack already after all)
17855 */
17856- if (unlikely(curctx == irqctx))
17857+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17858 return 0;
17859
17860 /* build the stack frame on the IRQ stack */
17861- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17862- irqctx->tinfo.task = curctx->tinfo.task;
17863- irqctx->tinfo.previous_esp = current_stack_pointer;
17864+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17865+ irqctx->previous_esp = current_stack_pointer;
17866
17867- /*
17868- * Copy the softirq bits in preempt_count so that the
17869- * softirq checks work in the hardirq context.
17870- */
17871- irqctx->tinfo.preempt_count =
17872- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17873- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17874+#ifdef CONFIG_PAX_MEMORY_UDEREF
17875+ __set_fs(MAKE_MM_SEG(0));
17876+#endif
17877
17878 if (unlikely(overflow))
17879 call_on_stack(print_stack_overflow, isp);
17880@@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17881 : "0" (irq), "1" (desc), "2" (isp),
17882 "D" (desc->handle_irq)
17883 : "memory", "cc", "ecx");
17884+
17885+#ifdef CONFIG_PAX_MEMORY_UDEREF
17886+ __set_fs(current_thread_info()->addr_limit);
17887+#endif
17888+
17889 return 1;
17890 }
17891
17892@@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17893 */
17894 void __cpuinit irq_ctx_init(int cpu)
17895 {
17896- union irq_ctx *irqctx;
17897-
17898 if (per_cpu(hardirq_ctx, cpu))
17899 return;
17900
17901- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17902- THREAD_FLAGS,
17903- THREAD_ORDER));
17904- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17905- irqctx->tinfo.cpu = cpu;
17906- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17907- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17908-
17909- per_cpu(hardirq_ctx, cpu) = irqctx;
17910-
17911- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17912- THREAD_FLAGS,
17913- THREAD_ORDER));
17914- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17915- irqctx->tinfo.cpu = cpu;
17916- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17917-
17918- per_cpu(softirq_ctx, cpu) = irqctx;
17919+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17920+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17921
17922 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17923 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17924@@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17925 asmlinkage void do_softirq(void)
17926 {
17927 unsigned long flags;
17928- struct thread_info *curctx;
17929 union irq_ctx *irqctx;
17930 u32 *isp;
17931
17932@@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17933 local_irq_save(flags);
17934
17935 if (local_softirq_pending()) {
17936- curctx = current_thread_info();
17937 irqctx = __this_cpu_read(softirq_ctx);
17938- irqctx->tinfo.task = curctx->task;
17939- irqctx->tinfo.previous_esp = current_stack_pointer;
17940+ irqctx->previous_esp = current_stack_pointer;
17941
17942 /* build the stack frame on the softirq stack */
17943- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17944+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17945+
17946+#ifdef CONFIG_PAX_MEMORY_UDEREF
17947+ __set_fs(MAKE_MM_SEG(0));
17948+#endif
17949
17950 call_on_stack(__do_softirq, isp);
17951+
17952+#ifdef CONFIG_PAX_MEMORY_UDEREF
17953+ __set_fs(current_thread_info()->addr_limit);
17954+#endif
17955+
17956 /*
17957 * Shouldn't happen, we returned above if in_interrupt():
17958 */
17959diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17960index d04d3ec..ea4b374 100644
17961--- a/arch/x86/kernel/irq_64.c
17962+++ b/arch/x86/kernel/irq_64.c
17963@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17964 u64 estack_top, estack_bottom;
17965 u64 curbase = (u64)task_stack_page(current);
17966
17967- if (user_mode_vm(regs))
17968+ if (user_mode(regs))
17969 return;
17970
17971 if (regs->sp >= curbase + sizeof(struct thread_info) +
17972diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17973index 90fcf62..e682cdd 100644
17974--- a/arch/x86/kernel/kdebugfs.c
17975+++ b/arch/x86/kernel/kdebugfs.c
17976@@ -28,6 +28,8 @@ struct setup_data_node {
17977 };
17978
17979 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17980+ size_t count, loff_t *ppos) __size_overflow(3);
17981+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17982 size_t count, loff_t *ppos)
17983 {
17984 struct setup_data_node *node = file->private_data;
17985diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17986index 2f45c4c..d95504f 100644
17987--- a/arch/x86/kernel/kgdb.c
17988+++ b/arch/x86/kernel/kgdb.c
17989@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17990 #ifdef CONFIG_X86_32
17991 switch (regno) {
17992 case GDB_SS:
17993- if (!user_mode_vm(regs))
17994+ if (!user_mode(regs))
17995 *(unsigned long *)mem = __KERNEL_DS;
17996 break;
17997 case GDB_SP:
17998- if (!user_mode_vm(regs))
17999+ if (!user_mode(regs))
18000 *(unsigned long *)mem = kernel_stack_pointer(regs);
18001 break;
18002 case GDB_GS:
18003@@ -475,12 +475,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18004 case 'k':
18005 /* clear the trace bit */
18006 linux_regs->flags &= ~X86_EFLAGS_TF;
18007- atomic_set(&kgdb_cpu_doing_single_step, -1);
18008+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18009
18010 /* set the trace bit if we're stepping */
18011 if (remcomInBuffer[0] == 's') {
18012 linux_regs->flags |= X86_EFLAGS_TF;
18013- atomic_set(&kgdb_cpu_doing_single_step,
18014+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18015 raw_smp_processor_id());
18016 }
18017
18018@@ -545,7 +545,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18019
18020 switch (cmd) {
18021 case DIE_DEBUG:
18022- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
18023+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
18024 if (user_mode(regs))
18025 return single_step_cont(regs, args);
18026 break;
18027diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18028index 7da647d..56fe348 100644
18029--- a/arch/x86/kernel/kprobes.c
18030+++ b/arch/x86/kernel/kprobes.c
18031@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
18032 } __attribute__((packed)) *insn;
18033
18034 insn = (struct __arch_relative_insn *)from;
18035+
18036+ pax_open_kernel();
18037 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
18038 insn->op = op;
18039+ pax_close_kernel();
18040 }
18041
18042 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
18043@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18044 kprobe_opcode_t opcode;
18045 kprobe_opcode_t *orig_opcodes = opcodes;
18046
18047- if (search_exception_tables((unsigned long)opcodes))
18048+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18049 return 0; /* Page fault may occur on this address. */
18050
18051 retry:
18052@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
18053 }
18054 }
18055 insn_get_length(&insn);
18056+ pax_open_kernel();
18057 memcpy(dest, insn.kaddr, insn.length);
18058+ pax_close_kernel();
18059
18060 #ifdef CONFIG_X86_64
18061 if (insn_rip_relative(&insn)) {
18062@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
18063 (u8 *) dest;
18064 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
18065 disp = (u8 *) dest + insn_offset_displacement(&insn);
18066+ pax_open_kernel();
18067 *(s32 *) disp = (s32) newdisp;
18068+ pax_close_kernel();
18069 }
18070 #endif
18071 return insn.length;
18072@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
18073 */
18074 __copy_instruction(p->ainsn.insn, p->addr, 0);
18075
18076- if (can_boost(p->addr))
18077+ if (can_boost(ktla_ktva(p->addr)))
18078 p->ainsn.boostable = 0;
18079 else
18080 p->ainsn.boostable = -1;
18081
18082- p->opcode = *p->addr;
18083+ p->opcode = *(ktla_ktva(p->addr));
18084 }
18085
18086 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18087@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18088 * nor set current_kprobe, because it doesn't use single
18089 * stepping.
18090 */
18091- regs->ip = (unsigned long)p->ainsn.insn;
18092+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18093 preempt_enable_no_resched();
18094 return;
18095 }
18096@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18097 if (p->opcode == BREAKPOINT_INSTRUCTION)
18098 regs->ip = (unsigned long)p->addr;
18099 else
18100- regs->ip = (unsigned long)p->ainsn.insn;
18101+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18102 }
18103
18104 /*
18105@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18106 setup_singlestep(p, regs, kcb, 0);
18107 return 1;
18108 }
18109- } else if (*addr != BREAKPOINT_INSTRUCTION) {
18110+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18111 /*
18112 * The breakpoint instruction was removed right
18113 * after we hit it. Another cpu has removed
18114@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18115 " movq %rax, 152(%rsp)\n"
18116 RESTORE_REGS_STRING
18117 " popfq\n"
18118+#ifdef KERNEXEC_PLUGIN
18119+ " btsq $63,(%rsp)\n"
18120+#endif
18121 #else
18122 " pushf\n"
18123 SAVE_REGS_STRING
18124@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18125 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18126 {
18127 unsigned long *tos = stack_addr(regs);
18128- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18129+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18130 unsigned long orig_ip = (unsigned long)p->addr;
18131 kprobe_opcode_t *insn = p->ainsn.insn;
18132
18133@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18134 struct die_args *args = data;
18135 int ret = NOTIFY_DONE;
18136
18137- if (args->regs && user_mode_vm(args->regs))
18138+ if (args->regs && user_mode(args->regs))
18139 return ret;
18140
18141 switch (val) {
18142@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18143 * Verify if the address gap is in 2GB range, because this uses
18144 * a relative jump.
18145 */
18146- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
18147+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
18148 if (abs(rel) > 0x7fffffff)
18149 return -ERANGE;
18150
18151@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18152 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
18153
18154 /* Set probe function call */
18155- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
18156+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
18157
18158 /* Set returning jmp instruction at the tail of out-of-line buffer */
18159 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
18160- (u8 *)op->kp.addr + op->optinsn.size);
18161+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
18162
18163 flush_icache_range((unsigned long) buf,
18164 (unsigned long) buf + TMPL_END_IDX +
18165@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
18166 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
18167
18168 /* Backup instructions which will be replaced by jump address */
18169- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
18170+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
18171 RELATIVE_ADDR_SIZE);
18172
18173 insn_buf[0] = RELATIVEJUMP_OPCODE;
18174diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18175index ea69726..a305f16 100644
18176--- a/arch/x86/kernel/ldt.c
18177+++ b/arch/x86/kernel/ldt.c
18178@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18179 if (reload) {
18180 #ifdef CONFIG_SMP
18181 preempt_disable();
18182- load_LDT(pc);
18183+ load_LDT_nolock(pc);
18184 if (!cpumask_equal(mm_cpumask(current->mm),
18185 cpumask_of(smp_processor_id())))
18186 smp_call_function(flush_ldt, current->mm, 1);
18187 preempt_enable();
18188 #else
18189- load_LDT(pc);
18190+ load_LDT_nolock(pc);
18191 #endif
18192 }
18193 if (oldsize) {
18194@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18195 return err;
18196
18197 for (i = 0; i < old->size; i++)
18198- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18199+ write_ldt_entry(new->ldt, i, old->ldt + i);
18200 return 0;
18201 }
18202
18203@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18204 retval = copy_ldt(&mm->context, &old_mm->context);
18205 mutex_unlock(&old_mm->context.lock);
18206 }
18207+
18208+ if (tsk == current) {
18209+ mm->context.vdso = 0;
18210+
18211+#ifdef CONFIG_X86_32
18212+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18213+ mm->context.user_cs_base = 0UL;
18214+ mm->context.user_cs_limit = ~0UL;
18215+
18216+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18217+ cpus_clear(mm->context.cpu_user_cs_mask);
18218+#endif
18219+
18220+#endif
18221+#endif
18222+
18223+ }
18224+
18225 return retval;
18226 }
18227
18228@@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm)
18229 }
18230 }
18231
18232+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
18233 static int read_ldt(void __user *ptr, unsigned long bytecount)
18234 {
18235 int err;
18236@@ -175,6 +194,7 @@ error_return:
18237 return err;
18238 }
18239
18240+static int read_default_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
18241 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
18242 {
18243 /* CHECKME: Can we use _one_ random number ? */
18244@@ -230,6 +250,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18245 }
18246 }
18247
18248+#ifdef CONFIG_PAX_SEGMEXEC
18249+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18250+ error = -EINVAL;
18251+ goto out_unlock;
18252+ }
18253+#endif
18254+
18255 fill_ldt(&ldt, &ldt_info);
18256 if (oldmode)
18257 ldt.avl = 0;
18258diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18259index a3fa43b..8966f4c 100644
18260--- a/arch/x86/kernel/machine_kexec_32.c
18261+++ b/arch/x86/kernel/machine_kexec_32.c
18262@@ -27,7 +27,7 @@
18263 #include <asm/cacheflush.h>
18264 #include <asm/debugreg.h>
18265
18266-static void set_idt(void *newidt, __u16 limit)
18267+static void set_idt(struct desc_struct *newidt, __u16 limit)
18268 {
18269 struct desc_ptr curidt;
18270
18271@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
18272 }
18273
18274
18275-static void set_gdt(void *newgdt, __u16 limit)
18276+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18277 {
18278 struct desc_ptr curgdt;
18279
18280@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18281 }
18282
18283 control_page = page_address(image->control_code_page);
18284- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18285+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18286
18287 relocate_kernel_ptr = control_page;
18288 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18289diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18290index 3ca42d0..79d24cd 100644
18291--- a/arch/x86/kernel/microcode_intel.c
18292+++ b/arch/x86/kernel/microcode_intel.c
18293@@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18294 return ret;
18295 }
18296
18297+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
18298 static int get_ucode_user(void *to, const void *from, size_t n)
18299 {
18300- return copy_from_user(to, from, n);
18301+ return copy_from_user(to, (const void __force_user *)from, n);
18302 }
18303
18304 static enum ucode_state
18305 request_microcode_user(int cpu, const void __user *buf, size_t size)
18306 {
18307- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18308+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18309 }
18310
18311 static void microcode_fini_cpu(int cpu)
18312diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18313index 925179f..1f0d561 100644
18314--- a/arch/x86/kernel/module.c
18315+++ b/arch/x86/kernel/module.c
18316@@ -36,15 +36,61 @@
18317 #define DEBUGP(fmt...)
18318 #endif
18319
18320-void *module_alloc(unsigned long size)
18321+static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
18322+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18323 {
18324- if (PAGE_ALIGN(size) > MODULES_LEN)
18325+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18326 return NULL;
18327 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18328- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18329+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18330 -1, __builtin_return_address(0));
18331 }
18332
18333+void *module_alloc(unsigned long size)
18334+{
18335+
18336+#ifdef CONFIG_PAX_KERNEXEC
18337+ return __module_alloc(size, PAGE_KERNEL);
18338+#else
18339+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18340+#endif
18341+
18342+}
18343+
18344+#ifdef CONFIG_PAX_KERNEXEC
18345+#ifdef CONFIG_X86_32
18346+void *module_alloc_exec(unsigned long size)
18347+{
18348+ struct vm_struct *area;
18349+
18350+ if (size == 0)
18351+ return NULL;
18352+
18353+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18354+ return area ? area->addr : NULL;
18355+}
18356+EXPORT_SYMBOL(module_alloc_exec);
18357+
18358+void module_free_exec(struct module *mod, void *module_region)
18359+{
18360+ vunmap(module_region);
18361+}
18362+EXPORT_SYMBOL(module_free_exec);
18363+#else
18364+void module_free_exec(struct module *mod, void *module_region)
18365+{
18366+ module_free(mod, module_region);
18367+}
18368+EXPORT_SYMBOL(module_free_exec);
18369+
18370+void *module_alloc_exec(unsigned long size)
18371+{
18372+ return __module_alloc(size, PAGE_KERNEL_RX);
18373+}
18374+EXPORT_SYMBOL(module_alloc_exec);
18375+#endif
18376+#endif
18377+
18378 #ifdef CONFIG_X86_32
18379 int apply_relocate(Elf32_Shdr *sechdrs,
18380 const char *strtab,
18381@@ -55,14 +101,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18382 unsigned int i;
18383 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18384 Elf32_Sym *sym;
18385- uint32_t *location;
18386+ uint32_t *plocation, location;
18387
18388 DEBUGP("Applying relocate section %u to %u\n", relsec,
18389 sechdrs[relsec].sh_info);
18390 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18391 /* This is where to make the change */
18392- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18393- + rel[i].r_offset;
18394+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18395+ location = (uint32_t)plocation;
18396+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18397+ plocation = ktla_ktva((void *)plocation);
18398 /* This is the symbol it is referring to. Note that all
18399 undefined symbols have been resolved. */
18400 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18401@@ -71,11 +119,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18402 switch (ELF32_R_TYPE(rel[i].r_info)) {
18403 case R_386_32:
18404 /* We add the value into the location given */
18405- *location += sym->st_value;
18406+ pax_open_kernel();
18407+ *plocation += sym->st_value;
18408+ pax_close_kernel();
18409 break;
18410 case R_386_PC32:
18411 /* Add the value, subtract its postition */
18412- *location += sym->st_value - (uint32_t)location;
18413+ pax_open_kernel();
18414+ *plocation += sym->st_value - location;
18415+ pax_close_kernel();
18416 break;
18417 default:
18418 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18419@@ -120,21 +172,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18420 case R_X86_64_NONE:
18421 break;
18422 case R_X86_64_64:
18423+ pax_open_kernel();
18424 *(u64 *)loc = val;
18425+ pax_close_kernel();
18426 break;
18427 case R_X86_64_32:
18428+ pax_open_kernel();
18429 *(u32 *)loc = val;
18430+ pax_close_kernel();
18431 if (val != *(u32 *)loc)
18432 goto overflow;
18433 break;
18434 case R_X86_64_32S:
18435+ pax_open_kernel();
18436 *(s32 *)loc = val;
18437+ pax_close_kernel();
18438 if ((s64)val != *(s32 *)loc)
18439 goto overflow;
18440 break;
18441 case R_X86_64_PC32:
18442 val -= (u64)loc;
18443+ pax_open_kernel();
18444 *(u32 *)loc = val;
18445+ pax_close_kernel();
18446+
18447 #if 0
18448 if ((s64)val != *(s32 *)loc)
18449 goto overflow;
18450diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18451index 47acaf3..ec48ab6 100644
18452--- a/arch/x86/kernel/nmi.c
18453+++ b/arch/x86/kernel/nmi.c
18454@@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
18455 dotraplinkage notrace __kprobes void
18456 do_nmi(struct pt_regs *regs, long error_code)
18457 {
18458+
18459+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18460+ if (!user_mode(regs)) {
18461+ unsigned long cs = regs->cs & 0xFFFF;
18462+ unsigned long ip = ktva_ktla(regs->ip);
18463+
18464+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18465+ regs->ip = ip;
18466+ }
18467+#endif
18468+
18469 nmi_nesting_preprocess(regs);
18470
18471 nmi_enter();
18472diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18473index 676b8c7..870ba04 100644
18474--- a/arch/x86/kernel/paravirt-spinlocks.c
18475+++ b/arch/x86/kernel/paravirt-spinlocks.c
18476@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18477 arch_spin_lock(lock);
18478 }
18479
18480-struct pv_lock_ops pv_lock_ops = {
18481+struct pv_lock_ops pv_lock_ops __read_only = {
18482 #ifdef CONFIG_SMP
18483 .spin_is_locked = __ticket_spin_is_locked,
18484 .spin_is_contended = __ticket_spin_is_contended,
18485diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18486index d90272e..6bb013b 100644
18487--- a/arch/x86/kernel/paravirt.c
18488+++ b/arch/x86/kernel/paravirt.c
18489@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18490 {
18491 return x;
18492 }
18493+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18494+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18495+#endif
18496
18497 void __init default_banner(void)
18498 {
18499@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18500 if (opfunc == NULL)
18501 /* If there's no function, patch it with a ud2a (BUG) */
18502 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18503- else if (opfunc == _paravirt_nop)
18504+ else if (opfunc == (void *)_paravirt_nop)
18505 /* If the operation is a nop, then nop the callsite */
18506 ret = paravirt_patch_nop();
18507
18508 /* identity functions just return their single argument */
18509- else if (opfunc == _paravirt_ident_32)
18510+ else if (opfunc == (void *)_paravirt_ident_32)
18511 ret = paravirt_patch_ident_32(insnbuf, len);
18512- else if (opfunc == _paravirt_ident_64)
18513+ else if (opfunc == (void *)_paravirt_ident_64)
18514 ret = paravirt_patch_ident_64(insnbuf, len);
18515+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18516+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18517+ ret = paravirt_patch_ident_64(insnbuf, len);
18518+#endif
18519
18520 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18521 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18522@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18523 if (insn_len > len || start == NULL)
18524 insn_len = len;
18525 else
18526- memcpy(insnbuf, start, insn_len);
18527+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18528
18529 return insn_len;
18530 }
18531@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
18532 preempt_enable();
18533 }
18534
18535-struct pv_info pv_info = {
18536+struct pv_info pv_info __read_only = {
18537 .name = "bare hardware",
18538 .paravirt_enabled = 0,
18539 .kernel_rpl = 0,
18540@@ -313,16 +320,16 @@ struct pv_info pv_info = {
18541 #endif
18542 };
18543
18544-struct pv_init_ops pv_init_ops = {
18545+struct pv_init_ops pv_init_ops __read_only = {
18546 .patch = native_patch,
18547 };
18548
18549-struct pv_time_ops pv_time_ops = {
18550+struct pv_time_ops pv_time_ops __read_only = {
18551 .sched_clock = native_sched_clock,
18552 .steal_clock = native_steal_clock,
18553 };
18554
18555-struct pv_irq_ops pv_irq_ops = {
18556+struct pv_irq_ops pv_irq_ops __read_only = {
18557 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18558 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18559 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18560@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
18561 #endif
18562 };
18563
18564-struct pv_cpu_ops pv_cpu_ops = {
18565+struct pv_cpu_ops pv_cpu_ops __read_only = {
18566 .cpuid = native_cpuid,
18567 .get_debugreg = native_get_debugreg,
18568 .set_debugreg = native_set_debugreg,
18569@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18570 .end_context_switch = paravirt_nop,
18571 };
18572
18573-struct pv_apic_ops pv_apic_ops = {
18574+struct pv_apic_ops pv_apic_ops __read_only = {
18575 #ifdef CONFIG_X86_LOCAL_APIC
18576 .startup_ipi_hook = paravirt_nop,
18577 #endif
18578 };
18579
18580-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18581+#ifdef CONFIG_X86_32
18582+#ifdef CONFIG_X86_PAE
18583+/* 64-bit pagetable entries */
18584+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18585+#else
18586 /* 32-bit pagetable entries */
18587 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18588+#endif
18589 #else
18590 /* 64-bit pagetable entries */
18591 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18592 #endif
18593
18594-struct pv_mmu_ops pv_mmu_ops = {
18595+struct pv_mmu_ops pv_mmu_ops __read_only = {
18596
18597 .read_cr2 = native_read_cr2,
18598 .write_cr2 = native_write_cr2,
18599@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18600 .make_pud = PTE_IDENT,
18601
18602 .set_pgd = native_set_pgd,
18603+ .set_pgd_batched = native_set_pgd_batched,
18604 #endif
18605 #endif /* PAGETABLE_LEVELS >= 3 */
18606
18607@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18608 },
18609
18610 .set_fixmap = native_set_fixmap,
18611+
18612+#ifdef CONFIG_PAX_KERNEXEC
18613+ .pax_open_kernel = native_pax_open_kernel,
18614+ .pax_close_kernel = native_pax_close_kernel,
18615+#endif
18616+
18617 };
18618
18619 EXPORT_SYMBOL_GPL(pv_time_ops);
18620diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18621index 35ccf75..7a15747 100644
18622--- a/arch/x86/kernel/pci-iommu_table.c
18623+++ b/arch/x86/kernel/pci-iommu_table.c
18624@@ -2,7 +2,7 @@
18625 #include <asm/iommu_table.h>
18626 #include <linux/string.h>
18627 #include <linux/kallsyms.h>
18628-
18629+#include <linux/sched.h>
18630
18631 #define DEBUG 1
18632
18633diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18634index 15763af..da59ada 100644
18635--- a/arch/x86/kernel/process.c
18636+++ b/arch/x86/kernel/process.c
18637@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18638
18639 void free_thread_info(struct thread_info *ti)
18640 {
18641- free_thread_xstate(ti->task);
18642 free_pages((unsigned long)ti, THREAD_ORDER);
18643 }
18644
18645+static struct kmem_cache *task_struct_cachep;
18646+
18647 void arch_task_cache_init(void)
18648 {
18649- task_xstate_cachep =
18650- kmem_cache_create("task_xstate", xstate_size,
18651+ /* create a slab on which task_structs can be allocated */
18652+ task_struct_cachep =
18653+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18654+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18655+
18656+ task_xstate_cachep =
18657+ kmem_cache_create("task_xstate", xstate_size,
18658 __alignof__(union thread_xstate),
18659- SLAB_PANIC | SLAB_NOTRACK, NULL);
18660+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18661+}
18662+
18663+struct task_struct *alloc_task_struct_node(int node)
18664+{
18665+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18666+}
18667+
18668+void free_task_struct(struct task_struct *task)
18669+{
18670+ free_thread_xstate(task);
18671+ kmem_cache_free(task_struct_cachep, task);
18672 }
18673
18674 /*
18675@@ -70,7 +87,7 @@ void exit_thread(void)
18676 unsigned long *bp = t->io_bitmap_ptr;
18677
18678 if (bp) {
18679- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18680+ struct tss_struct *tss = init_tss + get_cpu();
18681
18682 t->io_bitmap_ptr = NULL;
18683 clear_thread_flag(TIF_IO_BITMAP);
18684@@ -106,7 +123,7 @@ void show_regs_common(void)
18685
18686 printk(KERN_CONT "\n");
18687 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18688- current->pid, current->comm, print_tainted(),
18689+ task_pid_nr(current), current->comm, print_tainted(),
18690 init_utsname()->release,
18691 (int)strcspn(init_utsname()->version, " "),
18692 init_utsname()->version);
18693@@ -120,6 +137,9 @@ void flush_thread(void)
18694 {
18695 struct task_struct *tsk = current;
18696
18697+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18698+ loadsegment(gs, 0);
18699+#endif
18700 flush_ptrace_hw_breakpoint(tsk);
18701 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18702 /*
18703@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18704 regs.di = (unsigned long) arg;
18705
18706 #ifdef CONFIG_X86_32
18707- regs.ds = __USER_DS;
18708- regs.es = __USER_DS;
18709+ regs.ds = __KERNEL_DS;
18710+ regs.es = __KERNEL_DS;
18711 regs.fs = __KERNEL_PERCPU;
18712- regs.gs = __KERNEL_STACK_CANARY;
18713+ savesegment(gs, regs.gs);
18714 #else
18715 regs.ss = __KERNEL_DS;
18716 #endif
18717@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
18718
18719 return ret;
18720 }
18721-void stop_this_cpu(void *dummy)
18722+__noreturn void stop_this_cpu(void *dummy)
18723 {
18724 local_irq_disable();
18725 /*
18726@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
18727 }
18728 early_param("idle", idle_setup);
18729
18730-unsigned long arch_align_stack(unsigned long sp)
18731+#ifdef CONFIG_PAX_RANDKSTACK
18732+void pax_randomize_kstack(struct pt_regs *regs)
18733 {
18734- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18735- sp -= get_random_int() % 8192;
18736- return sp & ~0xf;
18737-}
18738+ struct thread_struct *thread = &current->thread;
18739+ unsigned long time;
18740
18741-unsigned long arch_randomize_brk(struct mm_struct *mm)
18742-{
18743- unsigned long range_end = mm->brk + 0x02000000;
18744- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18745-}
18746+ if (!randomize_va_space)
18747+ return;
18748+
18749+ if (v8086_mode(regs))
18750+ return;
18751
18752+ rdtscl(time);
18753+
18754+ /* P4 seems to return a 0 LSB, ignore it */
18755+#ifdef CONFIG_MPENTIUM4
18756+ time &= 0x3EUL;
18757+ time <<= 2;
18758+#elif defined(CONFIG_X86_64)
18759+ time &= 0xFUL;
18760+ time <<= 4;
18761+#else
18762+ time &= 0x1FUL;
18763+ time <<= 3;
18764+#endif
18765+
18766+ thread->sp0 ^= time;
18767+ load_sp0(init_tss + smp_processor_id(), thread);
18768+
18769+#ifdef CONFIG_X86_64
18770+ percpu_write(kernel_stack, thread->sp0);
18771+#endif
18772+}
18773+#endif
18774diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18775index c08d1ff..6ae1c81 100644
18776--- a/arch/x86/kernel/process_32.c
18777+++ b/arch/x86/kernel/process_32.c
18778@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18779 unsigned long thread_saved_pc(struct task_struct *tsk)
18780 {
18781 return ((unsigned long *)tsk->thread.sp)[3];
18782+//XXX return tsk->thread.eip;
18783 }
18784
18785 #ifndef CONFIG_SMP
18786@@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
18787 unsigned long sp;
18788 unsigned short ss, gs;
18789
18790- if (user_mode_vm(regs)) {
18791+ if (user_mode(regs)) {
18792 sp = regs->sp;
18793 ss = regs->ss & 0xffff;
18794- gs = get_user_gs(regs);
18795 } else {
18796 sp = kernel_stack_pointer(regs);
18797 savesegment(ss, ss);
18798- savesegment(gs, gs);
18799 }
18800+ gs = get_user_gs(regs);
18801
18802 show_regs_common();
18803
18804@@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18805 struct task_struct *tsk;
18806 int err;
18807
18808- childregs = task_pt_regs(p);
18809+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18810 *childregs = *regs;
18811 childregs->ax = 0;
18812 childregs->sp = sp;
18813
18814 p->thread.sp = (unsigned long) childregs;
18815 p->thread.sp0 = (unsigned long) (childregs+1);
18816+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18817
18818 p->thread.ip = (unsigned long) ret_from_fork;
18819
18820@@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18821 struct thread_struct *prev = &prev_p->thread,
18822 *next = &next_p->thread;
18823 int cpu = smp_processor_id();
18824- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18825+ struct tss_struct *tss = init_tss + cpu;
18826 fpu_switch_t fpu;
18827
18828 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18829@@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18830 */
18831 lazy_save_gs(prev->gs);
18832
18833+#ifdef CONFIG_PAX_MEMORY_UDEREF
18834+ __set_fs(task_thread_info(next_p)->addr_limit);
18835+#endif
18836+
18837 /*
18838 * Load the per-thread Thread-Local Storage descriptor.
18839 */
18840@@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18841 */
18842 arch_end_context_switch(next_p);
18843
18844+ percpu_write(current_task, next_p);
18845+ percpu_write(current_tinfo, &next_p->tinfo);
18846+
18847 /*
18848 * Restore %gs if needed (which is common)
18849 */
18850@@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18851
18852 switch_fpu_finish(next_p, fpu);
18853
18854- percpu_write(current_task, next_p);
18855-
18856 return prev_p;
18857 }
18858
18859@@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
18860 } while (count++ < 16);
18861 return 0;
18862 }
18863-
18864diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18865index cfa5c90..4facd28 100644
18866--- a/arch/x86/kernel/process_64.c
18867+++ b/arch/x86/kernel/process_64.c
18868@@ -89,7 +89,7 @@ static void __exit_idle(void)
18869 void exit_idle(void)
18870 {
18871 /* idle loop has pid 0 */
18872- if (current->pid)
18873+ if (task_pid_nr(current))
18874 return;
18875 __exit_idle();
18876 }
18877@@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18878 struct pt_regs *childregs;
18879 struct task_struct *me = current;
18880
18881- childregs = ((struct pt_regs *)
18882- (THREAD_SIZE + task_stack_page(p))) - 1;
18883+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18884 *childregs = *regs;
18885
18886 childregs->ax = 0;
18887@@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18888 p->thread.sp = (unsigned long) childregs;
18889 p->thread.sp0 = (unsigned long) (childregs+1);
18890 p->thread.usersp = me->thread.usersp;
18891+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18892
18893 set_tsk_thread_flag(p, TIF_FORK);
18894
18895@@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18896 struct thread_struct *prev = &prev_p->thread;
18897 struct thread_struct *next = &next_p->thread;
18898 int cpu = smp_processor_id();
18899- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18900+ struct tss_struct *tss = init_tss + cpu;
18901 unsigned fsindex, gsindex;
18902 fpu_switch_t fpu;
18903
18904@@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18905 prev->usersp = percpu_read(old_rsp);
18906 percpu_write(old_rsp, next->usersp);
18907 percpu_write(current_task, next_p);
18908+ percpu_write(current_tinfo, &next_p->tinfo);
18909
18910- percpu_write(kernel_stack,
18911- (unsigned long)task_stack_page(next_p) +
18912- THREAD_SIZE - KERNEL_STACK_OFFSET);
18913+ percpu_write(kernel_stack, next->sp0);
18914
18915 /*
18916 * Now maybe reload the debug registers and handle I/O bitmaps
18917@@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18918 if (!p || p == current || p->state == TASK_RUNNING)
18919 return 0;
18920 stack = (unsigned long)task_stack_page(p);
18921- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18922+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18923 return 0;
18924 fp = *(u64 *)(p->thread.sp);
18925 do {
18926- if (fp < (unsigned long)stack ||
18927- fp >= (unsigned long)stack+THREAD_SIZE)
18928+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18929 return 0;
18930 ip = *(u64 *)(fp+8);
18931 if (!in_sched_functions(ip))
18932diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18933index 5026738..574f70a 100644
18934--- a/arch/x86/kernel/ptrace.c
18935+++ b/arch/x86/kernel/ptrace.c
18936@@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
18937 static int ioperm_get(struct task_struct *target,
18938 const struct user_regset *regset,
18939 unsigned int pos, unsigned int count,
18940+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18941+static int ioperm_get(struct task_struct *target,
18942+ const struct user_regset *regset,
18943+ unsigned int pos, unsigned int count,
18944 void *kbuf, void __user *ubuf)
18945 {
18946 if (!target->thread.io_bitmap_ptr)
18947@@ -823,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
18948 unsigned long addr, unsigned long data)
18949 {
18950 int ret;
18951- unsigned long __user *datap = (unsigned long __user *)data;
18952+ unsigned long __user *datap = (__force unsigned long __user *)data;
18953
18954 switch (request) {
18955 /* read the word at location addr in the USER area. */
18956@@ -908,14 +912,14 @@ long arch_ptrace(struct task_struct *child, long request,
18957 if ((int) addr < 0)
18958 return -EIO;
18959 ret = do_get_thread_area(child, addr,
18960- (struct user_desc __user *)data);
18961+ (__force struct user_desc __user *) data);
18962 break;
18963
18964 case PTRACE_SET_THREAD_AREA:
18965 if ((int) addr < 0)
18966 return -EIO;
18967 ret = do_set_thread_area(child, addr,
18968- (struct user_desc __user *)data, 0);
18969+ (__force struct user_desc __user *) data, 0);
18970 break;
18971 #endif
18972
18973@@ -1332,7 +1336,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18974 memset(info, 0, sizeof(*info));
18975 info->si_signo = SIGTRAP;
18976 info->si_code = si_code;
18977- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18978+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18979 }
18980
18981 void user_single_step_siginfo(struct task_struct *tsk,
18982@@ -1361,6 +1365,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18983 # define IS_IA32 0
18984 #endif
18985
18986+#ifdef CONFIG_GRKERNSEC_SETXID
18987+extern void gr_delayed_cred_worker(void);
18988+#endif
18989+
18990 /*
18991 * We must return the syscall number to actually look up in the table.
18992 * This can be -1L to skip running any syscall at all.
18993@@ -1369,6 +1377,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18994 {
18995 long ret = 0;
18996
18997+#ifdef CONFIG_GRKERNSEC_SETXID
18998+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18999+ gr_delayed_cred_worker();
19000+#endif
19001+
19002 /*
19003 * If we stepped into a sysenter/syscall insn, it trapped in
19004 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
19005@@ -1412,6 +1425,11 @@ void syscall_trace_leave(struct pt_regs *regs)
19006 {
19007 bool step;
19008
19009+#ifdef CONFIG_GRKERNSEC_SETXID
19010+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
19011+ gr_delayed_cred_worker();
19012+#endif
19013+
19014 audit_syscall_exit(regs);
19015
19016 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
19017diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
19018index 42eb330..139955c 100644
19019--- a/arch/x86/kernel/pvclock.c
19020+++ b/arch/x86/kernel/pvclock.c
19021@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
19022 return pv_tsc_khz;
19023 }
19024
19025-static atomic64_t last_value = ATOMIC64_INIT(0);
19026+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
19027
19028 void pvclock_resume(void)
19029 {
19030- atomic64_set(&last_value, 0);
19031+ atomic64_set_unchecked(&last_value, 0);
19032 }
19033
19034 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
19035@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
19036 * updating at the same time, and one of them could be slightly behind,
19037 * making the assumption that last_value always go forward fail to hold.
19038 */
19039- last = atomic64_read(&last_value);
19040+ last = atomic64_read_unchecked(&last_value);
19041 do {
19042 if (ret < last)
19043 return last;
19044- last = atomic64_cmpxchg(&last_value, last, ret);
19045+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
19046 } while (unlikely(last != ret));
19047
19048 return ret;
19049diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19050index d840e69..98e9581 100644
19051--- a/arch/x86/kernel/reboot.c
19052+++ b/arch/x86/kernel/reboot.c
19053@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
19054 EXPORT_SYMBOL(pm_power_off);
19055
19056 static const struct desc_ptr no_idt = {};
19057-static int reboot_mode;
19058+static unsigned short reboot_mode;
19059 enum reboot_type reboot_type = BOOT_ACPI;
19060 int reboot_force;
19061
19062@@ -335,13 +335,17 @@ core_initcall(reboot_init);
19063 extern const unsigned char machine_real_restart_asm[];
19064 extern const u64 machine_real_restart_gdt[3];
19065
19066-void machine_real_restart(unsigned int type)
19067+__noreturn void machine_real_restart(unsigned int type)
19068 {
19069 void *restart_va;
19070 unsigned long restart_pa;
19071- void (*restart_lowmem)(unsigned int);
19072+ void (* __noreturn restart_lowmem)(unsigned int);
19073 u64 *lowmem_gdt;
19074
19075+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19076+ struct desc_struct *gdt;
19077+#endif
19078+
19079 local_irq_disable();
19080
19081 /* Write zero to CMOS register number 0x0f, which the BIOS POST
19082@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
19083 boot)". This seems like a fairly standard thing that gets set by
19084 REBOOT.COM programs, and the previous reset routine did this
19085 too. */
19086- *((unsigned short *)0x472) = reboot_mode;
19087+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19088
19089 /* Patch the GDT in the low memory trampoline */
19090 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
19091
19092 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
19093 restart_pa = virt_to_phys(restart_va);
19094- restart_lowmem = (void (*)(unsigned int))restart_pa;
19095+ restart_lowmem = (void *)restart_pa;
19096
19097 /* GDT[0]: GDT self-pointer */
19098 lowmem_gdt[0] =
19099@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
19100 GDT_ENTRY(0x009b, restart_pa, 0xffff);
19101
19102 /* Jump to the identity-mapped low memory code */
19103+
19104+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19105+ gdt = get_cpu_gdt_table(smp_processor_id());
19106+ pax_open_kernel();
19107+#ifdef CONFIG_PAX_MEMORY_UDEREF
19108+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
19109+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
19110+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
19111+#endif
19112+#ifdef CONFIG_PAX_KERNEXEC
19113+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
19114+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
19115+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
19116+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
19117+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
19118+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
19119+#endif
19120+ pax_close_kernel();
19121+#endif
19122+
19123+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19124+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
19125+ unreachable();
19126+#else
19127 restart_lowmem(type);
19128+#endif
19129+
19130 }
19131 #ifdef CONFIG_APM_MODULE
19132 EXPORT_SYMBOL(machine_real_restart);
19133@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19134 * try to force a triple fault and then cycle between hitting the keyboard
19135 * controller and doing that
19136 */
19137-static void native_machine_emergency_restart(void)
19138+__noreturn static void native_machine_emergency_restart(void)
19139 {
19140 int i;
19141 int attempt = 0;
19142@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
19143 #endif
19144 }
19145
19146-static void __machine_emergency_restart(int emergency)
19147+static __noreturn void __machine_emergency_restart(int emergency)
19148 {
19149 reboot_emergency = emergency;
19150 machine_ops.emergency_restart();
19151 }
19152
19153-static void native_machine_restart(char *__unused)
19154+static __noreturn void native_machine_restart(char *__unused)
19155 {
19156 printk("machine restart\n");
19157
19158@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
19159 __machine_emergency_restart(0);
19160 }
19161
19162-static void native_machine_halt(void)
19163+static __noreturn void native_machine_halt(void)
19164 {
19165 /* stop other cpus and apics */
19166 machine_shutdown();
19167@@ -706,7 +736,7 @@ static void native_machine_halt(void)
19168 stop_this_cpu(NULL);
19169 }
19170
19171-static void native_machine_power_off(void)
19172+__noreturn static void native_machine_power_off(void)
19173 {
19174 if (pm_power_off) {
19175 if (!reboot_force)
19176@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
19177 }
19178 /* a fallback in case there is no PM info available */
19179 tboot_shutdown(TB_SHUTDOWN_HALT);
19180+ unreachable();
19181 }
19182
19183 struct machine_ops machine_ops = {
19184diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19185index 7a6f3b3..bed145d7 100644
19186--- a/arch/x86/kernel/relocate_kernel_64.S
19187+++ b/arch/x86/kernel/relocate_kernel_64.S
19188@@ -11,6 +11,7 @@
19189 #include <asm/kexec.h>
19190 #include <asm/processor-flags.h>
19191 #include <asm/pgtable_types.h>
19192+#include <asm/alternative-asm.h>
19193
19194 /*
19195 * Must be relocatable PIC code callable as a C function
19196@@ -160,13 +161,14 @@ identity_mapped:
19197 xorq %rbp, %rbp
19198 xorq %r8, %r8
19199 xorq %r9, %r9
19200- xorq %r10, %r9
19201+ xorq %r10, %r10
19202 xorq %r11, %r11
19203 xorq %r12, %r12
19204 xorq %r13, %r13
19205 xorq %r14, %r14
19206 xorq %r15, %r15
19207
19208+ pax_force_retaddr 0, 1
19209 ret
19210
19211 1:
19212diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19213index d7d5099..28555d0 100644
19214--- a/arch/x86/kernel/setup.c
19215+++ b/arch/x86/kernel/setup.c
19216@@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
19217
19218 switch (data->type) {
19219 case SETUP_E820_EXT:
19220- parse_e820_ext(data);
19221+ parse_e820_ext((struct setup_data __force_kernel *)data);
19222 break;
19223 case SETUP_DTB:
19224 add_dtb(pa_data);
19225@@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
19226 * area (640->1Mb) as ram even though it is not.
19227 * take them out.
19228 */
19229- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19230+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19231 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19232 }
19233
19234@@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
19235
19236 if (!boot_params.hdr.root_flags)
19237 root_mountflags &= ~MS_RDONLY;
19238- init_mm.start_code = (unsigned long) _text;
19239- init_mm.end_code = (unsigned long) _etext;
19240+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19241+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19242 init_mm.end_data = (unsigned long) _edata;
19243 init_mm.brk = _brk_end;
19244
19245- code_resource.start = virt_to_phys(_text);
19246- code_resource.end = virt_to_phys(_etext)-1;
19247- data_resource.start = virt_to_phys(_etext);
19248+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19249+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19250+ data_resource.start = virt_to_phys(_sdata);
19251 data_resource.end = virt_to_phys(_edata)-1;
19252 bss_resource.start = virt_to_phys(&__bss_start);
19253 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19254diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19255index 5a98aa2..848d2be 100644
19256--- a/arch/x86/kernel/setup_percpu.c
19257+++ b/arch/x86/kernel/setup_percpu.c
19258@@ -21,19 +21,17 @@
19259 #include <asm/cpu.h>
19260 #include <asm/stackprotector.h>
19261
19262-DEFINE_PER_CPU(int, cpu_number);
19263+#ifdef CONFIG_SMP
19264+DEFINE_PER_CPU(unsigned int, cpu_number);
19265 EXPORT_PER_CPU_SYMBOL(cpu_number);
19266+#endif
19267
19268-#ifdef CONFIG_X86_64
19269 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19270-#else
19271-#define BOOT_PERCPU_OFFSET 0
19272-#endif
19273
19274 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19275 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19276
19277-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19278+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19279 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19280 };
19281 EXPORT_SYMBOL(__per_cpu_offset);
19282@@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void)
19283 * Pointer to the allocated area on success, NULL on failure.
19284 */
19285 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19286+ unsigned long align) __size_overflow(2);
19287+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19288 unsigned long align)
19289 {
19290 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
19291@@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
19292 /*
19293 * Helpers for first chunk memory allocation
19294 */
19295+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
19296+
19297 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
19298 {
19299 return pcpu_alloc_bootmem(cpu, size, align);
19300@@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19301 {
19302 #ifdef CONFIG_X86_32
19303 struct desc_struct gdt;
19304+ unsigned long base = per_cpu_offset(cpu);
19305
19306- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19307- 0x2 | DESCTYPE_S, 0x8);
19308- gdt.s = 1;
19309+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19310+ 0x83 | DESCTYPE_S, 0xC);
19311 write_gdt_entry(get_cpu_gdt_table(cpu),
19312 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19313 #endif
19314@@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void)
19315 /* alrighty, percpu areas up and running */
19316 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19317 for_each_possible_cpu(cpu) {
19318+#ifdef CONFIG_CC_STACKPROTECTOR
19319+#ifdef CONFIG_X86_32
19320+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19321+#endif
19322+#endif
19323 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19324 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19325 per_cpu(cpu_number, cpu) = cpu;
19326@@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void)
19327 */
19328 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19329 #endif
19330+#ifdef CONFIG_CC_STACKPROTECTOR
19331+#ifdef CONFIG_X86_32
19332+ if (!cpu)
19333+ per_cpu(stack_canary.canary, cpu) = canary;
19334+#endif
19335+#endif
19336 /*
19337 * Up to this point, the boot CPU has been using .init.data
19338 * area. Reload any changed state for the boot CPU.
19339diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19340index 46a01bd..2e88e6d 100644
19341--- a/arch/x86/kernel/signal.c
19342+++ b/arch/x86/kernel/signal.c
19343@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
19344 * Align the stack pointer according to the i386 ABI,
19345 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19346 */
19347- sp = ((sp + 4) & -16ul) - 4;
19348+ sp = ((sp - 12) & -16ul) - 4;
19349 #else /* !CONFIG_X86_32 */
19350 sp = round_down(sp, 16) - 8;
19351 #endif
19352@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19353 * Return an always-bogus address instead so we will die with SIGSEGV.
19354 */
19355 if (onsigstack && !likely(on_sig_stack(sp)))
19356- return (void __user *)-1L;
19357+ return (__force void __user *)-1L;
19358
19359 /* save i387 state */
19360 if (used_math() && save_i387_xstate(*fpstate) < 0)
19361- return (void __user *)-1L;
19362+ return (__force void __user *)-1L;
19363
19364 return (void __user *)sp;
19365 }
19366@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19367 }
19368
19369 if (current->mm->context.vdso)
19370- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19371+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19372 else
19373- restorer = &frame->retcode;
19374+ restorer = (void __user *)&frame->retcode;
19375 if (ka->sa.sa_flags & SA_RESTORER)
19376 restorer = ka->sa.sa_restorer;
19377
19378@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19379 * reasons and because gdb uses it as a signature to notice
19380 * signal handler stack frames.
19381 */
19382- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19383+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19384
19385 if (err)
19386 return -EFAULT;
19387@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19388 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19389
19390 /* Set up to return from userspace. */
19391- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19392+ if (current->mm->context.vdso)
19393+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19394+ else
19395+ restorer = (void __user *)&frame->retcode;
19396 if (ka->sa.sa_flags & SA_RESTORER)
19397 restorer = ka->sa.sa_restorer;
19398 put_user_ex(restorer, &frame->pretcode);
19399@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19400 * reasons and because gdb uses it as a signature to notice
19401 * signal handler stack frames.
19402 */
19403- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19404+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19405 } put_user_catch(err);
19406
19407 if (err)
19408@@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
19409 * X86_32: vm86 regs switched out by assembly code before reaching
19410 * here, so testing against kernel CS suffices.
19411 */
19412- if (!user_mode(regs))
19413+ if (!user_mode_novm(regs))
19414 return;
19415
19416 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19417diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19418index 66d250c..f1b10bd 100644
19419--- a/arch/x86/kernel/smpboot.c
19420+++ b/arch/x86/kernel/smpboot.c
19421@@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19422 set_idle_for_cpu(cpu, c_idle.idle);
19423 do_rest:
19424 per_cpu(current_task, cpu) = c_idle.idle;
19425+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19426 #ifdef CONFIG_X86_32
19427 /* Stack for startup_32 can be just as for start_secondary onwards */
19428 irq_ctx_init(cpu);
19429 #else
19430 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19431 initial_gs = per_cpu_offset(cpu);
19432- per_cpu(kernel_stack, cpu) =
19433- (unsigned long)task_stack_page(c_idle.idle) -
19434- KERNEL_STACK_OFFSET + THREAD_SIZE;
19435+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19436 #endif
19437+
19438+ pax_open_kernel();
19439 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19440+ pax_close_kernel();
19441+
19442 initial_code = (unsigned long)start_secondary;
19443 stack_start = c_idle.idle->thread.sp;
19444
19445@@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19446
19447 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19448
19449+#ifdef CONFIG_PAX_PER_CPU_PGD
19450+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19451+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19452+ KERNEL_PGD_PTRS);
19453+#endif
19454+
19455 err = do_boot_cpu(apicid, cpu);
19456 if (err) {
19457 pr_debug("do_boot_cpu failed %d\n", err);
19458diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19459index c346d11..d43b163 100644
19460--- a/arch/x86/kernel/step.c
19461+++ b/arch/x86/kernel/step.c
19462@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19463 struct desc_struct *desc;
19464 unsigned long base;
19465
19466- seg &= ~7UL;
19467+ seg >>= 3;
19468
19469 mutex_lock(&child->mm->context.lock);
19470- if (unlikely((seg >> 3) >= child->mm->context.size))
19471+ if (unlikely(seg >= child->mm->context.size))
19472 addr = -1L; /* bogus selector, access would fault */
19473 else {
19474 desc = child->mm->context.ldt + seg;
19475@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19476 addr += base;
19477 }
19478 mutex_unlock(&child->mm->context.lock);
19479- }
19480+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19481+ addr = ktla_ktva(addr);
19482
19483 return addr;
19484 }
19485@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19486 unsigned char opcode[15];
19487 unsigned long addr = convert_ip_to_linear(child, regs);
19488
19489+ if (addr == -EINVAL)
19490+ return 0;
19491+
19492 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19493 for (i = 0; i < copied; i++) {
19494 switch (opcode[i]) {
19495diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19496index 0b0cb5f..db6b9ed 100644
19497--- a/arch/x86/kernel/sys_i386_32.c
19498+++ b/arch/x86/kernel/sys_i386_32.c
19499@@ -24,17 +24,224 @@
19500
19501 #include <asm/syscalls.h>
19502
19503-/*
19504- * Do a system call from kernel instead of calling sys_execve so we
19505- * end up with proper pt_regs.
19506- */
19507-int kernel_execve(const char *filename,
19508- const char *const argv[],
19509- const char *const envp[])
19510+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19511 {
19512- long __res;
19513- asm volatile ("int $0x80"
19514- : "=a" (__res)
19515- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19516- return __res;
19517+ unsigned long pax_task_size = TASK_SIZE;
19518+
19519+#ifdef CONFIG_PAX_SEGMEXEC
19520+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19521+ pax_task_size = SEGMEXEC_TASK_SIZE;
19522+#endif
19523+
19524+ if (len > pax_task_size || addr > pax_task_size - len)
19525+ return -EINVAL;
19526+
19527+ return 0;
19528+}
19529+
19530+unsigned long
19531+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19532+ unsigned long len, unsigned long pgoff, unsigned long flags)
19533+{
19534+ struct mm_struct *mm = current->mm;
19535+ struct vm_area_struct *vma;
19536+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19537+
19538+#ifdef CONFIG_PAX_SEGMEXEC
19539+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19540+ pax_task_size = SEGMEXEC_TASK_SIZE;
19541+#endif
19542+
19543+ pax_task_size -= PAGE_SIZE;
19544+
19545+ if (len > pax_task_size)
19546+ return -ENOMEM;
19547+
19548+ if (flags & MAP_FIXED)
19549+ return addr;
19550+
19551+#ifdef CONFIG_PAX_RANDMMAP
19552+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19553+#endif
19554+
19555+ if (addr) {
19556+ addr = PAGE_ALIGN(addr);
19557+ if (pax_task_size - len >= addr) {
19558+ vma = find_vma(mm, addr);
19559+ if (check_heap_stack_gap(vma, addr, len))
19560+ return addr;
19561+ }
19562+ }
19563+ if (len > mm->cached_hole_size) {
19564+ start_addr = addr = mm->free_area_cache;
19565+ } else {
19566+ start_addr = addr = mm->mmap_base;
19567+ mm->cached_hole_size = 0;
19568+ }
19569+
19570+#ifdef CONFIG_PAX_PAGEEXEC
19571+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19572+ start_addr = 0x00110000UL;
19573+
19574+#ifdef CONFIG_PAX_RANDMMAP
19575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19576+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19577+#endif
19578+
19579+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19580+ start_addr = addr = mm->mmap_base;
19581+ else
19582+ addr = start_addr;
19583+ }
19584+#endif
19585+
19586+full_search:
19587+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19588+ /* At this point: (!vma || addr < vma->vm_end). */
19589+ if (pax_task_size - len < addr) {
19590+ /*
19591+ * Start a new search - just in case we missed
19592+ * some holes.
19593+ */
19594+ if (start_addr != mm->mmap_base) {
19595+ start_addr = addr = mm->mmap_base;
19596+ mm->cached_hole_size = 0;
19597+ goto full_search;
19598+ }
19599+ return -ENOMEM;
19600+ }
19601+ if (check_heap_stack_gap(vma, addr, len))
19602+ break;
19603+ if (addr + mm->cached_hole_size < vma->vm_start)
19604+ mm->cached_hole_size = vma->vm_start - addr;
19605+ addr = vma->vm_end;
19606+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19607+ start_addr = addr = mm->mmap_base;
19608+ mm->cached_hole_size = 0;
19609+ goto full_search;
19610+ }
19611+ }
19612+
19613+ /*
19614+ * Remember the place where we stopped the search:
19615+ */
19616+ mm->free_area_cache = addr + len;
19617+ return addr;
19618+}
19619+
19620+unsigned long
19621+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19622+ const unsigned long len, const unsigned long pgoff,
19623+ const unsigned long flags)
19624+{
19625+ struct vm_area_struct *vma;
19626+ struct mm_struct *mm = current->mm;
19627+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19628+
19629+#ifdef CONFIG_PAX_SEGMEXEC
19630+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19631+ pax_task_size = SEGMEXEC_TASK_SIZE;
19632+#endif
19633+
19634+ pax_task_size -= PAGE_SIZE;
19635+
19636+ /* requested length too big for entire address space */
19637+ if (len > pax_task_size)
19638+ return -ENOMEM;
19639+
19640+ if (flags & MAP_FIXED)
19641+ return addr;
19642+
19643+#ifdef CONFIG_PAX_PAGEEXEC
19644+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19645+ goto bottomup;
19646+#endif
19647+
19648+#ifdef CONFIG_PAX_RANDMMAP
19649+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19650+#endif
19651+
19652+ /* requesting a specific address */
19653+ if (addr) {
19654+ addr = PAGE_ALIGN(addr);
19655+ if (pax_task_size - len >= addr) {
19656+ vma = find_vma(mm, addr);
19657+ if (check_heap_stack_gap(vma, addr, len))
19658+ return addr;
19659+ }
19660+ }
19661+
19662+ /* check if free_area_cache is useful for us */
19663+ if (len <= mm->cached_hole_size) {
19664+ mm->cached_hole_size = 0;
19665+ mm->free_area_cache = mm->mmap_base;
19666+ }
19667+
19668+ /* either no address requested or can't fit in requested address hole */
19669+ addr = mm->free_area_cache;
19670+
19671+ /* make sure it can fit in the remaining address space */
19672+ if (addr > len) {
19673+ vma = find_vma(mm, addr-len);
19674+ if (check_heap_stack_gap(vma, addr - len, len))
19675+ /* remember the address as a hint for next time */
19676+ return (mm->free_area_cache = addr-len);
19677+ }
19678+
19679+ if (mm->mmap_base < len)
19680+ goto bottomup;
19681+
19682+ addr = mm->mmap_base-len;
19683+
19684+ do {
19685+ /*
19686+ * Lookup failure means no vma is above this address,
19687+ * else if new region fits below vma->vm_start,
19688+ * return with success:
19689+ */
19690+ vma = find_vma(mm, addr);
19691+ if (check_heap_stack_gap(vma, addr, len))
19692+ /* remember the address as a hint for next time */
19693+ return (mm->free_area_cache = addr);
19694+
19695+ /* remember the largest hole we saw so far */
19696+ if (addr + mm->cached_hole_size < vma->vm_start)
19697+ mm->cached_hole_size = vma->vm_start - addr;
19698+
19699+ /* try just below the current vma->vm_start */
19700+ addr = skip_heap_stack_gap(vma, len);
19701+ } while (!IS_ERR_VALUE(addr));
19702+
19703+bottomup:
19704+ /*
19705+ * A failed mmap() very likely causes application failure,
19706+ * so fall back to the bottom-up function here. This scenario
19707+ * can happen with large stack limits and large mmap()
19708+ * allocations.
19709+ */
19710+
19711+#ifdef CONFIG_PAX_SEGMEXEC
19712+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19713+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19714+ else
19715+#endif
19716+
19717+ mm->mmap_base = TASK_UNMAPPED_BASE;
19718+
19719+#ifdef CONFIG_PAX_RANDMMAP
19720+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19721+ mm->mmap_base += mm->delta_mmap;
19722+#endif
19723+
19724+ mm->free_area_cache = mm->mmap_base;
19725+ mm->cached_hole_size = ~0UL;
19726+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19727+ /*
19728+ * Restore the topdown base:
19729+ */
19730+ mm->mmap_base = base;
19731+ mm->free_area_cache = base;
19732+ mm->cached_hole_size = ~0UL;
19733+
19734+ return addr;
19735 }
19736diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19737index 0514890..3dbebce 100644
19738--- a/arch/x86/kernel/sys_x86_64.c
19739+++ b/arch/x86/kernel/sys_x86_64.c
19740@@ -95,8 +95,8 @@ out:
19741 return error;
19742 }
19743
19744-static void find_start_end(unsigned long flags, unsigned long *begin,
19745- unsigned long *end)
19746+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19747+ unsigned long *begin, unsigned long *end)
19748 {
19749 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19750 unsigned long new_begin;
19751@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19752 *begin = new_begin;
19753 }
19754 } else {
19755- *begin = TASK_UNMAPPED_BASE;
19756+ *begin = mm->mmap_base;
19757 *end = TASK_SIZE;
19758 }
19759 }
19760@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19761 if (flags & MAP_FIXED)
19762 return addr;
19763
19764- find_start_end(flags, &begin, &end);
19765+ find_start_end(mm, flags, &begin, &end);
19766
19767 if (len > end)
19768 return -ENOMEM;
19769
19770+#ifdef CONFIG_PAX_RANDMMAP
19771+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19772+#endif
19773+
19774 if (addr) {
19775 addr = PAGE_ALIGN(addr);
19776 vma = find_vma(mm, addr);
19777- if (end - len >= addr &&
19778- (!vma || addr + len <= vma->vm_start))
19779+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19780 return addr;
19781 }
19782 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19783@@ -172,7 +175,7 @@ full_search:
19784 }
19785 return -ENOMEM;
19786 }
19787- if (!vma || addr + len <= vma->vm_start) {
19788+ if (check_heap_stack_gap(vma, addr, len)) {
19789 /*
19790 * Remember the place where we stopped the search:
19791 */
19792@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19793 {
19794 struct vm_area_struct *vma;
19795 struct mm_struct *mm = current->mm;
19796- unsigned long addr = addr0;
19797+ unsigned long base = mm->mmap_base, addr = addr0;
19798
19799 /* requested length too big for entire address space */
19800 if (len > TASK_SIZE)
19801@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19802 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19803 goto bottomup;
19804
19805+#ifdef CONFIG_PAX_RANDMMAP
19806+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19807+#endif
19808+
19809 /* requesting a specific address */
19810 if (addr) {
19811 addr = PAGE_ALIGN(addr);
19812- vma = find_vma(mm, addr);
19813- if (TASK_SIZE - len >= addr &&
19814- (!vma || addr + len <= vma->vm_start))
19815- return addr;
19816+ if (TASK_SIZE - len >= addr) {
19817+ vma = find_vma(mm, addr);
19818+ if (check_heap_stack_gap(vma, addr, len))
19819+ return addr;
19820+ }
19821 }
19822
19823 /* check if free_area_cache is useful for us */
19824@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19825 ALIGN_TOPDOWN);
19826
19827 vma = find_vma(mm, tmp_addr);
19828- if (!vma || tmp_addr + len <= vma->vm_start)
19829+ if (check_heap_stack_gap(vma, tmp_addr, len))
19830 /* remember the address as a hint for next time */
19831 return mm->free_area_cache = tmp_addr;
19832 }
19833@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19834 * return with success:
19835 */
19836 vma = find_vma(mm, addr);
19837- if (!vma || addr+len <= vma->vm_start)
19838+ if (check_heap_stack_gap(vma, addr, len))
19839 /* remember the address as a hint for next time */
19840 return mm->free_area_cache = addr;
19841
19842@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19843 mm->cached_hole_size = vma->vm_start - addr;
19844
19845 /* try just below the current vma->vm_start */
19846- addr = vma->vm_start-len;
19847- } while (len < vma->vm_start);
19848+ addr = skip_heap_stack_gap(vma, len);
19849+ } while (!IS_ERR_VALUE(addr));
19850
19851 bottomup:
19852 /*
19853@@ -270,13 +278,21 @@ bottomup:
19854 * can happen with large stack limits and large mmap()
19855 * allocations.
19856 */
19857+ mm->mmap_base = TASK_UNMAPPED_BASE;
19858+
19859+#ifdef CONFIG_PAX_RANDMMAP
19860+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19861+ mm->mmap_base += mm->delta_mmap;
19862+#endif
19863+
19864+ mm->free_area_cache = mm->mmap_base;
19865 mm->cached_hole_size = ~0UL;
19866- mm->free_area_cache = TASK_UNMAPPED_BASE;
19867 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868 /*
19869 * Restore the topdown base:
19870 */
19871- mm->free_area_cache = mm->mmap_base;
19872+ mm->mmap_base = base;
19873+ mm->free_area_cache = base;
19874 mm->cached_hole_size = ~0UL;
19875
19876 return addr;
19877diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19878index e2410e2..4fe3fbc 100644
19879--- a/arch/x86/kernel/tboot.c
19880+++ b/arch/x86/kernel/tboot.c
19881@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19882
19883 void tboot_shutdown(u32 shutdown_type)
19884 {
19885- void (*shutdown)(void);
19886+ void (* __noreturn shutdown)(void);
19887
19888 if (!tboot_enabled())
19889 return;
19890@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19891
19892 switch_to_tboot_pt();
19893
19894- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19895+ shutdown = (void *)tboot->shutdown_entry;
19896 shutdown();
19897
19898 /* should not reach here */
19899@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19900 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19901 }
19902
19903-static atomic_t ap_wfs_count;
19904+static atomic_unchecked_t ap_wfs_count;
19905
19906 static int tboot_wait_for_aps(int num_aps)
19907 {
19908@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19909 {
19910 switch (action) {
19911 case CPU_DYING:
19912- atomic_inc(&ap_wfs_count);
19913+ atomic_inc_unchecked(&ap_wfs_count);
19914 if (num_online_cpus() == 1)
19915- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19916+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19917 return NOTIFY_BAD;
19918 break;
19919 }
19920@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
19921
19922 tboot_create_trampoline();
19923
19924- atomic_set(&ap_wfs_count, 0);
19925+ atomic_set_unchecked(&ap_wfs_count, 0);
19926 register_hotcpu_notifier(&tboot_cpu_notifier);
19927 return 0;
19928 }
19929diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19930index dd5fbf4..b7f2232 100644
19931--- a/arch/x86/kernel/time.c
19932+++ b/arch/x86/kernel/time.c
19933@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19934 {
19935 unsigned long pc = instruction_pointer(regs);
19936
19937- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19938+ if (!user_mode(regs) && in_lock_functions(pc)) {
19939 #ifdef CONFIG_FRAME_POINTER
19940- return *(unsigned long *)(regs->bp + sizeof(long));
19941+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19942 #else
19943 unsigned long *sp =
19944 (unsigned long *)kernel_stack_pointer(regs);
19945@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19946 * or above a saved flags. Eflags has bits 22-31 zero,
19947 * kernel addresses don't.
19948 */
19949+
19950+#ifdef CONFIG_PAX_KERNEXEC
19951+ return ktla_ktva(sp[0]);
19952+#else
19953 if (sp[0] >> 22)
19954 return sp[0];
19955 if (sp[1] >> 22)
19956 return sp[1];
19957 #endif
19958+
19959+#endif
19960 }
19961 return pc;
19962 }
19963diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19964index bcfec2d..8f88b4a 100644
19965--- a/arch/x86/kernel/tls.c
19966+++ b/arch/x86/kernel/tls.c
19967@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19968 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19969 return -EINVAL;
19970
19971+#ifdef CONFIG_PAX_SEGMEXEC
19972+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19973+ return -EINVAL;
19974+#endif
19975+
19976 set_tls_desc(p, idx, &info, 1);
19977
19978 return 0;
19979diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
19980index 2f083a2..7d3fecc 100644
19981--- a/arch/x86/kernel/tls.h
19982+++ b/arch/x86/kernel/tls.h
19983@@ -16,6 +16,6 @@
19984
19985 extern user_regset_active_fn regset_tls_active;
19986 extern user_regset_get_fn regset_tls_get;
19987-extern user_regset_set_fn regset_tls_set;
19988+extern user_regset_set_fn regset_tls_set __size_overflow(4);
19989
19990 #endif /* _ARCH_X86_KERNEL_TLS_H */
19991diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19992index 451c0a7..e57f551 100644
19993--- a/arch/x86/kernel/trampoline_32.S
19994+++ b/arch/x86/kernel/trampoline_32.S
19995@@ -32,6 +32,12 @@
19996 #include <asm/segment.h>
19997 #include <asm/page_types.h>
19998
19999+#ifdef CONFIG_PAX_KERNEXEC
20000+#define ta(X) (X)
20001+#else
20002+#define ta(X) ((X) - __PAGE_OFFSET)
20003+#endif
20004+
20005 #ifdef CONFIG_SMP
20006
20007 .section ".x86_trampoline","a"
20008@@ -62,7 +68,7 @@ r_base = .
20009 inc %ax # protected mode (PE) bit
20010 lmsw %ax # into protected mode
20011 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20012- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20013+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
20014
20015 # These need to be in the same 64K segment as the above;
20016 # hence we don't use the boot_gdt_descr defined in head.S
20017diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20018index 09ff517..df19fbff 100644
20019--- a/arch/x86/kernel/trampoline_64.S
20020+++ b/arch/x86/kernel/trampoline_64.S
20021@@ -90,7 +90,7 @@ startup_32:
20022 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20023 movl %eax, %ds
20024
20025- movl $X86_CR4_PAE, %eax
20026+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20027 movl %eax, %cr4 # Enable PAE mode
20028
20029 # Setup trampoline 4 level pagetables
20030@@ -138,7 +138,7 @@ tidt:
20031 # so the kernel can live anywhere
20032 .balign 4
20033 tgdt:
20034- .short tgdt_end - tgdt # gdt limit
20035+ .short tgdt_end - tgdt - 1 # gdt limit
20036 .long tgdt - r_base
20037 .short 0
20038 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20039diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20040index 4bbe04d..41d0943 100644
20041--- a/arch/x86/kernel/traps.c
20042+++ b/arch/x86/kernel/traps.c
20043@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
20044
20045 /* Do we ignore FPU interrupts ? */
20046 char ignore_fpu_irq;
20047-
20048-/*
20049- * The IDT has to be page-aligned to simplify the Pentium
20050- * F0 0F bug workaround.
20051- */
20052-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20053 #endif
20054
20055 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20056@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20057 }
20058
20059 static void __kprobes
20060-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20061+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20062 long error_code, siginfo_t *info)
20063 {
20064 struct task_struct *tsk = current;
20065
20066 #ifdef CONFIG_X86_32
20067- if (regs->flags & X86_VM_MASK) {
20068+ if (v8086_mode(regs)) {
20069 /*
20070 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20071 * On nmi (interrupt 2), do_trap should not be called.
20072@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20073 }
20074 #endif
20075
20076- if (!user_mode(regs))
20077+ if (!user_mode_novm(regs))
20078 goto kernel_trap;
20079
20080 #ifdef CONFIG_X86_32
20081@@ -148,7 +142,7 @@ trap_signal:
20082 printk_ratelimit()) {
20083 printk(KERN_INFO
20084 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20085- tsk->comm, tsk->pid, str,
20086+ tsk->comm, task_pid_nr(tsk), str,
20087 regs->ip, regs->sp, error_code);
20088 print_vma_addr(" in ", regs->ip);
20089 printk("\n");
20090@@ -165,8 +159,20 @@ kernel_trap:
20091 if (!fixup_exception(regs)) {
20092 tsk->thread.error_code = error_code;
20093 tsk->thread.trap_no = trapnr;
20094+
20095+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20096+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20097+ str = "PAX: suspicious stack segment fault";
20098+#endif
20099+
20100 die(str, regs, error_code);
20101 }
20102+
20103+#ifdef CONFIG_PAX_REFCOUNT
20104+ if (trapnr == 4)
20105+ pax_report_refcount_overflow(regs);
20106+#endif
20107+
20108 return;
20109
20110 #ifdef CONFIG_X86_32
20111@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20112 conditional_sti(regs);
20113
20114 #ifdef CONFIG_X86_32
20115- if (regs->flags & X86_VM_MASK)
20116+ if (v8086_mode(regs))
20117 goto gp_in_vm86;
20118 #endif
20119
20120 tsk = current;
20121- if (!user_mode(regs))
20122+ if (!user_mode_novm(regs))
20123 goto gp_in_kernel;
20124
20125+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20126+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20127+ struct mm_struct *mm = tsk->mm;
20128+ unsigned long limit;
20129+
20130+ down_write(&mm->mmap_sem);
20131+ limit = mm->context.user_cs_limit;
20132+ if (limit < TASK_SIZE) {
20133+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20134+ up_write(&mm->mmap_sem);
20135+ return;
20136+ }
20137+ up_write(&mm->mmap_sem);
20138+ }
20139+#endif
20140+
20141 tsk->thread.error_code = error_code;
20142 tsk->thread.trap_no = 13;
20143
20144@@ -295,6 +317,13 @@ gp_in_kernel:
20145 if (notify_die(DIE_GPF, "general protection fault", regs,
20146 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20147 return;
20148+
20149+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20150+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20151+ die("PAX: suspicious general protection fault", regs, error_code);
20152+ else
20153+#endif
20154+
20155 die("general protection fault", regs, error_code);
20156 }
20157
20158@@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20159 /* It's safe to allow irq's after DR6 has been saved */
20160 preempt_conditional_sti(regs);
20161
20162- if (regs->flags & X86_VM_MASK) {
20163+ if (v8086_mode(regs)) {
20164 handle_vm86_trap((struct kernel_vm86_regs *) regs,
20165 error_code, 1);
20166 preempt_conditional_cli(regs);
20167@@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20168 * We already checked v86 mode above, so we can check for kernel mode
20169 * by just checking the CPL of CS.
20170 */
20171- if ((dr6 & DR_STEP) && !user_mode(regs)) {
20172+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
20173 tsk->thread.debugreg6 &= ~DR_STEP;
20174 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
20175 regs->flags &= ~X86_EFLAGS_TF;
20176@@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
20177 return;
20178 conditional_sti(regs);
20179
20180- if (!user_mode_vm(regs))
20181+ if (!user_mode(regs))
20182 {
20183 if (!fixup_exception(regs)) {
20184 task->thread.error_code = error_code;
20185diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20186index b9242ba..50c5edd 100644
20187--- a/arch/x86/kernel/verify_cpu.S
20188+++ b/arch/x86/kernel/verify_cpu.S
20189@@ -20,6 +20,7 @@
20190 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20191 * arch/x86/kernel/trampoline_64.S: secondary processor verification
20192 * arch/x86/kernel/head_32.S: processor startup
20193+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20194 *
20195 * verify_cpu, returns the status of longmode and SSE in register %eax.
20196 * 0: Success 1: Failure
20197diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20198index 328cb37..f37fee1 100644
20199--- a/arch/x86/kernel/vm86_32.c
20200+++ b/arch/x86/kernel/vm86_32.c
20201@@ -41,6 +41,7 @@
20202 #include <linux/ptrace.h>
20203 #include <linux/audit.h>
20204 #include <linux/stddef.h>
20205+#include <linux/grsecurity.h>
20206
20207 #include <asm/uaccess.h>
20208 #include <asm/io.h>
20209@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
20210 /* convert vm86_regs to kernel_vm86_regs */
20211 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
20212 const struct vm86_regs __user *user,
20213+ unsigned extra) __size_overflow(3);
20214+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
20215+ const struct vm86_regs __user *user,
20216 unsigned extra)
20217 {
20218 int ret = 0;
20219@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20220 do_exit(SIGSEGV);
20221 }
20222
20223- tss = &per_cpu(init_tss, get_cpu());
20224+ tss = init_tss + get_cpu();
20225 current->thread.sp0 = current->thread.saved_sp0;
20226 current->thread.sysenter_cs = __KERNEL_CS;
20227 load_sp0(tss, &current->thread);
20228@@ -210,6 +214,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
20229 struct task_struct *tsk;
20230 int tmp, ret = -EPERM;
20231
20232+#ifdef CONFIG_GRKERNSEC_VM86
20233+ if (!capable(CAP_SYS_RAWIO)) {
20234+ gr_handle_vm86();
20235+ goto out;
20236+ }
20237+#endif
20238+
20239 tsk = current;
20240 if (tsk->thread.saved_sp0)
20241 goto out;
20242@@ -240,6 +251,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20243 int tmp, ret;
20244 struct vm86plus_struct __user *v86;
20245
20246+#ifdef CONFIG_GRKERNSEC_VM86
20247+ if (!capable(CAP_SYS_RAWIO)) {
20248+ gr_handle_vm86();
20249+ ret = -EPERM;
20250+ goto out;
20251+ }
20252+#endif
20253+
20254 tsk = current;
20255 switch (cmd) {
20256 case VM86_REQUEST_IRQ:
20257@@ -326,7 +345,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20258 tsk->thread.saved_fs = info->regs32->fs;
20259 tsk->thread.saved_gs = get_user_gs(info->regs32);
20260
20261- tss = &per_cpu(init_tss, get_cpu());
20262+ tss = init_tss + get_cpu();
20263 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20264 if (cpu_has_sep)
20265 tsk->thread.sysenter_cs = 0;
20266@@ -533,7 +552,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20267 goto cannot_handle;
20268 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20269 goto cannot_handle;
20270- intr_ptr = (unsigned long __user *) (i << 2);
20271+ intr_ptr = (__force unsigned long __user *) (i << 2);
20272 if (get_user(segoffs, intr_ptr))
20273 goto cannot_handle;
20274 if ((segoffs >> 16) == BIOSSEG)
20275diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20276index 0f703f1..9e15f64 100644
20277--- a/arch/x86/kernel/vmlinux.lds.S
20278+++ b/arch/x86/kernel/vmlinux.lds.S
20279@@ -26,6 +26,13 @@
20280 #include <asm/page_types.h>
20281 #include <asm/cache.h>
20282 #include <asm/boot.h>
20283+#include <asm/segment.h>
20284+
20285+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20286+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20287+#else
20288+#define __KERNEL_TEXT_OFFSET 0
20289+#endif
20290
20291 #undef i386 /* in case the preprocessor is a 32bit one */
20292
20293@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20294
20295 PHDRS {
20296 text PT_LOAD FLAGS(5); /* R_E */
20297+#ifdef CONFIG_X86_32
20298+ module PT_LOAD FLAGS(5); /* R_E */
20299+#endif
20300+#ifdef CONFIG_XEN
20301+ rodata PT_LOAD FLAGS(5); /* R_E */
20302+#else
20303+ rodata PT_LOAD FLAGS(4); /* R__ */
20304+#endif
20305 data PT_LOAD FLAGS(6); /* RW_ */
20306-#ifdef CONFIG_X86_64
20307+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20308 #ifdef CONFIG_SMP
20309 percpu PT_LOAD FLAGS(6); /* RW_ */
20310 #endif
20311+ text.init PT_LOAD FLAGS(5); /* R_E */
20312+ text.exit PT_LOAD FLAGS(5); /* R_E */
20313 init PT_LOAD FLAGS(7); /* RWE */
20314-#endif
20315 note PT_NOTE FLAGS(0); /* ___ */
20316 }
20317
20318 SECTIONS
20319 {
20320 #ifdef CONFIG_X86_32
20321- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20322- phys_startup_32 = startup_32 - LOAD_OFFSET;
20323+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20324 #else
20325- . = __START_KERNEL;
20326- phys_startup_64 = startup_64 - LOAD_OFFSET;
20327+ . = __START_KERNEL;
20328 #endif
20329
20330 /* Text and read-only data */
20331- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20332- _text = .;
20333+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20334 /* bootstrapping code */
20335+#ifdef CONFIG_X86_32
20336+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20337+#else
20338+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20339+#endif
20340+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20341+ _text = .;
20342 HEAD_TEXT
20343 #ifdef CONFIG_X86_32
20344 . = ALIGN(PAGE_SIZE);
20345@@ -108,13 +128,47 @@ SECTIONS
20346 IRQENTRY_TEXT
20347 *(.fixup)
20348 *(.gnu.warning)
20349- /* End of text section */
20350- _etext = .;
20351 } :text = 0x9090
20352
20353- NOTES :text :note
20354+ . += __KERNEL_TEXT_OFFSET;
20355
20356- EXCEPTION_TABLE(16) :text = 0x9090
20357+#ifdef CONFIG_X86_32
20358+ . = ALIGN(PAGE_SIZE);
20359+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20360+
20361+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20362+ MODULES_EXEC_VADDR = .;
20363+ BYTE(0)
20364+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20365+ . = ALIGN(HPAGE_SIZE);
20366+ MODULES_EXEC_END = . - 1;
20367+#endif
20368+
20369+ } :module
20370+#endif
20371+
20372+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20373+ /* End of text section */
20374+ _etext = . - __KERNEL_TEXT_OFFSET;
20375+ }
20376+
20377+#ifdef CONFIG_X86_32
20378+ . = ALIGN(PAGE_SIZE);
20379+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20380+ *(.idt)
20381+ . = ALIGN(PAGE_SIZE);
20382+ *(.empty_zero_page)
20383+ *(.initial_pg_fixmap)
20384+ *(.initial_pg_pmd)
20385+ *(.initial_page_table)
20386+ *(.swapper_pg_dir)
20387+ } :rodata
20388+#endif
20389+
20390+ . = ALIGN(PAGE_SIZE);
20391+ NOTES :rodata :note
20392+
20393+ EXCEPTION_TABLE(16) :rodata
20394
20395 #if defined(CONFIG_DEBUG_RODATA)
20396 /* .text should occupy whole number of pages */
20397@@ -126,16 +180,20 @@ SECTIONS
20398
20399 /* Data */
20400 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20401+
20402+#ifdef CONFIG_PAX_KERNEXEC
20403+ . = ALIGN(HPAGE_SIZE);
20404+#else
20405+ . = ALIGN(PAGE_SIZE);
20406+#endif
20407+
20408 /* Start of data section */
20409 _sdata = .;
20410
20411 /* init_task */
20412 INIT_TASK_DATA(THREAD_SIZE)
20413
20414-#ifdef CONFIG_X86_32
20415- /* 32 bit has nosave before _edata */
20416 NOSAVE_DATA
20417-#endif
20418
20419 PAGE_ALIGNED_DATA(PAGE_SIZE)
20420
20421@@ -176,12 +234,19 @@ SECTIONS
20422 #endif /* CONFIG_X86_64 */
20423
20424 /* Init code and data - will be freed after init */
20425- . = ALIGN(PAGE_SIZE);
20426 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20427+ BYTE(0)
20428+
20429+#ifdef CONFIG_PAX_KERNEXEC
20430+ . = ALIGN(HPAGE_SIZE);
20431+#else
20432+ . = ALIGN(PAGE_SIZE);
20433+#endif
20434+
20435 __init_begin = .; /* paired with __init_end */
20436- }
20437+ } :init.begin
20438
20439-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20440+#ifdef CONFIG_SMP
20441 /*
20442 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20443 * output PHDR, so the next output section - .init.text - should
20444@@ -190,12 +255,27 @@ SECTIONS
20445 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20446 #endif
20447
20448- INIT_TEXT_SECTION(PAGE_SIZE)
20449-#ifdef CONFIG_X86_64
20450- :init
20451-#endif
20452+ . = ALIGN(PAGE_SIZE);
20453+ init_begin = .;
20454+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20455+ VMLINUX_SYMBOL(_sinittext) = .;
20456+ INIT_TEXT
20457+ VMLINUX_SYMBOL(_einittext) = .;
20458+ . = ALIGN(PAGE_SIZE);
20459+ } :text.init
20460
20461- INIT_DATA_SECTION(16)
20462+ /*
20463+ * .exit.text is discard at runtime, not link time, to deal with
20464+ * references from .altinstructions and .eh_frame
20465+ */
20466+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20467+ EXIT_TEXT
20468+ . = ALIGN(16);
20469+ } :text.exit
20470+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20471+
20472+ . = ALIGN(PAGE_SIZE);
20473+ INIT_DATA_SECTION(16) :init
20474
20475 /*
20476 * Code and data for a variety of lowlevel trampolines, to be
20477@@ -269,19 +349,12 @@ SECTIONS
20478 }
20479
20480 . = ALIGN(8);
20481- /*
20482- * .exit.text is discard at runtime, not link time, to deal with
20483- * references from .altinstructions and .eh_frame
20484- */
20485- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20486- EXIT_TEXT
20487- }
20488
20489 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20490 EXIT_DATA
20491 }
20492
20493-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20494+#ifndef CONFIG_SMP
20495 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20496 #endif
20497
20498@@ -300,16 +373,10 @@ SECTIONS
20499 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20500 __smp_locks = .;
20501 *(.smp_locks)
20502- . = ALIGN(PAGE_SIZE);
20503 __smp_locks_end = .;
20504+ . = ALIGN(PAGE_SIZE);
20505 }
20506
20507-#ifdef CONFIG_X86_64
20508- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20509- NOSAVE_DATA
20510- }
20511-#endif
20512-
20513 /* BSS */
20514 . = ALIGN(PAGE_SIZE);
20515 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20516@@ -325,6 +392,7 @@ SECTIONS
20517 __brk_base = .;
20518 . += 64 * 1024; /* 64k alignment slop space */
20519 *(.brk_reservation) /* areas brk users have reserved */
20520+ . = ALIGN(HPAGE_SIZE);
20521 __brk_limit = .;
20522 }
20523
20524@@ -351,13 +419,12 @@ SECTIONS
20525 * for the boot processor.
20526 */
20527 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20528-INIT_PER_CPU(gdt_page);
20529 INIT_PER_CPU(irq_stack_union);
20530
20531 /*
20532 * Build-time check on the image size:
20533 */
20534-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20535+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20536 "kernel image bigger than KERNEL_IMAGE_SIZE");
20537
20538 #ifdef CONFIG_SMP
20539diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20540index b07ba93..a212969 100644
20541--- a/arch/x86/kernel/vsyscall_64.c
20542+++ b/arch/x86/kernel/vsyscall_64.c
20543@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
20544 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
20545 };
20546
20547-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20548+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20549
20550 static int __init vsyscall_setup(char *str)
20551 {
20552 if (str) {
20553 if (!strcmp("emulate", str))
20554 vsyscall_mode = EMULATE;
20555- else if (!strcmp("native", str))
20556- vsyscall_mode = NATIVE;
20557 else if (!strcmp("none", str))
20558 vsyscall_mode = NONE;
20559 else
20560@@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20561
20562 tsk = current;
20563 if (seccomp_mode(&tsk->seccomp))
20564- do_exit(SIGKILL);
20565+ do_group_exit(SIGKILL);
20566
20567 /*
20568 * With a real vsyscall, page faults cause SIGSEGV. We want to
20569@@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20570 return true;
20571
20572 sigsegv:
20573- force_sig(SIGSEGV, current);
20574- return true;
20575+ do_group_exit(SIGKILL);
20576 }
20577
20578 /*
20579@@ -333,10 +330,7 @@ void __init map_vsyscall(void)
20580 extern char __vvar_page;
20581 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20582
20583- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20584- vsyscall_mode == NATIVE
20585- ? PAGE_KERNEL_VSYSCALL
20586- : PAGE_KERNEL_VVAR);
20587+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20588 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20589 (unsigned long)VSYSCALL_START);
20590
20591diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20592index 9796c2f..f686fbf 100644
20593--- a/arch/x86/kernel/x8664_ksyms_64.c
20594+++ b/arch/x86/kernel/x8664_ksyms_64.c
20595@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20596 EXPORT_SYMBOL(copy_user_generic_string);
20597 EXPORT_SYMBOL(copy_user_generic_unrolled);
20598 EXPORT_SYMBOL(__copy_user_nocache);
20599-EXPORT_SYMBOL(_copy_from_user);
20600-EXPORT_SYMBOL(_copy_to_user);
20601
20602 EXPORT_SYMBOL(copy_page);
20603 EXPORT_SYMBOL(clear_page);
20604diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20605index 7110911..e8cdee5 100644
20606--- a/arch/x86/kernel/xsave.c
20607+++ b/arch/x86/kernel/xsave.c
20608@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20609 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20610 return -EINVAL;
20611
20612- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20613+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20614 fx_sw_user->extended_size -
20615 FP_XSTATE_MAGIC2_SIZE));
20616 if (err)
20617@@ -266,7 +266,7 @@ fx_only:
20618 * the other extended state.
20619 */
20620 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20621- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20622+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20623 }
20624
20625 /*
20626@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
20627 if (use_xsave())
20628 err = restore_user_xstate(buf);
20629 else
20630- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20631+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20632 buf);
20633 if (unlikely(err)) {
20634 /*
20635diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20636index 89b02bf..0f6511d 100644
20637--- a/arch/x86/kvm/cpuid.c
20638+++ b/arch/x86/kvm/cpuid.c
20639@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20640 struct kvm_cpuid2 *cpuid,
20641 struct kvm_cpuid_entry2 __user *entries)
20642 {
20643- int r;
20644+ int r, i;
20645
20646 r = -E2BIG;
20647 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20648 goto out;
20649 r = -EFAULT;
20650- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20651- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20652+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20653 goto out;
20654+ for (i = 0; i < cpuid->nent; ++i) {
20655+ struct kvm_cpuid_entry2 cpuid_entry;
20656+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20657+ goto out;
20658+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20659+ }
20660 vcpu->arch.cpuid_nent = cpuid->nent;
20661 kvm_apic_set_version(vcpu);
20662 kvm_x86_ops->cpuid_update(vcpu);
20663@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20664 struct kvm_cpuid2 *cpuid,
20665 struct kvm_cpuid_entry2 __user *entries)
20666 {
20667- int r;
20668+ int r, i;
20669
20670 r = -E2BIG;
20671 if (cpuid->nent < vcpu->arch.cpuid_nent)
20672 goto out;
20673 r = -EFAULT;
20674- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20675- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20676+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20677 goto out;
20678+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20679+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20680+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20681+ goto out;
20682+ }
20683 return 0;
20684
20685 out:
20686diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20687index 0982507..7f6d72f 100644
20688--- a/arch/x86/kvm/emulate.c
20689+++ b/arch/x86/kvm/emulate.c
20690@@ -250,6 +250,7 @@ struct gprefix {
20691
20692 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20693 do { \
20694+ unsigned long _tmp; \
20695 __asm__ __volatile__ ( \
20696 _PRE_EFLAGS("0", "4", "2") \
20697 _op _suffix " %"_x"3,%1; " \
20698@@ -264,8 +265,6 @@ struct gprefix {
20699 /* Raw emulation: instruction has two explicit operands. */
20700 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20701 do { \
20702- unsigned long _tmp; \
20703- \
20704 switch ((ctxt)->dst.bytes) { \
20705 case 2: \
20706 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20707@@ -281,7 +280,6 @@ struct gprefix {
20708
20709 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20710 do { \
20711- unsigned long _tmp; \
20712 switch ((ctxt)->dst.bytes) { \
20713 case 1: \
20714 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20715diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20716index cfdc6e0..ab92e84 100644
20717--- a/arch/x86/kvm/lapic.c
20718+++ b/arch/x86/kvm/lapic.c
20719@@ -54,7 +54,7 @@
20720 #define APIC_BUS_CYCLE_NS 1
20721
20722 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20723-#define apic_debug(fmt, arg...)
20724+#define apic_debug(fmt, arg...) do {} while (0)
20725
20726 #define APIC_LVT_NUM 6
20727 /* 14 is the version for Xeon and Pentium 8.4.8*/
20728diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20729index 1561028..0ed7f14 100644
20730--- a/arch/x86/kvm/paging_tmpl.h
20731+++ b/arch/x86/kvm/paging_tmpl.h
20732@@ -197,7 +197,7 @@ retry_walk:
20733 if (unlikely(kvm_is_error_hva(host_addr)))
20734 goto error;
20735
20736- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20737+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20738 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20739 goto error;
20740
20741diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20742index e385214..f8df033 100644
20743--- a/arch/x86/kvm/svm.c
20744+++ b/arch/x86/kvm/svm.c
20745@@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20746 int cpu = raw_smp_processor_id();
20747
20748 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20749+
20750+ pax_open_kernel();
20751 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20752+ pax_close_kernel();
20753+
20754 load_TR_desc();
20755 }
20756
20757@@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20758 #endif
20759 #endif
20760
20761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20762+ __set_fs(current_thread_info()->addr_limit);
20763+#endif
20764+
20765 reload_tss(vcpu);
20766
20767 local_irq_disable();
20768diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20769index a7a6f60..04b745a 100644
20770--- a/arch/x86/kvm/vmx.c
20771+++ b/arch/x86/kvm/vmx.c
20772@@ -1306,7 +1306,11 @@ static void reload_tss(void)
20773 struct desc_struct *descs;
20774
20775 descs = (void *)gdt->address;
20776+
20777+ pax_open_kernel();
20778 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20779+ pax_close_kernel();
20780+
20781 load_TR_desc();
20782 }
20783
20784@@ -2637,8 +2641,11 @@ static __init int hardware_setup(void)
20785 if (!cpu_has_vmx_flexpriority())
20786 flexpriority_enabled = 0;
20787
20788- if (!cpu_has_vmx_tpr_shadow())
20789- kvm_x86_ops->update_cr8_intercept = NULL;
20790+ if (!cpu_has_vmx_tpr_shadow()) {
20791+ pax_open_kernel();
20792+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20793+ pax_close_kernel();
20794+ }
20795
20796 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20797 kvm_disable_largepages();
20798@@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void)
20799 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20800
20801 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20802- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20803+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20804
20805 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20806 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20807@@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20808 "jmp .Lkvm_vmx_return \n\t"
20809 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20810 ".Lkvm_vmx_return: "
20811+
20812+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20813+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20814+ ".Lkvm_vmx_return2: "
20815+#endif
20816+
20817 /* Save guest registers, load host registers, keep flags */
20818 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20819 "pop %0 \n\t"
20820@@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20821 #endif
20822 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20823 [wordsize]"i"(sizeof(ulong))
20824+
20825+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20826+ ,[cs]"i"(__KERNEL_CS)
20827+#endif
20828+
20829 : "cc", "memory"
20830 , R"ax", R"bx", R"di", R"si"
20831 #ifdef CONFIG_X86_64
20832@@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20833 }
20834 }
20835
20836- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20837+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20838+
20839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20840+ loadsegment(fs, __KERNEL_PERCPU);
20841+#endif
20842+
20843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20844+ __set_fs(current_thread_info()->addr_limit);
20845+#endif
20846+
20847 vmx->loaded_vmcs->launched = 1;
20848
20849 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20850diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20851index 8d1c6c6..6e6d611 100644
20852--- a/arch/x86/kvm/x86.c
20853+++ b/arch/x86/kvm/x86.c
20854@@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
20855 return kvm_set_msr(vcpu, index, *data);
20856 }
20857
20858+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
20859 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
20860 {
20861 int version;
20862@@ -1307,12 +1308,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20863 return 0;
20864 }
20865
20866+static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) __size_overflow(2);
20867 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20868 {
20869 struct kvm *kvm = vcpu->kvm;
20870 int lm = is_long_mode(vcpu);
20871- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20872- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20873+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20874+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20875 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20876 : kvm->arch.xen_hvm_config.blob_size_32;
20877 u32 page_num = data & ~PAGE_MASK;
20878@@ -2145,6 +2147,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20879 if (n < msr_list.nmsrs)
20880 goto out;
20881 r = -EFAULT;
20882+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20883+ goto out;
20884 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20885 num_msrs_to_save * sizeof(u32)))
20886 goto out;
20887@@ -2266,7 +2270,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20888 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20889 struct kvm_interrupt *irq)
20890 {
20891- if (irq->irq < 0 || irq->irq >= 256)
20892+ if (irq->irq >= 256)
20893 return -EINVAL;
20894 if (irqchip_in_kernel(vcpu->kvm))
20895 return -ENXIO;
20896@@ -3499,6 +3503,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
20897
20898 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20899 struct kvm_vcpu *vcpu, u32 access,
20900+ struct x86_exception *exception) __size_overflow(1,3);
20901+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20902+ struct kvm_vcpu *vcpu, u32 access,
20903 struct x86_exception *exception)
20904 {
20905 void *data = val;
20906@@ -3530,6 +3537,9 @@ out:
20907 /* used for instruction fetching */
20908 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20909 gva_t addr, void *val, unsigned int bytes,
20910+ struct x86_exception *exception) __size_overflow(2,4);
20911+static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20912+ gva_t addr, void *val, unsigned int bytes,
20913 struct x86_exception *exception)
20914 {
20915 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20916@@ -3554,6 +3564,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
20917
20918 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20919 gva_t addr, void *val, unsigned int bytes,
20920+ struct x86_exception *exception) __size_overflow(2,4);
20921+static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20922+ gva_t addr, void *val, unsigned int bytes,
20923 struct x86_exception *exception)
20924 {
20925 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20926@@ -3667,12 +3680,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
20927 }
20928
20929 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20930+ void *val, int bytes) __size_overflow(2);
20931+static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20932 void *val, int bytes)
20933 {
20934 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
20935 }
20936
20937 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20938+ void *val, int bytes) __size_overflow(2);
20939+static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20940 void *val, int bytes)
20941 {
20942 return emulator_write_phys(vcpu, gpa, val, bytes);
20943@@ -3823,6 +3840,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20944 const void *old,
20945 const void *new,
20946 unsigned int bytes,
20947+ struct x86_exception *exception) __size_overflow(5);
20948+static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20949+ unsigned long addr,
20950+ const void *old,
20951+ const void *new,
20952+ unsigned int bytes,
20953 struct x86_exception *exception)
20954 {
20955 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20956@@ -4782,7 +4805,7 @@ static void kvm_set_mmio_spte_mask(void)
20957 kvm_mmu_set_mmio_spte_mask(mask);
20958 }
20959
20960-int kvm_arch_init(void *opaque)
20961+int kvm_arch_init(const void *opaque)
20962 {
20963 int r;
20964 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20965diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
20966index cb80c29..aeee86c 100644
20967--- a/arch/x86/kvm/x86.h
20968+++ b/arch/x86/kvm/x86.h
20969@@ -116,11 +116,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
20970
20971 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
20972 gva_t addr, void *val, unsigned int bytes,
20973- struct x86_exception *exception);
20974+ struct x86_exception *exception) __size_overflow(2,4);
20975
20976 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20977 gva_t addr, void *val, unsigned int bytes,
20978- struct x86_exception *exception);
20979+ struct x86_exception *exception) __size_overflow(2,4);
20980
20981 extern u64 host_xcr0;
20982
20983diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20984index 642d880..44e0f3f 100644
20985--- a/arch/x86/lguest/boot.c
20986+++ b/arch/x86/lguest/boot.c
20987@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20988 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20989 * Launcher to reboot us.
20990 */
20991-static void lguest_restart(char *reason)
20992+static __noreturn void lguest_restart(char *reason)
20993 {
20994 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20995+ BUG();
20996 }
20997
20998 /*G:050
20999diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21000index 042f682..c92afb6 100644
21001--- a/arch/x86/lib/atomic64_32.c
21002+++ b/arch/x86/lib/atomic64_32.c
21003@@ -8,18 +8,30 @@
21004
21005 long long atomic64_read_cx8(long long, const atomic64_t *v);
21006 EXPORT_SYMBOL(atomic64_read_cx8);
21007+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
21008+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
21009 long long atomic64_set_cx8(long long, const atomic64_t *v);
21010 EXPORT_SYMBOL(atomic64_set_cx8);
21011+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
21012+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
21013 long long atomic64_xchg_cx8(long long, unsigned high);
21014 EXPORT_SYMBOL(atomic64_xchg_cx8);
21015 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
21016 EXPORT_SYMBOL(atomic64_add_return_cx8);
21017+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21018+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
21019 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
21020 EXPORT_SYMBOL(atomic64_sub_return_cx8);
21021+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21022+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
21023 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
21024 EXPORT_SYMBOL(atomic64_inc_return_cx8);
21025+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21026+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
21027 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
21028 EXPORT_SYMBOL(atomic64_dec_return_cx8);
21029+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
21030+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
21031 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
21032 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
21033 int atomic64_inc_not_zero_cx8(atomic64_t *v);
21034@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
21035 #ifndef CONFIG_X86_CMPXCHG64
21036 long long atomic64_read_386(long long, const atomic64_t *v);
21037 EXPORT_SYMBOL(atomic64_read_386);
21038+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
21039+EXPORT_SYMBOL(atomic64_read_unchecked_386);
21040 long long atomic64_set_386(long long, const atomic64_t *v);
21041 EXPORT_SYMBOL(atomic64_set_386);
21042+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
21043+EXPORT_SYMBOL(atomic64_set_unchecked_386);
21044 long long atomic64_xchg_386(long long, unsigned high);
21045 EXPORT_SYMBOL(atomic64_xchg_386);
21046 long long atomic64_add_return_386(long long a, atomic64_t *v);
21047 EXPORT_SYMBOL(atomic64_add_return_386);
21048+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21049+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
21050 long long atomic64_sub_return_386(long long a, atomic64_t *v);
21051 EXPORT_SYMBOL(atomic64_sub_return_386);
21052+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21053+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
21054 long long atomic64_inc_return_386(long long a, atomic64_t *v);
21055 EXPORT_SYMBOL(atomic64_inc_return_386);
21056+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21057+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
21058 long long atomic64_dec_return_386(long long a, atomic64_t *v);
21059 EXPORT_SYMBOL(atomic64_dec_return_386);
21060+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
21061+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
21062 long long atomic64_add_386(long long a, atomic64_t *v);
21063 EXPORT_SYMBOL(atomic64_add_386);
21064+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
21065+EXPORT_SYMBOL(atomic64_add_unchecked_386);
21066 long long atomic64_sub_386(long long a, atomic64_t *v);
21067 EXPORT_SYMBOL(atomic64_sub_386);
21068+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
21069+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
21070 long long atomic64_inc_386(long long a, atomic64_t *v);
21071 EXPORT_SYMBOL(atomic64_inc_386);
21072+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
21073+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
21074 long long atomic64_dec_386(long long a, atomic64_t *v);
21075 EXPORT_SYMBOL(atomic64_dec_386);
21076+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
21077+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
21078 long long atomic64_dec_if_positive_386(atomic64_t *v);
21079 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
21080 int atomic64_inc_not_zero_386(atomic64_t *v);
21081diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
21082index e8e7e0d..56fd1b0 100644
21083--- a/arch/x86/lib/atomic64_386_32.S
21084+++ b/arch/x86/lib/atomic64_386_32.S
21085@@ -48,6 +48,10 @@ BEGIN(read)
21086 movl (v), %eax
21087 movl 4(v), %edx
21088 RET_ENDP
21089+BEGIN(read_unchecked)
21090+ movl (v), %eax
21091+ movl 4(v), %edx
21092+RET_ENDP
21093 #undef v
21094
21095 #define v %esi
21096@@ -55,6 +59,10 @@ BEGIN(set)
21097 movl %ebx, (v)
21098 movl %ecx, 4(v)
21099 RET_ENDP
21100+BEGIN(set_unchecked)
21101+ movl %ebx, (v)
21102+ movl %ecx, 4(v)
21103+RET_ENDP
21104 #undef v
21105
21106 #define v %esi
21107@@ -70,6 +78,20 @@ RET_ENDP
21108 BEGIN(add)
21109 addl %eax, (v)
21110 adcl %edx, 4(v)
21111+
21112+#ifdef CONFIG_PAX_REFCOUNT
21113+ jno 0f
21114+ subl %eax, (v)
21115+ sbbl %edx, 4(v)
21116+ int $4
21117+0:
21118+ _ASM_EXTABLE(0b, 0b)
21119+#endif
21120+
21121+RET_ENDP
21122+BEGIN(add_unchecked)
21123+ addl %eax, (v)
21124+ adcl %edx, 4(v)
21125 RET_ENDP
21126 #undef v
21127
21128@@ -77,6 +99,24 @@ RET_ENDP
21129 BEGIN(add_return)
21130 addl (v), %eax
21131 adcl 4(v), %edx
21132+
21133+#ifdef CONFIG_PAX_REFCOUNT
21134+ into
21135+1234:
21136+ _ASM_EXTABLE(1234b, 2f)
21137+#endif
21138+
21139+ movl %eax, (v)
21140+ movl %edx, 4(v)
21141+
21142+#ifdef CONFIG_PAX_REFCOUNT
21143+2:
21144+#endif
21145+
21146+RET_ENDP
21147+BEGIN(add_return_unchecked)
21148+ addl (v), %eax
21149+ adcl 4(v), %edx
21150 movl %eax, (v)
21151 movl %edx, 4(v)
21152 RET_ENDP
21153@@ -86,6 +126,20 @@ RET_ENDP
21154 BEGIN(sub)
21155 subl %eax, (v)
21156 sbbl %edx, 4(v)
21157+
21158+#ifdef CONFIG_PAX_REFCOUNT
21159+ jno 0f
21160+ addl %eax, (v)
21161+ adcl %edx, 4(v)
21162+ int $4
21163+0:
21164+ _ASM_EXTABLE(0b, 0b)
21165+#endif
21166+
21167+RET_ENDP
21168+BEGIN(sub_unchecked)
21169+ subl %eax, (v)
21170+ sbbl %edx, 4(v)
21171 RET_ENDP
21172 #undef v
21173
21174@@ -96,6 +150,27 @@ BEGIN(sub_return)
21175 sbbl $0, %edx
21176 addl (v), %eax
21177 adcl 4(v), %edx
21178+
21179+#ifdef CONFIG_PAX_REFCOUNT
21180+ into
21181+1234:
21182+ _ASM_EXTABLE(1234b, 2f)
21183+#endif
21184+
21185+ movl %eax, (v)
21186+ movl %edx, 4(v)
21187+
21188+#ifdef CONFIG_PAX_REFCOUNT
21189+2:
21190+#endif
21191+
21192+RET_ENDP
21193+BEGIN(sub_return_unchecked)
21194+ negl %edx
21195+ negl %eax
21196+ sbbl $0, %edx
21197+ addl (v), %eax
21198+ adcl 4(v), %edx
21199 movl %eax, (v)
21200 movl %edx, 4(v)
21201 RET_ENDP
21202@@ -105,6 +180,20 @@ RET_ENDP
21203 BEGIN(inc)
21204 addl $1, (v)
21205 adcl $0, 4(v)
21206+
21207+#ifdef CONFIG_PAX_REFCOUNT
21208+ jno 0f
21209+ subl $1, (v)
21210+ sbbl $0, 4(v)
21211+ int $4
21212+0:
21213+ _ASM_EXTABLE(0b, 0b)
21214+#endif
21215+
21216+RET_ENDP
21217+BEGIN(inc_unchecked)
21218+ addl $1, (v)
21219+ adcl $0, 4(v)
21220 RET_ENDP
21221 #undef v
21222
21223@@ -114,6 +203,26 @@ BEGIN(inc_return)
21224 movl 4(v), %edx
21225 addl $1, %eax
21226 adcl $0, %edx
21227+
21228+#ifdef CONFIG_PAX_REFCOUNT
21229+ into
21230+1234:
21231+ _ASM_EXTABLE(1234b, 2f)
21232+#endif
21233+
21234+ movl %eax, (v)
21235+ movl %edx, 4(v)
21236+
21237+#ifdef CONFIG_PAX_REFCOUNT
21238+2:
21239+#endif
21240+
21241+RET_ENDP
21242+BEGIN(inc_return_unchecked)
21243+ movl (v), %eax
21244+ movl 4(v), %edx
21245+ addl $1, %eax
21246+ adcl $0, %edx
21247 movl %eax, (v)
21248 movl %edx, 4(v)
21249 RET_ENDP
21250@@ -123,6 +232,20 @@ RET_ENDP
21251 BEGIN(dec)
21252 subl $1, (v)
21253 sbbl $0, 4(v)
21254+
21255+#ifdef CONFIG_PAX_REFCOUNT
21256+ jno 0f
21257+ addl $1, (v)
21258+ adcl $0, 4(v)
21259+ int $4
21260+0:
21261+ _ASM_EXTABLE(0b, 0b)
21262+#endif
21263+
21264+RET_ENDP
21265+BEGIN(dec_unchecked)
21266+ subl $1, (v)
21267+ sbbl $0, 4(v)
21268 RET_ENDP
21269 #undef v
21270
21271@@ -132,6 +255,26 @@ BEGIN(dec_return)
21272 movl 4(v), %edx
21273 subl $1, %eax
21274 sbbl $0, %edx
21275+
21276+#ifdef CONFIG_PAX_REFCOUNT
21277+ into
21278+1234:
21279+ _ASM_EXTABLE(1234b, 2f)
21280+#endif
21281+
21282+ movl %eax, (v)
21283+ movl %edx, 4(v)
21284+
21285+#ifdef CONFIG_PAX_REFCOUNT
21286+2:
21287+#endif
21288+
21289+RET_ENDP
21290+BEGIN(dec_return_unchecked)
21291+ movl (v), %eax
21292+ movl 4(v), %edx
21293+ subl $1, %eax
21294+ sbbl $0, %edx
21295 movl %eax, (v)
21296 movl %edx, 4(v)
21297 RET_ENDP
21298@@ -143,6 +286,13 @@ BEGIN(add_unless)
21299 adcl %edx, %edi
21300 addl (v), %eax
21301 adcl 4(v), %edx
21302+
21303+#ifdef CONFIG_PAX_REFCOUNT
21304+ into
21305+1234:
21306+ _ASM_EXTABLE(1234b, 2f)
21307+#endif
21308+
21309 cmpl %eax, %esi
21310 je 3f
21311 1:
21312@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
21313 1:
21314 addl $1, %eax
21315 adcl $0, %edx
21316+
21317+#ifdef CONFIG_PAX_REFCOUNT
21318+ into
21319+1234:
21320+ _ASM_EXTABLE(1234b, 2f)
21321+#endif
21322+
21323 movl %eax, (v)
21324 movl %edx, 4(v)
21325 movl $1, %eax
21326@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
21327 movl 4(v), %edx
21328 subl $1, %eax
21329 sbbl $0, %edx
21330+
21331+#ifdef CONFIG_PAX_REFCOUNT
21332+ into
21333+1234:
21334+ _ASM_EXTABLE(1234b, 1f)
21335+#endif
21336+
21337 js 1f
21338 movl %eax, (v)
21339 movl %edx, 4(v)
21340diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
21341index 391a083..3a2cf39 100644
21342--- a/arch/x86/lib/atomic64_cx8_32.S
21343+++ b/arch/x86/lib/atomic64_cx8_32.S
21344@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
21345 CFI_STARTPROC
21346
21347 read64 %ecx
21348+ pax_force_retaddr
21349 ret
21350 CFI_ENDPROC
21351 ENDPROC(atomic64_read_cx8)
21352
21353+ENTRY(atomic64_read_unchecked_cx8)
21354+ CFI_STARTPROC
21355+
21356+ read64 %ecx
21357+ pax_force_retaddr
21358+ ret
21359+ CFI_ENDPROC
21360+ENDPROC(atomic64_read_unchecked_cx8)
21361+
21362 ENTRY(atomic64_set_cx8)
21363 CFI_STARTPROC
21364
21365@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
21366 cmpxchg8b (%esi)
21367 jne 1b
21368
21369+ pax_force_retaddr
21370 ret
21371 CFI_ENDPROC
21372 ENDPROC(atomic64_set_cx8)
21373
21374+ENTRY(atomic64_set_unchecked_cx8)
21375+ CFI_STARTPROC
21376+
21377+1:
21378+/* we don't need LOCK_PREFIX since aligned 64-bit writes
21379+ * are atomic on 586 and newer */
21380+ cmpxchg8b (%esi)
21381+ jne 1b
21382+
21383+ pax_force_retaddr
21384+ ret
21385+ CFI_ENDPROC
21386+ENDPROC(atomic64_set_unchecked_cx8)
21387+
21388 ENTRY(atomic64_xchg_cx8)
21389 CFI_STARTPROC
21390
21391@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
21392 cmpxchg8b (%esi)
21393 jne 1b
21394
21395+ pax_force_retaddr
21396 ret
21397 CFI_ENDPROC
21398 ENDPROC(atomic64_xchg_cx8)
21399
21400-.macro addsub_return func ins insc
21401-ENTRY(atomic64_\func\()_return_cx8)
21402+.macro addsub_return func ins insc unchecked=""
21403+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21404 CFI_STARTPROC
21405 SAVE ebp
21406 SAVE ebx
21407@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21408 movl %edx, %ecx
21409 \ins\()l %esi, %ebx
21410 \insc\()l %edi, %ecx
21411+
21412+.ifb \unchecked
21413+#ifdef CONFIG_PAX_REFCOUNT
21414+ into
21415+2:
21416+ _ASM_EXTABLE(2b, 3f)
21417+#endif
21418+.endif
21419+
21420 LOCK_PREFIX
21421 cmpxchg8b (%ebp)
21422 jne 1b
21423-
21424-10:
21425 movl %ebx, %eax
21426 movl %ecx, %edx
21427+
21428+.ifb \unchecked
21429+#ifdef CONFIG_PAX_REFCOUNT
21430+3:
21431+#endif
21432+.endif
21433+
21434 RESTORE edi
21435 RESTORE esi
21436 RESTORE ebx
21437 RESTORE ebp
21438+ pax_force_retaddr
21439 ret
21440 CFI_ENDPROC
21441-ENDPROC(atomic64_\func\()_return_cx8)
21442+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21443 .endm
21444
21445 addsub_return add add adc
21446 addsub_return sub sub sbb
21447+addsub_return add add adc _unchecked
21448+addsub_return sub sub sbb _unchecked
21449
21450-.macro incdec_return func ins insc
21451-ENTRY(atomic64_\func\()_return_cx8)
21452+.macro incdec_return func ins insc unchecked=""
21453+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21454 CFI_STARTPROC
21455 SAVE ebx
21456
21457@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21458 movl %edx, %ecx
21459 \ins\()l $1, %ebx
21460 \insc\()l $0, %ecx
21461+
21462+.ifb \unchecked
21463+#ifdef CONFIG_PAX_REFCOUNT
21464+ into
21465+2:
21466+ _ASM_EXTABLE(2b, 3f)
21467+#endif
21468+.endif
21469+
21470 LOCK_PREFIX
21471 cmpxchg8b (%esi)
21472 jne 1b
21473
21474-10:
21475 movl %ebx, %eax
21476 movl %ecx, %edx
21477+
21478+.ifb \unchecked
21479+#ifdef CONFIG_PAX_REFCOUNT
21480+3:
21481+#endif
21482+.endif
21483+
21484 RESTORE ebx
21485+ pax_force_retaddr
21486 ret
21487 CFI_ENDPROC
21488-ENDPROC(atomic64_\func\()_return_cx8)
21489+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21490 .endm
21491
21492 incdec_return inc add adc
21493 incdec_return dec sub sbb
21494+incdec_return inc add adc _unchecked
21495+incdec_return dec sub sbb _unchecked
21496
21497 ENTRY(atomic64_dec_if_positive_cx8)
21498 CFI_STARTPROC
21499@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21500 movl %edx, %ecx
21501 subl $1, %ebx
21502 sbb $0, %ecx
21503+
21504+#ifdef CONFIG_PAX_REFCOUNT
21505+ into
21506+1234:
21507+ _ASM_EXTABLE(1234b, 2f)
21508+#endif
21509+
21510 js 2f
21511 LOCK_PREFIX
21512 cmpxchg8b (%esi)
21513@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21514 movl %ebx, %eax
21515 movl %ecx, %edx
21516 RESTORE ebx
21517+ pax_force_retaddr
21518 ret
21519 CFI_ENDPROC
21520 ENDPROC(atomic64_dec_if_positive_cx8)
21521@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
21522 movl %edx, %ecx
21523 addl %esi, %ebx
21524 adcl %edi, %ecx
21525+
21526+#ifdef CONFIG_PAX_REFCOUNT
21527+ into
21528+1234:
21529+ _ASM_EXTABLE(1234b, 3f)
21530+#endif
21531+
21532 LOCK_PREFIX
21533 cmpxchg8b (%ebp)
21534 jne 1b
21535@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
21536 CFI_ADJUST_CFA_OFFSET -8
21537 RESTORE ebx
21538 RESTORE ebp
21539+ pax_force_retaddr
21540 ret
21541 4:
21542 cmpl %edx, 4(%esp)
21543@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21544 movl %edx, %ecx
21545 addl $1, %ebx
21546 adcl $0, %ecx
21547+
21548+#ifdef CONFIG_PAX_REFCOUNT
21549+ into
21550+1234:
21551+ _ASM_EXTABLE(1234b, 3f)
21552+#endif
21553+
21554 LOCK_PREFIX
21555 cmpxchg8b (%esi)
21556 jne 1b
21557@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21558 movl $1, %eax
21559 3:
21560 RESTORE ebx
21561+ pax_force_retaddr
21562 ret
21563 4:
21564 testl %edx, %edx
21565diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21566index 78d16a5..fbcf666 100644
21567--- a/arch/x86/lib/checksum_32.S
21568+++ b/arch/x86/lib/checksum_32.S
21569@@ -28,7 +28,8 @@
21570 #include <linux/linkage.h>
21571 #include <asm/dwarf2.h>
21572 #include <asm/errno.h>
21573-
21574+#include <asm/segment.h>
21575+
21576 /*
21577 * computes a partial checksum, e.g. for TCP/UDP fragments
21578 */
21579@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21580
21581 #define ARGBASE 16
21582 #define FP 12
21583-
21584-ENTRY(csum_partial_copy_generic)
21585+
21586+ENTRY(csum_partial_copy_generic_to_user)
21587 CFI_STARTPROC
21588+
21589+#ifdef CONFIG_PAX_MEMORY_UDEREF
21590+ pushl_cfi %gs
21591+ popl_cfi %es
21592+ jmp csum_partial_copy_generic
21593+#endif
21594+
21595+ENTRY(csum_partial_copy_generic_from_user)
21596+
21597+#ifdef CONFIG_PAX_MEMORY_UDEREF
21598+ pushl_cfi %gs
21599+ popl_cfi %ds
21600+#endif
21601+
21602+ENTRY(csum_partial_copy_generic)
21603 subl $4,%esp
21604 CFI_ADJUST_CFA_OFFSET 4
21605 pushl_cfi %edi
21606@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21607 jmp 4f
21608 SRC(1: movw (%esi), %bx )
21609 addl $2, %esi
21610-DST( movw %bx, (%edi) )
21611+DST( movw %bx, %es:(%edi) )
21612 addl $2, %edi
21613 addw %bx, %ax
21614 adcl $0, %eax
21615@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21616 SRC(1: movl (%esi), %ebx )
21617 SRC( movl 4(%esi), %edx )
21618 adcl %ebx, %eax
21619-DST( movl %ebx, (%edi) )
21620+DST( movl %ebx, %es:(%edi) )
21621 adcl %edx, %eax
21622-DST( movl %edx, 4(%edi) )
21623+DST( movl %edx, %es:4(%edi) )
21624
21625 SRC( movl 8(%esi), %ebx )
21626 SRC( movl 12(%esi), %edx )
21627 adcl %ebx, %eax
21628-DST( movl %ebx, 8(%edi) )
21629+DST( movl %ebx, %es:8(%edi) )
21630 adcl %edx, %eax
21631-DST( movl %edx, 12(%edi) )
21632+DST( movl %edx, %es:12(%edi) )
21633
21634 SRC( movl 16(%esi), %ebx )
21635 SRC( movl 20(%esi), %edx )
21636 adcl %ebx, %eax
21637-DST( movl %ebx, 16(%edi) )
21638+DST( movl %ebx, %es:16(%edi) )
21639 adcl %edx, %eax
21640-DST( movl %edx, 20(%edi) )
21641+DST( movl %edx, %es:20(%edi) )
21642
21643 SRC( movl 24(%esi), %ebx )
21644 SRC( movl 28(%esi), %edx )
21645 adcl %ebx, %eax
21646-DST( movl %ebx, 24(%edi) )
21647+DST( movl %ebx, %es:24(%edi) )
21648 adcl %edx, %eax
21649-DST( movl %edx, 28(%edi) )
21650+DST( movl %edx, %es:28(%edi) )
21651
21652 lea 32(%esi), %esi
21653 lea 32(%edi), %edi
21654@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21655 shrl $2, %edx # This clears CF
21656 SRC(3: movl (%esi), %ebx )
21657 adcl %ebx, %eax
21658-DST( movl %ebx, (%edi) )
21659+DST( movl %ebx, %es:(%edi) )
21660 lea 4(%esi), %esi
21661 lea 4(%edi), %edi
21662 dec %edx
21663@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21664 jb 5f
21665 SRC( movw (%esi), %cx )
21666 leal 2(%esi), %esi
21667-DST( movw %cx, (%edi) )
21668+DST( movw %cx, %es:(%edi) )
21669 leal 2(%edi), %edi
21670 je 6f
21671 shll $16,%ecx
21672 SRC(5: movb (%esi), %cl )
21673-DST( movb %cl, (%edi) )
21674+DST( movb %cl, %es:(%edi) )
21675 6: addl %ecx, %eax
21676 adcl $0, %eax
21677 7:
21678@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21679
21680 6001:
21681 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21682- movl $-EFAULT, (%ebx)
21683+ movl $-EFAULT, %ss:(%ebx)
21684
21685 # zero the complete destination - computing the rest
21686 # is too much work
21687@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21688
21689 6002:
21690 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21691- movl $-EFAULT,(%ebx)
21692+ movl $-EFAULT,%ss:(%ebx)
21693 jmp 5000b
21694
21695 .previous
21696
21697+ pushl_cfi %ss
21698+ popl_cfi %ds
21699+ pushl_cfi %ss
21700+ popl_cfi %es
21701 popl_cfi %ebx
21702 CFI_RESTORE ebx
21703 popl_cfi %esi
21704@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21705 popl_cfi %ecx # equivalent to addl $4,%esp
21706 ret
21707 CFI_ENDPROC
21708-ENDPROC(csum_partial_copy_generic)
21709+ENDPROC(csum_partial_copy_generic_to_user)
21710
21711 #else
21712
21713 /* Version for PentiumII/PPro */
21714
21715 #define ROUND1(x) \
21716+ nop; nop; nop; \
21717 SRC(movl x(%esi), %ebx ) ; \
21718 addl %ebx, %eax ; \
21719- DST(movl %ebx, x(%edi) ) ;
21720+ DST(movl %ebx, %es:x(%edi)) ;
21721
21722 #define ROUND(x) \
21723+ nop; nop; nop; \
21724 SRC(movl x(%esi), %ebx ) ; \
21725 adcl %ebx, %eax ; \
21726- DST(movl %ebx, x(%edi) ) ;
21727+ DST(movl %ebx, %es:x(%edi)) ;
21728
21729 #define ARGBASE 12
21730-
21731-ENTRY(csum_partial_copy_generic)
21732+
21733+ENTRY(csum_partial_copy_generic_to_user)
21734 CFI_STARTPROC
21735+
21736+#ifdef CONFIG_PAX_MEMORY_UDEREF
21737+ pushl_cfi %gs
21738+ popl_cfi %es
21739+ jmp csum_partial_copy_generic
21740+#endif
21741+
21742+ENTRY(csum_partial_copy_generic_from_user)
21743+
21744+#ifdef CONFIG_PAX_MEMORY_UDEREF
21745+ pushl_cfi %gs
21746+ popl_cfi %ds
21747+#endif
21748+
21749+ENTRY(csum_partial_copy_generic)
21750 pushl_cfi %ebx
21751 CFI_REL_OFFSET ebx, 0
21752 pushl_cfi %edi
21753@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21754 subl %ebx, %edi
21755 lea -1(%esi),%edx
21756 andl $-32,%edx
21757- lea 3f(%ebx,%ebx), %ebx
21758+ lea 3f(%ebx,%ebx,2), %ebx
21759 testl %esi, %esi
21760 jmp *%ebx
21761 1: addl $64,%esi
21762@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21763 jb 5f
21764 SRC( movw (%esi), %dx )
21765 leal 2(%esi), %esi
21766-DST( movw %dx, (%edi) )
21767+DST( movw %dx, %es:(%edi) )
21768 leal 2(%edi), %edi
21769 je 6f
21770 shll $16,%edx
21771 5:
21772 SRC( movb (%esi), %dl )
21773-DST( movb %dl, (%edi) )
21774+DST( movb %dl, %es:(%edi) )
21775 6: addl %edx, %eax
21776 adcl $0, %eax
21777 7:
21778 .section .fixup, "ax"
21779 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21780- movl $-EFAULT, (%ebx)
21781+ movl $-EFAULT, %ss:(%ebx)
21782 # zero the complete destination (computing the rest is too much work)
21783 movl ARGBASE+8(%esp),%edi # dst
21784 movl ARGBASE+12(%esp),%ecx # len
21785@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21786 rep; stosb
21787 jmp 7b
21788 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21789- movl $-EFAULT, (%ebx)
21790+ movl $-EFAULT, %ss:(%ebx)
21791 jmp 7b
21792 .previous
21793
21794+#ifdef CONFIG_PAX_MEMORY_UDEREF
21795+ pushl_cfi %ss
21796+ popl_cfi %ds
21797+ pushl_cfi %ss
21798+ popl_cfi %es
21799+#endif
21800+
21801 popl_cfi %esi
21802 CFI_RESTORE esi
21803 popl_cfi %edi
21804@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21805 CFI_RESTORE ebx
21806 ret
21807 CFI_ENDPROC
21808-ENDPROC(csum_partial_copy_generic)
21809+ENDPROC(csum_partial_copy_generic_to_user)
21810
21811 #undef ROUND
21812 #undef ROUND1
21813diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21814index f2145cf..cea889d 100644
21815--- a/arch/x86/lib/clear_page_64.S
21816+++ b/arch/x86/lib/clear_page_64.S
21817@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21818 movl $4096/8,%ecx
21819 xorl %eax,%eax
21820 rep stosq
21821+ pax_force_retaddr
21822 ret
21823 CFI_ENDPROC
21824 ENDPROC(clear_page_c)
21825@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21826 movl $4096,%ecx
21827 xorl %eax,%eax
21828 rep stosb
21829+ pax_force_retaddr
21830 ret
21831 CFI_ENDPROC
21832 ENDPROC(clear_page_c_e)
21833@@ -43,6 +45,7 @@ ENTRY(clear_page)
21834 leaq 64(%rdi),%rdi
21835 jnz .Lloop
21836 nop
21837+ pax_force_retaddr
21838 ret
21839 CFI_ENDPROC
21840 .Lclear_page_end:
21841@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21842
21843 #include <asm/cpufeature.h>
21844
21845- .section .altinstr_replacement,"ax"
21846+ .section .altinstr_replacement,"a"
21847 1: .byte 0xeb /* jmp <disp8> */
21848 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21849 2: .byte 0xeb /* jmp <disp8> */
21850diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21851index 1e572c5..2a162cd 100644
21852--- a/arch/x86/lib/cmpxchg16b_emu.S
21853+++ b/arch/x86/lib/cmpxchg16b_emu.S
21854@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21855
21856 popf
21857 mov $1, %al
21858+ pax_force_retaddr
21859 ret
21860
21861 not_same:
21862 popf
21863 xor %al,%al
21864+ pax_force_retaddr
21865 ret
21866
21867 CFI_ENDPROC
21868diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21869index 01c805b..dccb07f 100644
21870--- a/arch/x86/lib/copy_page_64.S
21871+++ b/arch/x86/lib/copy_page_64.S
21872@@ -9,6 +9,7 @@ copy_page_c:
21873 CFI_STARTPROC
21874 movl $4096/8,%ecx
21875 rep movsq
21876+ pax_force_retaddr
21877 ret
21878 CFI_ENDPROC
21879 ENDPROC(copy_page_c)
21880@@ -39,7 +40,7 @@ ENTRY(copy_page)
21881 movq 16 (%rsi), %rdx
21882 movq 24 (%rsi), %r8
21883 movq 32 (%rsi), %r9
21884- movq 40 (%rsi), %r10
21885+ movq 40 (%rsi), %r13
21886 movq 48 (%rsi), %r11
21887 movq 56 (%rsi), %r12
21888
21889@@ -50,7 +51,7 @@ ENTRY(copy_page)
21890 movq %rdx, 16 (%rdi)
21891 movq %r8, 24 (%rdi)
21892 movq %r9, 32 (%rdi)
21893- movq %r10, 40 (%rdi)
21894+ movq %r13, 40 (%rdi)
21895 movq %r11, 48 (%rdi)
21896 movq %r12, 56 (%rdi)
21897
21898@@ -69,7 +70,7 @@ ENTRY(copy_page)
21899 movq 16 (%rsi), %rdx
21900 movq 24 (%rsi), %r8
21901 movq 32 (%rsi), %r9
21902- movq 40 (%rsi), %r10
21903+ movq 40 (%rsi), %r13
21904 movq 48 (%rsi), %r11
21905 movq 56 (%rsi), %r12
21906
21907@@ -78,7 +79,7 @@ ENTRY(copy_page)
21908 movq %rdx, 16 (%rdi)
21909 movq %r8, 24 (%rdi)
21910 movq %r9, 32 (%rdi)
21911- movq %r10, 40 (%rdi)
21912+ movq %r13, 40 (%rdi)
21913 movq %r11, 48 (%rdi)
21914 movq %r12, 56 (%rdi)
21915
21916@@ -95,6 +96,7 @@ ENTRY(copy_page)
21917 CFI_RESTORE r13
21918 addq $3*8,%rsp
21919 CFI_ADJUST_CFA_OFFSET -3*8
21920+ pax_force_retaddr
21921 ret
21922 .Lcopy_page_end:
21923 CFI_ENDPROC
21924@@ -105,7 +107,7 @@ ENDPROC(copy_page)
21925
21926 #include <asm/cpufeature.h>
21927
21928- .section .altinstr_replacement,"ax"
21929+ .section .altinstr_replacement,"a"
21930 1: .byte 0xeb /* jmp <disp8> */
21931 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21932 2:
21933diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21934index 0248402..821c786 100644
21935--- a/arch/x86/lib/copy_user_64.S
21936+++ b/arch/x86/lib/copy_user_64.S
21937@@ -16,6 +16,7 @@
21938 #include <asm/thread_info.h>
21939 #include <asm/cpufeature.h>
21940 #include <asm/alternative-asm.h>
21941+#include <asm/pgtable.h>
21942
21943 /*
21944 * By placing feature2 after feature1 in altinstructions section, we logically
21945@@ -29,7 +30,7 @@
21946 .byte 0xe9 /* 32bit jump */
21947 .long \orig-1f /* by default jump to orig */
21948 1:
21949- .section .altinstr_replacement,"ax"
21950+ .section .altinstr_replacement,"a"
21951 2: .byte 0xe9 /* near jump with 32bit immediate */
21952 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21953 3: .byte 0xe9 /* near jump with 32bit immediate */
21954@@ -71,47 +72,20 @@
21955 #endif
21956 .endm
21957
21958-/* Standard copy_to_user with segment limit checking */
21959-ENTRY(_copy_to_user)
21960- CFI_STARTPROC
21961- GET_THREAD_INFO(%rax)
21962- movq %rdi,%rcx
21963- addq %rdx,%rcx
21964- jc bad_to_user
21965- cmpq TI_addr_limit(%rax),%rcx
21966- ja bad_to_user
21967- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21968- copy_user_generic_unrolled,copy_user_generic_string, \
21969- copy_user_enhanced_fast_string
21970- CFI_ENDPROC
21971-ENDPROC(_copy_to_user)
21972-
21973-/* Standard copy_from_user with segment limit checking */
21974-ENTRY(_copy_from_user)
21975- CFI_STARTPROC
21976- GET_THREAD_INFO(%rax)
21977- movq %rsi,%rcx
21978- addq %rdx,%rcx
21979- jc bad_from_user
21980- cmpq TI_addr_limit(%rax),%rcx
21981- ja bad_from_user
21982- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21983- copy_user_generic_unrolled,copy_user_generic_string, \
21984- copy_user_enhanced_fast_string
21985- CFI_ENDPROC
21986-ENDPROC(_copy_from_user)
21987-
21988 .section .fixup,"ax"
21989 /* must zero dest */
21990 ENTRY(bad_from_user)
21991 bad_from_user:
21992 CFI_STARTPROC
21993+ testl %edx,%edx
21994+ js bad_to_user
21995 movl %edx,%ecx
21996 xorl %eax,%eax
21997 rep
21998 stosb
21999 bad_to_user:
22000 movl %edx,%eax
22001+ pax_force_retaddr
22002 ret
22003 CFI_ENDPROC
22004 ENDPROC(bad_from_user)
22005@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22006 jz 17f
22007 1: movq (%rsi),%r8
22008 2: movq 1*8(%rsi),%r9
22009-3: movq 2*8(%rsi),%r10
22010+3: movq 2*8(%rsi),%rax
22011 4: movq 3*8(%rsi),%r11
22012 5: movq %r8,(%rdi)
22013 6: movq %r9,1*8(%rdi)
22014-7: movq %r10,2*8(%rdi)
22015+7: movq %rax,2*8(%rdi)
22016 8: movq %r11,3*8(%rdi)
22017 9: movq 4*8(%rsi),%r8
22018 10: movq 5*8(%rsi),%r9
22019-11: movq 6*8(%rsi),%r10
22020+11: movq 6*8(%rsi),%rax
22021 12: movq 7*8(%rsi),%r11
22022 13: movq %r8,4*8(%rdi)
22023 14: movq %r9,5*8(%rdi)
22024-15: movq %r10,6*8(%rdi)
22025+15: movq %rax,6*8(%rdi)
22026 16: movq %r11,7*8(%rdi)
22027 leaq 64(%rsi),%rsi
22028 leaq 64(%rdi),%rdi
22029@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22030 decl %ecx
22031 jnz 21b
22032 23: xor %eax,%eax
22033+ pax_force_retaddr
22034 ret
22035
22036 .section .fixup,"ax"
22037@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
22038 3: rep
22039 movsb
22040 4: xorl %eax,%eax
22041+ pax_force_retaddr
22042 ret
22043
22044 .section .fixup,"ax"
22045@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
22046 1: rep
22047 movsb
22048 2: xorl %eax,%eax
22049+ pax_force_retaddr
22050 ret
22051
22052 .section .fixup,"ax"
22053diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22054index cb0c112..e3a6895 100644
22055--- a/arch/x86/lib/copy_user_nocache_64.S
22056+++ b/arch/x86/lib/copy_user_nocache_64.S
22057@@ -8,12 +8,14 @@
22058
22059 #include <linux/linkage.h>
22060 #include <asm/dwarf2.h>
22061+#include <asm/alternative-asm.h>
22062
22063 #define FIX_ALIGNMENT 1
22064
22065 #include <asm/current.h>
22066 #include <asm/asm-offsets.h>
22067 #include <asm/thread_info.h>
22068+#include <asm/pgtable.h>
22069
22070 .macro ALIGN_DESTINATION
22071 #ifdef FIX_ALIGNMENT
22072@@ -50,6 +52,15 @@
22073 */
22074 ENTRY(__copy_user_nocache)
22075 CFI_STARTPROC
22076+
22077+#ifdef CONFIG_PAX_MEMORY_UDEREF
22078+ mov $PAX_USER_SHADOW_BASE,%rcx
22079+ cmp %rcx,%rsi
22080+ jae 1f
22081+ add %rcx,%rsi
22082+1:
22083+#endif
22084+
22085 cmpl $8,%edx
22086 jb 20f /* less then 8 bytes, go to byte copy loop */
22087 ALIGN_DESTINATION
22088@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22089 jz 17f
22090 1: movq (%rsi),%r8
22091 2: movq 1*8(%rsi),%r9
22092-3: movq 2*8(%rsi),%r10
22093+3: movq 2*8(%rsi),%rax
22094 4: movq 3*8(%rsi),%r11
22095 5: movnti %r8,(%rdi)
22096 6: movnti %r9,1*8(%rdi)
22097-7: movnti %r10,2*8(%rdi)
22098+7: movnti %rax,2*8(%rdi)
22099 8: movnti %r11,3*8(%rdi)
22100 9: movq 4*8(%rsi),%r8
22101 10: movq 5*8(%rsi),%r9
22102-11: movq 6*8(%rsi),%r10
22103+11: movq 6*8(%rsi),%rax
22104 12: movq 7*8(%rsi),%r11
22105 13: movnti %r8,4*8(%rdi)
22106 14: movnti %r9,5*8(%rdi)
22107-15: movnti %r10,6*8(%rdi)
22108+15: movnti %rax,6*8(%rdi)
22109 16: movnti %r11,7*8(%rdi)
22110 leaq 64(%rsi),%rsi
22111 leaq 64(%rdi),%rdi
22112@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22113 jnz 21b
22114 23: xorl %eax,%eax
22115 sfence
22116+ pax_force_retaddr
22117 ret
22118
22119 .section .fixup,"ax"
22120diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22121index fb903b7..c92b7f7 100644
22122--- a/arch/x86/lib/csum-copy_64.S
22123+++ b/arch/x86/lib/csum-copy_64.S
22124@@ -8,6 +8,7 @@
22125 #include <linux/linkage.h>
22126 #include <asm/dwarf2.h>
22127 #include <asm/errno.h>
22128+#include <asm/alternative-asm.h>
22129
22130 /*
22131 * Checksum copy with exception handling.
22132@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22133 CFI_RESTORE rbp
22134 addq $7*8, %rsp
22135 CFI_ADJUST_CFA_OFFSET -7*8
22136+ pax_force_retaddr 0, 1
22137 ret
22138 CFI_RESTORE_STATE
22139
22140diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22141index 459b58a..9570bc7 100644
22142--- a/arch/x86/lib/csum-wrappers_64.c
22143+++ b/arch/x86/lib/csum-wrappers_64.c
22144@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22145 len -= 2;
22146 }
22147 }
22148- isum = csum_partial_copy_generic((__force const void *)src,
22149+
22150+#ifdef CONFIG_PAX_MEMORY_UDEREF
22151+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22152+ src += PAX_USER_SHADOW_BASE;
22153+#endif
22154+
22155+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22156 dst, len, isum, errp, NULL);
22157 if (unlikely(*errp))
22158 goto out_err;
22159@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22160 }
22161
22162 *errp = 0;
22163- return csum_partial_copy_generic(src, (void __force *)dst,
22164+
22165+#ifdef CONFIG_PAX_MEMORY_UDEREF
22166+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22167+ dst += PAX_USER_SHADOW_BASE;
22168+#endif
22169+
22170+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22171 len, isum, NULL, errp);
22172 }
22173 EXPORT_SYMBOL(csum_partial_copy_to_user);
22174diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22175index 51f1504..ddac4c1 100644
22176--- a/arch/x86/lib/getuser.S
22177+++ b/arch/x86/lib/getuser.S
22178@@ -33,15 +33,38 @@
22179 #include <asm/asm-offsets.h>
22180 #include <asm/thread_info.h>
22181 #include <asm/asm.h>
22182+#include <asm/segment.h>
22183+#include <asm/pgtable.h>
22184+#include <asm/alternative-asm.h>
22185+
22186+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22187+#define __copyuser_seg gs;
22188+#else
22189+#define __copyuser_seg
22190+#endif
22191
22192 .text
22193 ENTRY(__get_user_1)
22194 CFI_STARTPROC
22195+
22196+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22197 GET_THREAD_INFO(%_ASM_DX)
22198 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22199 jae bad_get_user
22200-1: movzb (%_ASM_AX),%edx
22201+
22202+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22203+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22204+ cmp %_ASM_DX,%_ASM_AX
22205+ jae 1234f
22206+ add %_ASM_DX,%_ASM_AX
22207+1234:
22208+#endif
22209+
22210+#endif
22211+
22212+1: __copyuser_seg movzb (%_ASM_AX),%edx
22213 xor %eax,%eax
22214+ pax_force_retaddr
22215 ret
22216 CFI_ENDPROC
22217 ENDPROC(__get_user_1)
22218@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22219 ENTRY(__get_user_2)
22220 CFI_STARTPROC
22221 add $1,%_ASM_AX
22222+
22223+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22224 jc bad_get_user
22225 GET_THREAD_INFO(%_ASM_DX)
22226 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22227 jae bad_get_user
22228-2: movzwl -1(%_ASM_AX),%edx
22229+
22230+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22231+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22232+ cmp %_ASM_DX,%_ASM_AX
22233+ jae 1234f
22234+ add %_ASM_DX,%_ASM_AX
22235+1234:
22236+#endif
22237+
22238+#endif
22239+
22240+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22241 xor %eax,%eax
22242+ pax_force_retaddr
22243 ret
22244 CFI_ENDPROC
22245 ENDPROC(__get_user_2)
22246@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22247 ENTRY(__get_user_4)
22248 CFI_STARTPROC
22249 add $3,%_ASM_AX
22250+
22251+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22252 jc bad_get_user
22253 GET_THREAD_INFO(%_ASM_DX)
22254 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22255 jae bad_get_user
22256-3: mov -3(%_ASM_AX),%edx
22257+
22258+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22259+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22260+ cmp %_ASM_DX,%_ASM_AX
22261+ jae 1234f
22262+ add %_ASM_DX,%_ASM_AX
22263+1234:
22264+#endif
22265+
22266+#endif
22267+
22268+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22269 xor %eax,%eax
22270+ pax_force_retaddr
22271 ret
22272 CFI_ENDPROC
22273 ENDPROC(__get_user_4)
22274@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22275 GET_THREAD_INFO(%_ASM_DX)
22276 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22277 jae bad_get_user
22278+
22279+#ifdef CONFIG_PAX_MEMORY_UDEREF
22280+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22281+ cmp %_ASM_DX,%_ASM_AX
22282+ jae 1234f
22283+ add %_ASM_DX,%_ASM_AX
22284+1234:
22285+#endif
22286+
22287 4: movq -7(%_ASM_AX),%_ASM_DX
22288 xor %eax,%eax
22289+ pax_force_retaddr
22290 ret
22291 CFI_ENDPROC
22292 ENDPROC(__get_user_8)
22293@@ -91,6 +152,7 @@ bad_get_user:
22294 CFI_STARTPROC
22295 xor %edx,%edx
22296 mov $(-EFAULT),%_ASM_AX
22297+ pax_force_retaddr
22298 ret
22299 CFI_ENDPROC
22300 END(bad_get_user)
22301diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
22302index 5a1f9f3..ba9f577 100644
22303--- a/arch/x86/lib/insn.c
22304+++ b/arch/x86/lib/insn.c
22305@@ -21,6 +21,11 @@
22306 #include <linux/string.h>
22307 #include <asm/inat.h>
22308 #include <asm/insn.h>
22309+#ifdef __KERNEL__
22310+#include <asm/pgtable_types.h>
22311+#else
22312+#define ktla_ktva(addr) addr
22313+#endif
22314
22315 /* Verify next sizeof(t) bytes can be on the same instruction */
22316 #define validate_next(t, insn, n) \
22317@@ -49,8 +54,8 @@
22318 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
22319 {
22320 memset(insn, 0, sizeof(*insn));
22321- insn->kaddr = kaddr;
22322- insn->next_byte = kaddr;
22323+ insn->kaddr = ktla_ktva(kaddr);
22324+ insn->next_byte = ktla_ktva(kaddr);
22325 insn->x86_64 = x86_64 ? 1 : 0;
22326 insn->opnd_bytes = 4;
22327 if (x86_64)
22328diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22329index 05a95e7..326f2fa 100644
22330--- a/arch/x86/lib/iomap_copy_64.S
22331+++ b/arch/x86/lib/iomap_copy_64.S
22332@@ -17,6 +17,7 @@
22333
22334 #include <linux/linkage.h>
22335 #include <asm/dwarf2.h>
22336+#include <asm/alternative-asm.h>
22337
22338 /*
22339 * override generic version in lib/iomap_copy.c
22340@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22341 CFI_STARTPROC
22342 movl %edx,%ecx
22343 rep movsd
22344+ pax_force_retaddr
22345 ret
22346 CFI_ENDPROC
22347 ENDPROC(__iowrite32_copy)
22348diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22349index efbf2a0..8893637 100644
22350--- a/arch/x86/lib/memcpy_64.S
22351+++ b/arch/x86/lib/memcpy_64.S
22352@@ -34,6 +34,7 @@
22353 rep movsq
22354 movl %edx, %ecx
22355 rep movsb
22356+ pax_force_retaddr
22357 ret
22358 .Lmemcpy_e:
22359 .previous
22360@@ -51,6 +52,7 @@
22361
22362 movl %edx, %ecx
22363 rep movsb
22364+ pax_force_retaddr
22365 ret
22366 .Lmemcpy_e_e:
22367 .previous
22368@@ -81,13 +83,13 @@ ENTRY(memcpy)
22369 */
22370 movq 0*8(%rsi), %r8
22371 movq 1*8(%rsi), %r9
22372- movq 2*8(%rsi), %r10
22373+ movq 2*8(%rsi), %rcx
22374 movq 3*8(%rsi), %r11
22375 leaq 4*8(%rsi), %rsi
22376
22377 movq %r8, 0*8(%rdi)
22378 movq %r9, 1*8(%rdi)
22379- movq %r10, 2*8(%rdi)
22380+ movq %rcx, 2*8(%rdi)
22381 movq %r11, 3*8(%rdi)
22382 leaq 4*8(%rdi), %rdi
22383 jae .Lcopy_forward_loop
22384@@ -110,12 +112,12 @@ ENTRY(memcpy)
22385 subq $0x20, %rdx
22386 movq -1*8(%rsi), %r8
22387 movq -2*8(%rsi), %r9
22388- movq -3*8(%rsi), %r10
22389+ movq -3*8(%rsi), %rcx
22390 movq -4*8(%rsi), %r11
22391 leaq -4*8(%rsi), %rsi
22392 movq %r8, -1*8(%rdi)
22393 movq %r9, -2*8(%rdi)
22394- movq %r10, -3*8(%rdi)
22395+ movq %rcx, -3*8(%rdi)
22396 movq %r11, -4*8(%rdi)
22397 leaq -4*8(%rdi), %rdi
22398 jae .Lcopy_backward_loop
22399@@ -135,12 +137,13 @@ ENTRY(memcpy)
22400 */
22401 movq 0*8(%rsi), %r8
22402 movq 1*8(%rsi), %r9
22403- movq -2*8(%rsi, %rdx), %r10
22404+ movq -2*8(%rsi, %rdx), %rcx
22405 movq -1*8(%rsi, %rdx), %r11
22406 movq %r8, 0*8(%rdi)
22407 movq %r9, 1*8(%rdi)
22408- movq %r10, -2*8(%rdi, %rdx)
22409+ movq %rcx, -2*8(%rdi, %rdx)
22410 movq %r11, -1*8(%rdi, %rdx)
22411+ pax_force_retaddr
22412 retq
22413 .p2align 4
22414 .Lless_16bytes:
22415@@ -153,6 +156,7 @@ ENTRY(memcpy)
22416 movq -1*8(%rsi, %rdx), %r9
22417 movq %r8, 0*8(%rdi)
22418 movq %r9, -1*8(%rdi, %rdx)
22419+ pax_force_retaddr
22420 retq
22421 .p2align 4
22422 .Lless_8bytes:
22423@@ -166,6 +170,7 @@ ENTRY(memcpy)
22424 movl -4(%rsi, %rdx), %r8d
22425 movl %ecx, (%rdi)
22426 movl %r8d, -4(%rdi, %rdx)
22427+ pax_force_retaddr
22428 retq
22429 .p2align 4
22430 .Lless_3bytes:
22431@@ -183,6 +188,7 @@ ENTRY(memcpy)
22432 jnz .Lloop_1
22433
22434 .Lend:
22435+ pax_force_retaddr
22436 retq
22437 CFI_ENDPROC
22438 ENDPROC(memcpy)
22439diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22440index ee16461..c39c199 100644
22441--- a/arch/x86/lib/memmove_64.S
22442+++ b/arch/x86/lib/memmove_64.S
22443@@ -61,13 +61,13 @@ ENTRY(memmove)
22444 5:
22445 sub $0x20, %rdx
22446 movq 0*8(%rsi), %r11
22447- movq 1*8(%rsi), %r10
22448+ movq 1*8(%rsi), %rcx
22449 movq 2*8(%rsi), %r9
22450 movq 3*8(%rsi), %r8
22451 leaq 4*8(%rsi), %rsi
22452
22453 movq %r11, 0*8(%rdi)
22454- movq %r10, 1*8(%rdi)
22455+ movq %rcx, 1*8(%rdi)
22456 movq %r9, 2*8(%rdi)
22457 movq %r8, 3*8(%rdi)
22458 leaq 4*8(%rdi), %rdi
22459@@ -81,10 +81,10 @@ ENTRY(memmove)
22460 4:
22461 movq %rdx, %rcx
22462 movq -8(%rsi, %rdx), %r11
22463- lea -8(%rdi, %rdx), %r10
22464+ lea -8(%rdi, %rdx), %r9
22465 shrq $3, %rcx
22466 rep movsq
22467- movq %r11, (%r10)
22468+ movq %r11, (%r9)
22469 jmp 13f
22470 .Lmemmove_end_forward:
22471
22472@@ -95,14 +95,14 @@ ENTRY(memmove)
22473 7:
22474 movq %rdx, %rcx
22475 movq (%rsi), %r11
22476- movq %rdi, %r10
22477+ movq %rdi, %r9
22478 leaq -8(%rsi, %rdx), %rsi
22479 leaq -8(%rdi, %rdx), %rdi
22480 shrq $3, %rcx
22481 std
22482 rep movsq
22483 cld
22484- movq %r11, (%r10)
22485+ movq %r11, (%r9)
22486 jmp 13f
22487
22488 /*
22489@@ -127,13 +127,13 @@ ENTRY(memmove)
22490 8:
22491 subq $0x20, %rdx
22492 movq -1*8(%rsi), %r11
22493- movq -2*8(%rsi), %r10
22494+ movq -2*8(%rsi), %rcx
22495 movq -3*8(%rsi), %r9
22496 movq -4*8(%rsi), %r8
22497 leaq -4*8(%rsi), %rsi
22498
22499 movq %r11, -1*8(%rdi)
22500- movq %r10, -2*8(%rdi)
22501+ movq %rcx, -2*8(%rdi)
22502 movq %r9, -3*8(%rdi)
22503 movq %r8, -4*8(%rdi)
22504 leaq -4*8(%rdi), %rdi
22505@@ -151,11 +151,11 @@ ENTRY(memmove)
22506 * Move data from 16 bytes to 31 bytes.
22507 */
22508 movq 0*8(%rsi), %r11
22509- movq 1*8(%rsi), %r10
22510+ movq 1*8(%rsi), %rcx
22511 movq -2*8(%rsi, %rdx), %r9
22512 movq -1*8(%rsi, %rdx), %r8
22513 movq %r11, 0*8(%rdi)
22514- movq %r10, 1*8(%rdi)
22515+ movq %rcx, 1*8(%rdi)
22516 movq %r9, -2*8(%rdi, %rdx)
22517 movq %r8, -1*8(%rdi, %rdx)
22518 jmp 13f
22519@@ -167,9 +167,9 @@ ENTRY(memmove)
22520 * Move data from 8 bytes to 15 bytes.
22521 */
22522 movq 0*8(%rsi), %r11
22523- movq -1*8(%rsi, %rdx), %r10
22524+ movq -1*8(%rsi, %rdx), %r9
22525 movq %r11, 0*8(%rdi)
22526- movq %r10, -1*8(%rdi, %rdx)
22527+ movq %r9, -1*8(%rdi, %rdx)
22528 jmp 13f
22529 10:
22530 cmpq $4, %rdx
22531@@ -178,9 +178,9 @@ ENTRY(memmove)
22532 * Move data from 4 bytes to 7 bytes.
22533 */
22534 movl (%rsi), %r11d
22535- movl -4(%rsi, %rdx), %r10d
22536+ movl -4(%rsi, %rdx), %r9d
22537 movl %r11d, (%rdi)
22538- movl %r10d, -4(%rdi, %rdx)
22539+ movl %r9d, -4(%rdi, %rdx)
22540 jmp 13f
22541 11:
22542 cmp $2, %rdx
22543@@ -189,9 +189,9 @@ ENTRY(memmove)
22544 * Move data from 2 bytes to 3 bytes.
22545 */
22546 movw (%rsi), %r11w
22547- movw -2(%rsi, %rdx), %r10w
22548+ movw -2(%rsi, %rdx), %r9w
22549 movw %r11w, (%rdi)
22550- movw %r10w, -2(%rdi, %rdx)
22551+ movw %r9w, -2(%rdi, %rdx)
22552 jmp 13f
22553 12:
22554 cmp $1, %rdx
22555@@ -202,6 +202,7 @@ ENTRY(memmove)
22556 movb (%rsi), %r11b
22557 movb %r11b, (%rdi)
22558 13:
22559+ pax_force_retaddr
22560 retq
22561 CFI_ENDPROC
22562
22563@@ -210,6 +211,7 @@ ENTRY(memmove)
22564 /* Forward moving data. */
22565 movq %rdx, %rcx
22566 rep movsb
22567+ pax_force_retaddr
22568 retq
22569 .Lmemmove_end_forward_efs:
22570 .previous
22571diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22572index 79bd454..dff325a 100644
22573--- a/arch/x86/lib/memset_64.S
22574+++ b/arch/x86/lib/memset_64.S
22575@@ -31,6 +31,7 @@
22576 movl %r8d,%ecx
22577 rep stosb
22578 movq %r9,%rax
22579+ pax_force_retaddr
22580 ret
22581 .Lmemset_e:
22582 .previous
22583@@ -53,6 +54,7 @@
22584 movl %edx,%ecx
22585 rep stosb
22586 movq %r9,%rax
22587+ pax_force_retaddr
22588 ret
22589 .Lmemset_e_e:
22590 .previous
22591@@ -60,13 +62,13 @@
22592 ENTRY(memset)
22593 ENTRY(__memset)
22594 CFI_STARTPROC
22595- movq %rdi,%r10
22596 movq %rdx,%r11
22597
22598 /* expand byte value */
22599 movzbl %sil,%ecx
22600 movabs $0x0101010101010101,%rax
22601 mul %rcx /* with rax, clobbers rdx */
22602+ movq %rdi,%rdx
22603
22604 /* align dst */
22605 movl %edi,%r9d
22606@@ -120,7 +122,8 @@ ENTRY(__memset)
22607 jnz .Lloop_1
22608
22609 .Lende:
22610- movq %r10,%rax
22611+ movq %rdx,%rax
22612+ pax_force_retaddr
22613 ret
22614
22615 CFI_RESTORE_STATE
22616diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22617index c9f2d9b..e7fd2c0 100644
22618--- a/arch/x86/lib/mmx_32.c
22619+++ b/arch/x86/lib/mmx_32.c
22620@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22621 {
22622 void *p;
22623 int i;
22624+ unsigned long cr0;
22625
22626 if (unlikely(in_interrupt()))
22627 return __memcpy(to, from, len);
22628@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22629 kernel_fpu_begin();
22630
22631 __asm__ __volatile__ (
22632- "1: prefetch (%0)\n" /* This set is 28 bytes */
22633- " prefetch 64(%0)\n"
22634- " prefetch 128(%0)\n"
22635- " prefetch 192(%0)\n"
22636- " prefetch 256(%0)\n"
22637+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22638+ " prefetch 64(%1)\n"
22639+ " prefetch 128(%1)\n"
22640+ " prefetch 192(%1)\n"
22641+ " prefetch 256(%1)\n"
22642 "2: \n"
22643 ".section .fixup, \"ax\"\n"
22644- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22645+ "3: \n"
22646+
22647+#ifdef CONFIG_PAX_KERNEXEC
22648+ " movl %%cr0, %0\n"
22649+ " movl %0, %%eax\n"
22650+ " andl $0xFFFEFFFF, %%eax\n"
22651+ " movl %%eax, %%cr0\n"
22652+#endif
22653+
22654+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22655+
22656+#ifdef CONFIG_PAX_KERNEXEC
22657+ " movl %0, %%cr0\n"
22658+#endif
22659+
22660 " jmp 2b\n"
22661 ".previous\n"
22662 _ASM_EXTABLE(1b, 3b)
22663- : : "r" (from));
22664+ : "=&r" (cr0) : "r" (from) : "ax");
22665
22666 for ( ; i > 5; i--) {
22667 __asm__ __volatile__ (
22668- "1: prefetch 320(%0)\n"
22669- "2: movq (%0), %%mm0\n"
22670- " movq 8(%0), %%mm1\n"
22671- " movq 16(%0), %%mm2\n"
22672- " movq 24(%0), %%mm3\n"
22673- " movq %%mm0, (%1)\n"
22674- " movq %%mm1, 8(%1)\n"
22675- " movq %%mm2, 16(%1)\n"
22676- " movq %%mm3, 24(%1)\n"
22677- " movq 32(%0), %%mm0\n"
22678- " movq 40(%0), %%mm1\n"
22679- " movq 48(%0), %%mm2\n"
22680- " movq 56(%0), %%mm3\n"
22681- " movq %%mm0, 32(%1)\n"
22682- " movq %%mm1, 40(%1)\n"
22683- " movq %%mm2, 48(%1)\n"
22684- " movq %%mm3, 56(%1)\n"
22685+ "1: prefetch 320(%1)\n"
22686+ "2: movq (%1), %%mm0\n"
22687+ " movq 8(%1), %%mm1\n"
22688+ " movq 16(%1), %%mm2\n"
22689+ " movq 24(%1), %%mm3\n"
22690+ " movq %%mm0, (%2)\n"
22691+ " movq %%mm1, 8(%2)\n"
22692+ " movq %%mm2, 16(%2)\n"
22693+ " movq %%mm3, 24(%2)\n"
22694+ " movq 32(%1), %%mm0\n"
22695+ " movq 40(%1), %%mm1\n"
22696+ " movq 48(%1), %%mm2\n"
22697+ " movq 56(%1), %%mm3\n"
22698+ " movq %%mm0, 32(%2)\n"
22699+ " movq %%mm1, 40(%2)\n"
22700+ " movq %%mm2, 48(%2)\n"
22701+ " movq %%mm3, 56(%2)\n"
22702 ".section .fixup, \"ax\"\n"
22703- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22704+ "3:\n"
22705+
22706+#ifdef CONFIG_PAX_KERNEXEC
22707+ " movl %%cr0, %0\n"
22708+ " movl %0, %%eax\n"
22709+ " andl $0xFFFEFFFF, %%eax\n"
22710+ " movl %%eax, %%cr0\n"
22711+#endif
22712+
22713+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22714+
22715+#ifdef CONFIG_PAX_KERNEXEC
22716+ " movl %0, %%cr0\n"
22717+#endif
22718+
22719 " jmp 2b\n"
22720 ".previous\n"
22721 _ASM_EXTABLE(1b, 3b)
22722- : : "r" (from), "r" (to) : "memory");
22723+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22724
22725 from += 64;
22726 to += 64;
22727@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22728 static void fast_copy_page(void *to, void *from)
22729 {
22730 int i;
22731+ unsigned long cr0;
22732
22733 kernel_fpu_begin();
22734
22735@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22736 * but that is for later. -AV
22737 */
22738 __asm__ __volatile__(
22739- "1: prefetch (%0)\n"
22740- " prefetch 64(%0)\n"
22741- " prefetch 128(%0)\n"
22742- " prefetch 192(%0)\n"
22743- " prefetch 256(%0)\n"
22744+ "1: prefetch (%1)\n"
22745+ " prefetch 64(%1)\n"
22746+ " prefetch 128(%1)\n"
22747+ " prefetch 192(%1)\n"
22748+ " prefetch 256(%1)\n"
22749 "2: \n"
22750 ".section .fixup, \"ax\"\n"
22751- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22752+ "3: \n"
22753+
22754+#ifdef CONFIG_PAX_KERNEXEC
22755+ " movl %%cr0, %0\n"
22756+ " movl %0, %%eax\n"
22757+ " andl $0xFFFEFFFF, %%eax\n"
22758+ " movl %%eax, %%cr0\n"
22759+#endif
22760+
22761+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22762+
22763+#ifdef CONFIG_PAX_KERNEXEC
22764+ " movl %0, %%cr0\n"
22765+#endif
22766+
22767 " jmp 2b\n"
22768 ".previous\n"
22769- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22770+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22771
22772 for (i = 0; i < (4096-320)/64; i++) {
22773 __asm__ __volatile__ (
22774- "1: prefetch 320(%0)\n"
22775- "2: movq (%0), %%mm0\n"
22776- " movntq %%mm0, (%1)\n"
22777- " movq 8(%0), %%mm1\n"
22778- " movntq %%mm1, 8(%1)\n"
22779- " movq 16(%0), %%mm2\n"
22780- " movntq %%mm2, 16(%1)\n"
22781- " movq 24(%0), %%mm3\n"
22782- " movntq %%mm3, 24(%1)\n"
22783- " movq 32(%0), %%mm4\n"
22784- " movntq %%mm4, 32(%1)\n"
22785- " movq 40(%0), %%mm5\n"
22786- " movntq %%mm5, 40(%1)\n"
22787- " movq 48(%0), %%mm6\n"
22788- " movntq %%mm6, 48(%1)\n"
22789- " movq 56(%0), %%mm7\n"
22790- " movntq %%mm7, 56(%1)\n"
22791+ "1: prefetch 320(%1)\n"
22792+ "2: movq (%1), %%mm0\n"
22793+ " movntq %%mm0, (%2)\n"
22794+ " movq 8(%1), %%mm1\n"
22795+ " movntq %%mm1, 8(%2)\n"
22796+ " movq 16(%1), %%mm2\n"
22797+ " movntq %%mm2, 16(%2)\n"
22798+ " movq 24(%1), %%mm3\n"
22799+ " movntq %%mm3, 24(%2)\n"
22800+ " movq 32(%1), %%mm4\n"
22801+ " movntq %%mm4, 32(%2)\n"
22802+ " movq 40(%1), %%mm5\n"
22803+ " movntq %%mm5, 40(%2)\n"
22804+ " movq 48(%1), %%mm6\n"
22805+ " movntq %%mm6, 48(%2)\n"
22806+ " movq 56(%1), %%mm7\n"
22807+ " movntq %%mm7, 56(%2)\n"
22808 ".section .fixup, \"ax\"\n"
22809- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22810+ "3:\n"
22811+
22812+#ifdef CONFIG_PAX_KERNEXEC
22813+ " movl %%cr0, %0\n"
22814+ " movl %0, %%eax\n"
22815+ " andl $0xFFFEFFFF, %%eax\n"
22816+ " movl %%eax, %%cr0\n"
22817+#endif
22818+
22819+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22820+
22821+#ifdef CONFIG_PAX_KERNEXEC
22822+ " movl %0, %%cr0\n"
22823+#endif
22824+
22825 " jmp 2b\n"
22826 ".previous\n"
22827- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22828+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22829
22830 from += 64;
22831 to += 64;
22832@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22833 static void fast_copy_page(void *to, void *from)
22834 {
22835 int i;
22836+ unsigned long cr0;
22837
22838 kernel_fpu_begin();
22839
22840 __asm__ __volatile__ (
22841- "1: prefetch (%0)\n"
22842- " prefetch 64(%0)\n"
22843- " prefetch 128(%0)\n"
22844- " prefetch 192(%0)\n"
22845- " prefetch 256(%0)\n"
22846+ "1: prefetch (%1)\n"
22847+ " prefetch 64(%1)\n"
22848+ " prefetch 128(%1)\n"
22849+ " prefetch 192(%1)\n"
22850+ " prefetch 256(%1)\n"
22851 "2: \n"
22852 ".section .fixup, \"ax\"\n"
22853- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22854+ "3: \n"
22855+
22856+#ifdef CONFIG_PAX_KERNEXEC
22857+ " movl %%cr0, %0\n"
22858+ " movl %0, %%eax\n"
22859+ " andl $0xFFFEFFFF, %%eax\n"
22860+ " movl %%eax, %%cr0\n"
22861+#endif
22862+
22863+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22864+
22865+#ifdef CONFIG_PAX_KERNEXEC
22866+ " movl %0, %%cr0\n"
22867+#endif
22868+
22869 " jmp 2b\n"
22870 ".previous\n"
22871- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22872+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22873
22874 for (i = 0; i < 4096/64; i++) {
22875 __asm__ __volatile__ (
22876- "1: prefetch 320(%0)\n"
22877- "2: movq (%0), %%mm0\n"
22878- " movq 8(%0), %%mm1\n"
22879- " movq 16(%0), %%mm2\n"
22880- " movq 24(%0), %%mm3\n"
22881- " movq %%mm0, (%1)\n"
22882- " movq %%mm1, 8(%1)\n"
22883- " movq %%mm2, 16(%1)\n"
22884- " movq %%mm3, 24(%1)\n"
22885- " movq 32(%0), %%mm0\n"
22886- " movq 40(%0), %%mm1\n"
22887- " movq 48(%0), %%mm2\n"
22888- " movq 56(%0), %%mm3\n"
22889- " movq %%mm0, 32(%1)\n"
22890- " movq %%mm1, 40(%1)\n"
22891- " movq %%mm2, 48(%1)\n"
22892- " movq %%mm3, 56(%1)\n"
22893+ "1: prefetch 320(%1)\n"
22894+ "2: movq (%1), %%mm0\n"
22895+ " movq 8(%1), %%mm1\n"
22896+ " movq 16(%1), %%mm2\n"
22897+ " movq 24(%1), %%mm3\n"
22898+ " movq %%mm0, (%2)\n"
22899+ " movq %%mm1, 8(%2)\n"
22900+ " movq %%mm2, 16(%2)\n"
22901+ " movq %%mm3, 24(%2)\n"
22902+ " movq 32(%1), %%mm0\n"
22903+ " movq 40(%1), %%mm1\n"
22904+ " movq 48(%1), %%mm2\n"
22905+ " movq 56(%1), %%mm3\n"
22906+ " movq %%mm0, 32(%2)\n"
22907+ " movq %%mm1, 40(%2)\n"
22908+ " movq %%mm2, 48(%2)\n"
22909+ " movq %%mm3, 56(%2)\n"
22910 ".section .fixup, \"ax\"\n"
22911- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22912+ "3:\n"
22913+
22914+#ifdef CONFIG_PAX_KERNEXEC
22915+ " movl %%cr0, %0\n"
22916+ " movl %0, %%eax\n"
22917+ " andl $0xFFFEFFFF, %%eax\n"
22918+ " movl %%eax, %%cr0\n"
22919+#endif
22920+
22921+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22922+
22923+#ifdef CONFIG_PAX_KERNEXEC
22924+ " movl %0, %%cr0\n"
22925+#endif
22926+
22927 " jmp 2b\n"
22928 ".previous\n"
22929 _ASM_EXTABLE(1b, 3b)
22930- : : "r" (from), "r" (to) : "memory");
22931+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22932
22933 from += 64;
22934 to += 64;
22935diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22936index 69fa106..adda88b 100644
22937--- a/arch/x86/lib/msr-reg.S
22938+++ b/arch/x86/lib/msr-reg.S
22939@@ -3,6 +3,7 @@
22940 #include <asm/dwarf2.h>
22941 #include <asm/asm.h>
22942 #include <asm/msr.h>
22943+#include <asm/alternative-asm.h>
22944
22945 #ifdef CONFIG_X86_64
22946 /*
22947@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22948 CFI_STARTPROC
22949 pushq_cfi %rbx
22950 pushq_cfi %rbp
22951- movq %rdi, %r10 /* Save pointer */
22952+ movq %rdi, %r9 /* Save pointer */
22953 xorl %r11d, %r11d /* Return value */
22954 movl (%rdi), %eax
22955 movl 4(%rdi), %ecx
22956@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22957 movl 28(%rdi), %edi
22958 CFI_REMEMBER_STATE
22959 1: \op
22960-2: movl %eax, (%r10)
22961+2: movl %eax, (%r9)
22962 movl %r11d, %eax /* Return value */
22963- movl %ecx, 4(%r10)
22964- movl %edx, 8(%r10)
22965- movl %ebx, 12(%r10)
22966- movl %ebp, 20(%r10)
22967- movl %esi, 24(%r10)
22968- movl %edi, 28(%r10)
22969+ movl %ecx, 4(%r9)
22970+ movl %edx, 8(%r9)
22971+ movl %ebx, 12(%r9)
22972+ movl %ebp, 20(%r9)
22973+ movl %esi, 24(%r9)
22974+ movl %edi, 28(%r9)
22975 popq_cfi %rbp
22976 popq_cfi %rbx
22977+ pax_force_retaddr
22978 ret
22979 3:
22980 CFI_RESTORE_STATE
22981diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22982index 36b0d15..d381858 100644
22983--- a/arch/x86/lib/putuser.S
22984+++ b/arch/x86/lib/putuser.S
22985@@ -15,7 +15,9 @@
22986 #include <asm/thread_info.h>
22987 #include <asm/errno.h>
22988 #include <asm/asm.h>
22989-
22990+#include <asm/segment.h>
22991+#include <asm/pgtable.h>
22992+#include <asm/alternative-asm.h>
22993
22994 /*
22995 * __put_user_X
22996@@ -29,52 +31,119 @@
22997 * as they get called from within inline assembly.
22998 */
22999
23000-#define ENTER CFI_STARTPROC ; \
23001- GET_THREAD_INFO(%_ASM_BX)
23002-#define EXIT ret ; \
23003+#define ENTER CFI_STARTPROC
23004+#define EXIT pax_force_retaddr; ret ; \
23005 CFI_ENDPROC
23006
23007+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23008+#define _DEST %_ASM_CX,%_ASM_BX
23009+#else
23010+#define _DEST %_ASM_CX
23011+#endif
23012+
23013+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23014+#define __copyuser_seg gs;
23015+#else
23016+#define __copyuser_seg
23017+#endif
23018+
23019 .text
23020 ENTRY(__put_user_1)
23021 ENTER
23022+
23023+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23024+ GET_THREAD_INFO(%_ASM_BX)
23025 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23026 jae bad_put_user
23027-1: movb %al,(%_ASM_CX)
23028+
23029+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23030+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23031+ cmp %_ASM_BX,%_ASM_CX
23032+ jb 1234f
23033+ xor %ebx,%ebx
23034+1234:
23035+#endif
23036+
23037+#endif
23038+
23039+1: __copyuser_seg movb %al,(_DEST)
23040 xor %eax,%eax
23041 EXIT
23042 ENDPROC(__put_user_1)
23043
23044 ENTRY(__put_user_2)
23045 ENTER
23046+
23047+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23048+ GET_THREAD_INFO(%_ASM_BX)
23049 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23050 sub $1,%_ASM_BX
23051 cmp %_ASM_BX,%_ASM_CX
23052 jae bad_put_user
23053-2: movw %ax,(%_ASM_CX)
23054+
23055+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23056+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23057+ cmp %_ASM_BX,%_ASM_CX
23058+ jb 1234f
23059+ xor %ebx,%ebx
23060+1234:
23061+#endif
23062+
23063+#endif
23064+
23065+2: __copyuser_seg movw %ax,(_DEST)
23066 xor %eax,%eax
23067 EXIT
23068 ENDPROC(__put_user_2)
23069
23070 ENTRY(__put_user_4)
23071 ENTER
23072+
23073+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23074+ GET_THREAD_INFO(%_ASM_BX)
23075 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23076 sub $3,%_ASM_BX
23077 cmp %_ASM_BX,%_ASM_CX
23078 jae bad_put_user
23079-3: movl %eax,(%_ASM_CX)
23080+
23081+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23082+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23083+ cmp %_ASM_BX,%_ASM_CX
23084+ jb 1234f
23085+ xor %ebx,%ebx
23086+1234:
23087+#endif
23088+
23089+#endif
23090+
23091+3: __copyuser_seg movl %eax,(_DEST)
23092 xor %eax,%eax
23093 EXIT
23094 ENDPROC(__put_user_4)
23095
23096 ENTRY(__put_user_8)
23097 ENTER
23098+
23099+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23100+ GET_THREAD_INFO(%_ASM_BX)
23101 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23102 sub $7,%_ASM_BX
23103 cmp %_ASM_BX,%_ASM_CX
23104 jae bad_put_user
23105-4: mov %_ASM_AX,(%_ASM_CX)
23106+
23107+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23108+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23109+ cmp %_ASM_BX,%_ASM_CX
23110+ jb 1234f
23111+ xor %ebx,%ebx
23112+1234:
23113+#endif
23114+
23115+#endif
23116+
23117+4: __copyuser_seg mov %_ASM_AX,(_DEST)
23118 #ifdef CONFIG_X86_32
23119-5: movl %edx,4(%_ASM_CX)
23120+5: __copyuser_seg movl %edx,4(_DEST)
23121 #endif
23122 xor %eax,%eax
23123 EXIT
23124diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
23125index 1cad221..de671ee 100644
23126--- a/arch/x86/lib/rwlock.S
23127+++ b/arch/x86/lib/rwlock.S
23128@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
23129 FRAME
23130 0: LOCK_PREFIX
23131 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23132+
23133+#ifdef CONFIG_PAX_REFCOUNT
23134+ jno 1234f
23135+ LOCK_PREFIX
23136+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
23137+ int $4
23138+1234:
23139+ _ASM_EXTABLE(1234b, 1234b)
23140+#endif
23141+
23142 1: rep; nop
23143 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
23144 jne 1b
23145 LOCK_PREFIX
23146 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
23147+
23148+#ifdef CONFIG_PAX_REFCOUNT
23149+ jno 1234f
23150+ LOCK_PREFIX
23151+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23152+ int $4
23153+1234:
23154+ _ASM_EXTABLE(1234b, 1234b)
23155+#endif
23156+
23157 jnz 0b
23158 ENDFRAME
23159+ pax_force_retaddr
23160 ret
23161 CFI_ENDPROC
23162 END(__write_lock_failed)
23163@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
23164 FRAME
23165 0: LOCK_PREFIX
23166 READ_LOCK_SIZE(inc) (%__lock_ptr)
23167+
23168+#ifdef CONFIG_PAX_REFCOUNT
23169+ jno 1234f
23170+ LOCK_PREFIX
23171+ READ_LOCK_SIZE(dec) (%__lock_ptr)
23172+ int $4
23173+1234:
23174+ _ASM_EXTABLE(1234b, 1234b)
23175+#endif
23176+
23177 1: rep; nop
23178 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
23179 js 1b
23180 LOCK_PREFIX
23181 READ_LOCK_SIZE(dec) (%__lock_ptr)
23182+
23183+#ifdef CONFIG_PAX_REFCOUNT
23184+ jno 1234f
23185+ LOCK_PREFIX
23186+ READ_LOCK_SIZE(inc) (%__lock_ptr)
23187+ int $4
23188+1234:
23189+ _ASM_EXTABLE(1234b, 1234b)
23190+#endif
23191+
23192 js 0b
23193 ENDFRAME
23194+ pax_force_retaddr
23195 ret
23196 CFI_ENDPROC
23197 END(__read_lock_failed)
23198diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
23199index 5dff5f0..cadebf4 100644
23200--- a/arch/x86/lib/rwsem.S
23201+++ b/arch/x86/lib/rwsem.S
23202@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
23203 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23204 CFI_RESTORE __ASM_REG(dx)
23205 restore_common_regs
23206+ pax_force_retaddr
23207 ret
23208 CFI_ENDPROC
23209 ENDPROC(call_rwsem_down_read_failed)
23210@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
23211 movq %rax,%rdi
23212 call rwsem_down_write_failed
23213 restore_common_regs
23214+ pax_force_retaddr
23215 ret
23216 CFI_ENDPROC
23217 ENDPROC(call_rwsem_down_write_failed)
23218@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
23219 movq %rax,%rdi
23220 call rwsem_wake
23221 restore_common_regs
23222-1: ret
23223+1: pax_force_retaddr
23224+ ret
23225 CFI_ENDPROC
23226 ENDPROC(call_rwsem_wake)
23227
23228@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
23229 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23230 CFI_RESTORE __ASM_REG(dx)
23231 restore_common_regs
23232+ pax_force_retaddr
23233 ret
23234 CFI_ENDPROC
23235 ENDPROC(call_rwsem_downgrade_wake)
23236diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23237index a63efd6..ccecad8 100644
23238--- a/arch/x86/lib/thunk_64.S
23239+++ b/arch/x86/lib/thunk_64.S
23240@@ -8,6 +8,7 @@
23241 #include <linux/linkage.h>
23242 #include <asm/dwarf2.h>
23243 #include <asm/calling.h>
23244+#include <asm/alternative-asm.h>
23245
23246 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23247 .macro THUNK name, func, put_ret_addr_in_rdi=0
23248@@ -41,5 +42,6 @@
23249 SAVE_ARGS
23250 restore:
23251 RESTORE_ARGS
23252+ pax_force_retaddr
23253 ret
23254 CFI_ENDPROC
23255diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23256index e218d5d..a99a1eb 100644
23257--- a/arch/x86/lib/usercopy_32.c
23258+++ b/arch/x86/lib/usercopy_32.c
23259@@ -43,7 +43,7 @@ do { \
23260 __asm__ __volatile__( \
23261 " testl %1,%1\n" \
23262 " jz 2f\n" \
23263- "0: lodsb\n" \
23264+ "0: "__copyuser_seg"lodsb\n" \
23265 " stosb\n" \
23266 " testb %%al,%%al\n" \
23267 " jz 1f\n" \
23268@@ -128,10 +128,12 @@ do { \
23269 int __d0; \
23270 might_fault(); \
23271 __asm__ __volatile__( \
23272+ __COPYUSER_SET_ES \
23273 "0: rep; stosl\n" \
23274 " movl %2,%0\n" \
23275 "1: rep; stosb\n" \
23276 "2:\n" \
23277+ __COPYUSER_RESTORE_ES \
23278 ".section .fixup,\"ax\"\n" \
23279 "3: lea 0(%2,%0,4),%0\n" \
23280 " jmp 2b\n" \
23281@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23282 might_fault();
23283
23284 __asm__ __volatile__(
23285+ __COPYUSER_SET_ES
23286 " testl %0, %0\n"
23287 " jz 3f\n"
23288 " andl %0,%%ecx\n"
23289@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23290 " subl %%ecx,%0\n"
23291 " addl %0,%%eax\n"
23292 "1:\n"
23293+ __COPYUSER_RESTORE_ES
23294 ".section .fixup,\"ax\"\n"
23295 "2: xorl %%eax,%%eax\n"
23296 " jmp 1b\n"
23297@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23298
23299 #ifdef CONFIG_X86_INTEL_USERCOPY
23300 static unsigned long
23301-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23302+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23303 {
23304 int d0, d1;
23305 __asm__ __volatile__(
23306@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23307 " .align 2,0x90\n"
23308 "3: movl 0(%4), %%eax\n"
23309 "4: movl 4(%4), %%edx\n"
23310- "5: movl %%eax, 0(%3)\n"
23311- "6: movl %%edx, 4(%3)\n"
23312+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23313+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23314 "7: movl 8(%4), %%eax\n"
23315 "8: movl 12(%4),%%edx\n"
23316- "9: movl %%eax, 8(%3)\n"
23317- "10: movl %%edx, 12(%3)\n"
23318+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23319+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23320 "11: movl 16(%4), %%eax\n"
23321 "12: movl 20(%4), %%edx\n"
23322- "13: movl %%eax, 16(%3)\n"
23323- "14: movl %%edx, 20(%3)\n"
23324+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23325+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23326 "15: movl 24(%4), %%eax\n"
23327 "16: movl 28(%4), %%edx\n"
23328- "17: movl %%eax, 24(%3)\n"
23329- "18: movl %%edx, 28(%3)\n"
23330+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23331+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23332 "19: movl 32(%4), %%eax\n"
23333 "20: movl 36(%4), %%edx\n"
23334- "21: movl %%eax, 32(%3)\n"
23335- "22: movl %%edx, 36(%3)\n"
23336+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23337+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23338 "23: movl 40(%4), %%eax\n"
23339 "24: movl 44(%4), %%edx\n"
23340- "25: movl %%eax, 40(%3)\n"
23341- "26: movl %%edx, 44(%3)\n"
23342+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23343+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23344 "27: movl 48(%4), %%eax\n"
23345 "28: movl 52(%4), %%edx\n"
23346- "29: movl %%eax, 48(%3)\n"
23347- "30: movl %%edx, 52(%3)\n"
23348+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23349+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23350 "31: movl 56(%4), %%eax\n"
23351 "32: movl 60(%4), %%edx\n"
23352- "33: movl %%eax, 56(%3)\n"
23353- "34: movl %%edx, 60(%3)\n"
23354+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23355+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23356 " addl $-64, %0\n"
23357 " addl $64, %4\n"
23358 " addl $64, %3\n"
23359@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23360 " shrl $2, %0\n"
23361 " andl $3, %%eax\n"
23362 " cld\n"
23363+ __COPYUSER_SET_ES
23364 "99: rep; movsl\n"
23365 "36: movl %%eax, %0\n"
23366 "37: rep; movsb\n"
23367 "100:\n"
23368+ __COPYUSER_RESTORE_ES
23369 ".section .fixup,\"ax\"\n"
23370 "101: lea 0(%%eax,%0,4),%0\n"
23371 " jmp 100b\n"
23372@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23373 }
23374
23375 static unsigned long
23376+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23377+{
23378+ int d0, d1;
23379+ __asm__ __volatile__(
23380+ " .align 2,0x90\n"
23381+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23382+ " cmpl $67, %0\n"
23383+ " jbe 3f\n"
23384+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23385+ " .align 2,0x90\n"
23386+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23387+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23388+ "5: movl %%eax, 0(%3)\n"
23389+ "6: movl %%edx, 4(%3)\n"
23390+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23391+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23392+ "9: movl %%eax, 8(%3)\n"
23393+ "10: movl %%edx, 12(%3)\n"
23394+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23395+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23396+ "13: movl %%eax, 16(%3)\n"
23397+ "14: movl %%edx, 20(%3)\n"
23398+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23399+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23400+ "17: movl %%eax, 24(%3)\n"
23401+ "18: movl %%edx, 28(%3)\n"
23402+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23403+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23404+ "21: movl %%eax, 32(%3)\n"
23405+ "22: movl %%edx, 36(%3)\n"
23406+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23407+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23408+ "25: movl %%eax, 40(%3)\n"
23409+ "26: movl %%edx, 44(%3)\n"
23410+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23411+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23412+ "29: movl %%eax, 48(%3)\n"
23413+ "30: movl %%edx, 52(%3)\n"
23414+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23415+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23416+ "33: movl %%eax, 56(%3)\n"
23417+ "34: movl %%edx, 60(%3)\n"
23418+ " addl $-64, %0\n"
23419+ " addl $64, %4\n"
23420+ " addl $64, %3\n"
23421+ " cmpl $63, %0\n"
23422+ " ja 1b\n"
23423+ "35: movl %0, %%eax\n"
23424+ " shrl $2, %0\n"
23425+ " andl $3, %%eax\n"
23426+ " cld\n"
23427+ "99: rep; "__copyuser_seg" movsl\n"
23428+ "36: movl %%eax, %0\n"
23429+ "37: rep; "__copyuser_seg" movsb\n"
23430+ "100:\n"
23431+ ".section .fixup,\"ax\"\n"
23432+ "101: lea 0(%%eax,%0,4),%0\n"
23433+ " jmp 100b\n"
23434+ ".previous\n"
23435+ ".section __ex_table,\"a\"\n"
23436+ " .align 4\n"
23437+ " .long 1b,100b\n"
23438+ " .long 2b,100b\n"
23439+ " .long 3b,100b\n"
23440+ " .long 4b,100b\n"
23441+ " .long 5b,100b\n"
23442+ " .long 6b,100b\n"
23443+ " .long 7b,100b\n"
23444+ " .long 8b,100b\n"
23445+ " .long 9b,100b\n"
23446+ " .long 10b,100b\n"
23447+ " .long 11b,100b\n"
23448+ " .long 12b,100b\n"
23449+ " .long 13b,100b\n"
23450+ " .long 14b,100b\n"
23451+ " .long 15b,100b\n"
23452+ " .long 16b,100b\n"
23453+ " .long 17b,100b\n"
23454+ " .long 18b,100b\n"
23455+ " .long 19b,100b\n"
23456+ " .long 20b,100b\n"
23457+ " .long 21b,100b\n"
23458+ " .long 22b,100b\n"
23459+ " .long 23b,100b\n"
23460+ " .long 24b,100b\n"
23461+ " .long 25b,100b\n"
23462+ " .long 26b,100b\n"
23463+ " .long 27b,100b\n"
23464+ " .long 28b,100b\n"
23465+ " .long 29b,100b\n"
23466+ " .long 30b,100b\n"
23467+ " .long 31b,100b\n"
23468+ " .long 32b,100b\n"
23469+ " .long 33b,100b\n"
23470+ " .long 34b,100b\n"
23471+ " .long 35b,100b\n"
23472+ " .long 36b,100b\n"
23473+ " .long 37b,100b\n"
23474+ " .long 99b,101b\n"
23475+ ".previous"
23476+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23477+ : "1"(to), "2"(from), "0"(size)
23478+ : "eax", "edx", "memory");
23479+ return size;
23480+}
23481+
23482+static unsigned long
23483+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23484+static unsigned long
23485 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23486 {
23487 int d0, d1;
23488 __asm__ __volatile__(
23489 " .align 2,0x90\n"
23490- "0: movl 32(%4), %%eax\n"
23491+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23492 " cmpl $67, %0\n"
23493 " jbe 2f\n"
23494- "1: movl 64(%4), %%eax\n"
23495+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23496 " .align 2,0x90\n"
23497- "2: movl 0(%4), %%eax\n"
23498- "21: movl 4(%4), %%edx\n"
23499+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23500+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23501 " movl %%eax, 0(%3)\n"
23502 " movl %%edx, 4(%3)\n"
23503- "3: movl 8(%4), %%eax\n"
23504- "31: movl 12(%4),%%edx\n"
23505+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23506+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23507 " movl %%eax, 8(%3)\n"
23508 " movl %%edx, 12(%3)\n"
23509- "4: movl 16(%4), %%eax\n"
23510- "41: movl 20(%4), %%edx\n"
23511+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23512+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23513 " movl %%eax, 16(%3)\n"
23514 " movl %%edx, 20(%3)\n"
23515- "10: movl 24(%4), %%eax\n"
23516- "51: movl 28(%4), %%edx\n"
23517+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23518+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23519 " movl %%eax, 24(%3)\n"
23520 " movl %%edx, 28(%3)\n"
23521- "11: movl 32(%4), %%eax\n"
23522- "61: movl 36(%4), %%edx\n"
23523+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23524+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23525 " movl %%eax, 32(%3)\n"
23526 " movl %%edx, 36(%3)\n"
23527- "12: movl 40(%4), %%eax\n"
23528- "71: movl 44(%4), %%edx\n"
23529+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23530+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23531 " movl %%eax, 40(%3)\n"
23532 " movl %%edx, 44(%3)\n"
23533- "13: movl 48(%4), %%eax\n"
23534- "81: movl 52(%4), %%edx\n"
23535+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23536+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23537 " movl %%eax, 48(%3)\n"
23538 " movl %%edx, 52(%3)\n"
23539- "14: movl 56(%4), %%eax\n"
23540- "91: movl 60(%4), %%edx\n"
23541+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23542+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23543 " movl %%eax, 56(%3)\n"
23544 " movl %%edx, 60(%3)\n"
23545 " addl $-64, %0\n"
23546@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23547 " shrl $2, %0\n"
23548 " andl $3, %%eax\n"
23549 " cld\n"
23550- "6: rep; movsl\n"
23551+ "6: rep; "__copyuser_seg" movsl\n"
23552 " movl %%eax,%0\n"
23553- "7: rep; movsb\n"
23554+ "7: rep; "__copyuser_seg" movsb\n"
23555 "8:\n"
23556 ".section .fixup,\"ax\"\n"
23557 "9: lea 0(%%eax,%0,4),%0\n"
23558@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23559 */
23560
23561 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23562+ const void __user *from, unsigned long size) __size_overflow(3);
23563+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23564 const void __user *from, unsigned long size)
23565 {
23566 int d0, d1;
23567
23568 __asm__ __volatile__(
23569 " .align 2,0x90\n"
23570- "0: movl 32(%4), %%eax\n"
23571+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23572 " cmpl $67, %0\n"
23573 " jbe 2f\n"
23574- "1: movl 64(%4), %%eax\n"
23575+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23576 " .align 2,0x90\n"
23577- "2: movl 0(%4), %%eax\n"
23578- "21: movl 4(%4), %%edx\n"
23579+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23580+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23581 " movnti %%eax, 0(%3)\n"
23582 " movnti %%edx, 4(%3)\n"
23583- "3: movl 8(%4), %%eax\n"
23584- "31: movl 12(%4),%%edx\n"
23585+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23586+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23587 " movnti %%eax, 8(%3)\n"
23588 " movnti %%edx, 12(%3)\n"
23589- "4: movl 16(%4), %%eax\n"
23590- "41: movl 20(%4), %%edx\n"
23591+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23592+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23593 " movnti %%eax, 16(%3)\n"
23594 " movnti %%edx, 20(%3)\n"
23595- "10: movl 24(%4), %%eax\n"
23596- "51: movl 28(%4), %%edx\n"
23597+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23598+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23599 " movnti %%eax, 24(%3)\n"
23600 " movnti %%edx, 28(%3)\n"
23601- "11: movl 32(%4), %%eax\n"
23602- "61: movl 36(%4), %%edx\n"
23603+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23604+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23605 " movnti %%eax, 32(%3)\n"
23606 " movnti %%edx, 36(%3)\n"
23607- "12: movl 40(%4), %%eax\n"
23608- "71: movl 44(%4), %%edx\n"
23609+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23610+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23611 " movnti %%eax, 40(%3)\n"
23612 " movnti %%edx, 44(%3)\n"
23613- "13: movl 48(%4), %%eax\n"
23614- "81: movl 52(%4), %%edx\n"
23615+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23616+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23617 " movnti %%eax, 48(%3)\n"
23618 " movnti %%edx, 52(%3)\n"
23619- "14: movl 56(%4), %%eax\n"
23620- "91: movl 60(%4), %%edx\n"
23621+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23622+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23623 " movnti %%eax, 56(%3)\n"
23624 " movnti %%edx, 60(%3)\n"
23625 " addl $-64, %0\n"
23626@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23627 " shrl $2, %0\n"
23628 " andl $3, %%eax\n"
23629 " cld\n"
23630- "6: rep; movsl\n"
23631+ "6: rep; "__copyuser_seg" movsl\n"
23632 " movl %%eax,%0\n"
23633- "7: rep; movsb\n"
23634+ "7: rep; "__copyuser_seg" movsb\n"
23635 "8:\n"
23636 ".section .fixup,\"ax\"\n"
23637 "9: lea 0(%%eax,%0,4),%0\n"
23638@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23639 }
23640
23641 static unsigned long __copy_user_intel_nocache(void *to,
23642+ const void __user *from, unsigned long size) __size_overflow(3);
23643+static unsigned long __copy_user_intel_nocache(void *to,
23644 const void __user *from, unsigned long size)
23645 {
23646 int d0, d1;
23647
23648 __asm__ __volatile__(
23649 " .align 2,0x90\n"
23650- "0: movl 32(%4), %%eax\n"
23651+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23652 " cmpl $67, %0\n"
23653 " jbe 2f\n"
23654- "1: movl 64(%4), %%eax\n"
23655+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23656 " .align 2,0x90\n"
23657- "2: movl 0(%4), %%eax\n"
23658- "21: movl 4(%4), %%edx\n"
23659+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23660+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23661 " movnti %%eax, 0(%3)\n"
23662 " movnti %%edx, 4(%3)\n"
23663- "3: movl 8(%4), %%eax\n"
23664- "31: movl 12(%4),%%edx\n"
23665+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23666+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23667 " movnti %%eax, 8(%3)\n"
23668 " movnti %%edx, 12(%3)\n"
23669- "4: movl 16(%4), %%eax\n"
23670- "41: movl 20(%4), %%edx\n"
23671+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23672+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23673 " movnti %%eax, 16(%3)\n"
23674 " movnti %%edx, 20(%3)\n"
23675- "10: movl 24(%4), %%eax\n"
23676- "51: movl 28(%4), %%edx\n"
23677+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23678+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23679 " movnti %%eax, 24(%3)\n"
23680 " movnti %%edx, 28(%3)\n"
23681- "11: movl 32(%4), %%eax\n"
23682- "61: movl 36(%4), %%edx\n"
23683+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23684+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23685 " movnti %%eax, 32(%3)\n"
23686 " movnti %%edx, 36(%3)\n"
23687- "12: movl 40(%4), %%eax\n"
23688- "71: movl 44(%4), %%edx\n"
23689+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23690+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23691 " movnti %%eax, 40(%3)\n"
23692 " movnti %%edx, 44(%3)\n"
23693- "13: movl 48(%4), %%eax\n"
23694- "81: movl 52(%4), %%edx\n"
23695+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23696+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23697 " movnti %%eax, 48(%3)\n"
23698 " movnti %%edx, 52(%3)\n"
23699- "14: movl 56(%4), %%eax\n"
23700- "91: movl 60(%4), %%edx\n"
23701+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23702+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23703 " movnti %%eax, 56(%3)\n"
23704 " movnti %%edx, 60(%3)\n"
23705 " addl $-64, %0\n"
23706@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23707 " shrl $2, %0\n"
23708 " andl $3, %%eax\n"
23709 " cld\n"
23710- "6: rep; movsl\n"
23711+ "6: rep; "__copyuser_seg" movsl\n"
23712 " movl %%eax,%0\n"
23713- "7: rep; movsb\n"
23714+ "7: rep; "__copyuser_seg" movsb\n"
23715 "8:\n"
23716 ".section .fixup,\"ax\"\n"
23717 "9: lea 0(%%eax,%0,4),%0\n"
23718@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23719 */
23720 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23721 unsigned long size);
23722-unsigned long __copy_user_intel(void __user *to, const void *from,
23723+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23724+ unsigned long size);
23725+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23726 unsigned long size);
23727 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23728 const void __user *from, unsigned long size);
23729 #endif /* CONFIG_X86_INTEL_USERCOPY */
23730
23731 /* Generic arbitrary sized copy. */
23732-#define __copy_user(to, from, size) \
23733+#define __copy_user(to, from, size, prefix, set, restore) \
23734 do { \
23735 int __d0, __d1, __d2; \
23736 __asm__ __volatile__( \
23737+ set \
23738 " cmp $7,%0\n" \
23739 " jbe 1f\n" \
23740 " movl %1,%0\n" \
23741 " negl %0\n" \
23742 " andl $7,%0\n" \
23743 " subl %0,%3\n" \
23744- "4: rep; movsb\n" \
23745+ "4: rep; "prefix"movsb\n" \
23746 " movl %3,%0\n" \
23747 " shrl $2,%0\n" \
23748 " andl $3,%3\n" \
23749 " .align 2,0x90\n" \
23750- "0: rep; movsl\n" \
23751+ "0: rep; "prefix"movsl\n" \
23752 " movl %3,%0\n" \
23753- "1: rep; movsb\n" \
23754+ "1: rep; "prefix"movsb\n" \
23755 "2:\n" \
23756+ restore \
23757 ".section .fixup,\"ax\"\n" \
23758 "5: addl %3,%0\n" \
23759 " jmp 2b\n" \
23760@@ -682,14 +805,14 @@ do { \
23761 " negl %0\n" \
23762 " andl $7,%0\n" \
23763 " subl %0,%3\n" \
23764- "4: rep; movsb\n" \
23765+ "4: rep; "__copyuser_seg"movsb\n" \
23766 " movl %3,%0\n" \
23767 " shrl $2,%0\n" \
23768 " andl $3,%3\n" \
23769 " .align 2,0x90\n" \
23770- "0: rep; movsl\n" \
23771+ "0: rep; "__copyuser_seg"movsl\n" \
23772 " movl %3,%0\n" \
23773- "1: rep; movsb\n" \
23774+ "1: rep; "__copyuser_seg"movsb\n" \
23775 "2:\n" \
23776 ".section .fixup,\"ax\"\n" \
23777 "5: addl %3,%0\n" \
23778@@ -775,9 +898,9 @@ survive:
23779 }
23780 #endif
23781 if (movsl_is_ok(to, from, n))
23782- __copy_user(to, from, n);
23783+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23784 else
23785- n = __copy_user_intel(to, from, n);
23786+ n = __generic_copy_to_user_intel(to, from, n);
23787 return n;
23788 }
23789 EXPORT_SYMBOL(__copy_to_user_ll);
23790@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23791 unsigned long n)
23792 {
23793 if (movsl_is_ok(to, from, n))
23794- __copy_user(to, from, n);
23795+ __copy_user(to, from, n, __copyuser_seg, "", "");
23796 else
23797- n = __copy_user_intel((void __user *)to,
23798- (const void *)from, n);
23799+ n = __generic_copy_from_user_intel(to, from, n);
23800 return n;
23801 }
23802 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23803@@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23804 if (n > 64 && cpu_has_xmm2)
23805 n = __copy_user_intel_nocache(to, from, n);
23806 else
23807- __copy_user(to, from, n);
23808+ __copy_user(to, from, n, __copyuser_seg, "", "");
23809 #else
23810- __copy_user(to, from, n);
23811+ __copy_user(to, from, n, __copyuser_seg, "", "");
23812 #endif
23813 return n;
23814 }
23815 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23816
23817-/**
23818- * copy_to_user: - Copy a block of data into user space.
23819- * @to: Destination address, in user space.
23820- * @from: Source address, in kernel space.
23821- * @n: Number of bytes to copy.
23822- *
23823- * Context: User context only. This function may sleep.
23824- *
23825- * Copy data from kernel space to user space.
23826- *
23827- * Returns number of bytes that could not be copied.
23828- * On success, this will be zero.
23829- */
23830-unsigned long
23831-copy_to_user(void __user *to, const void *from, unsigned long n)
23832-{
23833- if (access_ok(VERIFY_WRITE, to, n))
23834- n = __copy_to_user(to, from, n);
23835- return n;
23836-}
23837-EXPORT_SYMBOL(copy_to_user);
23838-
23839-/**
23840- * copy_from_user: - Copy a block of data from user space.
23841- * @to: Destination address, in kernel space.
23842- * @from: Source address, in user space.
23843- * @n: Number of bytes to copy.
23844- *
23845- * Context: User context only. This function may sleep.
23846- *
23847- * Copy data from user space to kernel space.
23848- *
23849- * Returns number of bytes that could not be copied.
23850- * On success, this will be zero.
23851- *
23852- * If some data could not be copied, this function will pad the copied
23853- * data to the requested size using zero bytes.
23854- */
23855-unsigned long
23856-_copy_from_user(void *to, const void __user *from, unsigned long n)
23857-{
23858- if (access_ok(VERIFY_READ, from, n))
23859- n = __copy_from_user(to, from, n);
23860- else
23861- memset(to, 0, n);
23862- return n;
23863-}
23864-EXPORT_SYMBOL(_copy_from_user);
23865-
23866 void copy_from_user_overflow(void)
23867 {
23868 WARN(1, "Buffer overflow detected!\n");
23869 }
23870 EXPORT_SYMBOL(copy_from_user_overflow);
23871+
23872+void copy_to_user_overflow(void)
23873+{
23874+ WARN(1, "Buffer overflow detected!\n");
23875+}
23876+EXPORT_SYMBOL(copy_to_user_overflow);
23877+
23878+#ifdef CONFIG_PAX_MEMORY_UDEREF
23879+void __set_fs(mm_segment_t x)
23880+{
23881+ switch (x.seg) {
23882+ case 0:
23883+ loadsegment(gs, 0);
23884+ break;
23885+ case TASK_SIZE_MAX:
23886+ loadsegment(gs, __USER_DS);
23887+ break;
23888+ case -1UL:
23889+ loadsegment(gs, __KERNEL_DS);
23890+ break;
23891+ default:
23892+ BUG();
23893+ }
23894+ return;
23895+}
23896+EXPORT_SYMBOL(__set_fs);
23897+
23898+void set_fs(mm_segment_t x)
23899+{
23900+ current_thread_info()->addr_limit = x;
23901+ __set_fs(x);
23902+}
23903+EXPORT_SYMBOL(set_fs);
23904+#endif
23905diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23906index b7c2849..8633ad8 100644
23907--- a/arch/x86/lib/usercopy_64.c
23908+++ b/arch/x86/lib/usercopy_64.c
23909@@ -42,6 +42,12 @@ long
23910 __strncpy_from_user(char *dst, const char __user *src, long count)
23911 {
23912 long res;
23913+
23914+#ifdef CONFIG_PAX_MEMORY_UDEREF
23915+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23916+ src += PAX_USER_SHADOW_BASE;
23917+#endif
23918+
23919 __do_strncpy_from_user(dst, src, count, res);
23920 return res;
23921 }
23922@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23923 {
23924 long __d0;
23925 might_fault();
23926+
23927+#ifdef CONFIG_PAX_MEMORY_UDEREF
23928+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23929+ addr += PAX_USER_SHADOW_BASE;
23930+#endif
23931+
23932 /* no memory constraint because it doesn't change any memory gcc knows
23933 about */
23934 asm volatile(
23935@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23936 }
23937 EXPORT_SYMBOL(strlen_user);
23938
23939-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23940+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23941 {
23942- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23943- return copy_user_generic((__force void *)to, (__force void *)from, len);
23944- }
23945- return len;
23946+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23947+
23948+#ifdef CONFIG_PAX_MEMORY_UDEREF
23949+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23950+ to += PAX_USER_SHADOW_BASE;
23951+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23952+ from += PAX_USER_SHADOW_BASE;
23953+#endif
23954+
23955+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23956+ }
23957+ return len;
23958 }
23959 EXPORT_SYMBOL(copy_in_user);
23960
23961@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23962 * it is not necessary to optimize tail handling.
23963 */
23964 unsigned long
23965-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23966+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23967 {
23968 char c;
23969 unsigned zero_len;
23970diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23971index 1fb85db..8b3540b 100644
23972--- a/arch/x86/mm/extable.c
23973+++ b/arch/x86/mm/extable.c
23974@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23975 const struct exception_table_entry *fixup;
23976
23977 #ifdef CONFIG_PNPBIOS
23978- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23979+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23980 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23981 extern u32 pnp_bios_is_utter_crap;
23982 pnp_bios_is_utter_crap = 1;
23983diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23984index f0b4caf..d92fd42 100644
23985--- a/arch/x86/mm/fault.c
23986+++ b/arch/x86/mm/fault.c
23987@@ -13,11 +13,18 @@
23988 #include <linux/perf_event.h> /* perf_sw_event */
23989 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23990 #include <linux/prefetch.h> /* prefetchw */
23991+#include <linux/unistd.h>
23992+#include <linux/compiler.h>
23993
23994 #include <asm/traps.h> /* dotraplinkage, ... */
23995 #include <asm/pgalloc.h> /* pgd_*(), ... */
23996 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23997 #include <asm/fixmap.h> /* VSYSCALL_START */
23998+#include <asm/tlbflush.h>
23999+
24000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24001+#include <asm/stacktrace.h>
24002+#endif
24003
24004 /*
24005 * Page fault error code bits:
24006@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
24007 int ret = 0;
24008
24009 /* kprobe_running() needs smp_processor_id() */
24010- if (kprobes_built_in() && !user_mode_vm(regs)) {
24011+ if (kprobes_built_in() && !user_mode(regs)) {
24012 preempt_disable();
24013 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24014 ret = 1;
24015@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24016 return !instr_lo || (instr_lo>>1) == 1;
24017 case 0x00:
24018 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24019- if (probe_kernel_address(instr, opcode))
24020+ if (user_mode(regs)) {
24021+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24022+ return 0;
24023+ } else if (probe_kernel_address(instr, opcode))
24024 return 0;
24025
24026 *prefetch = (instr_lo == 0xF) &&
24027@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24028 while (instr < max_instr) {
24029 unsigned char opcode;
24030
24031- if (probe_kernel_address(instr, opcode))
24032+ if (user_mode(regs)) {
24033+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24034+ break;
24035+ } else if (probe_kernel_address(instr, opcode))
24036 break;
24037
24038 instr++;
24039@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24040 force_sig_info(si_signo, &info, tsk);
24041 }
24042
24043+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24044+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24045+#endif
24046+
24047+#ifdef CONFIG_PAX_EMUTRAMP
24048+static int pax_handle_fetch_fault(struct pt_regs *regs);
24049+#endif
24050+
24051+#ifdef CONFIG_PAX_PAGEEXEC
24052+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24053+{
24054+ pgd_t *pgd;
24055+ pud_t *pud;
24056+ pmd_t *pmd;
24057+
24058+ pgd = pgd_offset(mm, address);
24059+ if (!pgd_present(*pgd))
24060+ return NULL;
24061+ pud = pud_offset(pgd, address);
24062+ if (!pud_present(*pud))
24063+ return NULL;
24064+ pmd = pmd_offset(pud, address);
24065+ if (!pmd_present(*pmd))
24066+ return NULL;
24067+ return pmd;
24068+}
24069+#endif
24070+
24071 DEFINE_SPINLOCK(pgd_lock);
24072 LIST_HEAD(pgd_list);
24073
24074@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
24075 for (address = VMALLOC_START & PMD_MASK;
24076 address >= TASK_SIZE && address < FIXADDR_TOP;
24077 address += PMD_SIZE) {
24078+
24079+#ifdef CONFIG_PAX_PER_CPU_PGD
24080+ unsigned long cpu;
24081+#else
24082 struct page *page;
24083+#endif
24084
24085 spin_lock(&pgd_lock);
24086+
24087+#ifdef CONFIG_PAX_PER_CPU_PGD
24088+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24089+ pgd_t *pgd = get_cpu_pgd(cpu);
24090+ pmd_t *ret;
24091+#else
24092 list_for_each_entry(page, &pgd_list, lru) {
24093+ pgd_t *pgd = page_address(page);
24094 spinlock_t *pgt_lock;
24095 pmd_t *ret;
24096
24097@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
24098 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24099
24100 spin_lock(pgt_lock);
24101- ret = vmalloc_sync_one(page_address(page), address);
24102+#endif
24103+
24104+ ret = vmalloc_sync_one(pgd, address);
24105+
24106+#ifndef CONFIG_PAX_PER_CPU_PGD
24107 spin_unlock(pgt_lock);
24108+#endif
24109
24110 if (!ret)
24111 break;
24112@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
24113 * an interrupt in the middle of a task switch..
24114 */
24115 pgd_paddr = read_cr3();
24116+
24117+#ifdef CONFIG_PAX_PER_CPU_PGD
24118+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24119+#endif
24120+
24121 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24122 if (!pmd_k)
24123 return -1;
24124@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
24125 * happen within a race in page table update. In the later
24126 * case just flush:
24127 */
24128+
24129+#ifdef CONFIG_PAX_PER_CPU_PGD
24130+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24131+ pgd = pgd_offset_cpu(smp_processor_id(), address);
24132+#else
24133 pgd = pgd_offset(current->active_mm, address);
24134+#endif
24135+
24136 pgd_ref = pgd_offset_k(address);
24137 if (pgd_none(*pgd_ref))
24138 return -1;
24139@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24140 static int is_errata100(struct pt_regs *regs, unsigned long address)
24141 {
24142 #ifdef CONFIG_X86_64
24143- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24144+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24145 return 1;
24146 #endif
24147 return 0;
24148@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24149 }
24150
24151 static const char nx_warning[] = KERN_CRIT
24152-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24153+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24154
24155 static void
24156 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24157@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24158 if (!oops_may_print())
24159 return;
24160
24161- if (error_code & PF_INSTR) {
24162+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
24163 unsigned int level;
24164
24165 pte_t *pte = lookup_address(address, &level);
24166
24167 if (pte && pte_present(*pte) && !pte_exec(*pte))
24168- printk(nx_warning, current_uid());
24169+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24170 }
24171
24172+#ifdef CONFIG_PAX_KERNEXEC
24173+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24174+ if (current->signal->curr_ip)
24175+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24176+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24177+ else
24178+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24179+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24180+ }
24181+#endif
24182+
24183 printk(KERN_ALERT "BUG: unable to handle kernel ");
24184 if (address < PAGE_SIZE)
24185 printk(KERN_CONT "NULL pointer dereference");
24186@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24187 }
24188 #endif
24189
24190+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24191+ if (pax_is_fetch_fault(regs, error_code, address)) {
24192+
24193+#ifdef CONFIG_PAX_EMUTRAMP
24194+ switch (pax_handle_fetch_fault(regs)) {
24195+ case 2:
24196+ return;
24197+ }
24198+#endif
24199+
24200+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24201+ do_group_exit(SIGKILL);
24202+ }
24203+#endif
24204+
24205 if (unlikely(show_unhandled_signals))
24206 show_signal_msg(regs, error_code, address, tsk);
24207
24208@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24209 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
24210 printk(KERN_ERR
24211 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24212- tsk->comm, tsk->pid, address);
24213+ tsk->comm, task_pid_nr(tsk), address);
24214 code = BUS_MCEERR_AR;
24215 }
24216 #endif
24217@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24218 return 1;
24219 }
24220
24221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24222+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24223+{
24224+ pte_t *pte;
24225+ pmd_t *pmd;
24226+ spinlock_t *ptl;
24227+ unsigned char pte_mask;
24228+
24229+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24230+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24231+ return 0;
24232+
24233+ /* PaX: it's our fault, let's handle it if we can */
24234+
24235+ /* PaX: take a look at read faults before acquiring any locks */
24236+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24237+ /* instruction fetch attempt from a protected page in user mode */
24238+ up_read(&mm->mmap_sem);
24239+
24240+#ifdef CONFIG_PAX_EMUTRAMP
24241+ switch (pax_handle_fetch_fault(regs)) {
24242+ case 2:
24243+ return 1;
24244+ }
24245+#endif
24246+
24247+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24248+ do_group_exit(SIGKILL);
24249+ }
24250+
24251+ pmd = pax_get_pmd(mm, address);
24252+ if (unlikely(!pmd))
24253+ return 0;
24254+
24255+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24256+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24257+ pte_unmap_unlock(pte, ptl);
24258+ return 0;
24259+ }
24260+
24261+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24262+ /* write attempt to a protected page in user mode */
24263+ pte_unmap_unlock(pte, ptl);
24264+ return 0;
24265+ }
24266+
24267+#ifdef CONFIG_SMP
24268+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24269+#else
24270+ if (likely(address > get_limit(regs->cs)))
24271+#endif
24272+ {
24273+ set_pte(pte, pte_mkread(*pte));
24274+ __flush_tlb_one(address);
24275+ pte_unmap_unlock(pte, ptl);
24276+ up_read(&mm->mmap_sem);
24277+ return 1;
24278+ }
24279+
24280+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24281+
24282+ /*
24283+ * PaX: fill DTLB with user rights and retry
24284+ */
24285+ __asm__ __volatile__ (
24286+ "orb %2,(%1)\n"
24287+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24288+/*
24289+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24290+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24291+ * page fault when examined during a TLB load attempt. this is true not only
24292+ * for PTEs holding a non-present entry but also present entries that will
24293+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24294+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24295+ * for our target pages since their PTEs are simply not in the TLBs at all.
24296+
24297+ * the best thing in omitting it is that we gain around 15-20% speed in the
24298+ * fast path of the page fault handler and can get rid of tracing since we
24299+ * can no longer flush unintended entries.
24300+ */
24301+ "invlpg (%0)\n"
24302+#endif
24303+ __copyuser_seg"testb $0,(%0)\n"
24304+ "xorb %3,(%1)\n"
24305+ :
24306+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24307+ : "memory", "cc");
24308+ pte_unmap_unlock(pte, ptl);
24309+ up_read(&mm->mmap_sem);
24310+ return 1;
24311+}
24312+#endif
24313+
24314 /*
24315 * Handle a spurious fault caused by a stale TLB entry.
24316 *
24317@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
24318 static inline int
24319 access_error(unsigned long error_code, struct vm_area_struct *vma)
24320 {
24321+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24322+ return 1;
24323+
24324 if (error_code & PF_WRITE) {
24325 /* write, present and write, not present: */
24326 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24327@@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24328 {
24329 struct vm_area_struct *vma;
24330 struct task_struct *tsk;
24331- unsigned long address;
24332 struct mm_struct *mm;
24333 int fault;
24334 int write = error_code & PF_WRITE;
24335 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
24336 (write ? FAULT_FLAG_WRITE : 0);
24337
24338- tsk = current;
24339- mm = tsk->mm;
24340-
24341 /* Get the faulting address: */
24342- address = read_cr2();
24343+ unsigned long address = read_cr2();
24344+
24345+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24346+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24347+ if (!search_exception_tables(regs->ip)) {
24348+ bad_area_nosemaphore(regs, error_code, address);
24349+ return;
24350+ }
24351+ if (address < PAX_USER_SHADOW_BASE) {
24352+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24353+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
24354+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24355+ } else
24356+ address -= PAX_USER_SHADOW_BASE;
24357+ }
24358+#endif
24359+
24360+ tsk = current;
24361+ mm = tsk->mm;
24362
24363 /*
24364 * Detect and handle instructions that would cause a page fault for
24365@@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24366 * User-mode registers count as a user access even for any
24367 * potential system fault or CPU buglet:
24368 */
24369- if (user_mode_vm(regs)) {
24370+ if (user_mode(regs)) {
24371 local_irq_enable();
24372 error_code |= PF_USER;
24373 } else {
24374@@ -1132,6 +1338,11 @@ retry:
24375 might_sleep();
24376 }
24377
24378+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24379+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24380+ return;
24381+#endif
24382+
24383 vma = find_vma(mm, address);
24384 if (unlikely(!vma)) {
24385 bad_area(regs, error_code, address);
24386@@ -1143,18 +1354,24 @@ retry:
24387 bad_area(regs, error_code, address);
24388 return;
24389 }
24390- if (error_code & PF_USER) {
24391- /*
24392- * Accessing the stack below %sp is always a bug.
24393- * The large cushion allows instructions like enter
24394- * and pusha to work. ("enter $65535, $31" pushes
24395- * 32 pointers and then decrements %sp by 65535.)
24396- */
24397- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24398- bad_area(regs, error_code, address);
24399- return;
24400- }
24401+ /*
24402+ * Accessing the stack below %sp is always a bug.
24403+ * The large cushion allows instructions like enter
24404+ * and pusha to work. ("enter $65535, $31" pushes
24405+ * 32 pointers and then decrements %sp by 65535.)
24406+ */
24407+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24408+ bad_area(regs, error_code, address);
24409+ return;
24410 }
24411+
24412+#ifdef CONFIG_PAX_SEGMEXEC
24413+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24414+ bad_area(regs, error_code, address);
24415+ return;
24416+ }
24417+#endif
24418+
24419 if (unlikely(expand_stack(vma, address))) {
24420 bad_area(regs, error_code, address);
24421 return;
24422@@ -1209,3 +1426,292 @@ good_area:
24423
24424 up_read(&mm->mmap_sem);
24425 }
24426+
24427+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24428+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24429+{
24430+ struct mm_struct *mm = current->mm;
24431+ unsigned long ip = regs->ip;
24432+
24433+ if (v8086_mode(regs))
24434+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24435+
24436+#ifdef CONFIG_PAX_PAGEEXEC
24437+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24438+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24439+ return true;
24440+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24441+ return true;
24442+ return false;
24443+ }
24444+#endif
24445+
24446+#ifdef CONFIG_PAX_SEGMEXEC
24447+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24448+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24449+ return true;
24450+ return false;
24451+ }
24452+#endif
24453+
24454+ return false;
24455+}
24456+#endif
24457+
24458+#ifdef CONFIG_PAX_EMUTRAMP
24459+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24460+{
24461+ int err;
24462+
24463+ do { /* PaX: libffi trampoline emulation */
24464+ unsigned char mov, jmp;
24465+ unsigned int addr1, addr2;
24466+
24467+#ifdef CONFIG_X86_64
24468+ if ((regs->ip + 9) >> 32)
24469+ break;
24470+#endif
24471+
24472+ err = get_user(mov, (unsigned char __user *)regs->ip);
24473+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24474+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24475+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24476+
24477+ if (err)
24478+ break;
24479+
24480+ if (mov == 0xB8 && jmp == 0xE9) {
24481+ regs->ax = addr1;
24482+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24483+ return 2;
24484+ }
24485+ } while (0);
24486+
24487+ do { /* PaX: gcc trampoline emulation #1 */
24488+ unsigned char mov1, mov2;
24489+ unsigned short jmp;
24490+ unsigned int addr1, addr2;
24491+
24492+#ifdef CONFIG_X86_64
24493+ if ((regs->ip + 11) >> 32)
24494+ break;
24495+#endif
24496+
24497+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24498+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24499+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24500+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24501+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24502+
24503+ if (err)
24504+ break;
24505+
24506+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24507+ regs->cx = addr1;
24508+ regs->ax = addr2;
24509+ regs->ip = addr2;
24510+ return 2;
24511+ }
24512+ } while (0);
24513+
24514+ do { /* PaX: gcc trampoline emulation #2 */
24515+ unsigned char mov, jmp;
24516+ unsigned int addr1, addr2;
24517+
24518+#ifdef CONFIG_X86_64
24519+ if ((regs->ip + 9) >> 32)
24520+ break;
24521+#endif
24522+
24523+ err = get_user(mov, (unsigned char __user *)regs->ip);
24524+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24525+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24526+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24527+
24528+ if (err)
24529+ break;
24530+
24531+ if (mov == 0xB9 && jmp == 0xE9) {
24532+ regs->cx = addr1;
24533+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24534+ return 2;
24535+ }
24536+ } while (0);
24537+
24538+ return 1; /* PaX in action */
24539+}
24540+
24541+#ifdef CONFIG_X86_64
24542+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24543+{
24544+ int err;
24545+
24546+ do { /* PaX: libffi trampoline emulation */
24547+ unsigned short mov1, mov2, jmp1;
24548+ unsigned char stcclc, jmp2;
24549+ unsigned long addr1, addr2;
24550+
24551+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24552+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24553+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24554+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24555+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24556+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24557+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24558+
24559+ if (err)
24560+ break;
24561+
24562+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24563+ regs->r11 = addr1;
24564+ regs->r10 = addr2;
24565+ if (stcclc == 0xF8)
24566+ regs->flags &= ~X86_EFLAGS_CF;
24567+ else
24568+ regs->flags |= X86_EFLAGS_CF;
24569+ regs->ip = addr1;
24570+ return 2;
24571+ }
24572+ } while (0);
24573+
24574+ do { /* PaX: gcc trampoline emulation #1 */
24575+ unsigned short mov1, mov2, jmp1;
24576+ unsigned char jmp2;
24577+ unsigned int addr1;
24578+ unsigned long addr2;
24579+
24580+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24581+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24582+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24583+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24584+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24585+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24586+
24587+ if (err)
24588+ break;
24589+
24590+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24591+ regs->r11 = addr1;
24592+ regs->r10 = addr2;
24593+ regs->ip = addr1;
24594+ return 2;
24595+ }
24596+ } while (0);
24597+
24598+ do { /* PaX: gcc trampoline emulation #2 */
24599+ unsigned short mov1, mov2, jmp1;
24600+ unsigned char jmp2;
24601+ unsigned long addr1, addr2;
24602+
24603+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24604+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24605+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24606+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24607+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24608+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24609+
24610+ if (err)
24611+ break;
24612+
24613+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24614+ regs->r11 = addr1;
24615+ regs->r10 = addr2;
24616+ regs->ip = addr1;
24617+ return 2;
24618+ }
24619+ } while (0);
24620+
24621+ return 1; /* PaX in action */
24622+}
24623+#endif
24624+
24625+/*
24626+ * PaX: decide what to do with offenders (regs->ip = fault address)
24627+ *
24628+ * returns 1 when task should be killed
24629+ * 2 when gcc trampoline was detected
24630+ */
24631+static int pax_handle_fetch_fault(struct pt_regs *regs)
24632+{
24633+ if (v8086_mode(regs))
24634+ return 1;
24635+
24636+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24637+ return 1;
24638+
24639+#ifdef CONFIG_X86_32
24640+ return pax_handle_fetch_fault_32(regs);
24641+#else
24642+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24643+ return pax_handle_fetch_fault_32(regs);
24644+ else
24645+ return pax_handle_fetch_fault_64(regs);
24646+#endif
24647+}
24648+#endif
24649+
24650+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24651+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24652+{
24653+ long i;
24654+
24655+ printk(KERN_ERR "PAX: bytes at PC: ");
24656+ for (i = 0; i < 20; i++) {
24657+ unsigned char c;
24658+ if (get_user(c, (unsigned char __force_user *)pc+i))
24659+ printk(KERN_CONT "?? ");
24660+ else
24661+ printk(KERN_CONT "%02x ", c);
24662+ }
24663+ printk("\n");
24664+
24665+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24666+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24667+ unsigned long c;
24668+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24669+#ifdef CONFIG_X86_32
24670+ printk(KERN_CONT "???????? ");
24671+#else
24672+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24673+ printk(KERN_CONT "???????? ???????? ");
24674+ else
24675+ printk(KERN_CONT "???????????????? ");
24676+#endif
24677+ } else {
24678+#ifdef CONFIG_X86_64
24679+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24680+ printk(KERN_CONT "%08x ", (unsigned int)c);
24681+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24682+ } else
24683+#endif
24684+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24685+ }
24686+ }
24687+ printk("\n");
24688+}
24689+#endif
24690+
24691+/**
24692+ * probe_kernel_write(): safely attempt to write to a location
24693+ * @dst: address to write to
24694+ * @src: pointer to the data that shall be written
24695+ * @size: size of the data chunk
24696+ *
24697+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24698+ * happens, handle that and return -EFAULT.
24699+ */
24700+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24701+{
24702+ long ret;
24703+ mm_segment_t old_fs = get_fs();
24704+
24705+ set_fs(KERNEL_DS);
24706+ pagefault_disable();
24707+ pax_open_kernel();
24708+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24709+ pax_close_kernel();
24710+ pagefault_enable();
24711+ set_fs(old_fs);
24712+
24713+ return ret ? -EFAULT : 0;
24714+}
24715diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24716index dd74e46..7d26398 100644
24717--- a/arch/x86/mm/gup.c
24718+++ b/arch/x86/mm/gup.c
24719@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24720 addr = start;
24721 len = (unsigned long) nr_pages << PAGE_SHIFT;
24722 end = start + len;
24723- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24724+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24725 (void __user *)start, len)))
24726 return 0;
24727
24728diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24729index f4f29b1..5cac4fb 100644
24730--- a/arch/x86/mm/highmem_32.c
24731+++ b/arch/x86/mm/highmem_32.c
24732@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24733 idx = type + KM_TYPE_NR*smp_processor_id();
24734 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24735 BUG_ON(!pte_none(*(kmap_pte-idx)));
24736+
24737+ pax_open_kernel();
24738 set_pte(kmap_pte-idx, mk_pte(page, prot));
24739+ pax_close_kernel();
24740+
24741 arch_flush_lazy_mmu_mode();
24742
24743 return (void *)vaddr;
24744diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24745index 8ecbb4b..a269cab 100644
24746--- a/arch/x86/mm/hugetlbpage.c
24747+++ b/arch/x86/mm/hugetlbpage.c
24748@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24749 struct hstate *h = hstate_file(file);
24750 struct mm_struct *mm = current->mm;
24751 struct vm_area_struct *vma;
24752- unsigned long start_addr;
24753+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24754+
24755+#ifdef CONFIG_PAX_SEGMEXEC
24756+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24757+ pax_task_size = SEGMEXEC_TASK_SIZE;
24758+#endif
24759+
24760+ pax_task_size -= PAGE_SIZE;
24761
24762 if (len > mm->cached_hole_size) {
24763- start_addr = mm->free_area_cache;
24764+ start_addr = mm->free_area_cache;
24765 } else {
24766- start_addr = TASK_UNMAPPED_BASE;
24767- mm->cached_hole_size = 0;
24768+ start_addr = mm->mmap_base;
24769+ mm->cached_hole_size = 0;
24770 }
24771
24772 full_search:
24773@@ -280,26 +287,27 @@ full_search:
24774
24775 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24776 /* At this point: (!vma || addr < vma->vm_end). */
24777- if (TASK_SIZE - len < addr) {
24778+ if (pax_task_size - len < addr) {
24779 /*
24780 * Start a new search - just in case we missed
24781 * some holes.
24782 */
24783- if (start_addr != TASK_UNMAPPED_BASE) {
24784- start_addr = TASK_UNMAPPED_BASE;
24785+ if (start_addr != mm->mmap_base) {
24786+ start_addr = mm->mmap_base;
24787 mm->cached_hole_size = 0;
24788 goto full_search;
24789 }
24790 return -ENOMEM;
24791 }
24792- if (!vma || addr + len <= vma->vm_start) {
24793- mm->free_area_cache = addr + len;
24794- return addr;
24795- }
24796+ if (check_heap_stack_gap(vma, addr, len))
24797+ break;
24798 if (addr + mm->cached_hole_size < vma->vm_start)
24799 mm->cached_hole_size = vma->vm_start - addr;
24800 addr = ALIGN(vma->vm_end, huge_page_size(h));
24801 }
24802+
24803+ mm->free_area_cache = addr + len;
24804+ return addr;
24805 }
24806
24807 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24808@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24809 {
24810 struct hstate *h = hstate_file(file);
24811 struct mm_struct *mm = current->mm;
24812- struct vm_area_struct *vma, *prev_vma;
24813- unsigned long base = mm->mmap_base, addr = addr0;
24814+ struct vm_area_struct *vma;
24815+ unsigned long base = mm->mmap_base, addr;
24816 unsigned long largest_hole = mm->cached_hole_size;
24817- int first_time = 1;
24818
24819 /* don't allow allocations above current base */
24820 if (mm->free_area_cache > base)
24821@@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24822 largest_hole = 0;
24823 mm->free_area_cache = base;
24824 }
24825-try_again:
24826+
24827 /* make sure it can fit in the remaining address space */
24828 if (mm->free_area_cache < len)
24829 goto fail;
24830
24831 /* either no address requested or can't fit in requested address hole */
24832- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24833+ addr = (mm->free_area_cache - len);
24834 do {
24835+ addr &= huge_page_mask(h);
24836 /*
24837 * Lookup failure means no vma is above this address,
24838 * i.e. return with success:
24839@@ -341,46 +349,47 @@ try_again:
24840 * new region fits between prev_vma->vm_end and
24841 * vma->vm_start, use it:
24842 */
24843- prev_vma = vma->vm_prev;
24844- if (addr + len <= vma->vm_start &&
24845- (!prev_vma || (addr >= prev_vma->vm_end))) {
24846+ if (check_heap_stack_gap(vma, addr, len)) {
24847 /* remember the address as a hint for next time */
24848- mm->cached_hole_size = largest_hole;
24849- return (mm->free_area_cache = addr);
24850- } else {
24851- /* pull free_area_cache down to the first hole */
24852- if (mm->free_area_cache == vma->vm_end) {
24853- mm->free_area_cache = vma->vm_start;
24854- mm->cached_hole_size = largest_hole;
24855- }
24856+ mm->cached_hole_size = largest_hole;
24857+ return (mm->free_area_cache = addr);
24858+ }
24859+ /* pull free_area_cache down to the first hole */
24860+ if (mm->free_area_cache == vma->vm_end) {
24861+ mm->free_area_cache = vma->vm_start;
24862+ mm->cached_hole_size = largest_hole;
24863 }
24864
24865 /* remember the largest hole we saw so far */
24866 if (addr + largest_hole < vma->vm_start)
24867- largest_hole = vma->vm_start - addr;
24868+ largest_hole = vma->vm_start - addr;
24869
24870 /* try just below the current vma->vm_start */
24871- addr = (vma->vm_start - len) & huge_page_mask(h);
24872- } while (len <= vma->vm_start);
24873+ addr = skip_heap_stack_gap(vma, len);
24874+ } while (!IS_ERR_VALUE(addr));
24875
24876 fail:
24877 /*
24878- * if hint left us with no space for the requested
24879- * mapping then try again:
24880- */
24881- if (first_time) {
24882- mm->free_area_cache = base;
24883- largest_hole = 0;
24884- first_time = 0;
24885- goto try_again;
24886- }
24887- /*
24888 * A failed mmap() very likely causes application failure,
24889 * so fall back to the bottom-up function here. This scenario
24890 * can happen with large stack limits and large mmap()
24891 * allocations.
24892 */
24893- mm->free_area_cache = TASK_UNMAPPED_BASE;
24894+
24895+#ifdef CONFIG_PAX_SEGMEXEC
24896+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24897+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24898+ else
24899+#endif
24900+
24901+ mm->mmap_base = TASK_UNMAPPED_BASE;
24902+
24903+#ifdef CONFIG_PAX_RANDMMAP
24904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24905+ mm->mmap_base += mm->delta_mmap;
24906+#endif
24907+
24908+ mm->free_area_cache = mm->mmap_base;
24909 mm->cached_hole_size = ~0UL;
24910 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24911 len, pgoff, flags);
24912@@ -388,6 +397,7 @@ fail:
24913 /*
24914 * Restore the topdown base:
24915 */
24916+ mm->mmap_base = base;
24917 mm->free_area_cache = base;
24918 mm->cached_hole_size = ~0UL;
24919
24920@@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24921 struct hstate *h = hstate_file(file);
24922 struct mm_struct *mm = current->mm;
24923 struct vm_area_struct *vma;
24924+ unsigned long pax_task_size = TASK_SIZE;
24925
24926 if (len & ~huge_page_mask(h))
24927 return -EINVAL;
24928- if (len > TASK_SIZE)
24929+
24930+#ifdef CONFIG_PAX_SEGMEXEC
24931+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24932+ pax_task_size = SEGMEXEC_TASK_SIZE;
24933+#endif
24934+
24935+ pax_task_size -= PAGE_SIZE;
24936+
24937+ if (len > pax_task_size)
24938 return -ENOMEM;
24939
24940 if (flags & MAP_FIXED) {
24941@@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24942 if (addr) {
24943 addr = ALIGN(addr, huge_page_size(h));
24944 vma = find_vma(mm, addr);
24945- if (TASK_SIZE - len >= addr &&
24946- (!vma || addr + len <= vma->vm_start))
24947+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24948 return addr;
24949 }
24950 if (mm->get_unmapped_area == arch_get_unmapped_area)
24951diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24952index 6cabf65..77e9c1c 100644
24953--- a/arch/x86/mm/init.c
24954+++ b/arch/x86/mm/init.c
24955@@ -17,6 +17,7 @@
24956 #include <asm/tlb.h>
24957 #include <asm/proto.h>
24958 #include <asm/dma.h> /* for MAX_DMA_PFN */
24959+#include <asm/desc.h>
24960
24961 unsigned long __initdata pgt_buf_start;
24962 unsigned long __meminitdata pgt_buf_end;
24963@@ -33,7 +34,7 @@ int direct_gbpages
24964 static void __init find_early_table_space(unsigned long end, int use_pse,
24965 int use_gbpages)
24966 {
24967- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24968+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24969 phys_addr_t base;
24970
24971 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24972@@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24973 */
24974 int devmem_is_allowed(unsigned long pagenr)
24975 {
24976+#ifdef CONFIG_GRKERNSEC_KMEM
24977+ /* allow BDA */
24978+ if (!pagenr)
24979+ return 1;
24980+ /* allow EBDA */
24981+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24982+ return 1;
24983+#else
24984+ if (!pagenr)
24985+ return 1;
24986+#ifdef CONFIG_VM86
24987+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24988+ return 1;
24989+#endif
24990+#endif
24991+
24992+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24993+ return 1;
24994+#ifdef CONFIG_GRKERNSEC_KMEM
24995+ /* throw out everything else below 1MB */
24996 if (pagenr <= 256)
24997- return 1;
24998+ return 0;
24999+#endif
25000 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25001 return 0;
25002 if (!page_is_ram(pagenr))
25003@@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25004
25005 void free_initmem(void)
25006 {
25007+
25008+#ifdef CONFIG_PAX_KERNEXEC
25009+#ifdef CONFIG_X86_32
25010+ /* PaX: limit KERNEL_CS to actual size */
25011+ unsigned long addr, limit;
25012+ struct desc_struct d;
25013+ int cpu;
25014+
25015+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25016+ limit = (limit - 1UL) >> PAGE_SHIFT;
25017+
25018+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25019+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25020+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25021+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25022+ }
25023+
25024+ /* PaX: make KERNEL_CS read-only */
25025+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25026+ if (!paravirt_enabled())
25027+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25028+/*
25029+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25030+ pgd = pgd_offset_k(addr);
25031+ pud = pud_offset(pgd, addr);
25032+ pmd = pmd_offset(pud, addr);
25033+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25034+ }
25035+*/
25036+#ifdef CONFIG_X86_PAE
25037+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25038+/*
25039+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25040+ pgd = pgd_offset_k(addr);
25041+ pud = pud_offset(pgd, addr);
25042+ pmd = pmd_offset(pud, addr);
25043+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25044+ }
25045+*/
25046+#endif
25047+
25048+#ifdef CONFIG_MODULES
25049+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25050+#endif
25051+
25052+#else
25053+ pgd_t *pgd;
25054+ pud_t *pud;
25055+ pmd_t *pmd;
25056+ unsigned long addr, end;
25057+
25058+ /* PaX: make kernel code/rodata read-only, rest non-executable */
25059+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25060+ pgd = pgd_offset_k(addr);
25061+ pud = pud_offset(pgd, addr);
25062+ pmd = pmd_offset(pud, addr);
25063+ if (!pmd_present(*pmd))
25064+ continue;
25065+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25066+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25067+ else
25068+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25069+ }
25070+
25071+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25072+ end = addr + KERNEL_IMAGE_SIZE;
25073+ for (; addr < end; addr += PMD_SIZE) {
25074+ pgd = pgd_offset_k(addr);
25075+ pud = pud_offset(pgd, addr);
25076+ pmd = pmd_offset(pud, addr);
25077+ if (!pmd_present(*pmd))
25078+ continue;
25079+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25080+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25081+ }
25082+#endif
25083+
25084+ flush_tlb_all();
25085+#endif
25086+
25087 free_init_pages("unused kernel memory",
25088 (unsigned long)(&__init_begin),
25089 (unsigned long)(&__init_end));
25090diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25091index 8663f6c..829ae76 100644
25092--- a/arch/x86/mm/init_32.c
25093+++ b/arch/x86/mm/init_32.c
25094@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
25095 }
25096
25097 /*
25098- * Creates a middle page table and puts a pointer to it in the
25099- * given global directory entry. This only returns the gd entry
25100- * in non-PAE compilation mode, since the middle layer is folded.
25101- */
25102-static pmd_t * __init one_md_table_init(pgd_t *pgd)
25103-{
25104- pud_t *pud;
25105- pmd_t *pmd_table;
25106-
25107-#ifdef CONFIG_X86_PAE
25108- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25109- if (after_bootmem)
25110- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25111- else
25112- pmd_table = (pmd_t *)alloc_low_page();
25113- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25114- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25115- pud = pud_offset(pgd, 0);
25116- BUG_ON(pmd_table != pmd_offset(pud, 0));
25117-
25118- return pmd_table;
25119- }
25120-#endif
25121- pud = pud_offset(pgd, 0);
25122- pmd_table = pmd_offset(pud, 0);
25123-
25124- return pmd_table;
25125-}
25126-
25127-/*
25128 * Create a page table and place a pointer to it in a middle page
25129 * directory entry:
25130 */
25131@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25132 page_table = (pte_t *)alloc_low_page();
25133
25134 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25135+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25136+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25137+#else
25138 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25139+#endif
25140 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25141 }
25142
25143 return pte_offset_kernel(pmd, 0);
25144 }
25145
25146+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25147+{
25148+ pud_t *pud;
25149+ pmd_t *pmd_table;
25150+
25151+ pud = pud_offset(pgd, 0);
25152+ pmd_table = pmd_offset(pud, 0);
25153+
25154+ return pmd_table;
25155+}
25156+
25157 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25158 {
25159 int pgd_idx = pgd_index(vaddr);
25160@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25161 int pgd_idx, pmd_idx;
25162 unsigned long vaddr;
25163 pgd_t *pgd;
25164+ pud_t *pud;
25165 pmd_t *pmd;
25166 pte_t *pte = NULL;
25167
25168@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25169 pgd = pgd_base + pgd_idx;
25170
25171 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25172- pmd = one_md_table_init(pgd);
25173- pmd = pmd + pmd_index(vaddr);
25174+ pud = pud_offset(pgd, vaddr);
25175+ pmd = pmd_offset(pud, vaddr);
25176+
25177+#ifdef CONFIG_X86_PAE
25178+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25179+#endif
25180+
25181 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25182 pmd++, pmd_idx++) {
25183 pte = page_table_kmap_check(one_page_table_init(pmd),
25184@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25185 }
25186 }
25187
25188-static inline int is_kernel_text(unsigned long addr)
25189+static inline int is_kernel_text(unsigned long start, unsigned long end)
25190 {
25191- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
25192- return 1;
25193- return 0;
25194+ if ((start > ktla_ktva((unsigned long)_etext) ||
25195+ end <= ktla_ktva((unsigned long)_stext)) &&
25196+ (start > ktla_ktva((unsigned long)_einittext) ||
25197+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25198+
25199+#ifdef CONFIG_ACPI_SLEEP
25200+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25201+#endif
25202+
25203+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25204+ return 0;
25205+ return 1;
25206 }
25207
25208 /*
25209@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
25210 unsigned long last_map_addr = end;
25211 unsigned long start_pfn, end_pfn;
25212 pgd_t *pgd_base = swapper_pg_dir;
25213- int pgd_idx, pmd_idx, pte_ofs;
25214+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25215 unsigned long pfn;
25216 pgd_t *pgd;
25217+ pud_t *pud;
25218 pmd_t *pmd;
25219 pte_t *pte;
25220 unsigned pages_2m, pages_4k;
25221@@ -281,8 +282,13 @@ repeat:
25222 pfn = start_pfn;
25223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25224 pgd = pgd_base + pgd_idx;
25225- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25226- pmd = one_md_table_init(pgd);
25227+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25228+ pud = pud_offset(pgd, 0);
25229+ pmd = pmd_offset(pud, 0);
25230+
25231+#ifdef CONFIG_X86_PAE
25232+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25233+#endif
25234
25235 if (pfn >= end_pfn)
25236 continue;
25237@@ -294,14 +300,13 @@ repeat:
25238 #endif
25239 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25240 pmd++, pmd_idx++) {
25241- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25242+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25243
25244 /*
25245 * Map with big pages if possible, otherwise
25246 * create normal page tables:
25247 */
25248 if (use_pse) {
25249- unsigned int addr2;
25250 pgprot_t prot = PAGE_KERNEL_LARGE;
25251 /*
25252 * first pass will use the same initial
25253@@ -311,11 +316,7 @@ repeat:
25254 __pgprot(PTE_IDENT_ATTR |
25255 _PAGE_PSE);
25256
25257- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25258- PAGE_OFFSET + PAGE_SIZE-1;
25259-
25260- if (is_kernel_text(addr) ||
25261- is_kernel_text(addr2))
25262+ if (is_kernel_text(address, address + PMD_SIZE))
25263 prot = PAGE_KERNEL_LARGE_EXEC;
25264
25265 pages_2m++;
25266@@ -332,7 +333,7 @@ repeat:
25267 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25268 pte += pte_ofs;
25269 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25270- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25271+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25272 pgprot_t prot = PAGE_KERNEL;
25273 /*
25274 * first pass will use the same initial
25275@@ -340,7 +341,7 @@ repeat:
25276 */
25277 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25278
25279- if (is_kernel_text(addr))
25280+ if (is_kernel_text(address, address + PAGE_SIZE))
25281 prot = PAGE_KERNEL_EXEC;
25282
25283 pages_4k++;
25284@@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25285
25286 pud = pud_offset(pgd, va);
25287 pmd = pmd_offset(pud, va);
25288- if (!pmd_present(*pmd))
25289+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25290 break;
25291
25292 pte = pte_offset_kernel(pmd, va);
25293@@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
25294
25295 static void __init pagetable_init(void)
25296 {
25297- pgd_t *pgd_base = swapper_pg_dir;
25298-
25299- permanent_kmaps_init(pgd_base);
25300+ permanent_kmaps_init(swapper_pg_dir);
25301 }
25302
25303-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25304+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25305 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25306
25307 /* user-defined highmem size */
25308@@ -735,6 +734,12 @@ void __init mem_init(void)
25309
25310 pci_iommu_alloc();
25311
25312+#ifdef CONFIG_PAX_PER_CPU_PGD
25313+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25314+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25315+ KERNEL_PGD_PTRS);
25316+#endif
25317+
25318 #ifdef CONFIG_FLATMEM
25319 BUG_ON(!mem_map);
25320 #endif
25321@@ -761,7 +766,7 @@ void __init mem_init(void)
25322 reservedpages++;
25323
25324 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25325- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25326+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25327 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25328
25329 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25330@@ -802,10 +807,10 @@ void __init mem_init(void)
25331 ((unsigned long)&__init_end -
25332 (unsigned long)&__init_begin) >> 10,
25333
25334- (unsigned long)&_etext, (unsigned long)&_edata,
25335- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25336+ (unsigned long)&_sdata, (unsigned long)&_edata,
25337+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25338
25339- (unsigned long)&_text, (unsigned long)&_etext,
25340+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25341 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25342
25343 /*
25344@@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
25345 if (!kernel_set_to_readonly)
25346 return;
25347
25348+ start = ktla_ktva(start);
25349 pr_debug("Set kernel text: %lx - %lx for read write\n",
25350 start, start+size);
25351
25352@@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
25353 if (!kernel_set_to_readonly)
25354 return;
25355
25356+ start = ktla_ktva(start);
25357 pr_debug("Set kernel text: %lx - %lx for read only\n",
25358 start, start+size);
25359
25360@@ -925,6 +932,7 @@ void mark_rodata_ro(void)
25361 unsigned long start = PFN_ALIGN(_text);
25362 unsigned long size = PFN_ALIGN(_etext) - start;
25363
25364+ start = ktla_ktva(start);
25365 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25366 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25367 size >> 10);
25368diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25369index 436a030..4f97ffc 100644
25370--- a/arch/x86/mm/init_64.c
25371+++ b/arch/x86/mm/init_64.c
25372@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25373 * around without checking the pgd every time.
25374 */
25375
25376-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25377+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25378 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25379
25380 int force_personality32;
25381@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25382
25383 for (address = start; address <= end; address += PGDIR_SIZE) {
25384 const pgd_t *pgd_ref = pgd_offset_k(address);
25385+
25386+#ifdef CONFIG_PAX_PER_CPU_PGD
25387+ unsigned long cpu;
25388+#else
25389 struct page *page;
25390+#endif
25391
25392 if (pgd_none(*pgd_ref))
25393 continue;
25394
25395 spin_lock(&pgd_lock);
25396+
25397+#ifdef CONFIG_PAX_PER_CPU_PGD
25398+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25399+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25400+#else
25401 list_for_each_entry(page, &pgd_list, lru) {
25402 pgd_t *pgd;
25403 spinlock_t *pgt_lock;
25404@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25405 /* the pgt_lock only for Xen */
25406 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25407 spin_lock(pgt_lock);
25408+#endif
25409
25410 if (pgd_none(*pgd))
25411 set_pgd(pgd, *pgd_ref);
25412@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25413 BUG_ON(pgd_page_vaddr(*pgd)
25414 != pgd_page_vaddr(*pgd_ref));
25415
25416+#ifndef CONFIG_PAX_PER_CPU_PGD
25417 spin_unlock(pgt_lock);
25418+#endif
25419+
25420 }
25421 spin_unlock(&pgd_lock);
25422 }
25423@@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25424 {
25425 if (pgd_none(*pgd)) {
25426 pud_t *pud = (pud_t *)spp_getpage();
25427- pgd_populate(&init_mm, pgd, pud);
25428+ pgd_populate_kernel(&init_mm, pgd, pud);
25429 if (pud != pud_offset(pgd, 0))
25430 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25431 pud, pud_offset(pgd, 0));
25432@@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25433 {
25434 if (pud_none(*pud)) {
25435 pmd_t *pmd = (pmd_t *) spp_getpage();
25436- pud_populate(&init_mm, pud, pmd);
25437+ pud_populate_kernel(&init_mm, pud, pmd);
25438 if (pmd != pmd_offset(pud, 0))
25439 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25440 pmd, pmd_offset(pud, 0));
25441@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25442 pmd = fill_pmd(pud, vaddr);
25443 pte = fill_pte(pmd, vaddr);
25444
25445+ pax_open_kernel();
25446 set_pte(pte, new_pte);
25447+ pax_close_kernel();
25448
25449 /*
25450 * It's enough to flush this one mapping.
25451@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25452 pgd = pgd_offset_k((unsigned long)__va(phys));
25453 if (pgd_none(*pgd)) {
25454 pud = (pud_t *) spp_getpage();
25455- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25456- _PAGE_USER));
25457+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25458 }
25459 pud = pud_offset(pgd, (unsigned long)__va(phys));
25460 if (pud_none(*pud)) {
25461 pmd = (pmd_t *) spp_getpage();
25462- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25463- _PAGE_USER));
25464+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25465 }
25466 pmd = pmd_offset(pud, phys);
25467 BUG_ON(!pmd_none(*pmd));
25468@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25469 if (pfn >= pgt_buf_top)
25470 panic("alloc_low_page: ran out of memory");
25471
25472- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25473+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25474 clear_page(adr);
25475 *phys = pfn * PAGE_SIZE;
25476 return adr;
25477@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
25478
25479 phys = __pa(virt);
25480 left = phys & (PAGE_SIZE - 1);
25481- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25482+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25483 adr = (void *)(((unsigned long)adr) | left);
25484
25485 return adr;
25486@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25487 unmap_low_page(pmd);
25488
25489 spin_lock(&init_mm.page_table_lock);
25490- pud_populate(&init_mm, pud, __va(pmd_phys));
25491+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25492 spin_unlock(&init_mm.page_table_lock);
25493 }
25494 __flush_tlb_all();
25495@@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
25496 unmap_low_page(pud);
25497
25498 spin_lock(&init_mm.page_table_lock);
25499- pgd_populate(&init_mm, pgd, __va(pud_phys));
25500+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25501 spin_unlock(&init_mm.page_table_lock);
25502 pgd_changed = true;
25503 }
25504@@ -684,6 +698,12 @@ void __init mem_init(void)
25505
25506 pci_iommu_alloc();
25507
25508+#ifdef CONFIG_PAX_PER_CPU_PGD
25509+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25510+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25511+ KERNEL_PGD_PTRS);
25512+#endif
25513+
25514 /* clear_bss() already clear the empty_zero_page */
25515
25516 reservedpages = 0;
25517@@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
25518 static struct vm_area_struct gate_vma = {
25519 .vm_start = VSYSCALL_START,
25520 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25521- .vm_page_prot = PAGE_READONLY_EXEC,
25522- .vm_flags = VM_READ | VM_EXEC
25523+ .vm_page_prot = PAGE_READONLY,
25524+ .vm_flags = VM_READ
25525 };
25526
25527 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25528@@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
25529
25530 const char *arch_vma_name(struct vm_area_struct *vma)
25531 {
25532- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25533+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25534 return "[vdso]";
25535 if (vma == &gate_vma)
25536 return "[vsyscall]";
25537diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25538index 7b179b4..6bd1777 100644
25539--- a/arch/x86/mm/iomap_32.c
25540+++ b/arch/x86/mm/iomap_32.c
25541@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25542 type = kmap_atomic_idx_push();
25543 idx = type + KM_TYPE_NR * smp_processor_id();
25544 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25545+
25546+ pax_open_kernel();
25547 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25548+ pax_close_kernel();
25549+
25550 arch_flush_lazy_mmu_mode();
25551
25552 return (void *)vaddr;
25553diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25554index be1ef57..55f0160 100644
25555--- a/arch/x86/mm/ioremap.c
25556+++ b/arch/x86/mm/ioremap.c
25557@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25558 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25559 int is_ram = page_is_ram(pfn);
25560
25561- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25562+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25563 return NULL;
25564 WARN_ON_ONCE(is_ram);
25565 }
25566@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25567
25568 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25569 if (page_is_ram(start >> PAGE_SHIFT))
25570+#ifdef CONFIG_HIGHMEM
25571+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25572+#endif
25573 return __va(phys);
25574
25575 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25576@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25577 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25578
25579 static __initdata int after_paging_init;
25580-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25581+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25582
25583 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25584 {
25585@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25586 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25587
25588 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25589- memset(bm_pte, 0, sizeof(bm_pte));
25590- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25591+ pmd_populate_user(&init_mm, pmd, bm_pte);
25592
25593 /*
25594 * The boot-ioremap range spans multiple pmds, for which
25595diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25596index d87dd6d..bf3fa66 100644
25597--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25598+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25599@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25600 * memory (e.g. tracked pages)? For now, we need this to avoid
25601 * invoking kmemcheck for PnP BIOS calls.
25602 */
25603- if (regs->flags & X86_VM_MASK)
25604+ if (v8086_mode(regs))
25605 return false;
25606- if (regs->cs != __KERNEL_CS)
25607+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25608 return false;
25609
25610 pte = kmemcheck_pte_lookup(address);
25611diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25612index 845df68..1d8d29f 100644
25613--- a/arch/x86/mm/mmap.c
25614+++ b/arch/x86/mm/mmap.c
25615@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25616 * Leave an at least ~128 MB hole with possible stack randomization.
25617 */
25618 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25619-#define MAX_GAP (TASK_SIZE/6*5)
25620+#define MAX_GAP (pax_task_size/6*5)
25621
25622 static int mmap_is_legacy(void)
25623 {
25624@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25625 return rnd << PAGE_SHIFT;
25626 }
25627
25628-static unsigned long mmap_base(void)
25629+static unsigned long mmap_base(struct mm_struct *mm)
25630 {
25631 unsigned long gap = rlimit(RLIMIT_STACK);
25632+ unsigned long pax_task_size = TASK_SIZE;
25633+
25634+#ifdef CONFIG_PAX_SEGMEXEC
25635+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25636+ pax_task_size = SEGMEXEC_TASK_SIZE;
25637+#endif
25638
25639 if (gap < MIN_GAP)
25640 gap = MIN_GAP;
25641 else if (gap > MAX_GAP)
25642 gap = MAX_GAP;
25643
25644- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25645+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25646 }
25647
25648 /*
25649 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25650 * does, but not when emulating X86_32
25651 */
25652-static unsigned long mmap_legacy_base(void)
25653+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25654 {
25655- if (mmap_is_ia32())
25656+ if (mmap_is_ia32()) {
25657+
25658+#ifdef CONFIG_PAX_SEGMEXEC
25659+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25660+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25661+ else
25662+#endif
25663+
25664 return TASK_UNMAPPED_BASE;
25665- else
25666+ } else
25667 return TASK_UNMAPPED_BASE + mmap_rnd();
25668 }
25669
25670@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25671 void arch_pick_mmap_layout(struct mm_struct *mm)
25672 {
25673 if (mmap_is_legacy()) {
25674- mm->mmap_base = mmap_legacy_base();
25675+ mm->mmap_base = mmap_legacy_base(mm);
25676+
25677+#ifdef CONFIG_PAX_RANDMMAP
25678+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25679+ mm->mmap_base += mm->delta_mmap;
25680+#endif
25681+
25682 mm->get_unmapped_area = arch_get_unmapped_area;
25683 mm->unmap_area = arch_unmap_area;
25684 } else {
25685- mm->mmap_base = mmap_base();
25686+ mm->mmap_base = mmap_base(mm);
25687+
25688+#ifdef CONFIG_PAX_RANDMMAP
25689+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25690+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25691+#endif
25692+
25693 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25694 mm->unmap_area = arch_unmap_area_topdown;
25695 }
25696diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25697index dc0b727..dc9d71a 100644
25698--- a/arch/x86/mm/mmio-mod.c
25699+++ b/arch/x86/mm/mmio-mod.c
25700@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25701 break;
25702 default:
25703 {
25704- unsigned char *ip = (unsigned char *)instptr;
25705+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25706 my_trace->opcode = MMIO_UNKNOWN_OP;
25707 my_trace->width = 0;
25708 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25709@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25710 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25711 void __iomem *addr)
25712 {
25713- static atomic_t next_id;
25714+ static atomic_unchecked_t next_id;
25715 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25716 /* These are page-unaligned. */
25717 struct mmiotrace_map map = {
25718@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25719 .private = trace
25720 },
25721 .phys = offset,
25722- .id = atomic_inc_return(&next_id)
25723+ .id = atomic_inc_return_unchecked(&next_id)
25724 };
25725 map.map_id = trace->id;
25726
25727diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25728index b008656..773eac2 100644
25729--- a/arch/x86/mm/pageattr-test.c
25730+++ b/arch/x86/mm/pageattr-test.c
25731@@ -36,7 +36,7 @@ enum {
25732
25733 static int pte_testbit(pte_t pte)
25734 {
25735- return pte_flags(pte) & _PAGE_UNUSED1;
25736+ return pte_flags(pte) & _PAGE_CPA_TEST;
25737 }
25738
25739 struct split_state {
25740diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25741index e1ebde3..b1e1db38 100644
25742--- a/arch/x86/mm/pageattr.c
25743+++ b/arch/x86/mm/pageattr.c
25744@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25745 */
25746 #ifdef CONFIG_PCI_BIOS
25747 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25748- pgprot_val(forbidden) |= _PAGE_NX;
25749+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25750 #endif
25751
25752 /*
25753@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25754 * Does not cover __inittext since that is gone later on. On
25755 * 64bit we do not enforce !NX on the low mapping
25756 */
25757- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25758- pgprot_val(forbidden) |= _PAGE_NX;
25759+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25760+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25761
25762+#ifdef CONFIG_DEBUG_RODATA
25763 /*
25764 * The .rodata section needs to be read-only. Using the pfn
25765 * catches all aliases.
25766@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25767 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25768 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25769 pgprot_val(forbidden) |= _PAGE_RW;
25770+#endif
25771
25772 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25773 /*
25774@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25775 }
25776 #endif
25777
25778+#ifdef CONFIG_PAX_KERNEXEC
25779+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25780+ pgprot_val(forbidden) |= _PAGE_RW;
25781+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25782+ }
25783+#endif
25784+
25785 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25786
25787 return prot;
25788@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25789 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25790 {
25791 /* change init_mm */
25792+ pax_open_kernel();
25793 set_pte_atomic(kpte, pte);
25794+
25795 #ifdef CONFIG_X86_32
25796 if (!SHARED_KERNEL_PMD) {
25797+
25798+#ifdef CONFIG_PAX_PER_CPU_PGD
25799+ unsigned long cpu;
25800+#else
25801 struct page *page;
25802+#endif
25803
25804+#ifdef CONFIG_PAX_PER_CPU_PGD
25805+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25806+ pgd_t *pgd = get_cpu_pgd(cpu);
25807+#else
25808 list_for_each_entry(page, &pgd_list, lru) {
25809- pgd_t *pgd;
25810+ pgd_t *pgd = (pgd_t *)page_address(page);
25811+#endif
25812+
25813 pud_t *pud;
25814 pmd_t *pmd;
25815
25816- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25817+ pgd += pgd_index(address);
25818 pud = pud_offset(pgd, address);
25819 pmd = pmd_offset(pud, address);
25820 set_pte_atomic((pte_t *)pmd, pte);
25821 }
25822 }
25823 #endif
25824+ pax_close_kernel();
25825 }
25826
25827 static int
25828diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25829index f6ff57b..481690f 100644
25830--- a/arch/x86/mm/pat.c
25831+++ b/arch/x86/mm/pat.c
25832@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25833
25834 if (!entry) {
25835 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25836- current->comm, current->pid, start, end);
25837+ current->comm, task_pid_nr(current), start, end);
25838 return -EINVAL;
25839 }
25840
25841@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25842 while (cursor < to) {
25843 if (!devmem_is_allowed(pfn)) {
25844 printk(KERN_INFO
25845- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25846- current->comm, from, to);
25847+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25848+ current->comm, from, to, cursor);
25849 return 0;
25850 }
25851 cursor += PAGE_SIZE;
25852@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25853 printk(KERN_INFO
25854 "%s:%d ioremap_change_attr failed %s "
25855 "for %Lx-%Lx\n",
25856- current->comm, current->pid,
25857+ current->comm, task_pid_nr(current),
25858 cattr_name(flags),
25859 base, (unsigned long long)(base + size));
25860 return -EINVAL;
25861@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25862 if (want_flags != flags) {
25863 printk(KERN_WARNING
25864 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25865- current->comm, current->pid,
25866+ current->comm, task_pid_nr(current),
25867 cattr_name(want_flags),
25868 (unsigned long long)paddr,
25869 (unsigned long long)(paddr + size),
25870@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25871 free_memtype(paddr, paddr + size);
25872 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25873 " for %Lx-%Lx, got %s\n",
25874- current->comm, current->pid,
25875+ current->comm, task_pid_nr(current),
25876 cattr_name(want_flags),
25877 (unsigned long long)paddr,
25878 (unsigned long long)(paddr + size),
25879diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25880index 9f0614d..92ae64a 100644
25881--- a/arch/x86/mm/pf_in.c
25882+++ b/arch/x86/mm/pf_in.c
25883@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25884 int i;
25885 enum reason_type rv = OTHERS;
25886
25887- p = (unsigned char *)ins_addr;
25888+ p = (unsigned char *)ktla_ktva(ins_addr);
25889 p += skip_prefix(p, &prf);
25890 p += get_opcode(p, &opcode);
25891
25892@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25893 struct prefix_bits prf;
25894 int i;
25895
25896- p = (unsigned char *)ins_addr;
25897+ p = (unsigned char *)ktla_ktva(ins_addr);
25898 p += skip_prefix(p, &prf);
25899 p += get_opcode(p, &opcode);
25900
25901@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25902 struct prefix_bits prf;
25903 int i;
25904
25905- p = (unsigned char *)ins_addr;
25906+ p = (unsigned char *)ktla_ktva(ins_addr);
25907 p += skip_prefix(p, &prf);
25908 p += get_opcode(p, &opcode);
25909
25910@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25911 struct prefix_bits prf;
25912 int i;
25913
25914- p = (unsigned char *)ins_addr;
25915+ p = (unsigned char *)ktla_ktva(ins_addr);
25916 p += skip_prefix(p, &prf);
25917 p += get_opcode(p, &opcode);
25918 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25919@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25920 struct prefix_bits prf;
25921 int i;
25922
25923- p = (unsigned char *)ins_addr;
25924+ p = (unsigned char *)ktla_ktva(ins_addr);
25925 p += skip_prefix(p, &prf);
25926 p += get_opcode(p, &opcode);
25927 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25928diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25929index 8573b83..7d9628f 100644
25930--- a/arch/x86/mm/pgtable.c
25931+++ b/arch/x86/mm/pgtable.c
25932@@ -84,10 +84,60 @@ static inline void pgd_list_del(pgd_t *pgd)
25933 list_del(&page->lru);
25934 }
25935
25936-#define UNSHARED_PTRS_PER_PGD \
25937- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25938+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25939+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25940
25941+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25942+{
25943+ while (count--)
25944+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25945+}
25946+#endif
25947
25948+#ifdef CONFIG_PAX_PER_CPU_PGD
25949+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25950+{
25951+ while (count--) {
25952+ pgd_t pgd;
25953+
25954+#ifdef CONFIG_X86_64
25955+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25956+#else
25957+ pgd = *src++;
25958+#endif
25959+
25960+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25961+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25962+#endif
25963+
25964+ *dst++ = pgd;
25965+ }
25966+
25967+}
25968+#endif
25969+
25970+#ifdef CONFIG_X86_64
25971+#define pxd_t pud_t
25972+#define pyd_t pgd_t
25973+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25974+#define pxd_free(mm, pud) pud_free((mm), (pud))
25975+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25976+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25977+#define PYD_SIZE PGDIR_SIZE
25978+#else
25979+#define pxd_t pmd_t
25980+#define pyd_t pud_t
25981+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25982+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25983+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25984+#define pyd_offset(mm, address) pud_offset((mm), (address))
25985+#define PYD_SIZE PUD_SIZE
25986+#endif
25987+
25988+#ifdef CONFIG_PAX_PER_CPU_PGD
25989+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25990+static inline void pgd_dtor(pgd_t *pgd) {}
25991+#else
25992 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25993 {
25994 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25995@@ -128,6 +178,7 @@ static void pgd_dtor(pgd_t *pgd)
25996 pgd_list_del(pgd);
25997 spin_unlock(&pgd_lock);
25998 }
25999+#endif
26000
26001 /*
26002 * List of all pgd's needed for non-PAE so it can invalidate entries
26003@@ -140,7 +191,7 @@ static void pgd_dtor(pgd_t *pgd)
26004 * -- wli
26005 */
26006
26007-#ifdef CONFIG_X86_PAE
26008+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26009 /*
26010 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26011 * updating the top-level pagetable entries to guarantee the
26012@@ -152,7 +203,7 @@ static void pgd_dtor(pgd_t *pgd)
26013 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26014 * and initialize the kernel pmds here.
26015 */
26016-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26017+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26018
26019 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26020 {
26021@@ -170,36 +221,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26022 */
26023 flush_tlb_mm(mm);
26024 }
26025+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26026+#define PREALLOCATED_PXDS USER_PGD_PTRS
26027 #else /* !CONFIG_X86_PAE */
26028
26029 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26030-#define PREALLOCATED_PMDS 0
26031+#define PREALLOCATED_PXDS 0
26032
26033 #endif /* CONFIG_X86_PAE */
26034
26035-static void free_pmds(pmd_t *pmds[])
26036+static void free_pxds(pxd_t *pxds[])
26037 {
26038 int i;
26039
26040- for(i = 0; i < PREALLOCATED_PMDS; i++)
26041- if (pmds[i])
26042- free_page((unsigned long)pmds[i]);
26043+ for(i = 0; i < PREALLOCATED_PXDS; i++)
26044+ if (pxds[i])
26045+ free_page((unsigned long)pxds[i]);
26046 }
26047
26048-static int preallocate_pmds(pmd_t *pmds[])
26049+static int preallocate_pxds(pxd_t *pxds[])
26050 {
26051 int i;
26052 bool failed = false;
26053
26054- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26055- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26056- if (pmd == NULL)
26057+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26058+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26059+ if (pxd == NULL)
26060 failed = true;
26061- pmds[i] = pmd;
26062+ pxds[i] = pxd;
26063 }
26064
26065 if (failed) {
26066- free_pmds(pmds);
26067+ free_pxds(pxds);
26068 return -ENOMEM;
26069 }
26070
26071@@ -212,51 +265,55 @@ static int preallocate_pmds(pmd_t *pmds[])
26072 * preallocate which never got a corresponding vma will need to be
26073 * freed manually.
26074 */
26075-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26076+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26077 {
26078 int i;
26079
26080- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26081+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26082 pgd_t pgd = pgdp[i];
26083
26084 if (pgd_val(pgd) != 0) {
26085- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26086+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26087
26088- pgdp[i] = native_make_pgd(0);
26089+ set_pgd(pgdp + i, native_make_pgd(0));
26090
26091- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26092- pmd_free(mm, pmd);
26093+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26094+ pxd_free(mm, pxd);
26095 }
26096 }
26097 }
26098
26099-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26100+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26101 {
26102- pud_t *pud;
26103+ pyd_t *pyd;
26104 unsigned long addr;
26105 int i;
26106
26107- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26108+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26109 return;
26110
26111- pud = pud_offset(pgd, 0);
26112+#ifdef CONFIG_X86_64
26113+ pyd = pyd_offset(mm, 0L);
26114+#else
26115+ pyd = pyd_offset(pgd, 0L);
26116+#endif
26117
26118- for (addr = i = 0; i < PREALLOCATED_PMDS;
26119- i++, pud++, addr += PUD_SIZE) {
26120- pmd_t *pmd = pmds[i];
26121+ for (addr = i = 0; i < PREALLOCATED_PXDS;
26122+ i++, pyd++, addr += PYD_SIZE) {
26123+ pxd_t *pxd = pxds[i];
26124
26125 if (i >= KERNEL_PGD_BOUNDARY)
26126- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26127- sizeof(pmd_t) * PTRS_PER_PMD);
26128+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26129+ sizeof(pxd_t) * PTRS_PER_PMD);
26130
26131- pud_populate(mm, pud, pmd);
26132+ pyd_populate(mm, pyd, pxd);
26133 }
26134 }
26135
26136 pgd_t *pgd_alloc(struct mm_struct *mm)
26137 {
26138 pgd_t *pgd;
26139- pmd_t *pmds[PREALLOCATED_PMDS];
26140+ pxd_t *pxds[PREALLOCATED_PXDS];
26141
26142 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26143
26144@@ -265,11 +322,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26145
26146 mm->pgd = pgd;
26147
26148- if (preallocate_pmds(pmds) != 0)
26149+ if (preallocate_pxds(pxds) != 0)
26150 goto out_free_pgd;
26151
26152 if (paravirt_pgd_alloc(mm) != 0)
26153- goto out_free_pmds;
26154+ goto out_free_pxds;
26155
26156 /*
26157 * Make sure that pre-populating the pmds is atomic with
26158@@ -279,14 +336,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26159 spin_lock(&pgd_lock);
26160
26161 pgd_ctor(mm, pgd);
26162- pgd_prepopulate_pmd(mm, pgd, pmds);
26163+ pgd_prepopulate_pxd(mm, pgd, pxds);
26164
26165 spin_unlock(&pgd_lock);
26166
26167 return pgd;
26168
26169-out_free_pmds:
26170- free_pmds(pmds);
26171+out_free_pxds:
26172+ free_pxds(pxds);
26173 out_free_pgd:
26174 free_page((unsigned long)pgd);
26175 out:
26176@@ -295,7 +352,7 @@ out:
26177
26178 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26179 {
26180- pgd_mop_up_pmds(mm, pgd);
26181+ pgd_mop_up_pxds(mm, pgd);
26182 pgd_dtor(pgd);
26183 paravirt_pgd_free(mm, pgd);
26184 free_page((unsigned long)pgd);
26185diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26186index cac7184..09a39fa 100644
26187--- a/arch/x86/mm/pgtable_32.c
26188+++ b/arch/x86/mm/pgtable_32.c
26189@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26190 return;
26191 }
26192 pte = pte_offset_kernel(pmd, vaddr);
26193+
26194+ pax_open_kernel();
26195 if (pte_val(pteval))
26196 set_pte_at(&init_mm, vaddr, pte, pteval);
26197 else
26198 pte_clear(&init_mm, vaddr, pte);
26199+ pax_close_kernel();
26200
26201 /*
26202 * It's enough to flush this one mapping.
26203diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26204index 410531d..0f16030 100644
26205--- a/arch/x86/mm/setup_nx.c
26206+++ b/arch/x86/mm/setup_nx.c
26207@@ -5,8 +5,10 @@
26208 #include <asm/pgtable.h>
26209 #include <asm/proto.h>
26210
26211+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26212 static int disable_nx __cpuinitdata;
26213
26214+#ifndef CONFIG_PAX_PAGEEXEC
26215 /*
26216 * noexec = on|off
26217 *
26218@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
26219 return 0;
26220 }
26221 early_param("noexec", noexec_setup);
26222+#endif
26223+
26224+#endif
26225
26226 void __cpuinit x86_configure_nx(void)
26227 {
26228+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26229 if (cpu_has_nx && !disable_nx)
26230 __supported_pte_mask |= _PAGE_NX;
26231 else
26232+#endif
26233 __supported_pte_mask &= ~_PAGE_NX;
26234 }
26235
26236diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26237index d6c0418..06a0ad5 100644
26238--- a/arch/x86/mm/tlb.c
26239+++ b/arch/x86/mm/tlb.c
26240@@ -65,7 +65,11 @@ void leave_mm(int cpu)
26241 BUG();
26242 cpumask_clear_cpu(cpu,
26243 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26244+
26245+#ifndef CONFIG_PAX_PER_CPU_PGD
26246 load_cr3(swapper_pg_dir);
26247+#endif
26248+
26249 }
26250 EXPORT_SYMBOL_GPL(leave_mm);
26251
26252diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
26253index 6687022..ceabcfa 100644
26254--- a/arch/x86/net/bpf_jit.S
26255+++ b/arch/x86/net/bpf_jit.S
26256@@ -9,6 +9,7 @@
26257 */
26258 #include <linux/linkage.h>
26259 #include <asm/dwarf2.h>
26260+#include <asm/alternative-asm.h>
26261
26262 /*
26263 * Calling convention :
26264@@ -35,6 +36,7 @@ sk_load_word:
26265 jle bpf_slow_path_word
26266 mov (SKBDATA,%rsi),%eax
26267 bswap %eax /* ntohl() */
26268+ pax_force_retaddr
26269 ret
26270
26271
26272@@ -53,6 +55,7 @@ sk_load_half:
26273 jle bpf_slow_path_half
26274 movzwl (SKBDATA,%rsi),%eax
26275 rol $8,%ax # ntohs()
26276+ pax_force_retaddr
26277 ret
26278
26279 sk_load_byte_ind:
26280@@ -66,6 +69,7 @@ sk_load_byte:
26281 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
26282 jle bpf_slow_path_byte
26283 movzbl (SKBDATA,%rsi),%eax
26284+ pax_force_retaddr
26285 ret
26286
26287 /**
26288@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
26289 movzbl (SKBDATA,%rsi),%ebx
26290 and $15,%bl
26291 shl $2,%bl
26292+ pax_force_retaddr
26293 ret
26294 CFI_ENDPROC
26295 ENDPROC(sk_load_byte_msh)
26296@@ -91,6 +96,7 @@ bpf_error:
26297 xor %eax,%eax
26298 mov -8(%rbp),%rbx
26299 leaveq
26300+ pax_force_retaddr
26301 ret
26302
26303 /* rsi contains offset and can be scratched */
26304@@ -113,6 +119,7 @@ bpf_slow_path_word:
26305 js bpf_error
26306 mov -12(%rbp),%eax
26307 bswap %eax
26308+ pax_force_retaddr
26309 ret
26310
26311 bpf_slow_path_half:
26312@@ -121,12 +128,14 @@ bpf_slow_path_half:
26313 mov -12(%rbp),%ax
26314 rol $8,%ax
26315 movzwl %ax,%eax
26316+ pax_force_retaddr
26317 ret
26318
26319 bpf_slow_path_byte:
26320 bpf_slow_path_common(1)
26321 js bpf_error
26322 movzbl -12(%rbp),%eax
26323+ pax_force_retaddr
26324 ret
26325
26326 bpf_slow_path_byte_msh:
26327@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
26328 and $15,%al
26329 shl $2,%al
26330 xchg %eax,%ebx
26331+ pax_force_retaddr
26332 ret
26333diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26334index 5a5b6e4..201d42e 100644
26335--- a/arch/x86/net/bpf_jit_comp.c
26336+++ b/arch/x86/net/bpf_jit_comp.c
26337@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
26338 set_fs(old_fs);
26339 }
26340
26341+struct bpf_jit_work {
26342+ struct work_struct work;
26343+ void *image;
26344+};
26345
26346 void bpf_jit_compile(struct sk_filter *fp)
26347 {
26348@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26349 if (addrs == NULL)
26350 return;
26351
26352+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26353+ if (!fp->work)
26354+ goto out;
26355+
26356 /* Before first pass, make a rough estimation of addrs[]
26357 * each bpf instruction is translated to less than 64 bytes
26358 */
26359@@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
26360 common_load: seen |= SEEN_DATAREF;
26361 if ((int)K < 0) {
26362 /* Abort the JIT because __load_pointer() is needed. */
26363- goto out;
26364+ goto error;
26365 }
26366 t_offset = func - (image + addrs[i]);
26367 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
26368@@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
26369 case BPF_S_LDX_B_MSH:
26370 if ((int)K < 0) {
26371 /* Abort the JIT because __load_pointer() is needed. */
26372- goto out;
26373+ goto error;
26374 }
26375 seen |= SEEN_DATAREF | SEEN_XREG;
26376 t_offset = sk_load_byte_msh - (image + addrs[i]);
26377@@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26378 break;
26379 default:
26380 /* hmm, too complex filter, give up with jit compiler */
26381- goto out;
26382+ goto error;
26383 }
26384 ilen = prog - temp;
26385 if (image) {
26386 if (unlikely(proglen + ilen > oldproglen)) {
26387 pr_err("bpb_jit_compile fatal error\n");
26388- kfree(addrs);
26389- module_free(NULL, image);
26390- return;
26391+ module_free_exec(NULL, image);
26392+ goto error;
26393 }
26394+ pax_open_kernel();
26395 memcpy(image + proglen, temp, ilen);
26396+ pax_close_kernel();
26397 }
26398 proglen += ilen;
26399 addrs[i] = proglen;
26400@@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26401 break;
26402 }
26403 if (proglen == oldproglen) {
26404- image = module_alloc(max_t(unsigned int,
26405- proglen,
26406- sizeof(struct work_struct)));
26407+ image = module_alloc_exec(proglen);
26408 if (!image)
26409- goto out;
26410+ goto error;
26411 }
26412 oldproglen = proglen;
26413 }
26414@@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26415 bpf_flush_icache(image, image + proglen);
26416
26417 fp->bpf_func = (void *)image;
26418- }
26419+ } else
26420+error:
26421+ kfree(fp->work);
26422+
26423 out:
26424 kfree(addrs);
26425 return;
26426@@ -641,18 +651,20 @@ out:
26427
26428 static void jit_free_defer(struct work_struct *arg)
26429 {
26430- module_free(NULL, arg);
26431+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26432+ kfree(arg);
26433 }
26434
26435 /* run from softirq, we must use a work_struct to call
26436- * module_free() from process context
26437+ * module_free_exec() from process context
26438 */
26439 void bpf_jit_free(struct sk_filter *fp)
26440 {
26441 if (fp->bpf_func != sk_run_filter) {
26442- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26443+ struct work_struct *work = &fp->work->work;
26444
26445 INIT_WORK(work, jit_free_defer);
26446+ fp->work->image = fp->bpf_func;
26447 schedule_work(work);
26448 }
26449 }
26450diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26451index bff89df..377758a 100644
26452--- a/arch/x86/oprofile/backtrace.c
26453+++ b/arch/x86/oprofile/backtrace.c
26454@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26455 struct stack_frame_ia32 *fp;
26456 unsigned long bytes;
26457
26458- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26459+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26460 if (bytes != sizeof(bufhead))
26461 return NULL;
26462
26463- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26464+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26465
26466 oprofile_add_trace(bufhead[0].return_address);
26467
26468@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26469 struct stack_frame bufhead[2];
26470 unsigned long bytes;
26471
26472- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26473+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26474 if (bytes != sizeof(bufhead))
26475 return NULL;
26476
26477@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26478 {
26479 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26480
26481- if (!user_mode_vm(regs)) {
26482+ if (!user_mode(regs)) {
26483 unsigned long stack = kernel_stack_pointer(regs);
26484 if (depth)
26485 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26486diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26487index cb29191..036766d 100644
26488--- a/arch/x86/pci/mrst.c
26489+++ b/arch/x86/pci/mrst.c
26490@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
26491 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
26492 pci_mmcfg_late_init();
26493 pcibios_enable_irq = mrst_pci_irq_enable;
26494- pci_root_ops = pci_mrst_ops;
26495+ pax_open_kernel();
26496+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26497+ pax_close_kernel();
26498 /* Continue with standard init */
26499 return 1;
26500 }
26501diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26502index da8fe05..7ee6704 100644
26503--- a/arch/x86/pci/pcbios.c
26504+++ b/arch/x86/pci/pcbios.c
26505@@ -79,50 +79,93 @@ union bios32 {
26506 static struct {
26507 unsigned long address;
26508 unsigned short segment;
26509-} bios32_indirect = { 0, __KERNEL_CS };
26510+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26511
26512 /*
26513 * Returns the entry point for the given service, NULL on error
26514 */
26515
26516-static unsigned long bios32_service(unsigned long service)
26517+static unsigned long __devinit bios32_service(unsigned long service)
26518 {
26519 unsigned char return_code; /* %al */
26520 unsigned long address; /* %ebx */
26521 unsigned long length; /* %ecx */
26522 unsigned long entry; /* %edx */
26523 unsigned long flags;
26524+ struct desc_struct d, *gdt;
26525
26526 local_irq_save(flags);
26527- __asm__("lcall *(%%edi); cld"
26528+
26529+ gdt = get_cpu_gdt_table(smp_processor_id());
26530+
26531+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26532+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26533+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26534+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26535+
26536+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26537 : "=a" (return_code),
26538 "=b" (address),
26539 "=c" (length),
26540 "=d" (entry)
26541 : "0" (service),
26542 "1" (0),
26543- "D" (&bios32_indirect));
26544+ "D" (&bios32_indirect),
26545+ "r"(__PCIBIOS_DS)
26546+ : "memory");
26547+
26548+ pax_open_kernel();
26549+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26550+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26551+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26552+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26553+ pax_close_kernel();
26554+
26555 local_irq_restore(flags);
26556
26557 switch (return_code) {
26558- case 0:
26559- return address + entry;
26560- case 0x80: /* Not present */
26561- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26562- return 0;
26563- default: /* Shouldn't happen */
26564- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26565- service, return_code);
26566+ case 0: {
26567+ int cpu;
26568+ unsigned char flags;
26569+
26570+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26571+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26572+ printk(KERN_WARNING "bios32_service: not valid\n");
26573 return 0;
26574+ }
26575+ address = address + PAGE_OFFSET;
26576+ length += 16UL; /* some BIOSs underreport this... */
26577+ flags = 4;
26578+ if (length >= 64*1024*1024) {
26579+ length >>= PAGE_SHIFT;
26580+ flags |= 8;
26581+ }
26582+
26583+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26584+ gdt = get_cpu_gdt_table(cpu);
26585+ pack_descriptor(&d, address, length, 0x9b, flags);
26586+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26587+ pack_descriptor(&d, address, length, 0x93, flags);
26588+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26589+ }
26590+ return entry;
26591+ }
26592+ case 0x80: /* Not present */
26593+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26594+ return 0;
26595+ default: /* Shouldn't happen */
26596+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26597+ service, return_code);
26598+ return 0;
26599 }
26600 }
26601
26602 static struct {
26603 unsigned long address;
26604 unsigned short segment;
26605-} pci_indirect = { 0, __KERNEL_CS };
26606+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26607
26608-static int pci_bios_present;
26609+static int pci_bios_present __read_only;
26610
26611 static int __devinit check_pcibios(void)
26612 {
26613@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26614 unsigned long flags, pcibios_entry;
26615
26616 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26617- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26618+ pci_indirect.address = pcibios_entry;
26619
26620 local_irq_save(flags);
26621- __asm__(
26622- "lcall *(%%edi); cld\n\t"
26623+ __asm__("movw %w6, %%ds\n\t"
26624+ "lcall *%%ss:(%%edi); cld\n\t"
26625+ "push %%ss\n\t"
26626+ "pop %%ds\n\t"
26627 "jc 1f\n\t"
26628 "xor %%ah, %%ah\n"
26629 "1:"
26630@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26631 "=b" (ebx),
26632 "=c" (ecx)
26633 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26634- "D" (&pci_indirect)
26635+ "D" (&pci_indirect),
26636+ "r" (__PCIBIOS_DS)
26637 : "memory");
26638 local_irq_restore(flags);
26639
26640@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26641
26642 switch (len) {
26643 case 1:
26644- __asm__("lcall *(%%esi); cld\n\t"
26645+ __asm__("movw %w6, %%ds\n\t"
26646+ "lcall *%%ss:(%%esi); cld\n\t"
26647+ "push %%ss\n\t"
26648+ "pop %%ds\n\t"
26649 "jc 1f\n\t"
26650 "xor %%ah, %%ah\n"
26651 "1:"
26652@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26653 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26654 "b" (bx),
26655 "D" ((long)reg),
26656- "S" (&pci_indirect));
26657+ "S" (&pci_indirect),
26658+ "r" (__PCIBIOS_DS));
26659 /*
26660 * Zero-extend the result beyond 8 bits, do not trust the
26661 * BIOS having done it:
26662@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26663 *value &= 0xff;
26664 break;
26665 case 2:
26666- __asm__("lcall *(%%esi); cld\n\t"
26667+ __asm__("movw %w6, %%ds\n\t"
26668+ "lcall *%%ss:(%%esi); cld\n\t"
26669+ "push %%ss\n\t"
26670+ "pop %%ds\n\t"
26671 "jc 1f\n\t"
26672 "xor %%ah, %%ah\n"
26673 "1:"
26674@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26675 : "1" (PCIBIOS_READ_CONFIG_WORD),
26676 "b" (bx),
26677 "D" ((long)reg),
26678- "S" (&pci_indirect));
26679+ "S" (&pci_indirect),
26680+ "r" (__PCIBIOS_DS));
26681 /*
26682 * Zero-extend the result beyond 16 bits, do not trust the
26683 * BIOS having done it:
26684@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26685 *value &= 0xffff;
26686 break;
26687 case 4:
26688- __asm__("lcall *(%%esi); cld\n\t"
26689+ __asm__("movw %w6, %%ds\n\t"
26690+ "lcall *%%ss:(%%esi); cld\n\t"
26691+ "push %%ss\n\t"
26692+ "pop %%ds\n\t"
26693 "jc 1f\n\t"
26694 "xor %%ah, %%ah\n"
26695 "1:"
26696@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26697 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26698 "b" (bx),
26699 "D" ((long)reg),
26700- "S" (&pci_indirect));
26701+ "S" (&pci_indirect),
26702+ "r" (__PCIBIOS_DS));
26703 break;
26704 }
26705
26706@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26707
26708 switch (len) {
26709 case 1:
26710- __asm__("lcall *(%%esi); cld\n\t"
26711+ __asm__("movw %w6, %%ds\n\t"
26712+ "lcall *%%ss:(%%esi); cld\n\t"
26713+ "push %%ss\n\t"
26714+ "pop %%ds\n\t"
26715 "jc 1f\n\t"
26716 "xor %%ah, %%ah\n"
26717 "1:"
26718@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26719 "c" (value),
26720 "b" (bx),
26721 "D" ((long)reg),
26722- "S" (&pci_indirect));
26723+ "S" (&pci_indirect),
26724+ "r" (__PCIBIOS_DS));
26725 break;
26726 case 2:
26727- __asm__("lcall *(%%esi); cld\n\t"
26728+ __asm__("movw %w6, %%ds\n\t"
26729+ "lcall *%%ss:(%%esi); cld\n\t"
26730+ "push %%ss\n\t"
26731+ "pop %%ds\n\t"
26732 "jc 1f\n\t"
26733 "xor %%ah, %%ah\n"
26734 "1:"
26735@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26736 "c" (value),
26737 "b" (bx),
26738 "D" ((long)reg),
26739- "S" (&pci_indirect));
26740+ "S" (&pci_indirect),
26741+ "r" (__PCIBIOS_DS));
26742 break;
26743 case 4:
26744- __asm__("lcall *(%%esi); cld\n\t"
26745+ __asm__("movw %w6, %%ds\n\t"
26746+ "lcall *%%ss:(%%esi); cld\n\t"
26747+ "push %%ss\n\t"
26748+ "pop %%ds\n\t"
26749 "jc 1f\n\t"
26750 "xor %%ah, %%ah\n"
26751 "1:"
26752@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26753 "c" (value),
26754 "b" (bx),
26755 "D" ((long)reg),
26756- "S" (&pci_indirect));
26757+ "S" (&pci_indirect),
26758+ "r" (__PCIBIOS_DS));
26759 break;
26760 }
26761
26762@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26763
26764 DBG("PCI: Fetching IRQ routing table... ");
26765 __asm__("push %%es\n\t"
26766+ "movw %w8, %%ds\n\t"
26767 "push %%ds\n\t"
26768 "pop %%es\n\t"
26769- "lcall *(%%esi); cld\n\t"
26770+ "lcall *%%ss:(%%esi); cld\n\t"
26771 "pop %%es\n\t"
26772+ "push %%ss\n\t"
26773+ "pop %%ds\n"
26774 "jc 1f\n\t"
26775 "xor %%ah, %%ah\n"
26776 "1:"
26777@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26778 "1" (0),
26779 "D" ((long) &opt),
26780 "S" (&pci_indirect),
26781- "m" (opt)
26782+ "m" (opt),
26783+ "r" (__PCIBIOS_DS)
26784 : "memory");
26785 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26786 if (ret & 0xff00)
26787@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26788 {
26789 int ret;
26790
26791- __asm__("lcall *(%%esi); cld\n\t"
26792+ __asm__("movw %w5, %%ds\n\t"
26793+ "lcall *%%ss:(%%esi); cld\n\t"
26794+ "push %%ss\n\t"
26795+ "pop %%ds\n"
26796 "jc 1f\n\t"
26797 "xor %%ah, %%ah\n"
26798 "1:"
26799@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26800 : "0" (PCIBIOS_SET_PCI_HW_INT),
26801 "b" ((dev->bus->number << 8) | dev->devfn),
26802 "c" ((irq << 8) | (pin + 10)),
26803- "S" (&pci_indirect));
26804+ "S" (&pci_indirect),
26805+ "r" (__PCIBIOS_DS));
26806 return !(ret & 0xff00);
26807 }
26808 EXPORT_SYMBOL(pcibios_set_irq_routing);
26809diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26810index 40e4469..1ab536e 100644
26811--- a/arch/x86/platform/efi/efi_32.c
26812+++ b/arch/x86/platform/efi/efi_32.c
26813@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26814 {
26815 struct desc_ptr gdt_descr;
26816
26817+#ifdef CONFIG_PAX_KERNEXEC
26818+ struct desc_struct d;
26819+#endif
26820+
26821 local_irq_save(efi_rt_eflags);
26822
26823 load_cr3(initial_page_table);
26824 __flush_tlb_all();
26825
26826+#ifdef CONFIG_PAX_KERNEXEC
26827+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26828+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26829+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26830+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26831+#endif
26832+
26833 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26834 gdt_descr.size = GDT_SIZE - 1;
26835 load_gdt(&gdt_descr);
26836@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26837 {
26838 struct desc_ptr gdt_descr;
26839
26840+#ifdef CONFIG_PAX_KERNEXEC
26841+ struct desc_struct d;
26842+
26843+ memset(&d, 0, sizeof d);
26844+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26845+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26846+#endif
26847+
26848 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26849 gdt_descr.size = GDT_SIZE - 1;
26850 load_gdt(&gdt_descr);
26851diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26852index fbe66e6..c5c0dd2 100644
26853--- a/arch/x86/platform/efi/efi_stub_32.S
26854+++ b/arch/x86/platform/efi/efi_stub_32.S
26855@@ -6,7 +6,9 @@
26856 */
26857
26858 #include <linux/linkage.h>
26859+#include <linux/init.h>
26860 #include <asm/page_types.h>
26861+#include <asm/segment.h>
26862
26863 /*
26864 * efi_call_phys(void *, ...) is a function with variable parameters.
26865@@ -20,7 +22,7 @@
26866 * service functions will comply with gcc calling convention, too.
26867 */
26868
26869-.text
26870+__INIT
26871 ENTRY(efi_call_phys)
26872 /*
26873 * 0. The function can only be called in Linux kernel. So CS has been
26874@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26875 * The mapping of lower virtual memory has been created in prelog and
26876 * epilog.
26877 */
26878- movl $1f, %edx
26879- subl $__PAGE_OFFSET, %edx
26880- jmp *%edx
26881+ movl $(__KERNEXEC_EFI_DS), %edx
26882+ mov %edx, %ds
26883+ mov %edx, %es
26884+ mov %edx, %ss
26885+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26886 1:
26887
26888 /*
26889@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26890 * parameter 2, ..., param n. To make things easy, we save the return
26891 * address of efi_call_phys in a global variable.
26892 */
26893- popl %edx
26894- movl %edx, saved_return_addr
26895- /* get the function pointer into ECX*/
26896- popl %ecx
26897- movl %ecx, efi_rt_function_ptr
26898- movl $2f, %edx
26899- subl $__PAGE_OFFSET, %edx
26900- pushl %edx
26901+ popl (saved_return_addr)
26902+ popl (efi_rt_function_ptr)
26903
26904 /*
26905 * 3. Clear PG bit in %CR0.
26906@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26907 /*
26908 * 5. Call the physical function.
26909 */
26910- jmp *%ecx
26911+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26912
26913-2:
26914 /*
26915 * 6. After EFI runtime service returns, control will return to
26916 * following instruction. We'd better readjust stack pointer first.
26917@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26918 movl %cr0, %edx
26919 orl $0x80000000, %edx
26920 movl %edx, %cr0
26921- jmp 1f
26922-1:
26923+
26924 /*
26925 * 8. Now restore the virtual mode from flat mode by
26926 * adding EIP with PAGE_OFFSET.
26927 */
26928- movl $1f, %edx
26929- jmp *%edx
26930+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26931 1:
26932+ movl $(__KERNEL_DS), %edx
26933+ mov %edx, %ds
26934+ mov %edx, %es
26935+ mov %edx, %ss
26936
26937 /*
26938 * 9. Balance the stack. And because EAX contain the return value,
26939 * we'd better not clobber it.
26940 */
26941- leal efi_rt_function_ptr, %edx
26942- movl (%edx), %ecx
26943- pushl %ecx
26944+ pushl (efi_rt_function_ptr)
26945
26946 /*
26947- * 10. Push the saved return address onto the stack and return.
26948+ * 10. Return to the saved return address.
26949 */
26950- leal saved_return_addr, %edx
26951- movl (%edx), %ecx
26952- pushl %ecx
26953- ret
26954+ jmpl *(saved_return_addr)
26955 ENDPROC(efi_call_phys)
26956 .previous
26957
26958-.data
26959+__INITDATA
26960 saved_return_addr:
26961 .long 0
26962 efi_rt_function_ptr:
26963diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26964index 4c07cca..2c8427d 100644
26965--- a/arch/x86/platform/efi/efi_stub_64.S
26966+++ b/arch/x86/platform/efi/efi_stub_64.S
26967@@ -7,6 +7,7 @@
26968 */
26969
26970 #include <linux/linkage.h>
26971+#include <asm/alternative-asm.h>
26972
26973 #define SAVE_XMM \
26974 mov %rsp, %rax; \
26975@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26976 call *%rdi
26977 addq $32, %rsp
26978 RESTORE_XMM
26979+ pax_force_retaddr 0, 1
26980 ret
26981 ENDPROC(efi_call0)
26982
26983@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26984 call *%rdi
26985 addq $32, %rsp
26986 RESTORE_XMM
26987+ pax_force_retaddr 0, 1
26988 ret
26989 ENDPROC(efi_call1)
26990
26991@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26992 call *%rdi
26993 addq $32, %rsp
26994 RESTORE_XMM
26995+ pax_force_retaddr 0, 1
26996 ret
26997 ENDPROC(efi_call2)
26998
26999@@ -71,6 +75,7 @@ ENTRY(efi_call3)
27000 call *%rdi
27001 addq $32, %rsp
27002 RESTORE_XMM
27003+ pax_force_retaddr 0, 1
27004 ret
27005 ENDPROC(efi_call3)
27006
27007@@ -83,6 +88,7 @@ ENTRY(efi_call4)
27008 call *%rdi
27009 addq $32, %rsp
27010 RESTORE_XMM
27011+ pax_force_retaddr 0, 1
27012 ret
27013 ENDPROC(efi_call4)
27014
27015@@ -96,6 +102,7 @@ ENTRY(efi_call5)
27016 call *%rdi
27017 addq $48, %rsp
27018 RESTORE_XMM
27019+ pax_force_retaddr 0, 1
27020 ret
27021 ENDPROC(efi_call5)
27022
27023@@ -112,5 +119,6 @@ ENTRY(efi_call6)
27024 call *%rdi
27025 addq $48, %rsp
27026 RESTORE_XMM
27027+ pax_force_retaddr 0, 1
27028 ret
27029 ENDPROC(efi_call6)
27030diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
27031index 475e2cd..1b8e708 100644
27032--- a/arch/x86/platform/mrst/mrst.c
27033+++ b/arch/x86/platform/mrst/mrst.c
27034@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
27035 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
27036 int sfi_mrtc_num;
27037
27038-static void mrst_power_off(void)
27039+static __noreturn void mrst_power_off(void)
27040 {
27041 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
27042 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
27043+ BUG();
27044 }
27045
27046-static void mrst_reboot(void)
27047+static __noreturn void mrst_reboot(void)
27048 {
27049 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
27050 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
27051 else
27052 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
27053+ BUG();
27054 }
27055
27056 /* parse all the mtimer info to a static mtimer array */
27057diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
27058index 3ae0e61..4202d86 100644
27059--- a/arch/x86/platform/uv/tlb_uv.c
27060+++ b/arch/x86/platform/uv/tlb_uv.c
27061@@ -1424,6 +1424,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
27062 * 0: display meaning of the statistics
27063 */
27064 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
27065+ size_t count, loff_t *data) __size_overflow(3);
27066+static ssize_t ptc_proc_write(struct file *file, const char __user *user,
27067 size_t count, loff_t *data)
27068 {
27069 int cpu;
27070@@ -1539,6 +1541,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
27071 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
27072 */
27073 static ssize_t tunables_write(struct file *file, const char __user *user,
27074+ size_t count, loff_t *data) __size_overflow(3);
27075+static ssize_t tunables_write(struct file *file, const char __user *user,
27076 size_t count, loff_t *data)
27077 {
27078 int cpu;
27079diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
27080index f10c0af..3ec1f95 100644
27081--- a/arch/x86/power/cpu.c
27082+++ b/arch/x86/power/cpu.c
27083@@ -131,7 +131,7 @@ static void do_fpu_end(void)
27084 static void fix_processor_context(void)
27085 {
27086 int cpu = smp_processor_id();
27087- struct tss_struct *t = &per_cpu(init_tss, cpu);
27088+ struct tss_struct *t = init_tss + cpu;
27089
27090 set_tss_desc(cpu, t); /*
27091 * This just modifies memory; should not be
27092@@ -141,7 +141,9 @@ static void fix_processor_context(void)
27093 */
27094
27095 #ifdef CONFIG_X86_64
27096+ pax_open_kernel();
27097 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
27098+ pax_close_kernel();
27099
27100 syscall_init(); /* This sets MSR_*STAR and related */
27101 #endif
27102diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27103index 5d17950..2253fc9 100644
27104--- a/arch/x86/vdso/Makefile
27105+++ b/arch/x86/vdso/Makefile
27106@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
27107 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
27108 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
27109
27110-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27111+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27112 GCOV_PROFILE := n
27113
27114 #
27115diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27116index 468d591..8e80a0a 100644
27117--- a/arch/x86/vdso/vdso32-setup.c
27118+++ b/arch/x86/vdso/vdso32-setup.c
27119@@ -25,6 +25,7 @@
27120 #include <asm/tlbflush.h>
27121 #include <asm/vdso.h>
27122 #include <asm/proto.h>
27123+#include <asm/mman.h>
27124
27125 enum {
27126 VDSO_DISABLED = 0,
27127@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27128 void enable_sep_cpu(void)
27129 {
27130 int cpu = get_cpu();
27131- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27132+ struct tss_struct *tss = init_tss + cpu;
27133
27134 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27135 put_cpu();
27136@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27137 gate_vma.vm_start = FIXADDR_USER_START;
27138 gate_vma.vm_end = FIXADDR_USER_END;
27139 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27140- gate_vma.vm_page_prot = __P101;
27141+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27142 /*
27143 * Make sure the vDSO gets into every core dump.
27144 * Dumping its contents makes post-mortem fully interpretable later
27145@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27146 if (compat)
27147 addr = VDSO_HIGH_BASE;
27148 else {
27149- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27150+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27151 if (IS_ERR_VALUE(addr)) {
27152 ret = addr;
27153 goto up_fail;
27154 }
27155 }
27156
27157- current->mm->context.vdso = (void *)addr;
27158+ current->mm->context.vdso = addr;
27159
27160 if (compat_uses_vma || !compat) {
27161 /*
27162@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27163 }
27164
27165 current_thread_info()->sysenter_return =
27166- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27167+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27168
27169 up_fail:
27170 if (ret)
27171- current->mm->context.vdso = NULL;
27172+ current->mm->context.vdso = 0;
27173
27174 up_write(&mm->mmap_sem);
27175
27176@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
27177
27178 const char *arch_vma_name(struct vm_area_struct *vma)
27179 {
27180- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27181+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27182 return "[vdso]";
27183+
27184+#ifdef CONFIG_PAX_SEGMEXEC
27185+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27186+ return "[vdso]";
27187+#endif
27188+
27189 return NULL;
27190 }
27191
27192@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27193 * Check to see if the corresponding task was created in compat vdso
27194 * mode.
27195 */
27196- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27197+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27198 return &gate_vma;
27199 return NULL;
27200 }
27201diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27202index 153407c..611cba9 100644
27203--- a/arch/x86/vdso/vma.c
27204+++ b/arch/x86/vdso/vma.c
27205@@ -16,8 +16,6 @@
27206 #include <asm/vdso.h>
27207 #include <asm/page.h>
27208
27209-unsigned int __read_mostly vdso_enabled = 1;
27210-
27211 extern char vdso_start[], vdso_end[];
27212 extern unsigned short vdso_sync_cpuid;
27213
27214@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27215 * unaligned here as a result of stack start randomization.
27216 */
27217 addr = PAGE_ALIGN(addr);
27218- addr = align_addr(addr, NULL, ALIGN_VDSO);
27219
27220 return addr;
27221 }
27222@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27223 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27224 {
27225 struct mm_struct *mm = current->mm;
27226- unsigned long addr;
27227+ unsigned long addr = 0;
27228 int ret;
27229
27230- if (!vdso_enabled)
27231- return 0;
27232-
27233 down_write(&mm->mmap_sem);
27234+
27235+#ifdef CONFIG_PAX_RANDMMAP
27236+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27237+#endif
27238+
27239 addr = vdso_addr(mm->start_stack, vdso_size);
27240+ addr = align_addr(addr, NULL, ALIGN_VDSO);
27241 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27242 if (IS_ERR_VALUE(addr)) {
27243 ret = addr;
27244 goto up_fail;
27245 }
27246
27247- current->mm->context.vdso = (void *)addr;
27248+ mm->context.vdso = addr;
27249
27250 ret = install_special_mapping(mm, addr, vdso_size,
27251 VM_READ|VM_EXEC|
27252 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
27253 VM_ALWAYSDUMP,
27254 vdso_pages);
27255- if (ret) {
27256- current->mm->context.vdso = NULL;
27257- goto up_fail;
27258- }
27259+
27260+ if (ret)
27261+ mm->context.vdso = 0;
27262
27263 up_fail:
27264 up_write(&mm->mmap_sem);
27265 return ret;
27266 }
27267-
27268-static __init int vdso_setup(char *s)
27269-{
27270- vdso_enabled = simple_strtoul(s, NULL, 0);
27271- return 0;
27272-}
27273-__setup("vdso=", vdso_setup);
27274diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27275index 4e517d4..68a48f5 100644
27276--- a/arch/x86/xen/enlighten.c
27277+++ b/arch/x86/xen/enlighten.c
27278@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27279
27280 struct shared_info xen_dummy_shared_info;
27281
27282-void *xen_initial_gdt;
27283-
27284 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27285 __read_mostly int xen_have_vector_callback;
27286 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27287@@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27288 #endif
27289 };
27290
27291-static void xen_reboot(int reason)
27292+static __noreturn void xen_reboot(int reason)
27293 {
27294 struct sched_shutdown r = { .reason = reason };
27295
27296- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27297- BUG();
27298+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27299+ BUG();
27300 }
27301
27302-static void xen_restart(char *msg)
27303+static __noreturn void xen_restart(char *msg)
27304 {
27305 xen_reboot(SHUTDOWN_reboot);
27306 }
27307
27308-static void xen_emergency_restart(void)
27309+static __noreturn void xen_emergency_restart(void)
27310 {
27311 xen_reboot(SHUTDOWN_reboot);
27312 }
27313
27314-static void xen_machine_halt(void)
27315+static __noreturn void xen_machine_halt(void)
27316 {
27317 xen_reboot(SHUTDOWN_poweroff);
27318 }
27319
27320-static void xen_machine_power_off(void)
27321+static __noreturn void xen_machine_power_off(void)
27322 {
27323 if (pm_power_off)
27324 pm_power_off();
27325@@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void)
27326 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27327
27328 /* Work out if we support NX */
27329- x86_configure_nx();
27330+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27331+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27332+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27333+ unsigned l, h;
27334+
27335+ __supported_pte_mask |= _PAGE_NX;
27336+ rdmsr(MSR_EFER, l, h);
27337+ l |= EFER_NX;
27338+ wrmsr(MSR_EFER, l, h);
27339+ }
27340+#endif
27341
27342 xen_setup_features();
27343
27344@@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void)
27345
27346 machine_ops = xen_machine_ops;
27347
27348- /*
27349- * The only reliable way to retain the initial address of the
27350- * percpu gdt_page is to remember it here, so we can go and
27351- * mark it RW later, when the initial percpu area is freed.
27352- */
27353- xen_initial_gdt = &per_cpu(gdt_page, 0);
27354-
27355 xen_smp_init();
27356
27357 #ifdef CONFIG_ACPI_NUMA
27358diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27359index dc19347..1b07a2c 100644
27360--- a/arch/x86/xen/mmu.c
27361+++ b/arch/x86/xen/mmu.c
27362@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27363 convert_pfn_mfn(init_level4_pgt);
27364 convert_pfn_mfn(level3_ident_pgt);
27365 convert_pfn_mfn(level3_kernel_pgt);
27366+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27367+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27368+ convert_pfn_mfn(level3_vmemmap_pgt);
27369
27370 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27371 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27372@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27373 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27374 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27375 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27376+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27377+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27378+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27379 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27380+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27381 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27382 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27383
27384@@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void)
27385 pv_mmu_ops.set_pud = xen_set_pud;
27386 #if PAGETABLE_LEVELS == 4
27387 pv_mmu_ops.set_pgd = xen_set_pgd;
27388+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27389 #endif
27390
27391 /* This will work as long as patching hasn't happened yet
27392@@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27393 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27394 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27395 .set_pgd = xen_set_pgd_hyper,
27396+ .set_pgd_batched = xen_set_pgd_hyper,
27397
27398 .alloc_pud = xen_alloc_pmd_init,
27399 .release_pud = xen_release_pmd_init,
27400diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27401index f2ce60a..14e08dc 100644
27402--- a/arch/x86/xen/smp.c
27403+++ b/arch/x86/xen/smp.c
27404@@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27405 {
27406 BUG_ON(smp_processor_id() != 0);
27407 native_smp_prepare_boot_cpu();
27408-
27409- /* We've switched to the "real" per-cpu gdt, so make sure the
27410- old memory can be recycled */
27411- make_lowmem_page_readwrite(xen_initial_gdt);
27412-
27413 xen_filter_cpu_maps();
27414 xen_setup_vcpu_info_placement();
27415 }
27416@@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27417 gdt = get_cpu_gdt_table(cpu);
27418
27419 ctxt->flags = VGCF_IN_KERNEL;
27420- ctxt->user_regs.ds = __USER_DS;
27421- ctxt->user_regs.es = __USER_DS;
27422+ ctxt->user_regs.ds = __KERNEL_DS;
27423+ ctxt->user_regs.es = __KERNEL_DS;
27424 ctxt->user_regs.ss = __KERNEL_DS;
27425 #ifdef CONFIG_X86_32
27426 ctxt->user_regs.fs = __KERNEL_PERCPU;
27427- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27428+ savesegment(gs, ctxt->user_regs.gs);
27429 #else
27430 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27431 #endif
27432@@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27433 int rc;
27434
27435 per_cpu(current_task, cpu) = idle;
27436+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27437 #ifdef CONFIG_X86_32
27438 irq_ctx_init(cpu);
27439 #else
27440 clear_tsk_thread_flag(idle, TIF_FORK);
27441- per_cpu(kernel_stack, cpu) =
27442- (unsigned long)task_stack_page(idle) -
27443- KERNEL_STACK_OFFSET + THREAD_SIZE;
27444+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27445 #endif
27446 xen_setup_runstate_info(cpu);
27447 xen_setup_timer(cpu);
27448diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27449index b040b0e..8cc4fe0 100644
27450--- a/arch/x86/xen/xen-asm_32.S
27451+++ b/arch/x86/xen/xen-asm_32.S
27452@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27453 ESP_OFFSET=4 # bytes pushed onto stack
27454
27455 /*
27456- * Store vcpu_info pointer for easy access. Do it this way to
27457- * avoid having to reload %fs
27458+ * Store vcpu_info pointer for easy access.
27459 */
27460 #ifdef CONFIG_SMP
27461- GET_THREAD_INFO(%eax)
27462- movl TI_cpu(%eax), %eax
27463- movl __per_cpu_offset(,%eax,4), %eax
27464- mov xen_vcpu(%eax), %eax
27465+ push %fs
27466+ mov $(__KERNEL_PERCPU), %eax
27467+ mov %eax, %fs
27468+ mov PER_CPU_VAR(xen_vcpu), %eax
27469+ pop %fs
27470 #else
27471 movl xen_vcpu, %eax
27472 #endif
27473diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27474index aaa7291..3f77960 100644
27475--- a/arch/x86/xen/xen-head.S
27476+++ b/arch/x86/xen/xen-head.S
27477@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27478 #ifdef CONFIG_X86_32
27479 mov %esi,xen_start_info
27480 mov $init_thread_union+THREAD_SIZE,%esp
27481+#ifdef CONFIG_SMP
27482+ movl $cpu_gdt_table,%edi
27483+ movl $__per_cpu_load,%eax
27484+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27485+ rorl $16,%eax
27486+ movb %al,__KERNEL_PERCPU + 4(%edi)
27487+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27488+ movl $__per_cpu_end - 1,%eax
27489+ subl $__per_cpu_start,%eax
27490+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27491+#endif
27492 #else
27493 mov %rsi,xen_start_info
27494 mov $init_thread_union+THREAD_SIZE,%rsp
27495diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27496index b095739..8c17bcd 100644
27497--- a/arch/x86/xen/xen-ops.h
27498+++ b/arch/x86/xen/xen-ops.h
27499@@ -10,8 +10,6 @@
27500 extern const char xen_hypervisor_callback[];
27501 extern const char xen_failsafe_callback[];
27502
27503-extern void *xen_initial_gdt;
27504-
27505 struct trap_info;
27506 void xen_copy_trap_info(struct trap_info *traps);
27507
27508diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27509index 525bd3d..ef888b1 100644
27510--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27511+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27512@@ -119,9 +119,9 @@
27513 ----------------------------------------------------------------------*/
27514
27515 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27516-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27517 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27518 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27519+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27520
27521 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27522 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27523diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27524index 2f33760..835e50a 100644
27525--- a/arch/xtensa/variants/fsf/include/variant/core.h
27526+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27527@@ -11,6 +11,7 @@
27528 #ifndef _XTENSA_CORE_H
27529 #define _XTENSA_CORE_H
27530
27531+#include <linux/const.h>
27532
27533 /****************************************************************************
27534 Parameters Useful for Any Code, USER or PRIVILEGED
27535@@ -112,9 +113,9 @@
27536 ----------------------------------------------------------------------*/
27537
27538 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27539-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27540 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27541 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27542+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27543
27544 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27545 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27546diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27547index af00795..2bb8105 100644
27548--- a/arch/xtensa/variants/s6000/include/variant/core.h
27549+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27550@@ -11,6 +11,7 @@
27551 #ifndef _XTENSA_CORE_CONFIGURATION_H
27552 #define _XTENSA_CORE_CONFIGURATION_H
27553
27554+#include <linux/const.h>
27555
27556 /****************************************************************************
27557 Parameters Useful for Any Code, USER or PRIVILEGED
27558@@ -118,9 +119,9 @@
27559 ----------------------------------------------------------------------*/
27560
27561 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27562-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27563 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27564 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27565+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27566
27567 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27568 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27569diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27570index 58916af..9cb880b 100644
27571--- a/block/blk-iopoll.c
27572+++ b/block/blk-iopoll.c
27573@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27574 }
27575 EXPORT_SYMBOL(blk_iopoll_complete);
27576
27577-static void blk_iopoll_softirq(struct softirq_action *h)
27578+static void blk_iopoll_softirq(void)
27579 {
27580 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27581 int rearm = 0, budget = blk_iopoll_budget;
27582diff --git a/block/blk-map.c b/block/blk-map.c
27583index 623e1cd..ca1e109 100644
27584--- a/block/blk-map.c
27585+++ b/block/blk-map.c
27586@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27587 if (!len || !kbuf)
27588 return -EINVAL;
27589
27590- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27591+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27592 if (do_copy)
27593 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27594 else
27595diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27596index 1366a89..e17f54b 100644
27597--- a/block/blk-softirq.c
27598+++ b/block/blk-softirq.c
27599@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27600 * Softirq action handler - move entries to local list and loop over them
27601 * while passing them to the queue registered handler.
27602 */
27603-static void blk_done_softirq(struct softirq_action *h)
27604+static void blk_done_softirq(void)
27605 {
27606 struct list_head *cpu_list, local_list;
27607
27608diff --git a/block/bsg.c b/block/bsg.c
27609index ff64ae3..593560c 100644
27610--- a/block/bsg.c
27611+++ b/block/bsg.c
27612@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27613 struct sg_io_v4 *hdr, struct bsg_device *bd,
27614 fmode_t has_write_perm)
27615 {
27616+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27617+ unsigned char *cmdptr;
27618+
27619 if (hdr->request_len > BLK_MAX_CDB) {
27620 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27621 if (!rq->cmd)
27622 return -ENOMEM;
27623- }
27624+ cmdptr = rq->cmd;
27625+ } else
27626+ cmdptr = tmpcmd;
27627
27628- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27629+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27630 hdr->request_len))
27631 return -EFAULT;
27632
27633+ if (cmdptr != rq->cmd)
27634+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27635+
27636 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27637 if (blk_verify_command(rq->cmd, has_write_perm))
27638 return -EPERM;
27639diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27640index 7c668c8..db3521c 100644
27641--- a/block/compat_ioctl.c
27642+++ b/block/compat_ioctl.c
27643@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27644 err |= __get_user(f->spec1, &uf->spec1);
27645 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27646 err |= __get_user(name, &uf->name);
27647- f->name = compat_ptr(name);
27648+ f->name = (void __force_kernel *)compat_ptr(name);
27649 if (err) {
27650 err = -EFAULT;
27651 goto out;
27652diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27653index 6296b40..417c00f 100644
27654--- a/block/partitions/efi.c
27655+++ b/block/partitions/efi.c
27656@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27657 if (!gpt)
27658 return NULL;
27659
27660+ if (!le32_to_cpu(gpt->num_partition_entries))
27661+ return NULL;
27662+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27663+ if (!pte)
27664+ return NULL;
27665+
27666 count = le32_to_cpu(gpt->num_partition_entries) *
27667 le32_to_cpu(gpt->sizeof_partition_entry);
27668- if (!count)
27669- return NULL;
27670- pte = kzalloc(count, GFP_KERNEL);
27671- if (!pte)
27672- return NULL;
27673-
27674 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27675 (u8 *) pte,
27676 count) < count) {
27677diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27678index 260fa80..e8f3caf 100644
27679--- a/block/scsi_ioctl.c
27680+++ b/block/scsi_ioctl.c
27681@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27682 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27683 struct sg_io_hdr *hdr, fmode_t mode)
27684 {
27685- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27686+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27687+ unsigned char *cmdptr;
27688+
27689+ if (rq->cmd != rq->__cmd)
27690+ cmdptr = rq->cmd;
27691+ else
27692+ cmdptr = tmpcmd;
27693+
27694+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27695 return -EFAULT;
27696+
27697+ if (cmdptr != rq->cmd)
27698+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27699+
27700 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27701 return -EPERM;
27702
27703@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27704 int err;
27705 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27706 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27707+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27708+ unsigned char *cmdptr;
27709
27710 if (!sic)
27711 return -EINVAL;
27712@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27713 */
27714 err = -EFAULT;
27715 rq->cmd_len = cmdlen;
27716- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27717+
27718+ if (rq->cmd != rq->__cmd)
27719+ cmdptr = rq->cmd;
27720+ else
27721+ cmdptr = tmpcmd;
27722+
27723+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27724 goto error;
27725
27726+ if (rq->cmd != cmdptr)
27727+ memcpy(rq->cmd, cmdptr, cmdlen);
27728+
27729 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27730 goto error;
27731
27732diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
27733index a0f768c..1da9c73 100644
27734--- a/crypto/ablkcipher.c
27735+++ b/crypto/ablkcipher.c
27736@@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req,
27737 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
27738
27739 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27740+ unsigned int keylen) __size_overflow(3);
27741+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27742 unsigned int keylen)
27743 {
27744 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27745@@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27746 }
27747
27748 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27749+ unsigned int keylen) __size_overflow(3);
27750+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27751 unsigned int keylen)
27752 {
27753 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27754diff --git a/crypto/aead.c b/crypto/aead.c
27755index 04add3dc..983032f 100644
27756--- a/crypto/aead.c
27757+++ b/crypto/aead.c
27758@@ -27,6 +27,8 @@
27759 #include "internal.h"
27760
27761 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27762+ unsigned int keylen) __size_overflow(3);
27763+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27764 unsigned int keylen)
27765 {
27766 struct aead_alg *aead = crypto_aead_alg(tfm);
27767@@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27768 return ret;
27769 }
27770
27771+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27772 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
27773 {
27774 struct aead_alg *aead = crypto_aead_alg(tfm);
27775diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
27776index 1e61d1a..cf06b86 100644
27777--- a/crypto/blkcipher.c
27778+++ b/crypto/blkcipher.c
27779@@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
27780 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
27781
27782 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27783+ unsigned int keylen) __size_overflow(3);
27784+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27785 unsigned int keylen)
27786 {
27787 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27788@@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27789 return ret;
27790 }
27791
27792+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27793 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27794 {
27795 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27796diff --git a/crypto/cipher.c b/crypto/cipher.c
27797index 39541e0..802d956 100644
27798--- a/crypto/cipher.c
27799+++ b/crypto/cipher.c
27800@@ -21,6 +21,8 @@
27801 #include "internal.h"
27802
27803 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27804+ unsigned int keylen) __size_overflow(3);
27805+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27806 unsigned int keylen)
27807 {
27808 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27809@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27810
27811 }
27812
27813+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27814 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27815 {
27816 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27817diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27818index 671d4d6..5f24030 100644
27819--- a/crypto/cryptd.c
27820+++ b/crypto/cryptd.c
27821@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27822
27823 struct cryptd_blkcipher_request_ctx {
27824 crypto_completion_t complete;
27825-};
27826+} __no_const;
27827
27828 struct cryptd_hash_ctx {
27829 struct crypto_shash *child;
27830@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27831
27832 struct cryptd_aead_request_ctx {
27833 crypto_completion_t complete;
27834-};
27835+} __no_const;
27836
27837 static void cryptd_queue_worker(struct work_struct *work);
27838
27839diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27840index 5d41894..22021e4 100644
27841--- a/drivers/acpi/apei/cper.c
27842+++ b/drivers/acpi/apei/cper.c
27843@@ -38,12 +38,12 @@
27844 */
27845 u64 cper_next_record_id(void)
27846 {
27847- static atomic64_t seq;
27848+ static atomic64_unchecked_t seq;
27849
27850- if (!atomic64_read(&seq))
27851- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27852+ if (!atomic64_read_unchecked(&seq))
27853+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27854
27855- return atomic64_inc_return(&seq);
27856+ return atomic64_inc_return_unchecked(&seq);
27857 }
27858 EXPORT_SYMBOL_GPL(cper_next_record_id);
27859
27860diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27861index 86933ca..5cb1a69 100644
27862--- a/drivers/acpi/battery.c
27863+++ b/drivers/acpi/battery.c
27864@@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
27865
27866 static ssize_t acpi_battery_write_alarm(struct file *file,
27867 const char __user * buffer,
27868+ size_t count, loff_t * ppos) __size_overflow(3);
27869+static ssize_t acpi_battery_write_alarm(struct file *file,
27870+ const char __user * buffer,
27871 size_t count, loff_t * ppos)
27872 {
27873 int result = 0;
27874diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27875index b258cab..3fb7da7 100644
27876--- a/drivers/acpi/ec_sys.c
27877+++ b/drivers/acpi/ec_sys.c
27878@@ -12,6 +12,7 @@
27879 #include <linux/acpi.h>
27880 #include <linux/debugfs.h>
27881 #include <linux/module.h>
27882+#include <linux/uaccess.h>
27883 #include "internal.h"
27884
27885 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27886@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27887 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27888 */
27889 unsigned int size = EC_SPACE_SIZE;
27890- u8 *data = (u8 *) buf;
27891+ u8 data;
27892 loff_t init_off = *off;
27893 int err = 0;
27894
27895@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27896 size = count;
27897
27898 while (size) {
27899- err = ec_read(*off, &data[*off - init_off]);
27900+ err = ec_read(*off, &data);
27901 if (err)
27902 return err;
27903+ if (put_user(data, &buf[*off - init_off]))
27904+ return -EFAULT;
27905 *off += 1;
27906 size--;
27907 }
27908@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27909
27910 unsigned int size = count;
27911 loff_t init_off = *off;
27912- u8 *data = (u8 *) buf;
27913 int err = 0;
27914
27915 if (*off >= EC_SPACE_SIZE)
27916@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27917 }
27918
27919 while (size) {
27920- u8 byte_write = data[*off - init_off];
27921+ u8 byte_write;
27922+ if (get_user(byte_write, &buf[*off - init_off]))
27923+ return -EFAULT;
27924 err = ec_write(*off, byte_write);
27925 if (err)
27926 return err;
27927diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27928index 251c7b62..000462d 100644
27929--- a/drivers/acpi/proc.c
27930+++ b/drivers/acpi/proc.c
27931@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27932 size_t count, loff_t * ppos)
27933 {
27934 struct list_head *node, *next;
27935- char strbuf[5];
27936- char str[5] = "";
27937- unsigned int len = count;
27938+ char strbuf[5] = {0};
27939
27940- if (len > 4)
27941- len = 4;
27942- if (len < 0)
27943+ if (count > 4)
27944+ count = 4;
27945+ if (copy_from_user(strbuf, buffer, count))
27946 return -EFAULT;
27947-
27948- if (copy_from_user(strbuf, buffer, len))
27949- return -EFAULT;
27950- strbuf[len] = '\0';
27951- sscanf(strbuf, "%s", str);
27952+ strbuf[count] = '\0';
27953
27954 mutex_lock(&acpi_device_lock);
27955 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27956@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27957 if (!dev->wakeup.flags.valid)
27958 continue;
27959
27960- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27961+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27962 if (device_can_wakeup(&dev->dev)) {
27963 bool enable = !device_may_wakeup(&dev->dev);
27964 device_set_wakeup_enable(&dev->dev, enable);
27965diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27966index 8ae05ce..7dbbed9 100644
27967--- a/drivers/acpi/processor_driver.c
27968+++ b/drivers/acpi/processor_driver.c
27969@@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27970 return 0;
27971 #endif
27972
27973- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27974+ BUG_ON(pr->id >= nr_cpu_ids);
27975
27976 /*
27977 * Buggy BIOS check
27978diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
27979index 6e36d0c..f319944 100644
27980--- a/drivers/acpi/sbs.c
27981+++ b/drivers/acpi/sbs.c
27982@@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
27983
27984 static ssize_t
27985 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27986+ size_t count, loff_t * ppos) __size_overflow(3);
27987+static ssize_t
27988+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27989 size_t count, loff_t * ppos)
27990 {
27991 struct seq_file *seq = file->private_data;
27992diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27993index c06e0ec..a2c06ba 100644
27994--- a/drivers/ata/libata-core.c
27995+++ b/drivers/ata/libata-core.c
27996@@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27997 struct ata_port *ap;
27998 unsigned int tag;
27999
28000- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28001+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28002 ap = qc->ap;
28003
28004 qc->flags = 0;
28005@@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
28006 struct ata_port *ap;
28007 struct ata_link *link;
28008
28009- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28010+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28011 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28012 ap = qc->ap;
28013 link = qc->dev->link;
28014@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28015 return;
28016
28017 spin_lock(&lock);
28018+ pax_open_kernel();
28019
28020 for (cur = ops->inherits; cur; cur = cur->inherits) {
28021 void **inherit = (void **)cur;
28022@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28023 if (IS_ERR(*pp))
28024 *pp = NULL;
28025
28026- ops->inherits = NULL;
28027+ *(struct ata_port_operations **)&ops->inherits = NULL;
28028
28029+ pax_close_kernel();
28030 spin_unlock(&lock);
28031 }
28032
28033diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
28034index 048589f..4002b98 100644
28035--- a/drivers/ata/pata_arasan_cf.c
28036+++ b/drivers/ata/pata_arasan_cf.c
28037@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
28038 /* Handle platform specific quirks */
28039 if (pdata->quirk) {
28040 if (pdata->quirk & CF_BROKEN_PIO) {
28041- ap->ops->set_piomode = NULL;
28042+ pax_open_kernel();
28043+ *(void **)&ap->ops->set_piomode = NULL;
28044+ pax_close_kernel();
28045 ap->pio_mask = 0;
28046 }
28047 if (pdata->quirk & CF_BROKEN_MWDMA)
28048diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
28049index f9b983a..887b9d8 100644
28050--- a/drivers/atm/adummy.c
28051+++ b/drivers/atm/adummy.c
28052@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
28053 vcc->pop(vcc, skb);
28054 else
28055 dev_kfree_skb_any(skb);
28056- atomic_inc(&vcc->stats->tx);
28057+ atomic_inc_unchecked(&vcc->stats->tx);
28058
28059 return 0;
28060 }
28061diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
28062index f8f41e0..1f987dd 100644
28063--- a/drivers/atm/ambassador.c
28064+++ b/drivers/atm/ambassador.c
28065@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
28066 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
28067
28068 // VC layer stats
28069- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28070+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28071
28072 // free the descriptor
28073 kfree (tx_descr);
28074@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
28075 dump_skb ("<<<", vc, skb);
28076
28077 // VC layer stats
28078- atomic_inc(&atm_vcc->stats->rx);
28079+ atomic_inc_unchecked(&atm_vcc->stats->rx);
28080 __net_timestamp(skb);
28081 // end of our responsibility
28082 atm_vcc->push (atm_vcc, skb);
28083@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
28084 } else {
28085 PRINTK (KERN_INFO, "dropped over-size frame");
28086 // should we count this?
28087- atomic_inc(&atm_vcc->stats->rx_drop);
28088+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28089 }
28090
28091 } else {
28092@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
28093 }
28094
28095 if (check_area (skb->data, skb->len)) {
28096- atomic_inc(&atm_vcc->stats->tx_err);
28097+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
28098 return -ENOMEM; // ?
28099 }
28100
28101diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
28102index b22d71c..d6e1049 100644
28103--- a/drivers/atm/atmtcp.c
28104+++ b/drivers/atm/atmtcp.c
28105@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
28106 if (vcc->pop) vcc->pop(vcc,skb);
28107 else dev_kfree_skb(skb);
28108 if (dev_data) return 0;
28109- atomic_inc(&vcc->stats->tx_err);
28110+ atomic_inc_unchecked(&vcc->stats->tx_err);
28111 return -ENOLINK;
28112 }
28113 size = skb->len+sizeof(struct atmtcp_hdr);
28114@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
28115 if (!new_skb) {
28116 if (vcc->pop) vcc->pop(vcc,skb);
28117 else dev_kfree_skb(skb);
28118- atomic_inc(&vcc->stats->tx_err);
28119+ atomic_inc_unchecked(&vcc->stats->tx_err);
28120 return -ENOBUFS;
28121 }
28122 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
28123@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
28124 if (vcc->pop) vcc->pop(vcc,skb);
28125 else dev_kfree_skb(skb);
28126 out_vcc->push(out_vcc,new_skb);
28127- atomic_inc(&vcc->stats->tx);
28128- atomic_inc(&out_vcc->stats->rx);
28129+ atomic_inc_unchecked(&vcc->stats->tx);
28130+ atomic_inc_unchecked(&out_vcc->stats->rx);
28131 return 0;
28132 }
28133
28134@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
28135 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
28136 read_unlock(&vcc_sklist_lock);
28137 if (!out_vcc) {
28138- atomic_inc(&vcc->stats->tx_err);
28139+ atomic_inc_unchecked(&vcc->stats->tx_err);
28140 goto done;
28141 }
28142 skb_pull(skb,sizeof(struct atmtcp_hdr));
28143@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
28144 __net_timestamp(new_skb);
28145 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
28146 out_vcc->push(out_vcc,new_skb);
28147- atomic_inc(&vcc->stats->tx);
28148- atomic_inc(&out_vcc->stats->rx);
28149+ atomic_inc_unchecked(&vcc->stats->tx);
28150+ atomic_inc_unchecked(&out_vcc->stats->rx);
28151 done:
28152 if (vcc->pop) vcc->pop(vcc,skb);
28153 else dev_kfree_skb(skb);
28154diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
28155index 956e9ac..133516d 100644
28156--- a/drivers/atm/eni.c
28157+++ b/drivers/atm/eni.c
28158@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
28159 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
28160 vcc->dev->number);
28161 length = 0;
28162- atomic_inc(&vcc->stats->rx_err);
28163+ atomic_inc_unchecked(&vcc->stats->rx_err);
28164 }
28165 else {
28166 length = ATM_CELL_SIZE-1; /* no HEC */
28167@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28168 size);
28169 }
28170 eff = length = 0;
28171- atomic_inc(&vcc->stats->rx_err);
28172+ atomic_inc_unchecked(&vcc->stats->rx_err);
28173 }
28174 else {
28175 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
28176@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
28177 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
28178 vcc->dev->number,vcc->vci,length,size << 2,descr);
28179 length = eff = 0;
28180- atomic_inc(&vcc->stats->rx_err);
28181+ atomic_inc_unchecked(&vcc->stats->rx_err);
28182 }
28183 }
28184 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
28185@@ -771,7 +771,7 @@ rx_dequeued++;
28186 vcc->push(vcc,skb);
28187 pushed++;
28188 }
28189- atomic_inc(&vcc->stats->rx);
28190+ atomic_inc_unchecked(&vcc->stats->rx);
28191 }
28192 wake_up(&eni_dev->rx_wait);
28193 }
28194@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
28195 PCI_DMA_TODEVICE);
28196 if (vcc->pop) vcc->pop(vcc,skb);
28197 else dev_kfree_skb_irq(skb);
28198- atomic_inc(&vcc->stats->tx);
28199+ atomic_inc_unchecked(&vcc->stats->tx);
28200 wake_up(&eni_dev->tx_wait);
28201 dma_complete++;
28202 }
28203@@ -1569,7 +1569,7 @@ tx_complete++;
28204 /*--------------------------------- entries ---------------------------------*/
28205
28206
28207-static const char *media_name[] __devinitdata = {
28208+static const char *media_name[] __devinitconst = {
28209 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
28210 "UTP", "05?", "06?", "07?", /* 4- 7 */
28211 "TAXI","09?", "10?", "11?", /* 8-11 */
28212diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
28213index 5072f8a..fa52520d 100644
28214--- a/drivers/atm/firestream.c
28215+++ b/drivers/atm/firestream.c
28216@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
28217 }
28218 }
28219
28220- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28221+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28222
28223 fs_dprintk (FS_DEBUG_TXMEM, "i");
28224 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
28225@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28226 #endif
28227 skb_put (skb, qe->p1 & 0xffff);
28228 ATM_SKB(skb)->vcc = atm_vcc;
28229- atomic_inc(&atm_vcc->stats->rx);
28230+ atomic_inc_unchecked(&atm_vcc->stats->rx);
28231 __net_timestamp(skb);
28232 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
28233 atm_vcc->push (atm_vcc, skb);
28234@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28235 kfree (pe);
28236 }
28237 if (atm_vcc)
28238- atomic_inc(&atm_vcc->stats->rx_drop);
28239+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28240 break;
28241 case 0x1f: /* Reassembly abort: no buffers. */
28242 /* Silently increment error counter. */
28243 if (atm_vcc)
28244- atomic_inc(&atm_vcc->stats->rx_drop);
28245+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28246 break;
28247 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28248 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
28249diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28250index 361f5ae..7fc552d 100644
28251--- a/drivers/atm/fore200e.c
28252+++ b/drivers/atm/fore200e.c
28253@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
28254 #endif
28255 /* check error condition */
28256 if (*entry->status & STATUS_ERROR)
28257- atomic_inc(&vcc->stats->tx_err);
28258+ atomic_inc_unchecked(&vcc->stats->tx_err);
28259 else
28260- atomic_inc(&vcc->stats->tx);
28261+ atomic_inc_unchecked(&vcc->stats->tx);
28262 }
28263 }
28264
28265@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28266 if (skb == NULL) {
28267 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28268
28269- atomic_inc(&vcc->stats->rx_drop);
28270+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28271 return -ENOMEM;
28272 }
28273
28274@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28275
28276 dev_kfree_skb_any(skb);
28277
28278- atomic_inc(&vcc->stats->rx_drop);
28279+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28280 return -ENOMEM;
28281 }
28282
28283 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28284
28285 vcc->push(vcc, skb);
28286- atomic_inc(&vcc->stats->rx);
28287+ atomic_inc_unchecked(&vcc->stats->rx);
28288
28289 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28290
28291@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28292 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28293 fore200e->atm_dev->number,
28294 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28295- atomic_inc(&vcc->stats->rx_err);
28296+ atomic_inc_unchecked(&vcc->stats->rx_err);
28297 }
28298 }
28299
28300@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28301 goto retry_here;
28302 }
28303
28304- atomic_inc(&vcc->stats->tx_err);
28305+ atomic_inc_unchecked(&vcc->stats->tx_err);
28306
28307 fore200e->tx_sat++;
28308 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28309diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28310index b182c2f..1c6fa8a 100644
28311--- a/drivers/atm/he.c
28312+++ b/drivers/atm/he.c
28313@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28314
28315 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28316 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28317- atomic_inc(&vcc->stats->rx_drop);
28318+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28319 goto return_host_buffers;
28320 }
28321
28322@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28323 RBRQ_LEN_ERR(he_dev->rbrq_head)
28324 ? "LEN_ERR" : "",
28325 vcc->vpi, vcc->vci);
28326- atomic_inc(&vcc->stats->rx_err);
28327+ atomic_inc_unchecked(&vcc->stats->rx_err);
28328 goto return_host_buffers;
28329 }
28330
28331@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28332 vcc->push(vcc, skb);
28333 spin_lock(&he_dev->global_lock);
28334
28335- atomic_inc(&vcc->stats->rx);
28336+ atomic_inc_unchecked(&vcc->stats->rx);
28337
28338 return_host_buffers:
28339 ++pdus_assembled;
28340@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28341 tpd->vcc->pop(tpd->vcc, tpd->skb);
28342 else
28343 dev_kfree_skb_any(tpd->skb);
28344- atomic_inc(&tpd->vcc->stats->tx_err);
28345+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28346 }
28347 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28348 return;
28349@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28350 vcc->pop(vcc, skb);
28351 else
28352 dev_kfree_skb_any(skb);
28353- atomic_inc(&vcc->stats->tx_err);
28354+ atomic_inc_unchecked(&vcc->stats->tx_err);
28355 return -EINVAL;
28356 }
28357
28358@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28359 vcc->pop(vcc, skb);
28360 else
28361 dev_kfree_skb_any(skb);
28362- atomic_inc(&vcc->stats->tx_err);
28363+ atomic_inc_unchecked(&vcc->stats->tx_err);
28364 return -EINVAL;
28365 }
28366 #endif
28367@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28368 vcc->pop(vcc, skb);
28369 else
28370 dev_kfree_skb_any(skb);
28371- atomic_inc(&vcc->stats->tx_err);
28372+ atomic_inc_unchecked(&vcc->stats->tx_err);
28373 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28374 return -ENOMEM;
28375 }
28376@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28377 vcc->pop(vcc, skb);
28378 else
28379 dev_kfree_skb_any(skb);
28380- atomic_inc(&vcc->stats->tx_err);
28381+ atomic_inc_unchecked(&vcc->stats->tx_err);
28382 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28383 return -ENOMEM;
28384 }
28385@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28386 __enqueue_tpd(he_dev, tpd, cid);
28387 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28388
28389- atomic_inc(&vcc->stats->tx);
28390+ atomic_inc_unchecked(&vcc->stats->tx);
28391
28392 return 0;
28393 }
28394diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28395index b812103..e391a49 100644
28396--- a/drivers/atm/horizon.c
28397+++ b/drivers/atm/horizon.c
28398@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28399 {
28400 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28401 // VC layer stats
28402- atomic_inc(&vcc->stats->rx);
28403+ atomic_inc_unchecked(&vcc->stats->rx);
28404 __net_timestamp(skb);
28405 // end of our responsibility
28406 vcc->push (vcc, skb);
28407@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28408 dev->tx_iovec = NULL;
28409
28410 // VC layer stats
28411- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28412+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28413
28414 // free the skb
28415 hrz_kfree_skb (skb);
28416diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28417index 1c05212..c28e200 100644
28418--- a/drivers/atm/idt77252.c
28419+++ b/drivers/atm/idt77252.c
28420@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28421 else
28422 dev_kfree_skb(skb);
28423
28424- atomic_inc(&vcc->stats->tx);
28425+ atomic_inc_unchecked(&vcc->stats->tx);
28426 }
28427
28428 atomic_dec(&scq->used);
28429@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28430 if ((sb = dev_alloc_skb(64)) == NULL) {
28431 printk("%s: Can't allocate buffers for aal0.\n",
28432 card->name);
28433- atomic_add(i, &vcc->stats->rx_drop);
28434+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28435 break;
28436 }
28437 if (!atm_charge(vcc, sb->truesize)) {
28438 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28439 card->name);
28440- atomic_add(i - 1, &vcc->stats->rx_drop);
28441+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28442 dev_kfree_skb(sb);
28443 break;
28444 }
28445@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28446 ATM_SKB(sb)->vcc = vcc;
28447 __net_timestamp(sb);
28448 vcc->push(vcc, sb);
28449- atomic_inc(&vcc->stats->rx);
28450+ atomic_inc_unchecked(&vcc->stats->rx);
28451
28452 cell += ATM_CELL_PAYLOAD;
28453 }
28454@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28455 "(CDC: %08x)\n",
28456 card->name, len, rpp->len, readl(SAR_REG_CDC));
28457 recycle_rx_pool_skb(card, rpp);
28458- atomic_inc(&vcc->stats->rx_err);
28459+ atomic_inc_unchecked(&vcc->stats->rx_err);
28460 return;
28461 }
28462 if (stat & SAR_RSQE_CRC) {
28463 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28464 recycle_rx_pool_skb(card, rpp);
28465- atomic_inc(&vcc->stats->rx_err);
28466+ atomic_inc_unchecked(&vcc->stats->rx_err);
28467 return;
28468 }
28469 if (skb_queue_len(&rpp->queue) > 1) {
28470@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28471 RXPRINTK("%s: Can't alloc RX skb.\n",
28472 card->name);
28473 recycle_rx_pool_skb(card, rpp);
28474- atomic_inc(&vcc->stats->rx_err);
28475+ atomic_inc_unchecked(&vcc->stats->rx_err);
28476 return;
28477 }
28478 if (!atm_charge(vcc, skb->truesize)) {
28479@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28480 __net_timestamp(skb);
28481
28482 vcc->push(vcc, skb);
28483- atomic_inc(&vcc->stats->rx);
28484+ atomic_inc_unchecked(&vcc->stats->rx);
28485
28486 return;
28487 }
28488@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28489 __net_timestamp(skb);
28490
28491 vcc->push(vcc, skb);
28492- atomic_inc(&vcc->stats->rx);
28493+ atomic_inc_unchecked(&vcc->stats->rx);
28494
28495 if (skb->truesize > SAR_FB_SIZE_3)
28496 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28497@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28498 if (vcc->qos.aal != ATM_AAL0) {
28499 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28500 card->name, vpi, vci);
28501- atomic_inc(&vcc->stats->rx_drop);
28502+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28503 goto drop;
28504 }
28505
28506 if ((sb = dev_alloc_skb(64)) == NULL) {
28507 printk("%s: Can't allocate buffers for AAL0.\n",
28508 card->name);
28509- atomic_inc(&vcc->stats->rx_err);
28510+ atomic_inc_unchecked(&vcc->stats->rx_err);
28511 goto drop;
28512 }
28513
28514@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28515 ATM_SKB(sb)->vcc = vcc;
28516 __net_timestamp(sb);
28517 vcc->push(vcc, sb);
28518- atomic_inc(&vcc->stats->rx);
28519+ atomic_inc_unchecked(&vcc->stats->rx);
28520
28521 drop:
28522 skb_pull(queue, 64);
28523@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28524
28525 if (vc == NULL) {
28526 printk("%s: NULL connection in send().\n", card->name);
28527- atomic_inc(&vcc->stats->tx_err);
28528+ atomic_inc_unchecked(&vcc->stats->tx_err);
28529 dev_kfree_skb(skb);
28530 return -EINVAL;
28531 }
28532 if (!test_bit(VCF_TX, &vc->flags)) {
28533 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28534- atomic_inc(&vcc->stats->tx_err);
28535+ atomic_inc_unchecked(&vcc->stats->tx_err);
28536 dev_kfree_skb(skb);
28537 return -EINVAL;
28538 }
28539@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28540 break;
28541 default:
28542 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28543- atomic_inc(&vcc->stats->tx_err);
28544+ atomic_inc_unchecked(&vcc->stats->tx_err);
28545 dev_kfree_skb(skb);
28546 return -EINVAL;
28547 }
28548
28549 if (skb_shinfo(skb)->nr_frags != 0) {
28550 printk("%s: No scatter-gather yet.\n", card->name);
28551- atomic_inc(&vcc->stats->tx_err);
28552+ atomic_inc_unchecked(&vcc->stats->tx_err);
28553 dev_kfree_skb(skb);
28554 return -EINVAL;
28555 }
28556@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28557
28558 err = queue_skb(card, vc, skb, oam);
28559 if (err) {
28560- atomic_inc(&vcc->stats->tx_err);
28561+ atomic_inc_unchecked(&vcc->stats->tx_err);
28562 dev_kfree_skb(skb);
28563 return err;
28564 }
28565@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28566 skb = dev_alloc_skb(64);
28567 if (!skb) {
28568 printk("%s: Out of memory in send_oam().\n", card->name);
28569- atomic_inc(&vcc->stats->tx_err);
28570+ atomic_inc_unchecked(&vcc->stats->tx_err);
28571 return -ENOMEM;
28572 }
28573 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28574diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28575index 9e373ba..cf93727 100644
28576--- a/drivers/atm/iphase.c
28577+++ b/drivers/atm/iphase.c
28578@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
28579 status = (u_short) (buf_desc_ptr->desc_mode);
28580 if (status & (RX_CER | RX_PTE | RX_OFL))
28581 {
28582- atomic_inc(&vcc->stats->rx_err);
28583+ atomic_inc_unchecked(&vcc->stats->rx_err);
28584 IF_ERR(printk("IA: bad packet, dropping it");)
28585 if (status & RX_CER) {
28586 IF_ERR(printk(" cause: packet CRC error\n");)
28587@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
28588 len = dma_addr - buf_addr;
28589 if (len > iadev->rx_buf_sz) {
28590 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28591- atomic_inc(&vcc->stats->rx_err);
28592+ atomic_inc_unchecked(&vcc->stats->rx_err);
28593 goto out_free_desc;
28594 }
28595
28596@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28597 ia_vcc = INPH_IA_VCC(vcc);
28598 if (ia_vcc == NULL)
28599 {
28600- atomic_inc(&vcc->stats->rx_err);
28601+ atomic_inc_unchecked(&vcc->stats->rx_err);
28602 atm_return(vcc, skb->truesize);
28603 dev_kfree_skb_any(skb);
28604 goto INCR_DLE;
28605@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28606 if ((length > iadev->rx_buf_sz) || (length >
28607 (skb->len - sizeof(struct cpcs_trailer))))
28608 {
28609- atomic_inc(&vcc->stats->rx_err);
28610+ atomic_inc_unchecked(&vcc->stats->rx_err);
28611 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28612 length, skb->len);)
28613 atm_return(vcc, skb->truesize);
28614@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28615
28616 IF_RX(printk("rx_dle_intr: skb push");)
28617 vcc->push(vcc,skb);
28618- atomic_inc(&vcc->stats->rx);
28619+ atomic_inc_unchecked(&vcc->stats->rx);
28620 iadev->rx_pkt_cnt++;
28621 }
28622 INCR_DLE:
28623@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28624 {
28625 struct k_sonet_stats *stats;
28626 stats = &PRIV(_ia_dev[board])->sonet_stats;
28627- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28628- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28629- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28630- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28631- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28632- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28633- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28634- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28635- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28636+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28637+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28638+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28639+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28640+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28641+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28642+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28643+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28644+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28645 }
28646 ia_cmds.status = 0;
28647 break;
28648@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28649 if ((desc == 0) || (desc > iadev->num_tx_desc))
28650 {
28651 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28652- atomic_inc(&vcc->stats->tx);
28653+ atomic_inc_unchecked(&vcc->stats->tx);
28654 if (vcc->pop)
28655 vcc->pop(vcc, skb);
28656 else
28657@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28658 ATM_DESC(skb) = vcc->vci;
28659 skb_queue_tail(&iadev->tx_dma_q, skb);
28660
28661- atomic_inc(&vcc->stats->tx);
28662+ atomic_inc_unchecked(&vcc->stats->tx);
28663 iadev->tx_pkt_cnt++;
28664 /* Increment transaction counter */
28665 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28666
28667 #if 0
28668 /* add flow control logic */
28669- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28670+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28671 if (iavcc->vc_desc_cnt > 10) {
28672 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28673 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28674diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28675index f556969..0da15eb 100644
28676--- a/drivers/atm/lanai.c
28677+++ b/drivers/atm/lanai.c
28678@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28679 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28680 lanai_endtx(lanai, lvcc);
28681 lanai_free_skb(lvcc->tx.atmvcc, skb);
28682- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28683+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28684 }
28685
28686 /* Try to fill the buffer - don't call unless there is backlog */
28687@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28688 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28689 __net_timestamp(skb);
28690 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28691- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28692+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28693 out:
28694 lvcc->rx.buf.ptr = end;
28695 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28696@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28697 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28698 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28699 lanai->stats.service_rxnotaal5++;
28700- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28701+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28702 return 0;
28703 }
28704 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28705@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28706 int bytes;
28707 read_unlock(&vcc_sklist_lock);
28708 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28709- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28710+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28711 lvcc->stats.x.aal5.service_trash++;
28712 bytes = (SERVICE_GET_END(s) * 16) -
28713 (((unsigned long) lvcc->rx.buf.ptr) -
28714@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28715 }
28716 if (s & SERVICE_STREAM) {
28717 read_unlock(&vcc_sklist_lock);
28718- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28719+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28720 lvcc->stats.x.aal5.service_stream++;
28721 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28722 "PDU on VCI %d!\n", lanai->number, vci);
28723@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28724 return 0;
28725 }
28726 DPRINTK("got rx crc error on vci %d\n", vci);
28727- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28728+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28729 lvcc->stats.x.aal5.service_rxcrc++;
28730 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28731 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28732diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28733index 1c70c45..300718d 100644
28734--- a/drivers/atm/nicstar.c
28735+++ b/drivers/atm/nicstar.c
28736@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28737 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28738 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28739 card->index);
28740- atomic_inc(&vcc->stats->tx_err);
28741+ atomic_inc_unchecked(&vcc->stats->tx_err);
28742 dev_kfree_skb_any(skb);
28743 return -EINVAL;
28744 }
28745@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28746 if (!vc->tx) {
28747 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28748 card->index);
28749- atomic_inc(&vcc->stats->tx_err);
28750+ atomic_inc_unchecked(&vcc->stats->tx_err);
28751 dev_kfree_skb_any(skb);
28752 return -EINVAL;
28753 }
28754@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28755 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28756 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28757 card->index);
28758- atomic_inc(&vcc->stats->tx_err);
28759+ atomic_inc_unchecked(&vcc->stats->tx_err);
28760 dev_kfree_skb_any(skb);
28761 return -EINVAL;
28762 }
28763
28764 if (skb_shinfo(skb)->nr_frags != 0) {
28765 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28766- atomic_inc(&vcc->stats->tx_err);
28767+ atomic_inc_unchecked(&vcc->stats->tx_err);
28768 dev_kfree_skb_any(skb);
28769 return -EINVAL;
28770 }
28771@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28772 }
28773
28774 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28775- atomic_inc(&vcc->stats->tx_err);
28776+ atomic_inc_unchecked(&vcc->stats->tx_err);
28777 dev_kfree_skb_any(skb);
28778 return -EIO;
28779 }
28780- atomic_inc(&vcc->stats->tx);
28781+ atomic_inc_unchecked(&vcc->stats->tx);
28782
28783 return 0;
28784 }
28785@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28786 printk
28787 ("nicstar%d: Can't allocate buffers for aal0.\n",
28788 card->index);
28789- atomic_add(i, &vcc->stats->rx_drop);
28790+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28791 break;
28792 }
28793 if (!atm_charge(vcc, sb->truesize)) {
28794 RXPRINTK
28795 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28796 card->index);
28797- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28798+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28799 dev_kfree_skb_any(sb);
28800 break;
28801 }
28802@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28803 ATM_SKB(sb)->vcc = vcc;
28804 __net_timestamp(sb);
28805 vcc->push(vcc, sb);
28806- atomic_inc(&vcc->stats->rx);
28807+ atomic_inc_unchecked(&vcc->stats->rx);
28808 cell += ATM_CELL_PAYLOAD;
28809 }
28810
28811@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28812 if (iovb == NULL) {
28813 printk("nicstar%d: Out of iovec buffers.\n",
28814 card->index);
28815- atomic_inc(&vcc->stats->rx_drop);
28816+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28817 recycle_rx_buf(card, skb);
28818 return;
28819 }
28820@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28821 small or large buffer itself. */
28822 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28823 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28824- atomic_inc(&vcc->stats->rx_err);
28825+ atomic_inc_unchecked(&vcc->stats->rx_err);
28826 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28827 NS_MAX_IOVECS);
28828 NS_PRV_IOVCNT(iovb) = 0;
28829@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28830 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28831 card->index);
28832 which_list(card, skb);
28833- atomic_inc(&vcc->stats->rx_err);
28834+ atomic_inc_unchecked(&vcc->stats->rx_err);
28835 recycle_rx_buf(card, skb);
28836 vc->rx_iov = NULL;
28837 recycle_iov_buf(card, iovb);
28838@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28839 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28840 card->index);
28841 which_list(card, skb);
28842- atomic_inc(&vcc->stats->rx_err);
28843+ atomic_inc_unchecked(&vcc->stats->rx_err);
28844 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28845 NS_PRV_IOVCNT(iovb));
28846 vc->rx_iov = NULL;
28847@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28848 printk(" - PDU size mismatch.\n");
28849 else
28850 printk(".\n");
28851- atomic_inc(&vcc->stats->rx_err);
28852+ atomic_inc_unchecked(&vcc->stats->rx_err);
28853 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28854 NS_PRV_IOVCNT(iovb));
28855 vc->rx_iov = NULL;
28856@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28857 /* skb points to a small buffer */
28858 if (!atm_charge(vcc, skb->truesize)) {
28859 push_rxbufs(card, skb);
28860- atomic_inc(&vcc->stats->rx_drop);
28861+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28862 } else {
28863 skb_put(skb, len);
28864 dequeue_sm_buf(card, skb);
28865@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28866 ATM_SKB(skb)->vcc = vcc;
28867 __net_timestamp(skb);
28868 vcc->push(vcc, skb);
28869- atomic_inc(&vcc->stats->rx);
28870+ atomic_inc_unchecked(&vcc->stats->rx);
28871 }
28872 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28873 struct sk_buff *sb;
28874@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28875 if (len <= NS_SMBUFSIZE) {
28876 if (!atm_charge(vcc, sb->truesize)) {
28877 push_rxbufs(card, sb);
28878- atomic_inc(&vcc->stats->rx_drop);
28879+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28880 } else {
28881 skb_put(sb, len);
28882 dequeue_sm_buf(card, sb);
28883@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28884 ATM_SKB(sb)->vcc = vcc;
28885 __net_timestamp(sb);
28886 vcc->push(vcc, sb);
28887- atomic_inc(&vcc->stats->rx);
28888+ atomic_inc_unchecked(&vcc->stats->rx);
28889 }
28890
28891 push_rxbufs(card, skb);
28892@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28893
28894 if (!atm_charge(vcc, skb->truesize)) {
28895 push_rxbufs(card, skb);
28896- atomic_inc(&vcc->stats->rx_drop);
28897+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28898 } else {
28899 dequeue_lg_buf(card, skb);
28900 #ifdef NS_USE_DESTRUCTORS
28901@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28902 ATM_SKB(skb)->vcc = vcc;
28903 __net_timestamp(skb);
28904 vcc->push(vcc, skb);
28905- atomic_inc(&vcc->stats->rx);
28906+ atomic_inc_unchecked(&vcc->stats->rx);
28907 }
28908
28909 push_rxbufs(card, sb);
28910@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28911 printk
28912 ("nicstar%d: Out of huge buffers.\n",
28913 card->index);
28914- atomic_inc(&vcc->stats->rx_drop);
28915+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28916 recycle_iovec_rx_bufs(card,
28917 (struct iovec *)
28918 iovb->data,
28919@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28920 card->hbpool.count++;
28921 } else
28922 dev_kfree_skb_any(hb);
28923- atomic_inc(&vcc->stats->rx_drop);
28924+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28925 } else {
28926 /* Copy the small buffer to the huge buffer */
28927 sb = (struct sk_buff *)iov->iov_base;
28928@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28929 #endif /* NS_USE_DESTRUCTORS */
28930 __net_timestamp(hb);
28931 vcc->push(vcc, hb);
28932- atomic_inc(&vcc->stats->rx);
28933+ atomic_inc_unchecked(&vcc->stats->rx);
28934 }
28935 }
28936
28937diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28938index e8cd652..bbbd1fc 100644
28939--- a/drivers/atm/solos-pci.c
28940+++ b/drivers/atm/solos-pci.c
28941@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28942 }
28943 atm_charge(vcc, skb->truesize);
28944 vcc->push(vcc, skb);
28945- atomic_inc(&vcc->stats->rx);
28946+ atomic_inc_unchecked(&vcc->stats->rx);
28947 break;
28948
28949 case PKT_STATUS:
28950@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28951 vcc = SKB_CB(oldskb)->vcc;
28952
28953 if (vcc) {
28954- atomic_inc(&vcc->stats->tx);
28955+ atomic_inc_unchecked(&vcc->stats->tx);
28956 solos_pop(vcc, oldskb);
28957 } else
28958 dev_kfree_skb_irq(oldskb);
28959diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28960index 90f1ccc..04c4a1e 100644
28961--- a/drivers/atm/suni.c
28962+++ b/drivers/atm/suni.c
28963@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28964
28965
28966 #define ADD_LIMITED(s,v) \
28967- atomic_add((v),&stats->s); \
28968- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28969+ atomic_add_unchecked((v),&stats->s); \
28970+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28971
28972
28973 static void suni_hz(unsigned long from_timer)
28974diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28975index 5120a96..e2572bd 100644
28976--- a/drivers/atm/uPD98402.c
28977+++ b/drivers/atm/uPD98402.c
28978@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28979 struct sonet_stats tmp;
28980 int error = 0;
28981
28982- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28983+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28984 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28985 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28986 if (zero && !error) {
28987@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28988
28989
28990 #define ADD_LIMITED(s,v) \
28991- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28992- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28993- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28994+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28995+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28996+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28997
28998
28999 static void stat_event(struct atm_dev *dev)
29000@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
29001 if (reason & uPD98402_INT_PFM) stat_event(dev);
29002 if (reason & uPD98402_INT_PCO) {
29003 (void) GET(PCOCR); /* clear interrupt cause */
29004- atomic_add(GET(HECCT),
29005+ atomic_add_unchecked(GET(HECCT),
29006 &PRIV(dev)->sonet_stats.uncorr_hcs);
29007 }
29008 if ((reason & uPD98402_INT_RFO) &&
29009@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
29010 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
29011 uPD98402_INT_LOS),PIMR); /* enable them */
29012 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
29013- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
29014- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
29015- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
29016+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
29017+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
29018+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
29019 return 0;
29020 }
29021
29022diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
29023index d889f56..17eb71e 100644
29024--- a/drivers/atm/zatm.c
29025+++ b/drivers/atm/zatm.c
29026@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
29027 }
29028 if (!size) {
29029 dev_kfree_skb_irq(skb);
29030- if (vcc) atomic_inc(&vcc->stats->rx_err);
29031+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
29032 continue;
29033 }
29034 if (!atm_charge(vcc,skb->truesize)) {
29035@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
29036 skb->len = size;
29037 ATM_SKB(skb)->vcc = vcc;
29038 vcc->push(vcc,skb);
29039- atomic_inc(&vcc->stats->rx);
29040+ atomic_inc_unchecked(&vcc->stats->rx);
29041 }
29042 zout(pos & 0xffff,MTA(mbx));
29043 #if 0 /* probably a stupid idea */
29044@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
29045 skb_queue_head(&zatm_vcc->backlog,skb);
29046 break;
29047 }
29048- atomic_inc(&vcc->stats->tx);
29049+ atomic_inc_unchecked(&vcc->stats->tx);
29050 wake_up(&zatm_vcc->tx_wait);
29051 }
29052
29053diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
29054index 8493536..31adee0 100644
29055--- a/drivers/base/devtmpfs.c
29056+++ b/drivers/base/devtmpfs.c
29057@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
29058 if (!thread)
29059 return 0;
29060
29061- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
29062+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
29063 if (err)
29064 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
29065 else
29066diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
29067index caf995f..6f76697 100644
29068--- a/drivers/base/power/wakeup.c
29069+++ b/drivers/base/power/wakeup.c
29070@@ -30,14 +30,14 @@ bool events_check_enabled;
29071 * They need to be modified together atomically, so it's better to use one
29072 * atomic variable to hold them both.
29073 */
29074-static atomic_t combined_event_count = ATOMIC_INIT(0);
29075+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
29076
29077 #define IN_PROGRESS_BITS (sizeof(int) * 4)
29078 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
29079
29080 static void split_counters(unsigned int *cnt, unsigned int *inpr)
29081 {
29082- unsigned int comb = atomic_read(&combined_event_count);
29083+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
29084
29085 *cnt = (comb >> IN_PROGRESS_BITS);
29086 *inpr = comb & MAX_IN_PROGRESS;
29087@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
29088 ws->last_time = ktime_get();
29089
29090 /* Increment the counter of events in progress. */
29091- atomic_inc(&combined_event_count);
29092+ atomic_inc_unchecked(&combined_event_count);
29093 }
29094
29095 /**
29096@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
29097 * Increment the counter of registered wakeup events and decrement the
29098 * couter of wakeup events in progress simultaneously.
29099 */
29100- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
29101+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
29102 }
29103
29104 /**
29105diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
29106index b0f553b..77b928b 100644
29107--- a/drivers/block/cciss.c
29108+++ b/drivers/block/cciss.c
29109@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
29110 int err;
29111 u32 cp;
29112
29113+ memset(&arg64, 0, sizeof(arg64));
29114+
29115 err = 0;
29116 err |=
29117 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
29118@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
29119 while (!list_empty(&h->reqQ)) {
29120 c = list_entry(h->reqQ.next, CommandList_struct, list);
29121 /* can't do anything if fifo is full */
29122- if ((h->access.fifo_full(h))) {
29123+ if ((h->access->fifo_full(h))) {
29124 dev_warn(&h->pdev->dev, "fifo full\n");
29125 break;
29126 }
29127@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
29128 h->Qdepth--;
29129
29130 /* Tell the controller execute command */
29131- h->access.submit_command(h, c);
29132+ h->access->submit_command(h, c);
29133
29134 /* Put job onto the completed Q */
29135 addQ(&h->cmpQ, c);
29136@@ -3443,17 +3445,17 @@ startio:
29137
29138 static inline unsigned long get_next_completion(ctlr_info_t *h)
29139 {
29140- return h->access.command_completed(h);
29141+ return h->access->command_completed(h);
29142 }
29143
29144 static inline int interrupt_pending(ctlr_info_t *h)
29145 {
29146- return h->access.intr_pending(h);
29147+ return h->access->intr_pending(h);
29148 }
29149
29150 static inline long interrupt_not_for_us(ctlr_info_t *h)
29151 {
29152- return ((h->access.intr_pending(h) == 0) ||
29153+ return ((h->access->intr_pending(h) == 0) ||
29154 (h->interrupts_enabled == 0));
29155 }
29156
29157@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
29158 u32 a;
29159
29160 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
29161- return h->access.command_completed(h);
29162+ return h->access->command_completed(h);
29163
29164 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
29165 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
29166@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
29167 trans_support & CFGTBL_Trans_use_short_tags);
29168
29169 /* Change the access methods to the performant access methods */
29170- h->access = SA5_performant_access;
29171+ h->access = &SA5_performant_access;
29172 h->transMethod = CFGTBL_Trans_Performant;
29173
29174 return;
29175@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
29176 if (prod_index < 0)
29177 return -ENODEV;
29178 h->product_name = products[prod_index].product_name;
29179- h->access = *(products[prod_index].access);
29180+ h->access = products[prod_index].access;
29181
29182 if (cciss_board_disabled(h)) {
29183 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
29184@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
29185 }
29186
29187 /* make sure the board interrupts are off */
29188- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29189+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29190 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
29191 if (rc)
29192 goto clean2;
29193@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
29194 * fake ones to scoop up any residual completions.
29195 */
29196 spin_lock_irqsave(&h->lock, flags);
29197- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29198+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29199 spin_unlock_irqrestore(&h->lock, flags);
29200 free_irq(h->intr[h->intr_mode], h);
29201 rc = cciss_request_irq(h, cciss_msix_discard_completions,
29202@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
29203 dev_info(&h->pdev->dev, "Board READY.\n");
29204 dev_info(&h->pdev->dev,
29205 "Waiting for stale completions to drain.\n");
29206- h->access.set_intr_mask(h, CCISS_INTR_ON);
29207+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29208 msleep(10000);
29209- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29210+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29211
29212 rc = controller_reset_failed(h->cfgtable);
29213 if (rc)
29214@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
29215 cciss_scsi_setup(h);
29216
29217 /* Turn the interrupts on so we can service requests */
29218- h->access.set_intr_mask(h, CCISS_INTR_ON);
29219+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29220
29221 /* Get the firmware version */
29222 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
29223@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
29224 kfree(flush_buf);
29225 if (return_code != IO_OK)
29226 dev_warn(&h->pdev->dev, "Error flushing cache\n");
29227- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29228+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29229 free_irq(h->intr[h->intr_mode], h);
29230 }
29231
29232diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
29233index 7fda30e..eb5dfe0 100644
29234--- a/drivers/block/cciss.h
29235+++ b/drivers/block/cciss.h
29236@@ -101,7 +101,7 @@ struct ctlr_info
29237 /* information about each logical volume */
29238 drive_info_struct *drv[CISS_MAX_LUN];
29239
29240- struct access_method access;
29241+ struct access_method *access;
29242
29243 /* queue and queue Info */
29244 struct list_head reqQ;
29245diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
29246index 9125bbe..eede5c8 100644
29247--- a/drivers/block/cpqarray.c
29248+++ b/drivers/block/cpqarray.c
29249@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29250 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29251 goto Enomem4;
29252 }
29253- hba[i]->access.set_intr_mask(hba[i], 0);
29254+ hba[i]->access->set_intr_mask(hba[i], 0);
29255 if (request_irq(hba[i]->intr, do_ida_intr,
29256 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29257 {
29258@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29259 add_timer(&hba[i]->timer);
29260
29261 /* Enable IRQ now that spinlock and rate limit timer are set up */
29262- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29263+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29264
29265 for(j=0; j<NWD; j++) {
29266 struct gendisk *disk = ida_gendisk[i][j];
29267@@ -694,7 +694,7 @@ DBGINFO(
29268 for(i=0; i<NR_PRODUCTS; i++) {
29269 if (board_id == products[i].board_id) {
29270 c->product_name = products[i].product_name;
29271- c->access = *(products[i].access);
29272+ c->access = products[i].access;
29273 break;
29274 }
29275 }
29276@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29277 hba[ctlr]->intr = intr;
29278 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29279 hba[ctlr]->product_name = products[j].product_name;
29280- hba[ctlr]->access = *(products[j].access);
29281+ hba[ctlr]->access = products[j].access;
29282 hba[ctlr]->ctlr = ctlr;
29283 hba[ctlr]->board_id = board_id;
29284 hba[ctlr]->pci_dev = NULL; /* not PCI */
29285@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29286
29287 while((c = h->reqQ) != NULL) {
29288 /* Can't do anything if we're busy */
29289- if (h->access.fifo_full(h) == 0)
29290+ if (h->access->fifo_full(h) == 0)
29291 return;
29292
29293 /* Get the first entry from the request Q */
29294@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29295 h->Qdepth--;
29296
29297 /* Tell the controller to do our bidding */
29298- h->access.submit_command(h, c);
29299+ h->access->submit_command(h, c);
29300
29301 /* Get onto the completion Q */
29302 addQ(&h->cmpQ, c);
29303@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29304 unsigned long flags;
29305 __u32 a,a1;
29306
29307- istat = h->access.intr_pending(h);
29308+ istat = h->access->intr_pending(h);
29309 /* Is this interrupt for us? */
29310 if (istat == 0)
29311 return IRQ_NONE;
29312@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29313 */
29314 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29315 if (istat & FIFO_NOT_EMPTY) {
29316- while((a = h->access.command_completed(h))) {
29317+ while((a = h->access->command_completed(h))) {
29318 a1 = a; a &= ~3;
29319 if ((c = h->cmpQ) == NULL)
29320 {
29321@@ -1449,11 +1449,11 @@ static int sendcmd(
29322 /*
29323 * Disable interrupt
29324 */
29325- info_p->access.set_intr_mask(info_p, 0);
29326+ info_p->access->set_intr_mask(info_p, 0);
29327 /* Make sure there is room in the command FIFO */
29328 /* Actually it should be completely empty at this time. */
29329 for (i = 200000; i > 0; i--) {
29330- temp = info_p->access.fifo_full(info_p);
29331+ temp = info_p->access->fifo_full(info_p);
29332 if (temp != 0) {
29333 break;
29334 }
29335@@ -1466,7 +1466,7 @@ DBG(
29336 /*
29337 * Send the cmd
29338 */
29339- info_p->access.submit_command(info_p, c);
29340+ info_p->access->submit_command(info_p, c);
29341 complete = pollcomplete(ctlr);
29342
29343 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29344@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29345 * we check the new geometry. Then turn interrupts back on when
29346 * we're done.
29347 */
29348- host->access.set_intr_mask(host, 0);
29349+ host->access->set_intr_mask(host, 0);
29350 getgeometry(ctlr);
29351- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29352+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29353
29354 for(i=0; i<NWD; i++) {
29355 struct gendisk *disk = ida_gendisk[ctlr][i];
29356@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29357 /* Wait (up to 2 seconds) for a command to complete */
29358
29359 for (i = 200000; i > 0; i--) {
29360- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29361+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29362 if (done == 0) {
29363 udelay(10); /* a short fixed delay */
29364 } else
29365diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29366index be73e9d..7fbf140 100644
29367--- a/drivers/block/cpqarray.h
29368+++ b/drivers/block/cpqarray.h
29369@@ -99,7 +99,7 @@ struct ctlr_info {
29370 drv_info_t drv[NWD];
29371 struct proc_dir_entry *proc;
29372
29373- struct access_method access;
29374+ struct access_method *access;
29375
29376 cmdlist_t *reqQ;
29377 cmdlist_t *cmpQ;
29378diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29379index 8d68056..e67050f 100644
29380--- a/drivers/block/drbd/drbd_int.h
29381+++ b/drivers/block/drbd/drbd_int.h
29382@@ -736,7 +736,7 @@ struct drbd_request;
29383 struct drbd_epoch {
29384 struct list_head list;
29385 unsigned int barrier_nr;
29386- atomic_t epoch_size; /* increased on every request added. */
29387+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29388 atomic_t active; /* increased on every req. added, and dec on every finished. */
29389 unsigned long flags;
29390 };
29391@@ -1108,7 +1108,7 @@ struct drbd_conf {
29392 void *int_dig_in;
29393 void *int_dig_vv;
29394 wait_queue_head_t seq_wait;
29395- atomic_t packet_seq;
29396+ atomic_unchecked_t packet_seq;
29397 unsigned int peer_seq;
29398 spinlock_t peer_seq_lock;
29399 unsigned int minor;
29400@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29401
29402 static inline void drbd_tcp_cork(struct socket *sock)
29403 {
29404- int __user val = 1;
29405+ int val = 1;
29406 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29407- (char __user *)&val, sizeof(val));
29408+ (char __force_user *)&val, sizeof(val));
29409 }
29410
29411 static inline void drbd_tcp_uncork(struct socket *sock)
29412 {
29413- int __user val = 0;
29414+ int val = 0;
29415 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29416- (char __user *)&val, sizeof(val));
29417+ (char __force_user *)&val, sizeof(val));
29418 }
29419
29420 static inline void drbd_tcp_nodelay(struct socket *sock)
29421 {
29422- int __user val = 1;
29423+ int val = 1;
29424 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29425- (char __user *)&val, sizeof(val));
29426+ (char __force_user *)&val, sizeof(val));
29427 }
29428
29429 static inline void drbd_tcp_quickack(struct socket *sock)
29430 {
29431- int __user val = 2;
29432+ int val = 2;
29433 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29434- (char __user *)&val, sizeof(val));
29435+ (char __force_user *)&val, sizeof(val));
29436 }
29437
29438 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29439diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29440index 211fc44..c5116f1 100644
29441--- a/drivers/block/drbd/drbd_main.c
29442+++ b/drivers/block/drbd/drbd_main.c
29443@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29444 p.sector = sector;
29445 p.block_id = block_id;
29446 p.blksize = blksize;
29447- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29448+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29449
29450 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29451 return false;
29452@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29453 p.sector = cpu_to_be64(req->sector);
29454 p.block_id = (unsigned long)req;
29455 p.seq_num = cpu_to_be32(req->seq_num =
29456- atomic_add_return(1, &mdev->packet_seq));
29457+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29458
29459 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29460
29461@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29462 atomic_set(&mdev->unacked_cnt, 0);
29463 atomic_set(&mdev->local_cnt, 0);
29464 atomic_set(&mdev->net_cnt, 0);
29465- atomic_set(&mdev->packet_seq, 0);
29466+ atomic_set_unchecked(&mdev->packet_seq, 0);
29467 atomic_set(&mdev->pp_in_use, 0);
29468 atomic_set(&mdev->pp_in_use_by_net, 0);
29469 atomic_set(&mdev->rs_sect_in, 0);
29470@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29471 mdev->receiver.t_state);
29472
29473 /* no need to lock it, I'm the only thread alive */
29474- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29475- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29476+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29477+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29478 mdev->al_writ_cnt =
29479 mdev->bm_writ_cnt =
29480 mdev->read_cnt =
29481diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29482index af2a250..219c74b 100644
29483--- a/drivers/block/drbd/drbd_nl.c
29484+++ b/drivers/block/drbd/drbd_nl.c
29485@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29486 module_put(THIS_MODULE);
29487 }
29488
29489-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29490+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29491
29492 static unsigned short *
29493 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29494@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29495 cn_reply->id.idx = CN_IDX_DRBD;
29496 cn_reply->id.val = CN_VAL_DRBD;
29497
29498- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29499+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29500 cn_reply->ack = 0; /* not used here. */
29501 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29502 (int)((char *)tl - (char *)reply->tag_list);
29503@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29504 cn_reply->id.idx = CN_IDX_DRBD;
29505 cn_reply->id.val = CN_VAL_DRBD;
29506
29507- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29508+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29509 cn_reply->ack = 0; /* not used here. */
29510 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29511 (int)((char *)tl - (char *)reply->tag_list);
29512@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29513 cn_reply->id.idx = CN_IDX_DRBD;
29514 cn_reply->id.val = CN_VAL_DRBD;
29515
29516- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29517+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29518 cn_reply->ack = 0; // not used here.
29519 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29520 (int)((char*)tl - (char*)reply->tag_list);
29521@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29522 cn_reply->id.idx = CN_IDX_DRBD;
29523 cn_reply->id.val = CN_VAL_DRBD;
29524
29525- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29526+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29527 cn_reply->ack = 0; /* not used here. */
29528 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29529 (int)((char *)tl - (char *)reply->tag_list);
29530diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29531index 43beaca..4a5b1dd 100644
29532--- a/drivers/block/drbd/drbd_receiver.c
29533+++ b/drivers/block/drbd/drbd_receiver.c
29534@@ -894,7 +894,7 @@ retry:
29535 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29536 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29537
29538- atomic_set(&mdev->packet_seq, 0);
29539+ atomic_set_unchecked(&mdev->packet_seq, 0);
29540 mdev->peer_seq = 0;
29541
29542 drbd_thread_start(&mdev->asender);
29543@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29544 do {
29545 next_epoch = NULL;
29546
29547- epoch_size = atomic_read(&epoch->epoch_size);
29548+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29549
29550 switch (ev & ~EV_CLEANUP) {
29551 case EV_PUT:
29552@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29553 rv = FE_DESTROYED;
29554 } else {
29555 epoch->flags = 0;
29556- atomic_set(&epoch->epoch_size, 0);
29557+ atomic_set_unchecked(&epoch->epoch_size, 0);
29558 /* atomic_set(&epoch->active, 0); is already zero */
29559 if (rv == FE_STILL_LIVE)
29560 rv = FE_RECYCLED;
29561@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29562 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29563 drbd_flush(mdev);
29564
29565- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29566+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29567 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29568 if (epoch)
29569 break;
29570 }
29571
29572 epoch = mdev->current_epoch;
29573- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29574+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29575
29576 D_ASSERT(atomic_read(&epoch->active) == 0);
29577 D_ASSERT(epoch->flags == 0);
29578@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29579 }
29580
29581 epoch->flags = 0;
29582- atomic_set(&epoch->epoch_size, 0);
29583+ atomic_set_unchecked(&epoch->epoch_size, 0);
29584 atomic_set(&epoch->active, 0);
29585
29586 spin_lock(&mdev->epoch_lock);
29587- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29588+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29589 list_add(&epoch->list, &mdev->current_epoch->list);
29590 mdev->current_epoch = epoch;
29591 mdev->epochs++;
29592@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29593 spin_unlock(&mdev->peer_seq_lock);
29594
29595 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29596- atomic_inc(&mdev->current_epoch->epoch_size);
29597+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29598 return drbd_drain_block(mdev, data_size);
29599 }
29600
29601@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29602
29603 spin_lock(&mdev->epoch_lock);
29604 e->epoch = mdev->current_epoch;
29605- atomic_inc(&e->epoch->epoch_size);
29606+ atomic_inc_unchecked(&e->epoch->epoch_size);
29607 atomic_inc(&e->epoch->active);
29608 spin_unlock(&mdev->epoch_lock);
29609
29610@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29611 D_ASSERT(list_empty(&mdev->done_ee));
29612
29613 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29614- atomic_set(&mdev->current_epoch->epoch_size, 0);
29615+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29616 D_ASSERT(list_empty(&mdev->current_epoch->list));
29617 }
29618
29619diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29620index cd50435..ba1ffb5 100644
29621--- a/drivers/block/loop.c
29622+++ b/drivers/block/loop.c
29623@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29624 mm_segment_t old_fs = get_fs();
29625
29626 set_fs(get_ds());
29627- bw = file->f_op->write(file, buf, len, &pos);
29628+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29629 set_fs(old_fs);
29630 if (likely(bw == len))
29631 return 0;
29632diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29633index 4364303..9adf4ee 100644
29634--- a/drivers/char/Kconfig
29635+++ b/drivers/char/Kconfig
29636@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29637
29638 config DEVKMEM
29639 bool "/dev/kmem virtual device support"
29640- default y
29641+ default n
29642+ depends on !GRKERNSEC_KMEM
29643 help
29644 Say Y here if you want to support the /dev/kmem device. The
29645 /dev/kmem device is rarely used, but can be used for certain
29646@@ -596,6 +597,7 @@ config DEVPORT
29647 bool
29648 depends on !M68K
29649 depends on ISA || PCI
29650+ depends on !GRKERNSEC_KMEM
29651 default y
29652
29653 source "drivers/s390/char/Kconfig"
29654diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29655index 2e04433..22afc64 100644
29656--- a/drivers/char/agp/frontend.c
29657+++ b/drivers/char/agp/frontend.c
29658@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29659 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29660 return -EFAULT;
29661
29662- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29663+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29664 return -EFAULT;
29665
29666 client = agp_find_client_by_pid(reserve.pid);
29667diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
29668index 095ab90..afad0a4 100644
29669--- a/drivers/char/briq_panel.c
29670+++ b/drivers/char/briq_panel.c
29671@@ -9,6 +9,7 @@
29672 #include <linux/types.h>
29673 #include <linux/errno.h>
29674 #include <linux/tty.h>
29675+#include <linux/mutex.h>
29676 #include <linux/timer.h>
29677 #include <linux/kernel.h>
29678 #include <linux/wait.h>
29679@@ -34,6 +35,7 @@ static int vfd_is_open;
29680 static unsigned char vfd[40];
29681 static int vfd_cursor;
29682 static unsigned char ledpb, led;
29683+static DEFINE_MUTEX(vfd_mutex);
29684
29685 static void update_vfd(void)
29686 {
29687@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29688 if (!vfd_is_open)
29689 return -EBUSY;
29690
29691+ mutex_lock(&vfd_mutex);
29692 for (;;) {
29693 char c;
29694 if (!indx)
29695 break;
29696- if (get_user(c, buf))
29697+ if (get_user(c, buf)) {
29698+ mutex_unlock(&vfd_mutex);
29699 return -EFAULT;
29700+ }
29701 if (esc) {
29702 set_led(c);
29703 esc = 0;
29704@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29705 buf++;
29706 }
29707 update_vfd();
29708+ mutex_unlock(&vfd_mutex);
29709
29710 return len;
29711 }
29712diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29713index f773a9d..65cd683 100644
29714--- a/drivers/char/genrtc.c
29715+++ b/drivers/char/genrtc.c
29716@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
29717 switch (cmd) {
29718
29719 case RTC_PLL_GET:
29720+ memset(&pll, 0, sizeof(pll));
29721 if (get_rtc_pll(&pll))
29722 return -EINVAL;
29723 else
29724diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29725index 0833896..cccce52 100644
29726--- a/drivers/char/hpet.c
29727+++ b/drivers/char/hpet.c
29728@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29729 }
29730
29731 static int
29732-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29733+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29734 struct hpet_info *info)
29735 {
29736 struct hpet_timer __iomem *timer;
29737diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29738index 58c0e63..46c16bf 100644
29739--- a/drivers/char/ipmi/ipmi_msghandler.c
29740+++ b/drivers/char/ipmi/ipmi_msghandler.c
29741@@ -415,7 +415,7 @@ struct ipmi_smi {
29742 struct proc_dir_entry *proc_dir;
29743 char proc_dir_name[10];
29744
29745- atomic_t stats[IPMI_NUM_STATS];
29746+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29747
29748 /*
29749 * run_to_completion duplicate of smb_info, smi_info
29750@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29751
29752
29753 #define ipmi_inc_stat(intf, stat) \
29754- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29755+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29756 #define ipmi_get_stat(intf, stat) \
29757- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29758+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29759
29760 static int is_lan_addr(struct ipmi_addr *addr)
29761 {
29762@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29763 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29764 init_waitqueue_head(&intf->waitq);
29765 for (i = 0; i < IPMI_NUM_STATS; i++)
29766- atomic_set(&intf->stats[i], 0);
29767+ atomic_set_unchecked(&intf->stats[i], 0);
29768
29769 intf->proc_dir = NULL;
29770
29771diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29772index 50fcf9c..91b5528 100644
29773--- a/drivers/char/ipmi/ipmi_si_intf.c
29774+++ b/drivers/char/ipmi/ipmi_si_intf.c
29775@@ -277,7 +277,7 @@ struct smi_info {
29776 unsigned char slave_addr;
29777
29778 /* Counters and things for the proc filesystem. */
29779- atomic_t stats[SI_NUM_STATS];
29780+ atomic_unchecked_t stats[SI_NUM_STATS];
29781
29782 struct task_struct *thread;
29783
29784@@ -286,9 +286,9 @@ struct smi_info {
29785 };
29786
29787 #define smi_inc_stat(smi, stat) \
29788- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29789+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29790 #define smi_get_stat(smi, stat) \
29791- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29792+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29793
29794 #define SI_MAX_PARMS 4
29795
29796@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
29797 atomic_set(&new_smi->req_events, 0);
29798 new_smi->run_to_completion = 0;
29799 for (i = 0; i < SI_NUM_STATS; i++)
29800- atomic_set(&new_smi->stats[i], 0);
29801+ atomic_set_unchecked(&new_smi->stats[i], 0);
29802
29803 new_smi->interrupt_disabled = 1;
29804 atomic_set(&new_smi->stop_operation, 0);
29805diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29806index 1aeaaba..e018570 100644
29807--- a/drivers/char/mbcs.c
29808+++ b/drivers/char/mbcs.c
29809@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
29810 return 0;
29811 }
29812
29813-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29814+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29815 {
29816 .part_num = MBCS_PART_NUM,
29817 .mfg_num = MBCS_MFG_NUM,
29818diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29819index d6e9d08..4493e89 100644
29820--- a/drivers/char/mem.c
29821+++ b/drivers/char/mem.c
29822@@ -18,6 +18,7 @@
29823 #include <linux/raw.h>
29824 #include <linux/tty.h>
29825 #include <linux/capability.h>
29826+#include <linux/security.h>
29827 #include <linux/ptrace.h>
29828 #include <linux/device.h>
29829 #include <linux/highmem.h>
29830@@ -35,6 +36,10 @@
29831 # include <linux/efi.h>
29832 #endif
29833
29834+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29835+extern const struct file_operations grsec_fops;
29836+#endif
29837+
29838 static inline unsigned long size_inside_page(unsigned long start,
29839 unsigned long size)
29840 {
29841@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29842
29843 while (cursor < to) {
29844 if (!devmem_is_allowed(pfn)) {
29845+#ifdef CONFIG_GRKERNSEC_KMEM
29846+ gr_handle_mem_readwrite(from, to);
29847+#else
29848 printk(KERN_INFO
29849 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29850 current->comm, from, to);
29851+#endif
29852 return 0;
29853 }
29854 cursor += PAGE_SIZE;
29855@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29856 }
29857 return 1;
29858 }
29859+#elif defined(CONFIG_GRKERNSEC_KMEM)
29860+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29861+{
29862+ return 0;
29863+}
29864 #else
29865 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29866 {
29867@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29868
29869 while (count > 0) {
29870 unsigned long remaining;
29871+ char *temp;
29872
29873 sz = size_inside_page(p, count);
29874
29875@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29876 if (!ptr)
29877 return -EFAULT;
29878
29879- remaining = copy_to_user(buf, ptr, sz);
29880+#ifdef CONFIG_PAX_USERCOPY
29881+ temp = kmalloc(sz, GFP_KERNEL);
29882+ if (!temp) {
29883+ unxlate_dev_mem_ptr(p, ptr);
29884+ return -ENOMEM;
29885+ }
29886+ memcpy(temp, ptr, sz);
29887+#else
29888+ temp = ptr;
29889+#endif
29890+
29891+ remaining = copy_to_user(buf, temp, sz);
29892+
29893+#ifdef CONFIG_PAX_USERCOPY
29894+ kfree(temp);
29895+#endif
29896+
29897 unxlate_dev_mem_ptr(p, ptr);
29898 if (remaining)
29899 return -EFAULT;
29900@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29901 size_t count, loff_t *ppos)
29902 {
29903 unsigned long p = *ppos;
29904- ssize_t low_count, read, sz;
29905+ ssize_t low_count, read, sz, err = 0;
29906 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29907- int err = 0;
29908
29909 read = 0;
29910 if (p < (unsigned long) high_memory) {
29911@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29912 }
29913 #endif
29914 while (low_count > 0) {
29915+ char *temp;
29916+
29917 sz = size_inside_page(p, low_count);
29918
29919 /*
29920@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29921 */
29922 kbuf = xlate_dev_kmem_ptr((char *)p);
29923
29924- if (copy_to_user(buf, kbuf, sz))
29925+#ifdef CONFIG_PAX_USERCOPY
29926+ temp = kmalloc(sz, GFP_KERNEL);
29927+ if (!temp)
29928+ return -ENOMEM;
29929+ memcpy(temp, kbuf, sz);
29930+#else
29931+ temp = kbuf;
29932+#endif
29933+
29934+ err = copy_to_user(buf, temp, sz);
29935+
29936+#ifdef CONFIG_PAX_USERCOPY
29937+ kfree(temp);
29938+#endif
29939+
29940+ if (err)
29941 return -EFAULT;
29942 buf += sz;
29943 p += sz;
29944@@ -867,6 +914,9 @@ static const struct memdev {
29945 #ifdef CONFIG_CRASH_DUMP
29946 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29947 #endif
29948+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29949+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29950+#endif
29951 };
29952
29953 static int memory_open(struct inode *inode, struct file *filp)
29954diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29955index da3cfee..a5a6606 100644
29956--- a/drivers/char/nvram.c
29957+++ b/drivers/char/nvram.c
29958@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29959
29960 spin_unlock_irq(&rtc_lock);
29961
29962- if (copy_to_user(buf, contents, tmp - contents))
29963+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29964 return -EFAULT;
29965
29966 *ppos = i;
29967diff --git a/drivers/char/random.c b/drivers/char/random.c
29968index 54ca8b2..4a092ed 100644
29969--- a/drivers/char/random.c
29970+++ b/drivers/char/random.c
29971@@ -261,8 +261,13 @@
29972 /*
29973 * Configuration information
29974 */
29975+#ifdef CONFIG_GRKERNSEC_RANDNET
29976+#define INPUT_POOL_WORDS 512
29977+#define OUTPUT_POOL_WORDS 128
29978+#else
29979 #define INPUT_POOL_WORDS 128
29980 #define OUTPUT_POOL_WORDS 32
29981+#endif
29982 #define SEC_XFER_SIZE 512
29983 #define EXTRACT_SIZE 10
29984
29985@@ -300,10 +305,17 @@ static struct poolinfo {
29986 int poolwords;
29987 int tap1, tap2, tap3, tap4, tap5;
29988 } poolinfo_table[] = {
29989+#ifdef CONFIG_GRKERNSEC_RANDNET
29990+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29991+ { 512, 411, 308, 208, 104, 1 },
29992+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29993+ { 128, 103, 76, 51, 25, 1 },
29994+#else
29995 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29996 { 128, 103, 76, 51, 25, 1 },
29997 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29998 { 32, 26, 20, 14, 7, 1 },
29999+#endif
30000 #if 0
30001 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
30002 { 2048, 1638, 1231, 819, 411, 1 },
30003@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
30004
30005 extract_buf(r, tmp);
30006 i = min_t(int, nbytes, EXTRACT_SIZE);
30007- if (copy_to_user(buf, tmp, i)) {
30008+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
30009 ret = -EFAULT;
30010 break;
30011 }
30012@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
30013 #include <linux/sysctl.h>
30014
30015 static int min_read_thresh = 8, min_write_thresh;
30016-static int max_read_thresh = INPUT_POOL_WORDS * 32;
30017+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
30018 static int max_write_thresh = INPUT_POOL_WORDS * 32;
30019 static char sysctl_bootid[16];
30020
30021@@ -1260,10 +1272,15 @@ static int proc_do_uuid(ctl_table *table, int write,
30022 uuid = table->data;
30023 if (!uuid) {
30024 uuid = tmp_uuid;
30025- uuid[8] = 0;
30026- }
30027- if (uuid[8] == 0)
30028 generate_random_uuid(uuid);
30029+ } else {
30030+ static DEFINE_SPINLOCK(bootid_spinlock);
30031+
30032+ spin_lock(&bootid_spinlock);
30033+ if (!uuid[8])
30034+ generate_random_uuid(uuid);
30035+ spin_unlock(&bootid_spinlock);
30036+ }
30037
30038 sprintf(buf, "%pU", uuid);
30039
30040diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
30041index 1ee8ce7..b778bef 100644
30042--- a/drivers/char/sonypi.c
30043+++ b/drivers/char/sonypi.c
30044@@ -55,6 +55,7 @@
30045 #include <asm/uaccess.h>
30046 #include <asm/io.h>
30047 #include <asm/system.h>
30048+#include <asm/local.h>
30049
30050 #include <linux/sonypi.h>
30051
30052@@ -491,7 +492,7 @@ static struct sonypi_device {
30053 spinlock_t fifo_lock;
30054 wait_queue_head_t fifo_proc_list;
30055 struct fasync_struct *fifo_async;
30056- int open_count;
30057+ local_t open_count;
30058 int model;
30059 struct input_dev *input_jog_dev;
30060 struct input_dev *input_key_dev;
30061@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
30062 static int sonypi_misc_release(struct inode *inode, struct file *file)
30063 {
30064 mutex_lock(&sonypi_device.lock);
30065- sonypi_device.open_count--;
30066+ local_dec(&sonypi_device.open_count);
30067 mutex_unlock(&sonypi_device.lock);
30068 return 0;
30069 }
30070@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
30071 {
30072 mutex_lock(&sonypi_device.lock);
30073 /* Flush input queue on first open */
30074- if (!sonypi_device.open_count)
30075+ if (!local_read(&sonypi_device.open_count))
30076 kfifo_reset(&sonypi_device.fifo);
30077- sonypi_device.open_count++;
30078+ local_inc(&sonypi_device.open_count);
30079 mutex_unlock(&sonypi_device.lock);
30080
30081 return 0;
30082diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
30083index ad7c732..5aa8054 100644
30084--- a/drivers/char/tpm/tpm.c
30085+++ b/drivers/char/tpm/tpm.c
30086@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
30087 chip->vendor.req_complete_val)
30088 goto out_recv;
30089
30090- if ((status == chip->vendor.req_canceled)) {
30091+ if (status == chip->vendor.req_canceled) {
30092 dev_err(chip->dev, "Operation Canceled\n");
30093 rc = -ECANCELED;
30094 goto out;
30095diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
30096index 0636520..169c1d0 100644
30097--- a/drivers/char/tpm/tpm_bios.c
30098+++ b/drivers/char/tpm/tpm_bios.c
30099@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
30100 event = addr;
30101
30102 if ((event->event_type == 0 && event->event_size == 0) ||
30103- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
30104+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
30105 return NULL;
30106
30107 return addr;
30108@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
30109 return NULL;
30110
30111 if ((event->event_type == 0 && event->event_size == 0) ||
30112- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
30113+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
30114 return NULL;
30115
30116 (*pos)++;
30117@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
30118 int i;
30119
30120 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
30121- seq_putc(m, data[i]);
30122+ if (!seq_putc(m, data[i]))
30123+ return -EFAULT;
30124
30125 return 0;
30126 }
30127@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
30128 log->bios_event_log_end = log->bios_event_log + len;
30129
30130 virt = acpi_os_map_memory(start, len);
30131+ if (!virt) {
30132+ kfree(log->bios_event_log);
30133+ log->bios_event_log = NULL;
30134+ return -EFAULT;
30135+ }
30136
30137- memcpy(log->bios_event_log, virt, len);
30138+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
30139
30140 acpi_os_unmap_memory(virt, len);
30141 return 0;
30142diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
30143index b58b561..c9088c8 100644
30144--- a/drivers/char/virtio_console.c
30145+++ b/drivers/char/virtio_console.c
30146@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
30147 if (to_user) {
30148 ssize_t ret;
30149
30150- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
30151+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
30152 if (ret)
30153 return -EFAULT;
30154 } else {
30155@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
30156 if (!port_has_data(port) && !port->host_connected)
30157 return 0;
30158
30159- return fill_readbuf(port, ubuf, count, true);
30160+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
30161 }
30162
30163 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
30164diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
30165index c9eee6d..f9d5280 100644
30166--- a/drivers/edac/amd64_edac.c
30167+++ b/drivers/edac/amd64_edac.c
30168@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
30169 * PCI core identifies what devices are on a system during boot, and then
30170 * inquiry this table to see if this driver is for a given device found.
30171 */
30172-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
30173+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
30174 {
30175 .vendor = PCI_VENDOR_ID_AMD,
30176 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
30177diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
30178index e47e73b..348e0bd 100644
30179--- a/drivers/edac/amd76x_edac.c
30180+++ b/drivers/edac/amd76x_edac.c
30181@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
30182 edac_mc_free(mci);
30183 }
30184
30185-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
30186+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
30187 {
30188 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30189 AMD762},
30190diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
30191index 1af531a..3a8ff27 100644
30192--- a/drivers/edac/e752x_edac.c
30193+++ b/drivers/edac/e752x_edac.c
30194@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
30195 edac_mc_free(mci);
30196 }
30197
30198-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
30199+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
30200 {
30201 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30202 E7520},
30203diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
30204index 6ffb6d2..383d8d7 100644
30205--- a/drivers/edac/e7xxx_edac.c
30206+++ b/drivers/edac/e7xxx_edac.c
30207@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
30208 edac_mc_free(mci);
30209 }
30210
30211-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
30212+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
30213 {
30214 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30215 E7205},
30216diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
30217index 97f5064..202b6e6 100644
30218--- a/drivers/edac/edac_pci_sysfs.c
30219+++ b/drivers/edac/edac_pci_sysfs.c
30220@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
30221 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
30222 static int edac_pci_poll_msec = 1000; /* one second workq period */
30223
30224-static atomic_t pci_parity_count = ATOMIC_INIT(0);
30225-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30226+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
30227+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
30228
30229 static struct kobject *edac_pci_top_main_kobj;
30230 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
30231@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30232 edac_printk(KERN_CRIT, EDAC_PCI,
30233 "Signaled System Error on %s\n",
30234 pci_name(dev));
30235- atomic_inc(&pci_nonparity_count);
30236+ atomic_inc_unchecked(&pci_nonparity_count);
30237 }
30238
30239 if (status & (PCI_STATUS_PARITY)) {
30240@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30241 "Master Data Parity Error on %s\n",
30242 pci_name(dev));
30243
30244- atomic_inc(&pci_parity_count);
30245+ atomic_inc_unchecked(&pci_parity_count);
30246 }
30247
30248 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30249@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30250 "Detected Parity Error on %s\n",
30251 pci_name(dev));
30252
30253- atomic_inc(&pci_parity_count);
30254+ atomic_inc_unchecked(&pci_parity_count);
30255 }
30256 }
30257
30258@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30259 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
30260 "Signaled System Error on %s\n",
30261 pci_name(dev));
30262- atomic_inc(&pci_nonparity_count);
30263+ atomic_inc_unchecked(&pci_nonparity_count);
30264 }
30265
30266 if (status & (PCI_STATUS_PARITY)) {
30267@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30268 "Master Data Parity Error on "
30269 "%s\n", pci_name(dev));
30270
30271- atomic_inc(&pci_parity_count);
30272+ atomic_inc_unchecked(&pci_parity_count);
30273 }
30274
30275 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30276@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30277 "Detected Parity Error on %s\n",
30278 pci_name(dev));
30279
30280- atomic_inc(&pci_parity_count);
30281+ atomic_inc_unchecked(&pci_parity_count);
30282 }
30283 }
30284 }
30285@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
30286 if (!check_pci_errors)
30287 return;
30288
30289- before_count = atomic_read(&pci_parity_count);
30290+ before_count = atomic_read_unchecked(&pci_parity_count);
30291
30292 /* scan all PCI devices looking for a Parity Error on devices and
30293 * bridges.
30294@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
30295 /* Only if operator has selected panic on PCI Error */
30296 if (edac_pci_get_panic_on_pe()) {
30297 /* If the count is different 'after' from 'before' */
30298- if (before_count != atomic_read(&pci_parity_count))
30299+ if (before_count != atomic_read_unchecked(&pci_parity_count))
30300 panic("EDAC: PCI Parity Error");
30301 }
30302 }
30303diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
30304index c0510b3..6e2a954 100644
30305--- a/drivers/edac/i3000_edac.c
30306+++ b/drivers/edac/i3000_edac.c
30307@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
30308 edac_mc_free(mci);
30309 }
30310
30311-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
30312+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
30313 {
30314 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30315 I3000},
30316diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
30317index 73f55e200..5faaf59 100644
30318--- a/drivers/edac/i3200_edac.c
30319+++ b/drivers/edac/i3200_edac.c
30320@@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
30321 edac_mc_free(mci);
30322 }
30323
30324-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
30325+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
30326 {
30327 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30328 I3200},
30329diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
30330index 4dc3ac2..67d05a6 100644
30331--- a/drivers/edac/i5000_edac.c
30332+++ b/drivers/edac/i5000_edac.c
30333@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
30334 *
30335 * The "E500P" device is the first device supported.
30336 */
30337-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
30338+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
30339 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
30340 .driver_data = I5000P},
30341
30342diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
30343index bcbdeec..9886d16 100644
30344--- a/drivers/edac/i5100_edac.c
30345+++ b/drivers/edac/i5100_edac.c
30346@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
30347 edac_mc_free(mci);
30348 }
30349
30350-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
30351+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
30352 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
30353 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
30354 { 0, }
30355diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
30356index 74d6ec34..baff517 100644
30357--- a/drivers/edac/i5400_edac.c
30358+++ b/drivers/edac/i5400_edac.c
30359@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
30360 *
30361 * The "E500P" device is the first device supported.
30362 */
30363-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
30364+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
30365 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
30366 {0,} /* 0 terminated list. */
30367 };
30368diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
30369index 6104dba..e7ea8e1 100644
30370--- a/drivers/edac/i7300_edac.c
30371+++ b/drivers/edac/i7300_edac.c
30372@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
30373 *
30374 * Has only 8086:360c PCI ID
30375 */
30376-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
30377+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
30378 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
30379 {0,} /* 0 terminated list. */
30380 };
30381diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
30382index 8568d9b..42b2fa8 100644
30383--- a/drivers/edac/i7core_edac.c
30384+++ b/drivers/edac/i7core_edac.c
30385@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
30386 /*
30387 * pci_device_id table for which devices we are looking for
30388 */
30389-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
30390+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
30391 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
30392 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
30393 {0,} /* 0 terminated list. */
30394diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
30395index 4329d39..f3022ef 100644
30396--- a/drivers/edac/i82443bxgx_edac.c
30397+++ b/drivers/edac/i82443bxgx_edac.c
30398@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
30399
30400 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
30401
30402-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
30403+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
30404 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
30405 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
30406 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
30407diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
30408index 931a057..fd28340 100644
30409--- a/drivers/edac/i82860_edac.c
30410+++ b/drivers/edac/i82860_edac.c
30411@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
30412 edac_mc_free(mci);
30413 }
30414
30415-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
30416+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
30417 {
30418 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30419 I82860},
30420diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
30421index 33864c6..01edc61 100644
30422--- a/drivers/edac/i82875p_edac.c
30423+++ b/drivers/edac/i82875p_edac.c
30424@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
30425 edac_mc_free(mci);
30426 }
30427
30428-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
30429+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
30430 {
30431 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30432 I82875P},
30433diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
30434index 4184e01..dcb2cd3 100644
30435--- a/drivers/edac/i82975x_edac.c
30436+++ b/drivers/edac/i82975x_edac.c
30437@@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
30438 edac_mc_free(mci);
30439 }
30440
30441-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
30442+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
30443 {
30444 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30445 I82975X
30446diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30447index 0106747..0b40417 100644
30448--- a/drivers/edac/mce_amd.h
30449+++ b/drivers/edac/mce_amd.h
30450@@ -83,7 +83,7 @@ struct amd_decoder_ops {
30451 bool (*dc_mce)(u16, u8);
30452 bool (*ic_mce)(u16, u8);
30453 bool (*nb_mce)(u16, u8);
30454-};
30455+} __no_const;
30456
30457 void amd_report_gart_errors(bool);
30458 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30459diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
30460index e294e1b..a41b05b 100644
30461--- a/drivers/edac/r82600_edac.c
30462+++ b/drivers/edac/r82600_edac.c
30463@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
30464 edac_mc_free(mci);
30465 }
30466
30467-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
30468+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
30469 {
30470 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
30471 },
30472diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
30473index 1dc118d..8c68af9 100644
30474--- a/drivers/edac/sb_edac.c
30475+++ b/drivers/edac/sb_edac.c
30476@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
30477 /*
30478 * pci_device_id table for which devices we are looking for
30479 */
30480-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
30481+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
30482 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
30483 {0,} /* 0 terminated list. */
30484 };
30485diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
30486index b6f47de..c5acf3a 100644
30487--- a/drivers/edac/x38_edac.c
30488+++ b/drivers/edac/x38_edac.c
30489@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
30490 edac_mc_free(mci);
30491 }
30492
30493-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
30494+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
30495 {
30496 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30497 X38},
30498diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30499index 85661b0..c784559a 100644
30500--- a/drivers/firewire/core-card.c
30501+++ b/drivers/firewire/core-card.c
30502@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
30503
30504 void fw_core_remove_card(struct fw_card *card)
30505 {
30506- struct fw_card_driver dummy_driver = dummy_driver_template;
30507+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
30508
30509 card->driver->update_phy_reg(card, 4,
30510 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30511diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30512index 4799393..37bd3ab 100644
30513--- a/drivers/firewire/core-cdev.c
30514+++ b/drivers/firewire/core-cdev.c
30515@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
30516 int ret;
30517
30518 if ((request->channels == 0 && request->bandwidth == 0) ||
30519- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30520- request->bandwidth < 0)
30521+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30522 return -EINVAL;
30523
30524 r = kmalloc(sizeof(*r), GFP_KERNEL);
30525diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30526index 855ab3f..11f4bbd 100644
30527--- a/drivers/firewire/core-transaction.c
30528+++ b/drivers/firewire/core-transaction.c
30529@@ -37,6 +37,7 @@
30530 #include <linux/timer.h>
30531 #include <linux/types.h>
30532 #include <linux/workqueue.h>
30533+#include <linux/sched.h>
30534
30535 #include <asm/byteorder.h>
30536
30537diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30538index b45be57..5fad18b 100644
30539--- a/drivers/firewire/core.h
30540+++ b/drivers/firewire/core.h
30541@@ -101,6 +101,7 @@ struct fw_card_driver {
30542
30543 int (*stop_iso)(struct fw_iso_context *ctx);
30544 };
30545+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30546
30547 void fw_card_initialize(struct fw_card *card,
30548 const struct fw_card_driver *driver, struct device *device);
30549diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30550index 153980b..4b4d046 100644
30551--- a/drivers/firmware/dmi_scan.c
30552+++ b/drivers/firmware/dmi_scan.c
30553@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30554 }
30555 }
30556 else {
30557- /*
30558- * no iounmap() for that ioremap(); it would be a no-op, but
30559- * it's so early in setup that sucker gets confused into doing
30560- * what it shouldn't if we actually call it.
30561- */
30562 p = dmi_ioremap(0xF0000, 0x10000);
30563 if (p == NULL)
30564 goto error;
30565@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30566 if (buf == NULL)
30567 return -1;
30568
30569- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30570+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30571
30572 iounmap(buf);
30573 return 0;
30574diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30575index 82d5c20..44a7177 100644
30576--- a/drivers/gpio/gpio-vr41xx.c
30577+++ b/drivers/gpio/gpio-vr41xx.c
30578@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30579 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30580 maskl, pendl, maskh, pendh);
30581
30582- atomic_inc(&irq_err_count);
30583+ atomic_inc_unchecked(&irq_err_count);
30584
30585 return -EINVAL;
30586 }
30587diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30588index 84a4a80..ce0306e 100644
30589--- a/drivers/gpu/drm/drm_crtc_helper.c
30590+++ b/drivers/gpu/drm/drm_crtc_helper.c
30591@@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30592 struct drm_crtc *tmp;
30593 int crtc_mask = 1;
30594
30595- WARN(!crtc, "checking null crtc?\n");
30596+ BUG_ON(!crtc);
30597
30598 dev = crtc->dev;
30599
30600diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30601index ebf7d3f..d64c436 100644
30602--- a/drivers/gpu/drm/drm_drv.c
30603+++ b/drivers/gpu/drm/drm_drv.c
30604@@ -312,7 +312,7 @@ module_exit(drm_core_exit);
30605 /**
30606 * Copy and IOCTL return string to user space
30607 */
30608-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30609+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30610 {
30611 int len;
30612
30613@@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
30614
30615 dev = file_priv->minor->dev;
30616 atomic_inc(&dev->ioctl_count);
30617- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30618+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30619 ++file_priv->ioctl_count;
30620
30621 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30622diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30623index 6263b01..7987f55 100644
30624--- a/drivers/gpu/drm/drm_fops.c
30625+++ b/drivers/gpu/drm/drm_fops.c
30626@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30627 }
30628
30629 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30630- atomic_set(&dev->counts[i], 0);
30631+ atomic_set_unchecked(&dev->counts[i], 0);
30632
30633 dev->sigdata.lock = NULL;
30634
30635@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
30636
30637 retcode = drm_open_helper(inode, filp, dev);
30638 if (!retcode) {
30639- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30640- if (!dev->open_count++)
30641+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30642+ if (local_inc_return(&dev->open_count) == 1)
30643 retcode = drm_setup(dev);
30644 }
30645 if (!retcode) {
30646@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
30647
30648 mutex_lock(&drm_global_mutex);
30649
30650- DRM_DEBUG("open_count = %d\n", dev->open_count);
30651+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30652
30653 if (dev->driver->preclose)
30654 dev->driver->preclose(dev, file_priv);
30655@@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
30656 * Begin inline drm_release
30657 */
30658
30659- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30660+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30661 task_pid_nr(current),
30662 (long)old_encode_dev(file_priv->minor->device),
30663- dev->open_count);
30664+ local_read(&dev->open_count));
30665
30666 /* Release any auth tokens that might point to this file_priv,
30667 (do that under the drm_global_mutex) */
30668@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
30669 * End inline drm_release
30670 */
30671
30672- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30673- if (!--dev->open_count) {
30674+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30675+ if (local_dec_and_test(&dev->open_count)) {
30676 if (atomic_read(&dev->ioctl_count)) {
30677 DRM_ERROR("Device busy: %d\n",
30678 atomic_read(&dev->ioctl_count));
30679diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30680index c87dc96..326055d 100644
30681--- a/drivers/gpu/drm/drm_global.c
30682+++ b/drivers/gpu/drm/drm_global.c
30683@@ -36,7 +36,7 @@
30684 struct drm_global_item {
30685 struct mutex mutex;
30686 void *object;
30687- int refcount;
30688+ atomic_t refcount;
30689 };
30690
30691 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30692@@ -49,7 +49,7 @@ void drm_global_init(void)
30693 struct drm_global_item *item = &glob[i];
30694 mutex_init(&item->mutex);
30695 item->object = NULL;
30696- item->refcount = 0;
30697+ atomic_set(&item->refcount, 0);
30698 }
30699 }
30700
30701@@ -59,7 +59,7 @@ void drm_global_release(void)
30702 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30703 struct drm_global_item *item = &glob[i];
30704 BUG_ON(item->object != NULL);
30705- BUG_ON(item->refcount != 0);
30706+ BUG_ON(atomic_read(&item->refcount) != 0);
30707 }
30708 }
30709
30710@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30711 void *object;
30712
30713 mutex_lock(&item->mutex);
30714- if (item->refcount == 0) {
30715+ if (atomic_read(&item->refcount) == 0) {
30716 item->object = kzalloc(ref->size, GFP_KERNEL);
30717 if (unlikely(item->object == NULL)) {
30718 ret = -ENOMEM;
30719@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30720 goto out_err;
30721
30722 }
30723- ++item->refcount;
30724+ atomic_inc(&item->refcount);
30725 ref->object = item->object;
30726 object = item->object;
30727 mutex_unlock(&item->mutex);
30728@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30729 struct drm_global_item *item = &glob[ref->global_type];
30730
30731 mutex_lock(&item->mutex);
30732- BUG_ON(item->refcount == 0);
30733+ BUG_ON(atomic_read(&item->refcount) == 0);
30734 BUG_ON(ref->object != item->object);
30735- if (--item->refcount == 0) {
30736+ if (atomic_dec_and_test(&item->refcount)) {
30737 ref->release(ref);
30738 item->object = NULL;
30739 }
30740diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30741index ab1162d..42587b2 100644
30742--- a/drivers/gpu/drm/drm_info.c
30743+++ b/drivers/gpu/drm/drm_info.c
30744@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30745 struct drm_local_map *map;
30746 struct drm_map_list *r_list;
30747
30748- /* Hardcoded from _DRM_FRAME_BUFFER,
30749- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30750- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30751- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30752+ static const char * const types[] = {
30753+ [_DRM_FRAME_BUFFER] = "FB",
30754+ [_DRM_REGISTERS] = "REG",
30755+ [_DRM_SHM] = "SHM",
30756+ [_DRM_AGP] = "AGP",
30757+ [_DRM_SCATTER_GATHER] = "SG",
30758+ [_DRM_CONSISTENT] = "PCI",
30759+ [_DRM_GEM] = "GEM" };
30760 const char *type;
30761 int i;
30762
30763@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30764 map = r_list->map;
30765 if (!map)
30766 continue;
30767- if (map->type < 0 || map->type > 5)
30768+ if (map->type >= ARRAY_SIZE(types))
30769 type = "??";
30770 else
30771 type = types[map->type];
30772@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30773 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30774 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30775 vma->vm_flags & VM_IO ? 'i' : '-',
30776+#ifdef CONFIG_GRKERNSEC_HIDESYM
30777+ 0);
30778+#else
30779 vma->vm_pgoff);
30780+#endif
30781
30782 #if defined(__i386__)
30783 pgprot = pgprot_val(vma->vm_page_prot);
30784diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30785index 637fcc3..e890b33 100644
30786--- a/drivers/gpu/drm/drm_ioc32.c
30787+++ b/drivers/gpu/drm/drm_ioc32.c
30788@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30789 request = compat_alloc_user_space(nbytes);
30790 if (!access_ok(VERIFY_WRITE, request, nbytes))
30791 return -EFAULT;
30792- list = (struct drm_buf_desc *) (request + 1);
30793+ list = (struct drm_buf_desc __user *) (request + 1);
30794
30795 if (__put_user(count, &request->count)
30796 || __put_user(list, &request->list))
30797@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30798 request = compat_alloc_user_space(nbytes);
30799 if (!access_ok(VERIFY_WRITE, request, nbytes))
30800 return -EFAULT;
30801- list = (struct drm_buf_pub *) (request + 1);
30802+ list = (struct drm_buf_pub __user *) (request + 1);
30803
30804 if (__put_user(count, &request->count)
30805 || __put_user(list, &request->list))
30806diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30807index 956fd38..e52167a 100644
30808--- a/drivers/gpu/drm/drm_ioctl.c
30809+++ b/drivers/gpu/drm/drm_ioctl.c
30810@@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30811 stats->data[i].value =
30812 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30813 else
30814- stats->data[i].value = atomic_read(&dev->counts[i]);
30815+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30816 stats->data[i].type = dev->types[i];
30817 }
30818
30819diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30820index c79c713..2048588 100644
30821--- a/drivers/gpu/drm/drm_lock.c
30822+++ b/drivers/gpu/drm/drm_lock.c
30823@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30824 if (drm_lock_take(&master->lock, lock->context)) {
30825 master->lock.file_priv = file_priv;
30826 master->lock.lock_time = jiffies;
30827- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30828+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30829 break; /* Got lock */
30830 }
30831
30832@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30833 return -EINVAL;
30834 }
30835
30836- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30837+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30838
30839 if (drm_lock_free(&master->lock, lock->context)) {
30840 /* FIXME: Should really bail out here. */
30841diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30842index 7f4b4e1..bf4def2 100644
30843--- a/drivers/gpu/drm/i810/i810_dma.c
30844+++ b/drivers/gpu/drm/i810/i810_dma.c
30845@@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30846 dma->buflist[vertex->idx],
30847 vertex->discard, vertex->used);
30848
30849- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30850- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30851+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30852+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30853 sarea_priv->last_enqueue = dev_priv->counter - 1;
30854 sarea_priv->last_dispatch = (int)hw_status[5];
30855
30856@@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30857 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30858 mc->last_render);
30859
30860- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30861- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30862+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30863+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30864 sarea_priv->last_enqueue = dev_priv->counter - 1;
30865 sarea_priv->last_dispatch = (int)hw_status[5];
30866
30867diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30868index c9339f4..f5e1b9d 100644
30869--- a/drivers/gpu/drm/i810/i810_drv.h
30870+++ b/drivers/gpu/drm/i810/i810_drv.h
30871@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30872 int page_flipping;
30873
30874 wait_queue_head_t irq_queue;
30875- atomic_t irq_received;
30876- atomic_t irq_emitted;
30877+ atomic_unchecked_t irq_received;
30878+ atomic_unchecked_t irq_emitted;
30879
30880 int front_offset;
30881 } drm_i810_private_t;
30882diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30883index deaa657..e0fd296 100644
30884--- a/drivers/gpu/drm/i915/i915_debugfs.c
30885+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30886@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30887 I915_READ(GTIMR));
30888 }
30889 seq_printf(m, "Interrupts received: %d\n",
30890- atomic_read(&dev_priv->irq_received));
30891+ atomic_read_unchecked(&dev_priv->irq_received));
30892 for (i = 0; i < I915_NUM_RINGS; i++) {
30893 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30894 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30895@@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30896 return ret;
30897
30898 if (opregion->header)
30899- seq_write(m, opregion->header, OPREGION_SIZE);
30900+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30901
30902 mutex_unlock(&dev->struct_mutex);
30903
30904diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30905index ddfe3d9..f6e6b21 100644
30906--- a/drivers/gpu/drm/i915/i915_dma.c
30907+++ b/drivers/gpu/drm/i915/i915_dma.c
30908@@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30909 bool can_switch;
30910
30911 spin_lock(&dev->count_lock);
30912- can_switch = (dev->open_count == 0);
30913+ can_switch = (local_read(&dev->open_count) == 0);
30914 spin_unlock(&dev->count_lock);
30915 return can_switch;
30916 }
30917diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30918index 9689ca3..294f9c1 100644
30919--- a/drivers/gpu/drm/i915/i915_drv.h
30920+++ b/drivers/gpu/drm/i915/i915_drv.h
30921@@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
30922 /* render clock increase/decrease */
30923 /* display clock increase/decrease */
30924 /* pll clock increase/decrease */
30925-};
30926+} __no_const;
30927
30928 struct intel_device_info {
30929 u8 gen;
30930@@ -320,7 +320,7 @@ typedef struct drm_i915_private {
30931 int current_page;
30932 int page_flipping;
30933
30934- atomic_t irq_received;
30935+ atomic_unchecked_t irq_received;
30936
30937 /* protects the irq masks */
30938 spinlock_t irq_lock;
30939@@ -896,7 +896,7 @@ struct drm_i915_gem_object {
30940 * will be page flipped away on the next vblank. When it
30941 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30942 */
30943- atomic_t pending_flip;
30944+ atomic_unchecked_t pending_flip;
30945 };
30946
30947 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30948@@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30949 extern void intel_teardown_gmbus(struct drm_device *dev);
30950 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30951 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30952-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30953+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30954 {
30955 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30956 }
30957diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30958index e159e33..cdcc663 100644
30959--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30960+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30961@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30962 i915_gem_clflush_object(obj);
30963
30964 if (obj->base.pending_write_domain)
30965- cd->flips |= atomic_read(&obj->pending_flip);
30966+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30967
30968 /* The actual obj->write_domain will be updated with
30969 * pending_write_domain after we emit the accumulated flush for all
30970@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30971
30972 static int
30973 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30974- int count)
30975+ unsigned int count)
30976 {
30977- int i;
30978+ unsigned int i;
30979
30980 for (i = 0; i < count; i++) {
30981 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30982diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30983index 5bd4361..0241a42 100644
30984--- a/drivers/gpu/drm/i915/i915_irq.c
30985+++ b/drivers/gpu/drm/i915/i915_irq.c
30986@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30987 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30988 struct drm_i915_master_private *master_priv;
30989
30990- atomic_inc(&dev_priv->irq_received);
30991+ atomic_inc_unchecked(&dev_priv->irq_received);
30992
30993 /* disable master interrupt before clearing iir */
30994 de_ier = I915_READ(DEIER);
30995@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30996 struct drm_i915_master_private *master_priv;
30997 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30998
30999- atomic_inc(&dev_priv->irq_received);
31000+ atomic_inc_unchecked(&dev_priv->irq_received);
31001
31002 if (IS_GEN6(dev))
31003 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
31004@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
31005 int ret = IRQ_NONE, pipe;
31006 bool blc_event = false;
31007
31008- atomic_inc(&dev_priv->irq_received);
31009+ atomic_inc_unchecked(&dev_priv->irq_received);
31010
31011 iir = I915_READ(IIR);
31012
31013@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
31014 {
31015 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
31016
31017- atomic_set(&dev_priv->irq_received, 0);
31018+ atomic_set_unchecked(&dev_priv->irq_received, 0);
31019
31020 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
31021 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
31022@@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
31023 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
31024 int pipe;
31025
31026- atomic_set(&dev_priv->irq_received, 0);
31027+ atomic_set_unchecked(&dev_priv->irq_received, 0);
31028
31029 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
31030 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
31031diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
31032index 2163818..cede019 100644
31033--- a/drivers/gpu/drm/i915/intel_display.c
31034+++ b/drivers/gpu/drm/i915/intel_display.c
31035@@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
31036
31037 wait_event(dev_priv->pending_flip_queue,
31038 atomic_read(&dev_priv->mm.wedged) ||
31039- atomic_read(&obj->pending_flip) == 0);
31040+ atomic_read_unchecked(&obj->pending_flip) == 0);
31041
31042 /* Big Hammer, we also need to ensure that any pending
31043 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
31044@@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
31045 obj = to_intel_framebuffer(crtc->fb)->obj;
31046 dev_priv = crtc->dev->dev_private;
31047 wait_event(dev_priv->pending_flip_queue,
31048- atomic_read(&obj->pending_flip) == 0);
31049+ atomic_read_unchecked(&obj->pending_flip) == 0);
31050 }
31051
31052 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
31053@@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
31054
31055 atomic_clear_mask(1 << intel_crtc->plane,
31056 &obj->pending_flip.counter);
31057- if (atomic_read(&obj->pending_flip) == 0)
31058+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
31059 wake_up(&dev_priv->pending_flip_queue);
31060
31061 schedule_work(&work->work);
31062@@ -7354,7 +7354,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
31063 OUT_RING(fb->pitches[0] | obj->tiling_mode);
31064 OUT_RING(obj->gtt_offset);
31065
31066- pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
31067+ /* Contrary to the suggestions in the documentation,
31068+ * "Enable Panel Fitter" does not seem to be required when page
31069+ * flipping with a non-native mode, and worse causes a normal
31070+ * modeset to fail.
31071+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
31072+ */
31073+ pf = 0;
31074 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
31075 OUT_RING(pf | pipesrc);
31076 ADVANCE_LP_RING();
31077@@ -7461,7 +7467,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
31078 /* Block clients from rendering to the new back buffer until
31079 * the flip occurs and the object is no longer visible.
31080 */
31081- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31082+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31083
31084 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
31085 if (ret)
31086@@ -7475,7 +7481,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
31087 return 0;
31088
31089 cleanup_pending:
31090- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31091+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
31092 drm_gem_object_unreference(&work->old_fb_obj->base);
31093 drm_gem_object_unreference(&obj->base);
31094 mutex_unlock(&dev->struct_mutex);
31095diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
31096index 54558a0..2d97005 100644
31097--- a/drivers/gpu/drm/mga/mga_drv.h
31098+++ b/drivers/gpu/drm/mga/mga_drv.h
31099@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
31100 u32 clear_cmd;
31101 u32 maccess;
31102
31103- atomic_t vbl_received; /**< Number of vblanks received. */
31104+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
31105 wait_queue_head_t fence_queue;
31106- atomic_t last_fence_retired;
31107+ atomic_unchecked_t last_fence_retired;
31108 u32 next_fence_to_post;
31109
31110 unsigned int fb_cpp;
31111diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
31112index 2581202..f230a8d9 100644
31113--- a/drivers/gpu/drm/mga/mga_irq.c
31114+++ b/drivers/gpu/drm/mga/mga_irq.c
31115@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
31116 if (crtc != 0)
31117 return 0;
31118
31119- return atomic_read(&dev_priv->vbl_received);
31120+ return atomic_read_unchecked(&dev_priv->vbl_received);
31121 }
31122
31123
31124@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
31125 /* VBLANK interrupt */
31126 if (status & MGA_VLINEPEN) {
31127 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
31128- atomic_inc(&dev_priv->vbl_received);
31129+ atomic_inc_unchecked(&dev_priv->vbl_received);
31130 drm_handle_vblank(dev, 0);
31131 handled = 1;
31132 }
31133@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
31134 if ((prim_start & ~0x03) != (prim_end & ~0x03))
31135 MGA_WRITE(MGA_PRIMEND, prim_end);
31136
31137- atomic_inc(&dev_priv->last_fence_retired);
31138+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
31139 DRM_WAKEUP(&dev_priv->fence_queue);
31140 handled = 1;
31141 }
31142@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
31143 * using fences.
31144 */
31145 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
31146- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
31147+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
31148 - *sequence) <= (1 << 23)));
31149
31150 *sequence = cur_fence;
31151diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
31152index e5cbead..6c354a3 100644
31153--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
31154+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
31155@@ -199,7 +199,7 @@ struct methods {
31156 const char desc[8];
31157 void (*loadbios)(struct drm_device *, uint8_t *);
31158 const bool rw;
31159-};
31160+} __do_const;
31161
31162 static struct methods shadow_methods[] = {
31163 { "PRAMIN", load_vbios_pramin, true },
31164@@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
31165 struct bit_table {
31166 const char id;
31167 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
31168-};
31169+} __no_const;
31170
31171 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
31172
31173diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
31174index b827098..c31a797 100644
31175--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
31176+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
31177@@ -242,7 +242,7 @@ struct nouveau_channel {
31178 struct list_head pending;
31179 uint32_t sequence;
31180 uint32_t sequence_ack;
31181- atomic_t last_sequence_irq;
31182+ atomic_unchecked_t last_sequence_irq;
31183 struct nouveau_vma vma;
31184 } fence;
31185
31186@@ -323,7 +323,7 @@ struct nouveau_exec_engine {
31187 u32 handle, u16 class);
31188 void (*set_tile_region)(struct drm_device *dev, int i);
31189 void (*tlb_flush)(struct drm_device *, int engine);
31190-};
31191+} __no_const;
31192
31193 struct nouveau_instmem_engine {
31194 void *priv;
31195@@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
31196 struct nouveau_mc_engine {
31197 int (*init)(struct drm_device *dev);
31198 void (*takedown)(struct drm_device *dev);
31199-};
31200+} __no_const;
31201
31202 struct nouveau_timer_engine {
31203 int (*init)(struct drm_device *dev);
31204 void (*takedown)(struct drm_device *dev);
31205 uint64_t (*read)(struct drm_device *dev);
31206-};
31207+} __no_const;
31208
31209 struct nouveau_fb_engine {
31210 int num_tiles;
31211@@ -566,7 +566,7 @@ struct nouveau_vram_engine {
31212 void (*put)(struct drm_device *, struct nouveau_mem **);
31213
31214 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
31215-};
31216+} __no_const;
31217
31218 struct nouveau_engine {
31219 struct nouveau_instmem_engine instmem;
31220@@ -714,7 +714,7 @@ struct drm_nouveau_private {
31221 struct drm_global_reference mem_global_ref;
31222 struct ttm_bo_global_ref bo_global_ref;
31223 struct ttm_bo_device bdev;
31224- atomic_t validate_sequence;
31225+ atomic_unchecked_t validate_sequence;
31226 } ttm;
31227
31228 struct {
31229diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
31230index 2f6daae..c9d7b9e 100644
31231--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
31232+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
31233@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
31234 if (USE_REFCNT(dev))
31235 sequence = nvchan_rd32(chan, 0x48);
31236 else
31237- sequence = atomic_read(&chan->fence.last_sequence_irq);
31238+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
31239
31240 if (chan->fence.sequence_ack == sequence)
31241 goto out;
31242@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
31243 return ret;
31244 }
31245
31246- atomic_set(&chan->fence.last_sequence_irq, 0);
31247+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
31248 return 0;
31249 }
31250
31251diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
31252index 7ce3fde..cb3ea04 100644
31253--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
31254+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
31255@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
31256 int trycnt = 0;
31257 int ret, i;
31258
31259- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
31260+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
31261 retry:
31262 if (++trycnt > 100000) {
31263 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
31264diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
31265index f80c5e0..936baa7 100644
31266--- a/drivers/gpu/drm/nouveau/nouveau_state.c
31267+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
31268@@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
31269 bool can_switch;
31270
31271 spin_lock(&dev->count_lock);
31272- can_switch = (dev->open_count == 0);
31273+ can_switch = (local_read(&dev->open_count) == 0);
31274 spin_unlock(&dev->count_lock);
31275 return can_switch;
31276 }
31277diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
31278index dbdea8e..cd6eeeb 100644
31279--- a/drivers/gpu/drm/nouveau/nv04_graph.c
31280+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
31281@@ -554,7 +554,7 @@ static int
31282 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
31283 u32 class, u32 mthd, u32 data)
31284 {
31285- atomic_set(&chan->fence.last_sequence_irq, data);
31286+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
31287 return 0;
31288 }
31289
31290diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
31291index bcac90b..53bfc76 100644
31292--- a/drivers/gpu/drm/r128/r128_cce.c
31293+++ b/drivers/gpu/drm/r128/r128_cce.c
31294@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
31295
31296 /* GH: Simple idle check.
31297 */
31298- atomic_set(&dev_priv->idle_count, 0);
31299+ atomic_set_unchecked(&dev_priv->idle_count, 0);
31300
31301 /* We don't support anything other than bus-mastering ring mode,
31302 * but the ring can be in either AGP or PCI space for the ring
31303diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
31304index 930c71b..499aded 100644
31305--- a/drivers/gpu/drm/r128/r128_drv.h
31306+++ b/drivers/gpu/drm/r128/r128_drv.h
31307@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
31308 int is_pci;
31309 unsigned long cce_buffers_offset;
31310
31311- atomic_t idle_count;
31312+ atomic_unchecked_t idle_count;
31313
31314 int page_flipping;
31315 int current_page;
31316 u32 crtc_offset;
31317 u32 crtc_offset_cntl;
31318
31319- atomic_t vbl_received;
31320+ atomic_unchecked_t vbl_received;
31321
31322 u32 color_fmt;
31323 unsigned int front_offset;
31324diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
31325index 429d5a0..7e899ed 100644
31326--- a/drivers/gpu/drm/r128/r128_irq.c
31327+++ b/drivers/gpu/drm/r128/r128_irq.c
31328@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
31329 if (crtc != 0)
31330 return 0;
31331
31332- return atomic_read(&dev_priv->vbl_received);
31333+ return atomic_read_unchecked(&dev_priv->vbl_received);
31334 }
31335
31336 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31337@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
31338 /* VBLANK interrupt */
31339 if (status & R128_CRTC_VBLANK_INT) {
31340 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
31341- atomic_inc(&dev_priv->vbl_received);
31342+ atomic_inc_unchecked(&dev_priv->vbl_received);
31343 drm_handle_vblank(dev, 0);
31344 return IRQ_HANDLED;
31345 }
31346diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
31347index a9e33ce..09edd4b 100644
31348--- a/drivers/gpu/drm/r128/r128_state.c
31349+++ b/drivers/gpu/drm/r128/r128_state.c
31350@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
31351
31352 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
31353 {
31354- if (atomic_read(&dev_priv->idle_count) == 0)
31355+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
31356 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
31357 else
31358- atomic_set(&dev_priv->idle_count, 0);
31359+ atomic_set_unchecked(&dev_priv->idle_count, 0);
31360 }
31361
31362 #endif
31363diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31364index 5a82b6b..9e69c73 100644
31365--- a/drivers/gpu/drm/radeon/mkregtable.c
31366+++ b/drivers/gpu/drm/radeon/mkregtable.c
31367@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
31368 regex_t mask_rex;
31369 regmatch_t match[4];
31370 char buf[1024];
31371- size_t end;
31372+ long end;
31373 int len;
31374 int done = 0;
31375 int r;
31376 unsigned o;
31377 struct offset *offset;
31378 char last_reg_s[10];
31379- int last_reg;
31380+ unsigned long last_reg;
31381
31382 if (regcomp
31383 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31384diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31385index 1668ec1..30ebdab 100644
31386--- a/drivers/gpu/drm/radeon/radeon.h
31387+++ b/drivers/gpu/drm/radeon/radeon.h
31388@@ -250,7 +250,7 @@ struct radeon_fence_driver {
31389 uint32_t scratch_reg;
31390 uint64_t gpu_addr;
31391 volatile uint32_t *cpu_addr;
31392- atomic_t seq;
31393+ atomic_unchecked_t seq;
31394 uint32_t last_seq;
31395 unsigned long last_jiffies;
31396 unsigned long last_timeout;
31397@@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
31398 int x2, int y2);
31399 void (*draw_auto)(struct radeon_device *rdev);
31400 void (*set_default_state)(struct radeon_device *rdev);
31401-};
31402+} __no_const;
31403
31404 struct r600_blit {
31405 struct mutex mutex;
31406@@ -1201,7 +1201,7 @@ struct radeon_asic {
31407 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
31408 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31409 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31410-};
31411+} __no_const;
31412
31413 /*
31414 * Asic structures
31415diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31416index 49f7cb7..2fcb48f 100644
31417--- a/drivers/gpu/drm/radeon/radeon_device.c
31418+++ b/drivers/gpu/drm/radeon/radeon_device.c
31419@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31420 bool can_switch;
31421
31422 spin_lock(&dev->count_lock);
31423- can_switch = (dev->open_count == 0);
31424+ can_switch = (local_read(&dev->open_count) == 0);
31425 spin_unlock(&dev->count_lock);
31426 return can_switch;
31427 }
31428diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31429index a1b59ca..86f2d44 100644
31430--- a/drivers/gpu/drm/radeon/radeon_drv.h
31431+++ b/drivers/gpu/drm/radeon/radeon_drv.h
31432@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31433
31434 /* SW interrupt */
31435 wait_queue_head_t swi_queue;
31436- atomic_t swi_emitted;
31437+ atomic_unchecked_t swi_emitted;
31438 int vblank_crtc;
31439 uint32_t irq_enable_reg;
31440 uint32_t r500_disp_irq_reg;
31441diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
31442index 4bd36a3..e66fe9c 100644
31443--- a/drivers/gpu/drm/radeon/radeon_fence.c
31444+++ b/drivers/gpu/drm/radeon/radeon_fence.c
31445@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
31446 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
31447 return 0;
31448 }
31449- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
31450+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
31451 if (!rdev->ring[fence->ring].ready)
31452 /* FIXME: cp is not running assume everythings is done right
31453 * away
31454@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
31455 }
31456 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
31457 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
31458- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
31459+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
31460 rdev->fence_drv[ring].initialized = true;
31461 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
31462 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
31463@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
31464 rdev->fence_drv[ring].scratch_reg = -1;
31465 rdev->fence_drv[ring].cpu_addr = NULL;
31466 rdev->fence_drv[ring].gpu_addr = 0;
31467- atomic_set(&rdev->fence_drv[ring].seq, 0);
31468+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
31469 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
31470 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
31471 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
31472diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31473index 48b7cea..342236f 100644
31474--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31475+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31476@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31477 request = compat_alloc_user_space(sizeof(*request));
31478 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31479 || __put_user(req32.param, &request->param)
31480- || __put_user((void __user *)(unsigned long)req32.value,
31481+ || __put_user((unsigned long)req32.value,
31482 &request->value))
31483 return -EFAULT;
31484
31485diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31486index 00da384..32f972d 100644
31487--- a/drivers/gpu/drm/radeon/radeon_irq.c
31488+++ b/drivers/gpu/drm/radeon/radeon_irq.c
31489@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31490 unsigned int ret;
31491 RING_LOCALS;
31492
31493- atomic_inc(&dev_priv->swi_emitted);
31494- ret = atomic_read(&dev_priv->swi_emitted);
31495+ atomic_inc_unchecked(&dev_priv->swi_emitted);
31496+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31497
31498 BEGIN_RING(4);
31499 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31500@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31501 drm_radeon_private_t *dev_priv =
31502 (drm_radeon_private_t *) dev->dev_private;
31503
31504- atomic_set(&dev_priv->swi_emitted, 0);
31505+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31506 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31507
31508 dev->max_vblank_count = 0x001fffff;
31509diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31510index e8422ae..d22d4a8 100644
31511--- a/drivers/gpu/drm/radeon/radeon_state.c
31512+++ b/drivers/gpu/drm/radeon/radeon_state.c
31513@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31514 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31515 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31516
31517- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31518+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31519 sarea_priv->nbox * sizeof(depth_boxes[0])))
31520 return -EFAULT;
31521
31522@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31523 {
31524 drm_radeon_private_t *dev_priv = dev->dev_private;
31525 drm_radeon_getparam_t *param = data;
31526- int value;
31527+ int value = 0;
31528
31529 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31530
31531diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31532index c421e77..e6bf2e8 100644
31533--- a/drivers/gpu/drm/radeon/radeon_ttm.c
31534+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31535@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31536 }
31537 if (unlikely(ttm_vm_ops == NULL)) {
31538 ttm_vm_ops = vma->vm_ops;
31539- radeon_ttm_vm_ops = *ttm_vm_ops;
31540- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31541+ pax_open_kernel();
31542+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31543+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31544+ pax_close_kernel();
31545 }
31546 vma->vm_ops = &radeon_ttm_vm_ops;
31547 return 0;
31548diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31549index f68dff2..8df955c 100644
31550--- a/drivers/gpu/drm/radeon/rs690.c
31551+++ b/drivers/gpu/drm/radeon/rs690.c
31552@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31553 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31554 rdev->pm.sideport_bandwidth.full)
31555 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31556- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31557+ read_delay_latency.full = dfixed_const(800 * 1000);
31558 read_delay_latency.full = dfixed_div(read_delay_latency,
31559 rdev->pm.igp_sideport_mclk);
31560+ a.full = dfixed_const(370);
31561+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31562 } else {
31563 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31564 rdev->pm.k8_bandwidth.full)
31565diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31566index 499debd..66fce72 100644
31567--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31568+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31569@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
31570 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31571 struct shrink_control *sc)
31572 {
31573- static atomic_t start_pool = ATOMIC_INIT(0);
31574+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31575 unsigned i;
31576- unsigned pool_offset = atomic_add_return(1, &start_pool);
31577+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31578 struct ttm_page_pool *pool;
31579 int shrink_pages = sc->nr_to_scan;
31580
31581diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31582index 88edacc..1e5412b 100644
31583--- a/drivers/gpu/drm/via/via_drv.h
31584+++ b/drivers/gpu/drm/via/via_drv.h
31585@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31586 typedef uint32_t maskarray_t[5];
31587
31588 typedef struct drm_via_irq {
31589- atomic_t irq_received;
31590+ atomic_unchecked_t irq_received;
31591 uint32_t pending_mask;
31592 uint32_t enable_mask;
31593 wait_queue_head_t irq_queue;
31594@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31595 struct timeval last_vblank;
31596 int last_vblank_valid;
31597 unsigned usec_per_vblank;
31598- atomic_t vbl_received;
31599+ atomic_unchecked_t vbl_received;
31600 drm_via_state_t hc_state;
31601 char pci_buf[VIA_PCI_BUF_SIZE];
31602 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31603diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31604index d391f48..10c8ca3 100644
31605--- a/drivers/gpu/drm/via/via_irq.c
31606+++ b/drivers/gpu/drm/via/via_irq.c
31607@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31608 if (crtc != 0)
31609 return 0;
31610
31611- return atomic_read(&dev_priv->vbl_received);
31612+ return atomic_read_unchecked(&dev_priv->vbl_received);
31613 }
31614
31615 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31616@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31617
31618 status = VIA_READ(VIA_REG_INTERRUPT);
31619 if (status & VIA_IRQ_VBLANK_PENDING) {
31620- atomic_inc(&dev_priv->vbl_received);
31621- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31622+ atomic_inc_unchecked(&dev_priv->vbl_received);
31623+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31624 do_gettimeofday(&cur_vblank);
31625 if (dev_priv->last_vblank_valid) {
31626 dev_priv->usec_per_vblank =
31627@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31628 dev_priv->last_vblank = cur_vblank;
31629 dev_priv->last_vblank_valid = 1;
31630 }
31631- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31632+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31633 DRM_DEBUG("US per vblank is: %u\n",
31634 dev_priv->usec_per_vblank);
31635 }
31636@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31637
31638 for (i = 0; i < dev_priv->num_irqs; ++i) {
31639 if (status & cur_irq->pending_mask) {
31640- atomic_inc(&cur_irq->irq_received);
31641+ atomic_inc_unchecked(&cur_irq->irq_received);
31642 DRM_WAKEUP(&cur_irq->irq_queue);
31643 handled = 1;
31644 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31645@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31646 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31647 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31648 masks[irq][4]));
31649- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31650+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31651 } else {
31652 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31653 (((cur_irq_sequence =
31654- atomic_read(&cur_irq->irq_received)) -
31655+ atomic_read_unchecked(&cur_irq->irq_received)) -
31656 *sequence) <= (1 << 23)));
31657 }
31658 *sequence = cur_irq_sequence;
31659@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31660 }
31661
31662 for (i = 0; i < dev_priv->num_irqs; ++i) {
31663- atomic_set(&cur_irq->irq_received, 0);
31664+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31665 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31666 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31667 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31668@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31669 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31670 case VIA_IRQ_RELATIVE:
31671 irqwait->request.sequence +=
31672- atomic_read(&cur_irq->irq_received);
31673+ atomic_read_unchecked(&cur_irq->irq_received);
31674 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31675 case VIA_IRQ_ABSOLUTE:
31676 break;
31677diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31678index dc27970..f18b008 100644
31679--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31680+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31681@@ -260,7 +260,7 @@ struct vmw_private {
31682 * Fencing and IRQs.
31683 */
31684
31685- atomic_t marker_seq;
31686+ atomic_unchecked_t marker_seq;
31687 wait_queue_head_t fence_queue;
31688 wait_queue_head_t fifo_queue;
31689 int fence_queue_waiters; /* Protected by hw_mutex */
31690diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31691index a0c2f12..68ae6cb 100644
31692--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31693+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31694@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31695 (unsigned int) min,
31696 (unsigned int) fifo->capabilities);
31697
31698- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31699+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31700 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31701 vmw_marker_queue_init(&fifo->marker_queue);
31702 return vmw_fifo_send_fence(dev_priv, &dummy);
31703@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31704 if (reserveable)
31705 iowrite32(bytes, fifo_mem +
31706 SVGA_FIFO_RESERVED);
31707- return fifo_mem + (next_cmd >> 2);
31708+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31709 } else {
31710 need_bounce = true;
31711 }
31712@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31713
31714 fm = vmw_fifo_reserve(dev_priv, bytes);
31715 if (unlikely(fm == NULL)) {
31716- *seqno = atomic_read(&dev_priv->marker_seq);
31717+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31718 ret = -ENOMEM;
31719 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31720 false, 3*HZ);
31721@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31722 }
31723
31724 do {
31725- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31726+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31727 } while (*seqno == 0);
31728
31729 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31730diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31731index cabc95f..14b3d77 100644
31732--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31733+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31734@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31735 * emitted. Then the fence is stale and signaled.
31736 */
31737
31738- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31739+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31740 > VMW_FENCE_WRAP);
31741
31742 return ret;
31743@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31744
31745 if (fifo_idle)
31746 down_read(&fifo_state->rwsem);
31747- signal_seq = atomic_read(&dev_priv->marker_seq);
31748+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31749 ret = 0;
31750
31751 for (;;) {
31752diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31753index 8a8725c..afed796 100644
31754--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31755+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31756@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31757 while (!vmw_lag_lt(queue, us)) {
31758 spin_lock(&queue->lock);
31759 if (list_empty(&queue->head))
31760- seqno = atomic_read(&dev_priv->marker_seq);
31761+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31762 else {
31763 marker = list_first_entry(&queue->head,
31764 struct vmw_marker, head);
31765diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31766index 75dbe34..f9204a8 100644
31767--- a/drivers/hid/hid-core.c
31768+++ b/drivers/hid/hid-core.c
31769@@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
31770
31771 int hid_add_device(struct hid_device *hdev)
31772 {
31773- static atomic_t id = ATOMIC_INIT(0);
31774+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31775 int ret;
31776
31777 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31778@@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
31779 /* XXX hack, any other cleaner solution after the driver core
31780 * is converted to allow more than 20 bytes as the device name? */
31781 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31782- hdev->vendor, hdev->product, atomic_inc_return(&id));
31783+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31784
31785 hid_debug_register(hdev, dev_name(&hdev->dev));
31786 ret = device_add(&hdev->dev);
31787diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31788index b1ec0e2..c295a61 100644
31789--- a/drivers/hid/usbhid/hiddev.c
31790+++ b/drivers/hid/usbhid/hiddev.c
31791@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31792 break;
31793
31794 case HIDIOCAPPLICATION:
31795- if (arg < 0 || arg >= hid->maxapplication)
31796+ if (arg >= hid->maxapplication)
31797 break;
31798
31799 for (i = 0; i < hid->maxcollection; i++)
31800diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31801index 4065374..10ed7dc 100644
31802--- a/drivers/hv/channel.c
31803+++ b/drivers/hv/channel.c
31804@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31805 int ret = 0;
31806 int t;
31807
31808- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31809- atomic_inc(&vmbus_connection.next_gpadl_handle);
31810+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31811+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31812
31813 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31814 if (ret)
31815diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31816index 12aa97f..c0679f7 100644
31817--- a/drivers/hv/hv.c
31818+++ b/drivers/hv/hv.c
31819@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31820 u64 output_address = (output) ? virt_to_phys(output) : 0;
31821 u32 output_address_hi = output_address >> 32;
31822 u32 output_address_lo = output_address & 0xFFFFFFFF;
31823- void *hypercall_page = hv_context.hypercall_page;
31824+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31825
31826 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31827 "=a"(hv_status_lo) : "d" (control_hi),
31828diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31829index 6d7d286..92b0873 100644
31830--- a/drivers/hv/hyperv_vmbus.h
31831+++ b/drivers/hv/hyperv_vmbus.h
31832@@ -556,7 +556,7 @@ enum vmbus_connect_state {
31833 struct vmbus_connection {
31834 enum vmbus_connect_state conn_state;
31835
31836- atomic_t next_gpadl_handle;
31837+ atomic_unchecked_t next_gpadl_handle;
31838
31839 /*
31840 * Represents channel interrupts. Each bit position represents a
31841diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31842index a220e57..428f54d 100644
31843--- a/drivers/hv/vmbus_drv.c
31844+++ b/drivers/hv/vmbus_drv.c
31845@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31846 {
31847 int ret = 0;
31848
31849- static atomic_t device_num = ATOMIC_INIT(0);
31850+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31851
31852 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31853- atomic_inc_return(&device_num));
31854+ atomic_inc_return_unchecked(&device_num));
31855
31856 child_device_obj->device.bus = &hv_bus;
31857 child_device_obj->device.parent = &hv_acpi_dev->dev;
31858diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31859index 554f046..f8b4729 100644
31860--- a/drivers/hwmon/acpi_power_meter.c
31861+++ b/drivers/hwmon/acpi_power_meter.c
31862@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31863 return res;
31864
31865 temp /= 1000;
31866- if (temp < 0)
31867- return -EINVAL;
31868
31869 mutex_lock(&resource->lock);
31870 resource->trip[attr->index - 7] = temp;
31871diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31872index 91fdd1f..b66a686 100644
31873--- a/drivers/hwmon/sht15.c
31874+++ b/drivers/hwmon/sht15.c
31875@@ -166,7 +166,7 @@ struct sht15_data {
31876 int supply_uV;
31877 bool supply_uV_valid;
31878 struct work_struct update_supply_work;
31879- atomic_t interrupt_handled;
31880+ atomic_unchecked_t interrupt_handled;
31881 };
31882
31883 /**
31884@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31885 return ret;
31886
31887 gpio_direction_input(data->pdata->gpio_data);
31888- atomic_set(&data->interrupt_handled, 0);
31889+ atomic_set_unchecked(&data->interrupt_handled, 0);
31890
31891 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31892 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31893 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31894 /* Only relevant if the interrupt hasn't occurred. */
31895- if (!atomic_read(&data->interrupt_handled))
31896+ if (!atomic_read_unchecked(&data->interrupt_handled))
31897 schedule_work(&data->read_work);
31898 }
31899 ret = wait_event_timeout(data->wait_queue,
31900@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31901
31902 /* First disable the interrupt */
31903 disable_irq_nosync(irq);
31904- atomic_inc(&data->interrupt_handled);
31905+ atomic_inc_unchecked(&data->interrupt_handled);
31906 /* Then schedule a reading work struct */
31907 if (data->state != SHT15_READING_NOTHING)
31908 schedule_work(&data->read_work);
31909@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31910 * If not, then start the interrupt again - care here as could
31911 * have gone low in meantime so verify it hasn't!
31912 */
31913- atomic_set(&data->interrupt_handled, 0);
31914+ atomic_set_unchecked(&data->interrupt_handled, 0);
31915 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31916 /* If still not occurred or another handler has been scheduled */
31917 if (gpio_get_value(data->pdata->gpio_data)
31918- || atomic_read(&data->interrupt_handled))
31919+ || atomic_read_unchecked(&data->interrupt_handled))
31920 return;
31921 }
31922
31923diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31924index 378fcb5..5e91fa8 100644
31925--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31926+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31927@@ -43,7 +43,7 @@
31928 extern struct i2c_adapter amd756_smbus;
31929
31930 static struct i2c_adapter *s4882_adapter;
31931-static struct i2c_algorithm *s4882_algo;
31932+static i2c_algorithm_no_const *s4882_algo;
31933
31934 /* Wrapper access functions for multiplexed SMBus */
31935 static DEFINE_MUTEX(amd756_lock);
31936diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31937index 29015eb..af2d8e9 100644
31938--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31939+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31940@@ -41,7 +41,7 @@
31941 extern struct i2c_adapter *nforce2_smbus;
31942
31943 static struct i2c_adapter *s4985_adapter;
31944-static struct i2c_algorithm *s4985_algo;
31945+static i2c_algorithm_no_const *s4985_algo;
31946
31947 /* Wrapper access functions for multiplexed SMBus */
31948 static DEFINE_MUTEX(nforce2_lock);
31949diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31950index d7a4833..7fae376 100644
31951--- a/drivers/i2c/i2c-mux.c
31952+++ b/drivers/i2c/i2c-mux.c
31953@@ -28,7 +28,7 @@
31954 /* multiplexer per channel data */
31955 struct i2c_mux_priv {
31956 struct i2c_adapter adap;
31957- struct i2c_algorithm algo;
31958+ i2c_algorithm_no_const algo;
31959
31960 struct i2c_adapter *parent;
31961 void *mux_dev; /* the mux chip/device */
31962diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31963index 57d00ca..0145194 100644
31964--- a/drivers/ide/aec62xx.c
31965+++ b/drivers/ide/aec62xx.c
31966@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31967 .cable_detect = atp86x_cable_detect,
31968 };
31969
31970-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31971+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31972 { /* 0: AEC6210 */
31973 .name = DRV_NAME,
31974 .init_chipset = init_chipset_aec62xx,
31975diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31976index 2c8016a..911a27c 100644
31977--- a/drivers/ide/alim15x3.c
31978+++ b/drivers/ide/alim15x3.c
31979@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31980 .dma_sff_read_status = ide_dma_sff_read_status,
31981 };
31982
31983-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31984+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31985 .name = DRV_NAME,
31986 .init_chipset = init_chipset_ali15x3,
31987 .init_hwif = init_hwif_ali15x3,
31988diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31989index 3747b25..56fc995 100644
31990--- a/drivers/ide/amd74xx.c
31991+++ b/drivers/ide/amd74xx.c
31992@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31993 .udma_mask = udma, \
31994 }
31995
31996-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31997+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31998 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31999 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
32000 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
32001diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
32002index 15f0ead..cb43480 100644
32003--- a/drivers/ide/atiixp.c
32004+++ b/drivers/ide/atiixp.c
32005@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
32006 .cable_detect = atiixp_cable_detect,
32007 };
32008
32009-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
32010+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
32011 { /* 0: IXP200/300/400/700 */
32012 .name = DRV_NAME,
32013 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
32014diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
32015index 5f80312..d1fc438 100644
32016--- a/drivers/ide/cmd64x.c
32017+++ b/drivers/ide/cmd64x.c
32018@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
32019 .dma_sff_read_status = ide_dma_sff_read_status,
32020 };
32021
32022-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
32023+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
32024 { /* 0: CMD643 */
32025 .name = DRV_NAME,
32026 .init_chipset = init_chipset_cmd64x,
32027diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
32028index 2c1e5f7..1444762 100644
32029--- a/drivers/ide/cs5520.c
32030+++ b/drivers/ide/cs5520.c
32031@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
32032 .set_dma_mode = cs5520_set_dma_mode,
32033 };
32034
32035-static const struct ide_port_info cyrix_chipset __devinitdata = {
32036+static const struct ide_port_info cyrix_chipset __devinitconst = {
32037 .name = DRV_NAME,
32038 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
32039 .port_ops = &cs5520_port_ops,
32040diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
32041index 4dc4eb9..49b40ad 100644
32042--- a/drivers/ide/cs5530.c
32043+++ b/drivers/ide/cs5530.c
32044@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
32045 .udma_filter = cs5530_udma_filter,
32046 };
32047
32048-static const struct ide_port_info cs5530_chipset __devinitdata = {
32049+static const struct ide_port_info cs5530_chipset __devinitconst = {
32050 .name = DRV_NAME,
32051 .init_chipset = init_chipset_cs5530,
32052 .init_hwif = init_hwif_cs5530,
32053diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
32054index 5059faf..18d4c85 100644
32055--- a/drivers/ide/cs5535.c
32056+++ b/drivers/ide/cs5535.c
32057@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
32058 .cable_detect = cs5535_cable_detect,
32059 };
32060
32061-static const struct ide_port_info cs5535_chipset __devinitdata = {
32062+static const struct ide_port_info cs5535_chipset __devinitconst = {
32063 .name = DRV_NAME,
32064 .port_ops = &cs5535_port_ops,
32065 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
32066diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
32067index 847553f..3ffb49d 100644
32068--- a/drivers/ide/cy82c693.c
32069+++ b/drivers/ide/cy82c693.c
32070@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
32071 .set_dma_mode = cy82c693_set_dma_mode,
32072 };
32073
32074-static const struct ide_port_info cy82c693_chipset __devinitdata = {
32075+static const struct ide_port_info cy82c693_chipset __devinitconst = {
32076 .name = DRV_NAME,
32077 .init_iops = init_iops_cy82c693,
32078 .port_ops = &cy82c693_port_ops,
32079diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
32080index 58c51cd..4aec3b8 100644
32081--- a/drivers/ide/hpt366.c
32082+++ b/drivers/ide/hpt366.c
32083@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
32084 }
32085 };
32086
32087-static const struct hpt_info hpt36x __devinitdata = {
32088+static const struct hpt_info hpt36x __devinitconst = {
32089 .chip_name = "HPT36x",
32090 .chip_type = HPT36x,
32091 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
32092@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
32093 .timings = &hpt36x_timings
32094 };
32095
32096-static const struct hpt_info hpt370 __devinitdata = {
32097+static const struct hpt_info hpt370 __devinitconst = {
32098 .chip_name = "HPT370",
32099 .chip_type = HPT370,
32100 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
32101@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
32102 .timings = &hpt37x_timings
32103 };
32104
32105-static const struct hpt_info hpt370a __devinitdata = {
32106+static const struct hpt_info hpt370a __devinitconst = {
32107 .chip_name = "HPT370A",
32108 .chip_type = HPT370A,
32109 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
32110@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
32111 .timings = &hpt37x_timings
32112 };
32113
32114-static const struct hpt_info hpt374 __devinitdata = {
32115+static const struct hpt_info hpt374 __devinitconst = {
32116 .chip_name = "HPT374",
32117 .chip_type = HPT374,
32118 .udma_mask = ATA_UDMA5,
32119@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
32120 .timings = &hpt37x_timings
32121 };
32122
32123-static const struct hpt_info hpt372 __devinitdata = {
32124+static const struct hpt_info hpt372 __devinitconst = {
32125 .chip_name = "HPT372",
32126 .chip_type = HPT372,
32127 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32128@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
32129 .timings = &hpt37x_timings
32130 };
32131
32132-static const struct hpt_info hpt372a __devinitdata = {
32133+static const struct hpt_info hpt372a __devinitconst = {
32134 .chip_name = "HPT372A",
32135 .chip_type = HPT372A,
32136 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32137@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
32138 .timings = &hpt37x_timings
32139 };
32140
32141-static const struct hpt_info hpt302 __devinitdata = {
32142+static const struct hpt_info hpt302 __devinitconst = {
32143 .chip_name = "HPT302",
32144 .chip_type = HPT302,
32145 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32146@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
32147 .timings = &hpt37x_timings
32148 };
32149
32150-static const struct hpt_info hpt371 __devinitdata = {
32151+static const struct hpt_info hpt371 __devinitconst = {
32152 .chip_name = "HPT371",
32153 .chip_type = HPT371,
32154 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32155@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
32156 .timings = &hpt37x_timings
32157 };
32158
32159-static const struct hpt_info hpt372n __devinitdata = {
32160+static const struct hpt_info hpt372n __devinitconst = {
32161 .chip_name = "HPT372N",
32162 .chip_type = HPT372N,
32163 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32164@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
32165 .timings = &hpt37x_timings
32166 };
32167
32168-static const struct hpt_info hpt302n __devinitdata = {
32169+static const struct hpt_info hpt302n __devinitconst = {
32170 .chip_name = "HPT302N",
32171 .chip_type = HPT302N,
32172 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32173@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
32174 .timings = &hpt37x_timings
32175 };
32176
32177-static const struct hpt_info hpt371n __devinitdata = {
32178+static const struct hpt_info hpt371n __devinitconst = {
32179 .chip_name = "HPT371N",
32180 .chip_type = HPT371N,
32181 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
32182@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
32183 .dma_sff_read_status = ide_dma_sff_read_status,
32184 };
32185
32186-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
32187+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
32188 { /* 0: HPT36x */
32189 .name = DRV_NAME,
32190 .init_chipset = init_chipset_hpt366,
32191diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
32192index 8126824..55a2798 100644
32193--- a/drivers/ide/ide-cd.c
32194+++ b/drivers/ide/ide-cd.c
32195@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
32196 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
32197 if ((unsigned long)buf & alignment
32198 || blk_rq_bytes(rq) & q->dma_pad_mask
32199- || object_is_on_stack(buf))
32200+ || object_starts_on_stack(buf))
32201 drive->dma = 0;
32202 }
32203 }
32204diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
32205index 7f56b73..dab5b67 100644
32206--- a/drivers/ide/ide-pci-generic.c
32207+++ b/drivers/ide/ide-pci-generic.c
32208@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
32209 .udma_mask = ATA_UDMA6, \
32210 }
32211
32212-static const struct ide_port_info generic_chipsets[] __devinitdata = {
32213+static const struct ide_port_info generic_chipsets[] __devinitconst = {
32214 /* 0: Unknown */
32215 DECLARE_GENERIC_PCI_DEV(0),
32216
32217diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
32218index 560e66d..d5dd180 100644
32219--- a/drivers/ide/it8172.c
32220+++ b/drivers/ide/it8172.c
32221@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
32222 .set_dma_mode = it8172_set_dma_mode,
32223 };
32224
32225-static const struct ide_port_info it8172_port_info __devinitdata = {
32226+static const struct ide_port_info it8172_port_info __devinitconst = {
32227 .name = DRV_NAME,
32228 .port_ops = &it8172_port_ops,
32229 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
32230diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
32231index 46816ba..1847aeb 100644
32232--- a/drivers/ide/it8213.c
32233+++ b/drivers/ide/it8213.c
32234@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
32235 .cable_detect = it8213_cable_detect,
32236 };
32237
32238-static const struct ide_port_info it8213_chipset __devinitdata = {
32239+static const struct ide_port_info it8213_chipset __devinitconst = {
32240 .name = DRV_NAME,
32241 .enablebits = { {0x41, 0x80, 0x80} },
32242 .port_ops = &it8213_port_ops,
32243diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
32244index 2e3169f..c5611db 100644
32245--- a/drivers/ide/it821x.c
32246+++ b/drivers/ide/it821x.c
32247@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
32248 .cable_detect = it821x_cable_detect,
32249 };
32250
32251-static const struct ide_port_info it821x_chipset __devinitdata = {
32252+static const struct ide_port_info it821x_chipset __devinitconst = {
32253 .name = DRV_NAME,
32254 .init_chipset = init_chipset_it821x,
32255 .init_hwif = init_hwif_it821x,
32256diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
32257index 74c2c4a..efddd7d 100644
32258--- a/drivers/ide/jmicron.c
32259+++ b/drivers/ide/jmicron.c
32260@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
32261 .cable_detect = jmicron_cable_detect,
32262 };
32263
32264-static const struct ide_port_info jmicron_chipset __devinitdata = {
32265+static const struct ide_port_info jmicron_chipset __devinitconst = {
32266 .name = DRV_NAME,
32267 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
32268 .port_ops = &jmicron_port_ops,
32269diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
32270index 95327a2..73f78d8 100644
32271--- a/drivers/ide/ns87415.c
32272+++ b/drivers/ide/ns87415.c
32273@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
32274 .dma_sff_read_status = superio_dma_sff_read_status,
32275 };
32276
32277-static const struct ide_port_info ns87415_chipset __devinitdata = {
32278+static const struct ide_port_info ns87415_chipset __devinitconst = {
32279 .name = DRV_NAME,
32280 .init_hwif = init_hwif_ns87415,
32281 .tp_ops = &ns87415_tp_ops,
32282diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
32283index 1a53a4c..39edc66 100644
32284--- a/drivers/ide/opti621.c
32285+++ b/drivers/ide/opti621.c
32286@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
32287 .set_pio_mode = opti621_set_pio_mode,
32288 };
32289
32290-static const struct ide_port_info opti621_chipset __devinitdata = {
32291+static const struct ide_port_info opti621_chipset __devinitconst = {
32292 .name = DRV_NAME,
32293 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
32294 .port_ops = &opti621_port_ops,
32295diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
32296index 9546fe2..2e5ceb6 100644
32297--- a/drivers/ide/pdc202xx_new.c
32298+++ b/drivers/ide/pdc202xx_new.c
32299@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
32300 .udma_mask = udma, \
32301 }
32302
32303-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
32304+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
32305 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
32306 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
32307 };
32308diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
32309index 3a35ec6..5634510 100644
32310--- a/drivers/ide/pdc202xx_old.c
32311+++ b/drivers/ide/pdc202xx_old.c
32312@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
32313 .max_sectors = sectors, \
32314 }
32315
32316-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
32317+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
32318 { /* 0: PDC20246 */
32319 .name = DRV_NAME,
32320 .init_chipset = init_chipset_pdc202xx,
32321diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
32322index 1892e81..fe0fd60 100644
32323--- a/drivers/ide/piix.c
32324+++ b/drivers/ide/piix.c
32325@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
32326 .udma_mask = udma, \
32327 }
32328
32329-static const struct ide_port_info piix_pci_info[] __devinitdata = {
32330+static const struct ide_port_info piix_pci_info[] __devinitconst = {
32331 /* 0: MPIIX */
32332 { /*
32333 * MPIIX actually has only a single IDE channel mapped to
32334diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
32335index a6414a8..c04173e 100644
32336--- a/drivers/ide/rz1000.c
32337+++ b/drivers/ide/rz1000.c
32338@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
32339 }
32340 }
32341
32342-static const struct ide_port_info rz1000_chipset __devinitdata = {
32343+static const struct ide_port_info rz1000_chipset __devinitconst = {
32344 .name = DRV_NAME,
32345 .host_flags = IDE_HFLAG_NO_DMA,
32346 };
32347diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
32348index 356b9b5..d4758eb 100644
32349--- a/drivers/ide/sc1200.c
32350+++ b/drivers/ide/sc1200.c
32351@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
32352 .dma_sff_read_status = ide_dma_sff_read_status,
32353 };
32354
32355-static const struct ide_port_info sc1200_chipset __devinitdata = {
32356+static const struct ide_port_info sc1200_chipset __devinitconst = {
32357 .name = DRV_NAME,
32358 .port_ops = &sc1200_port_ops,
32359 .dma_ops = &sc1200_dma_ops,
32360diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
32361index b7f5b0c..9701038 100644
32362--- a/drivers/ide/scc_pata.c
32363+++ b/drivers/ide/scc_pata.c
32364@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
32365 .dma_sff_read_status = scc_dma_sff_read_status,
32366 };
32367
32368-static const struct ide_port_info scc_chipset __devinitdata = {
32369+static const struct ide_port_info scc_chipset __devinitconst = {
32370 .name = "sccIDE",
32371 .init_iops = init_iops_scc,
32372 .init_dma = scc_init_dma,
32373diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32374index 35fb8da..24d72ef 100644
32375--- a/drivers/ide/serverworks.c
32376+++ b/drivers/ide/serverworks.c
32377@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
32378 .cable_detect = svwks_cable_detect,
32379 };
32380
32381-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32382+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32383 { /* 0: OSB4 */
32384 .name = DRV_NAME,
32385 .init_chipset = init_chipset_svwks,
32386diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32387index ddeda44..46f7e30 100644
32388--- a/drivers/ide/siimage.c
32389+++ b/drivers/ide/siimage.c
32390@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
32391 .udma_mask = ATA_UDMA6, \
32392 }
32393
32394-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32395+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32396 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32397 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32398 };
32399diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32400index 4a00225..09e61b4 100644
32401--- a/drivers/ide/sis5513.c
32402+++ b/drivers/ide/sis5513.c
32403@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
32404 .cable_detect = sis_cable_detect,
32405 };
32406
32407-static const struct ide_port_info sis5513_chipset __devinitdata = {
32408+static const struct ide_port_info sis5513_chipset __devinitconst = {
32409 .name = DRV_NAME,
32410 .init_chipset = init_chipset_sis5513,
32411 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32412diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32413index f21dc2a..d051cd2 100644
32414--- a/drivers/ide/sl82c105.c
32415+++ b/drivers/ide/sl82c105.c
32416@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32417 .dma_sff_read_status = ide_dma_sff_read_status,
32418 };
32419
32420-static const struct ide_port_info sl82c105_chipset __devinitdata = {
32421+static const struct ide_port_info sl82c105_chipset __devinitconst = {
32422 .name = DRV_NAME,
32423 .init_chipset = init_chipset_sl82c105,
32424 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32425diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32426index 864ffe0..863a5e9 100644
32427--- a/drivers/ide/slc90e66.c
32428+++ b/drivers/ide/slc90e66.c
32429@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32430 .cable_detect = slc90e66_cable_detect,
32431 };
32432
32433-static const struct ide_port_info slc90e66_chipset __devinitdata = {
32434+static const struct ide_port_info slc90e66_chipset __devinitconst = {
32435 .name = DRV_NAME,
32436 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32437 .port_ops = &slc90e66_port_ops,
32438diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32439index 4799d5c..1794678 100644
32440--- a/drivers/ide/tc86c001.c
32441+++ b/drivers/ide/tc86c001.c
32442@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32443 .dma_sff_read_status = ide_dma_sff_read_status,
32444 };
32445
32446-static const struct ide_port_info tc86c001_chipset __devinitdata = {
32447+static const struct ide_port_info tc86c001_chipset __devinitconst = {
32448 .name = DRV_NAME,
32449 .init_hwif = init_hwif_tc86c001,
32450 .port_ops = &tc86c001_port_ops,
32451diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32452index 281c914..55ce1b8 100644
32453--- a/drivers/ide/triflex.c
32454+++ b/drivers/ide/triflex.c
32455@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32456 .set_dma_mode = triflex_set_mode,
32457 };
32458
32459-static const struct ide_port_info triflex_device __devinitdata = {
32460+static const struct ide_port_info triflex_device __devinitconst = {
32461 .name = DRV_NAME,
32462 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32463 .port_ops = &triflex_port_ops,
32464diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32465index 4b42ca0..e494a98 100644
32466--- a/drivers/ide/trm290.c
32467+++ b/drivers/ide/trm290.c
32468@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32469 .dma_check = trm290_dma_check,
32470 };
32471
32472-static const struct ide_port_info trm290_chipset __devinitdata = {
32473+static const struct ide_port_info trm290_chipset __devinitconst = {
32474 .name = DRV_NAME,
32475 .init_hwif = init_hwif_trm290,
32476 .tp_ops = &trm290_tp_ops,
32477diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32478index f46f49c..eb77678 100644
32479--- a/drivers/ide/via82cxxx.c
32480+++ b/drivers/ide/via82cxxx.c
32481@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32482 .cable_detect = via82cxxx_cable_detect,
32483 };
32484
32485-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32486+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32487 .name = DRV_NAME,
32488 .init_chipset = init_chipset_via82cxxx,
32489 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32490diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32491index 73d4531..c90cd2d 100644
32492--- a/drivers/ieee802154/fakehard.c
32493+++ b/drivers/ieee802154/fakehard.c
32494@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32495 phy->transmit_power = 0xbf;
32496
32497 dev->netdev_ops = &fake_ops;
32498- dev->ml_priv = &fake_mlme;
32499+ dev->ml_priv = (void *)&fake_mlme;
32500
32501 priv = netdev_priv(dev);
32502 priv->phy = phy;
32503diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32504index c889aae..6cf5aa7 100644
32505--- a/drivers/infiniband/core/cm.c
32506+++ b/drivers/infiniband/core/cm.c
32507@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32508
32509 struct cm_counter_group {
32510 struct kobject obj;
32511- atomic_long_t counter[CM_ATTR_COUNT];
32512+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32513 };
32514
32515 struct cm_counter_attribute {
32516@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32517 struct ib_mad_send_buf *msg = NULL;
32518 int ret;
32519
32520- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32521+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32522 counter[CM_REQ_COUNTER]);
32523
32524 /* Quick state check to discard duplicate REQs. */
32525@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32526 if (!cm_id_priv)
32527 return;
32528
32529- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32530+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32531 counter[CM_REP_COUNTER]);
32532 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32533 if (ret)
32534@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32535 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32536 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32537 spin_unlock_irq(&cm_id_priv->lock);
32538- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32539+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32540 counter[CM_RTU_COUNTER]);
32541 goto out;
32542 }
32543@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32544 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32545 dreq_msg->local_comm_id);
32546 if (!cm_id_priv) {
32547- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32548+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32549 counter[CM_DREQ_COUNTER]);
32550 cm_issue_drep(work->port, work->mad_recv_wc);
32551 return -EINVAL;
32552@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32553 case IB_CM_MRA_REP_RCVD:
32554 break;
32555 case IB_CM_TIMEWAIT:
32556- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32557+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32558 counter[CM_DREQ_COUNTER]);
32559 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32560 goto unlock;
32561@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32562 cm_free_msg(msg);
32563 goto deref;
32564 case IB_CM_DREQ_RCVD:
32565- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32566+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32567 counter[CM_DREQ_COUNTER]);
32568 goto unlock;
32569 default:
32570@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32571 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32572 cm_id_priv->msg, timeout)) {
32573 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32574- atomic_long_inc(&work->port->
32575+ atomic_long_inc_unchecked(&work->port->
32576 counter_group[CM_RECV_DUPLICATES].
32577 counter[CM_MRA_COUNTER]);
32578 goto out;
32579@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32580 break;
32581 case IB_CM_MRA_REQ_RCVD:
32582 case IB_CM_MRA_REP_RCVD:
32583- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32584+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32585 counter[CM_MRA_COUNTER]);
32586 /* fall through */
32587 default:
32588@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32589 case IB_CM_LAP_IDLE:
32590 break;
32591 case IB_CM_MRA_LAP_SENT:
32592- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32593+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32594 counter[CM_LAP_COUNTER]);
32595 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32596 goto unlock;
32597@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32598 cm_free_msg(msg);
32599 goto deref;
32600 case IB_CM_LAP_RCVD:
32601- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32602+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32603 counter[CM_LAP_COUNTER]);
32604 goto unlock;
32605 default:
32606@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32607 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32608 if (cur_cm_id_priv) {
32609 spin_unlock_irq(&cm.lock);
32610- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32611+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32612 counter[CM_SIDR_REQ_COUNTER]);
32613 goto out; /* Duplicate message. */
32614 }
32615@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32616 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32617 msg->retries = 1;
32618
32619- atomic_long_add(1 + msg->retries,
32620+ atomic_long_add_unchecked(1 + msg->retries,
32621 &port->counter_group[CM_XMIT].counter[attr_index]);
32622 if (msg->retries)
32623- atomic_long_add(msg->retries,
32624+ atomic_long_add_unchecked(msg->retries,
32625 &port->counter_group[CM_XMIT_RETRIES].
32626 counter[attr_index]);
32627
32628@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32629 }
32630
32631 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32632- atomic_long_inc(&port->counter_group[CM_RECV].
32633+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32634 counter[attr_id - CM_ATTR_ID_OFFSET]);
32635
32636 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32637@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32638 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32639
32640 return sprintf(buf, "%ld\n",
32641- atomic_long_read(&group->counter[cm_attr->index]));
32642+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32643 }
32644
32645 static const struct sysfs_ops cm_counter_ops = {
32646diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32647index 176c8f9..2627b62 100644
32648--- a/drivers/infiniband/core/fmr_pool.c
32649+++ b/drivers/infiniband/core/fmr_pool.c
32650@@ -98,8 +98,8 @@ struct ib_fmr_pool {
32651
32652 struct task_struct *thread;
32653
32654- atomic_t req_ser;
32655- atomic_t flush_ser;
32656+ atomic_unchecked_t req_ser;
32657+ atomic_unchecked_t flush_ser;
32658
32659 wait_queue_head_t force_wait;
32660 };
32661@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32662 struct ib_fmr_pool *pool = pool_ptr;
32663
32664 do {
32665- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32666+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32667 ib_fmr_batch_release(pool);
32668
32669- atomic_inc(&pool->flush_ser);
32670+ atomic_inc_unchecked(&pool->flush_ser);
32671 wake_up_interruptible(&pool->force_wait);
32672
32673 if (pool->flush_function)
32674@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32675 }
32676
32677 set_current_state(TASK_INTERRUPTIBLE);
32678- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32679+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32680 !kthread_should_stop())
32681 schedule();
32682 __set_current_state(TASK_RUNNING);
32683@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32684 pool->dirty_watermark = params->dirty_watermark;
32685 pool->dirty_len = 0;
32686 spin_lock_init(&pool->pool_lock);
32687- atomic_set(&pool->req_ser, 0);
32688- atomic_set(&pool->flush_ser, 0);
32689+ atomic_set_unchecked(&pool->req_ser, 0);
32690+ atomic_set_unchecked(&pool->flush_ser, 0);
32691 init_waitqueue_head(&pool->force_wait);
32692
32693 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32694@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32695 }
32696 spin_unlock_irq(&pool->pool_lock);
32697
32698- serial = atomic_inc_return(&pool->req_ser);
32699+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32700 wake_up_process(pool->thread);
32701
32702 if (wait_event_interruptible(pool->force_wait,
32703- atomic_read(&pool->flush_ser) - serial >= 0))
32704+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32705 return -EINTR;
32706
32707 return 0;
32708@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32709 } else {
32710 list_add_tail(&fmr->list, &pool->dirty_list);
32711 if (++pool->dirty_len >= pool->dirty_watermark) {
32712- atomic_inc(&pool->req_ser);
32713+ atomic_inc_unchecked(&pool->req_ser);
32714 wake_up_process(pool->thread);
32715 }
32716 }
32717diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32718index 40c8353..946b0e4 100644
32719--- a/drivers/infiniband/hw/cxgb4/mem.c
32720+++ b/drivers/infiniband/hw/cxgb4/mem.c
32721@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32722 int err;
32723 struct fw_ri_tpte tpt;
32724 u32 stag_idx;
32725- static atomic_t key;
32726+ static atomic_unchecked_t key;
32727
32728 if (c4iw_fatal_error(rdev))
32729 return -EIO;
32730@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32731 &rdev->resource.tpt_fifo_lock);
32732 if (!stag_idx)
32733 return -ENOMEM;
32734- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32735+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32736 }
32737 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32738 __func__, stag_state, type, pdid, stag_idx);
32739diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
32740index a4de9d5..5fa20c3 100644
32741--- a/drivers/infiniband/hw/ipath/ipath_fs.c
32742+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
32743@@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = {
32744 };
32745
32746 static ssize_t flash_read(struct file *file, char __user *buf,
32747+ size_t count, loff_t *ppos) __size_overflow(3);
32748+static ssize_t flash_read(struct file *file, char __user *buf,
32749 size_t count, loff_t *ppos)
32750 {
32751 struct ipath_devdata *dd;
32752@@ -177,6 +179,8 @@ bail:
32753 }
32754
32755 static ssize_t flash_write(struct file *file, const char __user *buf,
32756+ size_t count, loff_t *ppos) __size_overflow(3);
32757+static ssize_t flash_write(struct file *file, const char __user *buf,
32758 size_t count, loff_t *ppos)
32759 {
32760 struct ipath_devdata *dd;
32761diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32762index 79b3dbc..96e5fcc 100644
32763--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32764+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32765@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32766 struct ib_atomic_eth *ateth;
32767 struct ipath_ack_entry *e;
32768 u64 vaddr;
32769- atomic64_t *maddr;
32770+ atomic64_unchecked_t *maddr;
32771 u64 sdata;
32772 u32 rkey;
32773 u8 next;
32774@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32775 IB_ACCESS_REMOTE_ATOMIC)))
32776 goto nack_acc_unlck;
32777 /* Perform atomic OP and save result. */
32778- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32779+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32780 sdata = be64_to_cpu(ateth->swap_data);
32781 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32782 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32783- (u64) atomic64_add_return(sdata, maddr) - sdata :
32784+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32785 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32786 be64_to_cpu(ateth->compare_data),
32787 sdata);
32788diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32789index 1f95bba..9530f87 100644
32790--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32791+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32792@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32793 unsigned long flags;
32794 struct ib_wc wc;
32795 u64 sdata;
32796- atomic64_t *maddr;
32797+ atomic64_unchecked_t *maddr;
32798 enum ib_wc_status send_status;
32799
32800 /*
32801@@ -382,11 +382,11 @@ again:
32802 IB_ACCESS_REMOTE_ATOMIC)))
32803 goto acc_err;
32804 /* Perform atomic OP and save result. */
32805- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32806+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32807 sdata = wqe->wr.wr.atomic.compare_add;
32808 *(u64 *) sqp->s_sge.sge.vaddr =
32809 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32810- (u64) atomic64_add_return(sdata, maddr) - sdata :
32811+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32812 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32813 sdata, wqe->wr.wr.atomic.swap);
32814 goto send_comp;
32815diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32816index 7140199..da60063 100644
32817--- a/drivers/infiniband/hw/nes/nes.c
32818+++ b/drivers/infiniband/hw/nes/nes.c
32819@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32820 LIST_HEAD(nes_adapter_list);
32821 static LIST_HEAD(nes_dev_list);
32822
32823-atomic_t qps_destroyed;
32824+atomic_unchecked_t qps_destroyed;
32825
32826 static unsigned int ee_flsh_adapter;
32827 static unsigned int sysfs_nonidx_addr;
32828@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32829 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32830 struct nes_adapter *nesadapter = nesdev->nesadapter;
32831
32832- atomic_inc(&qps_destroyed);
32833+ atomic_inc_unchecked(&qps_destroyed);
32834
32835 /* Free the control structures */
32836
32837diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32838index c438e46..ca30356 100644
32839--- a/drivers/infiniband/hw/nes/nes.h
32840+++ b/drivers/infiniband/hw/nes/nes.h
32841@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32842 extern unsigned int wqm_quanta;
32843 extern struct list_head nes_adapter_list;
32844
32845-extern atomic_t cm_connects;
32846-extern atomic_t cm_accepts;
32847-extern atomic_t cm_disconnects;
32848-extern atomic_t cm_closes;
32849-extern atomic_t cm_connecteds;
32850-extern atomic_t cm_connect_reqs;
32851-extern atomic_t cm_rejects;
32852-extern atomic_t mod_qp_timouts;
32853-extern atomic_t qps_created;
32854-extern atomic_t qps_destroyed;
32855-extern atomic_t sw_qps_destroyed;
32856+extern atomic_unchecked_t cm_connects;
32857+extern atomic_unchecked_t cm_accepts;
32858+extern atomic_unchecked_t cm_disconnects;
32859+extern atomic_unchecked_t cm_closes;
32860+extern atomic_unchecked_t cm_connecteds;
32861+extern atomic_unchecked_t cm_connect_reqs;
32862+extern atomic_unchecked_t cm_rejects;
32863+extern atomic_unchecked_t mod_qp_timouts;
32864+extern atomic_unchecked_t qps_created;
32865+extern atomic_unchecked_t qps_destroyed;
32866+extern atomic_unchecked_t sw_qps_destroyed;
32867 extern u32 mh_detected;
32868 extern u32 mh_pauses_sent;
32869 extern u32 cm_packets_sent;
32870@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32871 extern u32 cm_packets_received;
32872 extern u32 cm_packets_dropped;
32873 extern u32 cm_packets_retrans;
32874-extern atomic_t cm_listens_created;
32875-extern atomic_t cm_listens_destroyed;
32876+extern atomic_unchecked_t cm_listens_created;
32877+extern atomic_unchecked_t cm_listens_destroyed;
32878 extern u32 cm_backlog_drops;
32879-extern atomic_t cm_loopbacks;
32880-extern atomic_t cm_nodes_created;
32881-extern atomic_t cm_nodes_destroyed;
32882-extern atomic_t cm_accel_dropped_pkts;
32883-extern atomic_t cm_resets_recvd;
32884-extern atomic_t pau_qps_created;
32885-extern atomic_t pau_qps_destroyed;
32886+extern atomic_unchecked_t cm_loopbacks;
32887+extern atomic_unchecked_t cm_nodes_created;
32888+extern atomic_unchecked_t cm_nodes_destroyed;
32889+extern atomic_unchecked_t cm_accel_dropped_pkts;
32890+extern atomic_unchecked_t cm_resets_recvd;
32891+extern atomic_unchecked_t pau_qps_created;
32892+extern atomic_unchecked_t pau_qps_destroyed;
32893
32894 extern u32 int_mod_timer_init;
32895 extern u32 int_mod_cq_depth_256;
32896diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32897index a4972ab..1bcfc31 100644
32898--- a/drivers/infiniband/hw/nes/nes_cm.c
32899+++ b/drivers/infiniband/hw/nes/nes_cm.c
32900@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32901 u32 cm_packets_retrans;
32902 u32 cm_packets_created;
32903 u32 cm_packets_received;
32904-atomic_t cm_listens_created;
32905-atomic_t cm_listens_destroyed;
32906+atomic_unchecked_t cm_listens_created;
32907+atomic_unchecked_t cm_listens_destroyed;
32908 u32 cm_backlog_drops;
32909-atomic_t cm_loopbacks;
32910-atomic_t cm_nodes_created;
32911-atomic_t cm_nodes_destroyed;
32912-atomic_t cm_accel_dropped_pkts;
32913-atomic_t cm_resets_recvd;
32914+atomic_unchecked_t cm_loopbacks;
32915+atomic_unchecked_t cm_nodes_created;
32916+atomic_unchecked_t cm_nodes_destroyed;
32917+atomic_unchecked_t cm_accel_dropped_pkts;
32918+atomic_unchecked_t cm_resets_recvd;
32919
32920 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32921 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32922@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32923
32924 static struct nes_cm_core *g_cm_core;
32925
32926-atomic_t cm_connects;
32927-atomic_t cm_accepts;
32928-atomic_t cm_disconnects;
32929-atomic_t cm_closes;
32930-atomic_t cm_connecteds;
32931-atomic_t cm_connect_reqs;
32932-atomic_t cm_rejects;
32933+atomic_unchecked_t cm_connects;
32934+atomic_unchecked_t cm_accepts;
32935+atomic_unchecked_t cm_disconnects;
32936+atomic_unchecked_t cm_closes;
32937+atomic_unchecked_t cm_connecteds;
32938+atomic_unchecked_t cm_connect_reqs;
32939+atomic_unchecked_t cm_rejects;
32940
32941 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32942 {
32943@@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32944 kfree(listener);
32945 listener = NULL;
32946 ret = 0;
32947- atomic_inc(&cm_listens_destroyed);
32948+ atomic_inc_unchecked(&cm_listens_destroyed);
32949 } else {
32950 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32951 }
32952@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32953 cm_node->rem_mac);
32954
32955 add_hte_node(cm_core, cm_node);
32956- atomic_inc(&cm_nodes_created);
32957+ atomic_inc_unchecked(&cm_nodes_created);
32958
32959 return cm_node;
32960 }
32961@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32962 }
32963
32964 atomic_dec(&cm_core->node_cnt);
32965- atomic_inc(&cm_nodes_destroyed);
32966+ atomic_inc_unchecked(&cm_nodes_destroyed);
32967 nesqp = cm_node->nesqp;
32968 if (nesqp) {
32969 nesqp->cm_node = NULL;
32970@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32971
32972 static void drop_packet(struct sk_buff *skb)
32973 {
32974- atomic_inc(&cm_accel_dropped_pkts);
32975+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32976 dev_kfree_skb_any(skb);
32977 }
32978
32979@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32980 {
32981
32982 int reset = 0; /* whether to send reset in case of err.. */
32983- atomic_inc(&cm_resets_recvd);
32984+ atomic_inc_unchecked(&cm_resets_recvd);
32985 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32986 " refcnt=%d\n", cm_node, cm_node->state,
32987 atomic_read(&cm_node->ref_count));
32988@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32989 rem_ref_cm_node(cm_node->cm_core, cm_node);
32990 return NULL;
32991 }
32992- atomic_inc(&cm_loopbacks);
32993+ atomic_inc_unchecked(&cm_loopbacks);
32994 loopbackremotenode->loopbackpartner = cm_node;
32995 loopbackremotenode->tcp_cntxt.rcv_wscale =
32996 NES_CM_DEFAULT_RCV_WND_SCALE;
32997@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32998 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32999 else {
33000 rem_ref_cm_node(cm_core, cm_node);
33001- atomic_inc(&cm_accel_dropped_pkts);
33002+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
33003 dev_kfree_skb_any(skb);
33004 }
33005 break;
33006@@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
33007
33008 if ((cm_id) && (cm_id->event_handler)) {
33009 if (issue_disconn) {
33010- atomic_inc(&cm_disconnects);
33011+ atomic_inc_unchecked(&cm_disconnects);
33012 cm_event.event = IW_CM_EVENT_DISCONNECT;
33013 cm_event.status = disconn_status;
33014 cm_event.local_addr = cm_id->local_addr;
33015@@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
33016 }
33017
33018 if (issue_close) {
33019- atomic_inc(&cm_closes);
33020+ atomic_inc_unchecked(&cm_closes);
33021 nes_disconnect(nesqp, 1);
33022
33023 cm_id->provider_data = nesqp;
33024@@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33025
33026 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
33027 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
33028- atomic_inc(&cm_accepts);
33029+ atomic_inc_unchecked(&cm_accepts);
33030
33031 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
33032 netdev_refcnt_read(nesvnic->netdev));
33033@@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
33034 struct nes_cm_core *cm_core;
33035 u8 *start_buff;
33036
33037- atomic_inc(&cm_rejects);
33038+ atomic_inc_unchecked(&cm_rejects);
33039 cm_node = (struct nes_cm_node *)cm_id->provider_data;
33040 loopback = cm_node->loopbackpartner;
33041 cm_core = cm_node->cm_core;
33042@@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33043 ntohl(cm_id->local_addr.sin_addr.s_addr),
33044 ntohs(cm_id->local_addr.sin_port));
33045
33046- atomic_inc(&cm_connects);
33047+ atomic_inc_unchecked(&cm_connects);
33048 nesqp->active_conn = 1;
33049
33050 /* cache the cm_id in the qp */
33051@@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
33052 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
33053 return err;
33054 }
33055- atomic_inc(&cm_listens_created);
33056+ atomic_inc_unchecked(&cm_listens_created);
33057 }
33058
33059 cm_id->add_ref(cm_id);
33060@@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
33061
33062 if (nesqp->destroyed)
33063 return;
33064- atomic_inc(&cm_connecteds);
33065+ atomic_inc_unchecked(&cm_connecteds);
33066 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
33067 " local port 0x%04X. jiffies = %lu.\n",
33068 nesqp->hwqp.qp_id,
33069@@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
33070
33071 cm_id->add_ref(cm_id);
33072 ret = cm_id->event_handler(cm_id, &cm_event);
33073- atomic_inc(&cm_closes);
33074+ atomic_inc_unchecked(&cm_closes);
33075 cm_event.event = IW_CM_EVENT_CLOSE;
33076 cm_event.status = 0;
33077 cm_event.provider_data = cm_id->provider_data;
33078@@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
33079 return;
33080 cm_id = cm_node->cm_id;
33081
33082- atomic_inc(&cm_connect_reqs);
33083+ atomic_inc_unchecked(&cm_connect_reqs);
33084 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
33085 cm_node, cm_id, jiffies);
33086
33087@@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
33088 return;
33089 cm_id = cm_node->cm_id;
33090
33091- atomic_inc(&cm_connect_reqs);
33092+ atomic_inc_unchecked(&cm_connect_reqs);
33093 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
33094 cm_node, cm_id, jiffies);
33095
33096diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
33097index 3ba7be3..c81f6ff 100644
33098--- a/drivers/infiniband/hw/nes/nes_mgt.c
33099+++ b/drivers/infiniband/hw/nes/nes_mgt.c
33100@@ -40,8 +40,8 @@
33101 #include "nes.h"
33102 #include "nes_mgt.h"
33103
33104-atomic_t pau_qps_created;
33105-atomic_t pau_qps_destroyed;
33106+atomic_unchecked_t pau_qps_created;
33107+atomic_unchecked_t pau_qps_destroyed;
33108
33109 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
33110 {
33111@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
33112 {
33113 struct sk_buff *skb;
33114 unsigned long flags;
33115- atomic_inc(&pau_qps_destroyed);
33116+ atomic_inc_unchecked(&pau_qps_destroyed);
33117
33118 /* Free packets that have not yet been forwarded */
33119 /* Lock is acquired by skb_dequeue when removing the skb */
33120@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
33121 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
33122 skb_queue_head_init(&nesqp->pau_list);
33123 spin_lock_init(&nesqp->pau_lock);
33124- atomic_inc(&pau_qps_created);
33125+ atomic_inc_unchecked(&pau_qps_created);
33126 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
33127 }
33128
33129diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
33130index f3a3ecf..57d311d 100644
33131--- a/drivers/infiniband/hw/nes/nes_nic.c
33132+++ b/drivers/infiniband/hw/nes/nes_nic.c
33133@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
33134 target_stat_values[++index] = mh_detected;
33135 target_stat_values[++index] = mh_pauses_sent;
33136 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
33137- target_stat_values[++index] = atomic_read(&cm_connects);
33138- target_stat_values[++index] = atomic_read(&cm_accepts);
33139- target_stat_values[++index] = atomic_read(&cm_disconnects);
33140- target_stat_values[++index] = atomic_read(&cm_connecteds);
33141- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
33142- target_stat_values[++index] = atomic_read(&cm_rejects);
33143- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
33144- target_stat_values[++index] = atomic_read(&qps_created);
33145- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
33146- target_stat_values[++index] = atomic_read(&qps_destroyed);
33147- target_stat_values[++index] = atomic_read(&cm_closes);
33148+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
33149+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
33150+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
33151+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
33152+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
33153+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
33154+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
33155+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
33156+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
33157+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
33158+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
33159 target_stat_values[++index] = cm_packets_sent;
33160 target_stat_values[++index] = cm_packets_bounced;
33161 target_stat_values[++index] = cm_packets_created;
33162 target_stat_values[++index] = cm_packets_received;
33163 target_stat_values[++index] = cm_packets_dropped;
33164 target_stat_values[++index] = cm_packets_retrans;
33165- target_stat_values[++index] = atomic_read(&cm_listens_created);
33166- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
33167+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
33168+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
33169 target_stat_values[++index] = cm_backlog_drops;
33170- target_stat_values[++index] = atomic_read(&cm_loopbacks);
33171- target_stat_values[++index] = atomic_read(&cm_nodes_created);
33172- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
33173- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
33174- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
33175+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
33176+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
33177+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
33178+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
33179+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
33180 target_stat_values[++index] = nesadapter->free_4kpbl;
33181 target_stat_values[++index] = nesadapter->free_256pbl;
33182 target_stat_values[++index] = int_mod_timer_init;
33183 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
33184 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
33185 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
33186- target_stat_values[++index] = atomic_read(&pau_qps_created);
33187- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
33188+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
33189+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
33190 }
33191
33192 /**
33193diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
33194index 0927b5c..ed67986 100644
33195--- a/drivers/infiniband/hw/nes/nes_verbs.c
33196+++ b/drivers/infiniband/hw/nes/nes_verbs.c
33197@@ -46,9 +46,9 @@
33198
33199 #include <rdma/ib_umem.h>
33200
33201-atomic_t mod_qp_timouts;
33202-atomic_t qps_created;
33203-atomic_t sw_qps_destroyed;
33204+atomic_unchecked_t mod_qp_timouts;
33205+atomic_unchecked_t qps_created;
33206+atomic_unchecked_t sw_qps_destroyed;
33207
33208 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
33209
33210@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
33211 if (init_attr->create_flags)
33212 return ERR_PTR(-EINVAL);
33213
33214- atomic_inc(&qps_created);
33215+ atomic_inc_unchecked(&qps_created);
33216 switch (init_attr->qp_type) {
33217 case IB_QPT_RC:
33218 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
33219@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
33220 struct iw_cm_event cm_event;
33221 int ret = 0;
33222
33223- atomic_inc(&sw_qps_destroyed);
33224+ atomic_inc_unchecked(&sw_qps_destroyed);
33225 nesqp->destroyed = 1;
33226
33227 /* Blow away the connection if it exists. */
33228diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
33229index b881bdc..c2e360c 100644
33230--- a/drivers/infiniband/hw/qib/qib.h
33231+++ b/drivers/infiniband/hw/qib/qib.h
33232@@ -51,6 +51,7 @@
33233 #include <linux/completion.h>
33234 #include <linux/kref.h>
33235 #include <linux/sched.h>
33236+#include <linux/slab.h>
33237
33238 #include "qib_common.h"
33239 #include "qib_verbs.h"
33240diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
33241index 05e0f17..0275789 100644
33242--- a/drivers/infiniband/hw/qib/qib_fs.c
33243+++ b/drivers/infiniband/hw/qib/qib_fs.c
33244@@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = {
33245 };
33246
33247 static ssize_t flash_read(struct file *file, char __user *buf,
33248+ size_t count, loff_t *ppos) __size_overflow(3);
33249+static ssize_t flash_read(struct file *file, char __user *buf,
33250 size_t count, loff_t *ppos)
33251 {
33252 struct qib_devdata *dd;
33253@@ -318,6 +320,8 @@ bail:
33254 }
33255
33256 static ssize_t flash_write(struct file *file, const char __user *buf,
33257+ size_t count, loff_t *ppos) __size_overflow(3);
33258+static ssize_t flash_write(struct file *file, const char __user *buf,
33259 size_t count, loff_t *ppos)
33260 {
33261 struct qib_devdata *dd;
33262diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
33263index c351aa4..e6967c2 100644
33264--- a/drivers/input/gameport/gameport.c
33265+++ b/drivers/input/gameport/gameport.c
33266@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
33267 */
33268 static void gameport_init_port(struct gameport *gameport)
33269 {
33270- static atomic_t gameport_no = ATOMIC_INIT(0);
33271+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
33272
33273 __module_get(THIS_MODULE);
33274
33275 mutex_init(&gameport->drv_mutex);
33276 device_initialize(&gameport->dev);
33277 dev_set_name(&gameport->dev, "gameport%lu",
33278- (unsigned long)atomic_inc_return(&gameport_no) - 1);
33279+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
33280 gameport->dev.bus = &gameport_bus;
33281 gameport->dev.release = gameport_release_port;
33282 if (gameport->parent)
33283diff --git a/drivers/input/input.c b/drivers/input/input.c
33284index 1f78c95..3cddc6c 100644
33285--- a/drivers/input/input.c
33286+++ b/drivers/input/input.c
33287@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
33288 */
33289 int input_register_device(struct input_dev *dev)
33290 {
33291- static atomic_t input_no = ATOMIC_INIT(0);
33292+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
33293 struct input_handler *handler;
33294 const char *path;
33295 int error;
33296@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
33297 dev->setkeycode = input_default_setkeycode;
33298
33299 dev_set_name(&dev->dev, "input%ld",
33300- (unsigned long) atomic_inc_return(&input_no) - 1);
33301+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
33302
33303 error = device_add(&dev->dev);
33304 if (error)
33305diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
33306index b8d8611..7a4a04b 100644
33307--- a/drivers/input/joystick/sidewinder.c
33308+++ b/drivers/input/joystick/sidewinder.c
33309@@ -30,6 +30,7 @@
33310 #include <linux/kernel.h>
33311 #include <linux/module.h>
33312 #include <linux/slab.h>
33313+#include <linux/sched.h>
33314 #include <linux/init.h>
33315 #include <linux/input.h>
33316 #include <linux/gameport.h>
33317diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
33318index fd7a0d5..a4af10c 100644
33319--- a/drivers/input/joystick/xpad.c
33320+++ b/drivers/input/joystick/xpad.c
33321@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
33322
33323 static int xpad_led_probe(struct usb_xpad *xpad)
33324 {
33325- static atomic_t led_seq = ATOMIC_INIT(0);
33326+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
33327 long led_no;
33328 struct xpad_led *led;
33329 struct led_classdev *led_cdev;
33330@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
33331 if (!led)
33332 return -ENOMEM;
33333
33334- led_no = (long)atomic_inc_return(&led_seq) - 1;
33335+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
33336
33337 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
33338 led->xpad = xpad;
33339diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
33340index 0110b5a..d3ad144 100644
33341--- a/drivers/input/mousedev.c
33342+++ b/drivers/input/mousedev.c
33343@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
33344
33345 spin_unlock_irq(&client->packet_lock);
33346
33347- if (copy_to_user(buffer, data, count))
33348+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
33349 return -EFAULT;
33350
33351 return count;
33352diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
33353index ba70058..571d25d 100644
33354--- a/drivers/input/serio/serio.c
33355+++ b/drivers/input/serio/serio.c
33356@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
33357 */
33358 static void serio_init_port(struct serio *serio)
33359 {
33360- static atomic_t serio_no = ATOMIC_INIT(0);
33361+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
33362
33363 __module_get(THIS_MODULE);
33364
33365@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
33366 mutex_init(&serio->drv_mutex);
33367 device_initialize(&serio->dev);
33368 dev_set_name(&serio->dev, "serio%ld",
33369- (long)atomic_inc_return(&serio_no) - 1);
33370+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
33371 serio->dev.bus = &serio_bus;
33372 serio->dev.release = serio_release_port;
33373 serio->dev.groups = serio_device_attr_groups;
33374diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
33375index e44933d..9ba484a 100644
33376--- a/drivers/isdn/capi/capi.c
33377+++ b/drivers/isdn/capi/capi.c
33378@@ -83,8 +83,8 @@ struct capiminor {
33379
33380 struct capi20_appl *ap;
33381 u32 ncci;
33382- atomic_t datahandle;
33383- atomic_t msgid;
33384+ atomic_unchecked_t datahandle;
33385+ atomic_unchecked_t msgid;
33386
33387 struct tty_port port;
33388 int ttyinstop;
33389@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
33390 capimsg_setu16(s, 2, mp->ap->applid);
33391 capimsg_setu8 (s, 4, CAPI_DATA_B3);
33392 capimsg_setu8 (s, 5, CAPI_RESP);
33393- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
33394+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
33395 capimsg_setu32(s, 8, mp->ncci);
33396 capimsg_setu16(s, 12, datahandle);
33397 }
33398@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
33399 mp->outbytes -= len;
33400 spin_unlock_bh(&mp->outlock);
33401
33402- datahandle = atomic_inc_return(&mp->datahandle);
33403+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
33404 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
33405 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33406 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33407 capimsg_setu16(skb->data, 2, mp->ap->applid);
33408 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33409 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33410- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33411+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33412 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33413 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33414 capimsg_setu16(skb->data, 16, len); /* Data length */
33415diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
33416index db621db..825ea1a 100644
33417--- a/drivers/isdn/gigaset/common.c
33418+++ b/drivers/isdn/gigaset/common.c
33419@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
33420 cs->commands_pending = 0;
33421 cs->cur_at_seq = 0;
33422 cs->gotfwver = -1;
33423- cs->open_count = 0;
33424+ local_set(&cs->open_count, 0);
33425 cs->dev = NULL;
33426 cs->tty = NULL;
33427 cs->tty_dev = NULL;
33428diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
33429index 212efaf..f187c6b 100644
33430--- a/drivers/isdn/gigaset/gigaset.h
33431+++ b/drivers/isdn/gigaset/gigaset.h
33432@@ -35,6 +35,7 @@
33433 #include <linux/tty_driver.h>
33434 #include <linux/list.h>
33435 #include <linux/atomic.h>
33436+#include <asm/local.h>
33437
33438 #define GIG_VERSION {0, 5, 0, 0}
33439 #define GIG_COMPAT {0, 4, 0, 0}
33440@@ -433,7 +434,7 @@ struct cardstate {
33441 spinlock_t cmdlock;
33442 unsigned curlen, cmdbytes;
33443
33444- unsigned open_count;
33445+ local_t open_count;
33446 struct tty_struct *tty;
33447 struct tasklet_struct if_wake_tasklet;
33448 unsigned control_state;
33449diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
33450index ee0a549..a7c9798 100644
33451--- a/drivers/isdn/gigaset/interface.c
33452+++ b/drivers/isdn/gigaset/interface.c
33453@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
33454 }
33455 tty->driver_data = cs;
33456
33457- ++cs->open_count;
33458-
33459- if (cs->open_count == 1) {
33460+ if (local_inc_return(&cs->open_count) == 1) {
33461 spin_lock_irqsave(&cs->lock, flags);
33462 cs->tty = tty;
33463 spin_unlock_irqrestore(&cs->lock, flags);
33464@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
33465
33466 if (!cs->connected)
33467 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33468- else if (!cs->open_count)
33469+ else if (!local_read(&cs->open_count))
33470 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33471 else {
33472- if (!--cs->open_count) {
33473+ if (!local_dec_return(&cs->open_count)) {
33474 spin_lock_irqsave(&cs->lock, flags);
33475 cs->tty = NULL;
33476 spin_unlock_irqrestore(&cs->lock, flags);
33477@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
33478 if (!cs->connected) {
33479 gig_dbg(DEBUG_IF, "not connected");
33480 retval = -ENODEV;
33481- } else if (!cs->open_count)
33482+ } else if (!local_read(&cs->open_count))
33483 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33484 else {
33485 retval = 0;
33486@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
33487 retval = -ENODEV;
33488 goto done;
33489 }
33490- if (!cs->open_count) {
33491+ if (!local_read(&cs->open_count)) {
33492 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33493 retval = -ENODEV;
33494 goto done;
33495@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
33496 if (!cs->connected) {
33497 gig_dbg(DEBUG_IF, "not connected");
33498 retval = -ENODEV;
33499- } else if (!cs->open_count)
33500+ } else if (!local_read(&cs->open_count))
33501 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33502 else if (cs->mstate != MS_LOCKED) {
33503 dev_warn(cs->dev, "can't write to unlocked device\n");
33504@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
33505
33506 if (!cs->connected)
33507 gig_dbg(DEBUG_IF, "not connected");
33508- else if (!cs->open_count)
33509+ else if (!local_read(&cs->open_count))
33510 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33511 else if (cs->mstate != MS_LOCKED)
33512 dev_warn(cs->dev, "can't write to unlocked device\n");
33513@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
33514
33515 if (!cs->connected)
33516 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33517- else if (!cs->open_count)
33518+ else if (!local_read(&cs->open_count))
33519 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33520 else
33521 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33522@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
33523
33524 if (!cs->connected)
33525 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33526- else if (!cs->open_count)
33527+ else if (!local_read(&cs->open_count))
33528 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33529 else
33530 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33531@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
33532 goto out;
33533 }
33534
33535- if (!cs->open_count) {
33536+ if (!local_read(&cs->open_count)) {
33537 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33538 goto out;
33539 }
33540diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33541index 2a57da59..e7a12ed 100644
33542--- a/drivers/isdn/hardware/avm/b1.c
33543+++ b/drivers/isdn/hardware/avm/b1.c
33544@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
33545 }
33546 if (left) {
33547 if (t4file->user) {
33548- if (copy_from_user(buf, dp, left))
33549+ if (left > sizeof buf || copy_from_user(buf, dp, left))
33550 return -EFAULT;
33551 } else {
33552 memcpy(buf, dp, left);
33553@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
33554 }
33555 if (left) {
33556 if (config->user) {
33557- if (copy_from_user(buf, dp, left))
33558+ if (left > sizeof buf || copy_from_user(buf, dp, left))
33559 return -EFAULT;
33560 } else {
33561 memcpy(buf, dp, left);
33562diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33563index 85784a7..a19ca98 100644
33564--- a/drivers/isdn/hardware/eicon/divasync.h
33565+++ b/drivers/isdn/hardware/eicon/divasync.h
33566@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33567 } diva_didd_add_adapter_t;
33568 typedef struct _diva_didd_remove_adapter {
33569 IDI_CALL p_request;
33570-} diva_didd_remove_adapter_t;
33571+} __no_const diva_didd_remove_adapter_t;
33572 typedef struct _diva_didd_read_adapter_array {
33573 void * buffer;
33574 dword length;
33575diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33576index a3bd163..8956575 100644
33577--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33578+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33579@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33580 typedef struct _diva_os_idi_adapter_interface {
33581 diva_init_card_proc_t cleanup_adapter_proc;
33582 diva_cmd_card_proc_t cmd_proc;
33583-} diva_os_idi_adapter_interface_t;
33584+} __no_const diva_os_idi_adapter_interface_t;
33585
33586 typedef struct _diva_os_xdi_adapter {
33587 struct list_head link;
33588diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33589index 1f355bb..43f1fea 100644
33590--- a/drivers/isdn/icn/icn.c
33591+++ b/drivers/isdn/icn/icn.c
33592@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
33593 if (count > len)
33594 count = len;
33595 if (user) {
33596- if (copy_from_user(msg, buf, count))
33597+ if (count > sizeof msg || copy_from_user(msg, buf, count))
33598 return -EFAULT;
33599 } else
33600 memcpy(msg, buf, count);
33601diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33602index b5fdcb7..5b6c59f 100644
33603--- a/drivers/lguest/core.c
33604+++ b/drivers/lguest/core.c
33605@@ -92,9 +92,17 @@ static __init int map_switcher(void)
33606 * it's worked so far. The end address needs +1 because __get_vm_area
33607 * allocates an extra guard page, so we need space for that.
33608 */
33609+
33610+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33611+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33612+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33613+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33614+#else
33615 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33616 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33617 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33618+#endif
33619+
33620 if (!switcher_vma) {
33621 err = -ENOMEM;
33622 printk("lguest: could not map switcher pages high\n");
33623@@ -119,7 +127,7 @@ static __init int map_switcher(void)
33624 * Now the Switcher is mapped at the right address, we can't fail!
33625 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33626 */
33627- memcpy(switcher_vma->addr, start_switcher_text,
33628+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33629 end_switcher_text - start_switcher_text);
33630
33631 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33632diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
33633index ff4a0bc..f5fdd9c 100644
33634--- a/drivers/lguest/lguest_user.c
33635+++ b/drivers/lguest/lguest_user.c
33636@@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
33637 * Once our Guest is initialized, the Launcher makes it run by reading
33638 * from /dev/lguest.
33639 */
33640+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
33641 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
33642 {
33643 struct lguest *lg = file->private_data;
33644diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33645index 3980903..ce25c5e 100644
33646--- a/drivers/lguest/x86/core.c
33647+++ b/drivers/lguest/x86/core.c
33648@@ -59,7 +59,7 @@ static struct {
33649 /* Offset from where switcher.S was compiled to where we've copied it */
33650 static unsigned long switcher_offset(void)
33651 {
33652- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33653+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33654 }
33655
33656 /* This cpu's struct lguest_pages. */
33657@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33658 * These copies are pretty cheap, so we do them unconditionally: */
33659 /* Save the current Host top-level page directory.
33660 */
33661+
33662+#ifdef CONFIG_PAX_PER_CPU_PGD
33663+ pages->state.host_cr3 = read_cr3();
33664+#else
33665 pages->state.host_cr3 = __pa(current->mm->pgd);
33666+#endif
33667+
33668 /*
33669 * Set up the Guest's page tables to see this CPU's pages (and no
33670 * other CPU's pages).
33671@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33672 * compiled-in switcher code and the high-mapped copy we just made.
33673 */
33674 for (i = 0; i < IDT_ENTRIES; i++)
33675- default_idt_entries[i] += switcher_offset();
33676+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33677
33678 /*
33679 * Set up the Switcher's per-cpu areas.
33680@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33681 * it will be undisturbed when we switch. To change %cs and jump we
33682 * need this structure to feed to Intel's "lcall" instruction.
33683 */
33684- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33685+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33686 lguest_entry.segment = LGUEST_CS;
33687
33688 /*
33689diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33690index 40634b0..4f5855e 100644
33691--- a/drivers/lguest/x86/switcher_32.S
33692+++ b/drivers/lguest/x86/switcher_32.S
33693@@ -87,6 +87,7 @@
33694 #include <asm/page.h>
33695 #include <asm/segment.h>
33696 #include <asm/lguest.h>
33697+#include <asm/processor-flags.h>
33698
33699 // We mark the start of the code to copy
33700 // It's placed in .text tho it's never run here
33701@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33702 // Changes type when we load it: damn Intel!
33703 // For after we switch over our page tables
33704 // That entry will be read-only: we'd crash.
33705+
33706+#ifdef CONFIG_PAX_KERNEXEC
33707+ mov %cr0, %edx
33708+ xor $X86_CR0_WP, %edx
33709+ mov %edx, %cr0
33710+#endif
33711+
33712 movl $(GDT_ENTRY_TSS*8), %edx
33713 ltr %dx
33714
33715@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33716 // Let's clear it again for our return.
33717 // The GDT descriptor of the Host
33718 // Points to the table after two "size" bytes
33719- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33720+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33721 // Clear "used" from type field (byte 5, bit 2)
33722- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33723+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33724+
33725+#ifdef CONFIG_PAX_KERNEXEC
33726+ mov %cr0, %eax
33727+ xor $X86_CR0_WP, %eax
33728+ mov %eax, %cr0
33729+#endif
33730
33731 // Once our page table's switched, the Guest is live!
33732 // The Host fades as we run this final step.
33733@@ -295,13 +309,12 @@ deliver_to_host:
33734 // I consulted gcc, and it gave
33735 // These instructions, which I gladly credit:
33736 leal (%edx,%ebx,8), %eax
33737- movzwl (%eax),%edx
33738- movl 4(%eax), %eax
33739- xorw %ax, %ax
33740- orl %eax, %edx
33741+ movl 4(%eax), %edx
33742+ movw (%eax), %dx
33743 // Now the address of the handler's in %edx
33744 // We call it now: its "iret" drops us home.
33745- jmp *%edx
33746+ ljmp $__KERNEL_CS, $1f
33747+1: jmp *%edx
33748
33749 // Every interrupt can come to us here
33750 // But we must truly tell each apart.
33751diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33752index 4daf9e5..b8d1d0f 100644
33753--- a/drivers/macintosh/macio_asic.c
33754+++ b/drivers/macintosh/macio_asic.c
33755@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33756 * MacIO is matched against any Apple ID, it's probe() function
33757 * will then decide wether it applies or not
33758 */
33759-static const struct pci_device_id __devinitdata pci_ids [] = { {
33760+static const struct pci_device_id __devinitconst pci_ids [] = { {
33761 .vendor = PCI_VENDOR_ID_APPLE,
33762 .device = PCI_ANY_ID,
33763 .subvendor = PCI_ANY_ID,
33764diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33765index 1ce84ed..0fdd40a 100644
33766--- a/drivers/md/dm-ioctl.c
33767+++ b/drivers/md/dm-ioctl.c
33768@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33769 cmd == DM_LIST_VERSIONS_CMD)
33770 return 0;
33771
33772- if ((cmd == DM_DEV_CREATE_CMD)) {
33773+ if (cmd == DM_DEV_CREATE_CMD) {
33774 if (!*param->name) {
33775 DMWARN("name not supplied when creating device");
33776 return -EINVAL;
33777diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33778index 9bfd057..5373ff3 100644
33779--- a/drivers/md/dm-raid1.c
33780+++ b/drivers/md/dm-raid1.c
33781@@ -40,7 +40,7 @@ enum dm_raid1_error {
33782
33783 struct mirror {
33784 struct mirror_set *ms;
33785- atomic_t error_count;
33786+ atomic_unchecked_t error_count;
33787 unsigned long error_type;
33788 struct dm_dev *dev;
33789 sector_t offset;
33790@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33791 struct mirror *m;
33792
33793 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33794- if (!atomic_read(&m->error_count))
33795+ if (!atomic_read_unchecked(&m->error_count))
33796 return m;
33797
33798 return NULL;
33799@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33800 * simple way to tell if a device has encountered
33801 * errors.
33802 */
33803- atomic_inc(&m->error_count);
33804+ atomic_inc_unchecked(&m->error_count);
33805
33806 if (test_and_set_bit(error_type, &m->error_type))
33807 return;
33808@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33809 struct mirror *m = get_default_mirror(ms);
33810
33811 do {
33812- if (likely(!atomic_read(&m->error_count)))
33813+ if (likely(!atomic_read_unchecked(&m->error_count)))
33814 return m;
33815
33816 if (m-- == ms->mirror)
33817@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33818 {
33819 struct mirror *default_mirror = get_default_mirror(m->ms);
33820
33821- return !atomic_read(&default_mirror->error_count);
33822+ return !atomic_read_unchecked(&default_mirror->error_count);
33823 }
33824
33825 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33826@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33827 */
33828 if (likely(region_in_sync(ms, region, 1)))
33829 m = choose_mirror(ms, bio->bi_sector);
33830- else if (m && atomic_read(&m->error_count))
33831+ else if (m && atomic_read_unchecked(&m->error_count))
33832 m = NULL;
33833
33834 if (likely(m))
33835@@ -848,6 +848,10 @@ static void do_mirror(struct work_struct *work)
33836 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33837 uint32_t region_size,
33838 struct dm_target *ti,
33839+ struct dm_dirty_log *dl) __size_overflow(1);
33840+static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33841+ uint32_t region_size,
33842+ struct dm_target *ti,
33843 struct dm_dirty_log *dl)
33844 {
33845 size_t len;
33846@@ -937,7 +941,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33847 }
33848
33849 ms->mirror[mirror].ms = ms;
33850- atomic_set(&(ms->mirror[mirror].error_count), 0);
33851+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33852 ms->mirror[mirror].error_type = 0;
33853 ms->mirror[mirror].offset = offset;
33854
33855@@ -1347,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33856 */
33857 static char device_status_char(struct mirror *m)
33858 {
33859- if (!atomic_read(&(m->error_count)))
33860+ if (!atomic_read_unchecked(&(m->error_count)))
33861 return 'A';
33862
33863 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33864diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33865index 3d80cf0..7d98e1a 100644
33866--- a/drivers/md/dm-stripe.c
33867+++ b/drivers/md/dm-stripe.c
33868@@ -20,7 +20,7 @@ struct stripe {
33869 struct dm_dev *dev;
33870 sector_t physical_start;
33871
33872- atomic_t error_count;
33873+ atomic_unchecked_t error_count;
33874 };
33875
33876 struct stripe_c {
33877@@ -55,6 +55,7 @@ static void trigger_event(struct work_struct *work)
33878 dm_table_event(sc->ti->table);
33879 }
33880
33881+static inline struct stripe_c *alloc_context(unsigned int stripes) __size_overflow(1);
33882 static inline struct stripe_c *alloc_context(unsigned int stripes)
33883 {
33884 size_t len;
33885@@ -192,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33886 kfree(sc);
33887 return r;
33888 }
33889- atomic_set(&(sc->stripe[i].error_count), 0);
33890+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33891 }
33892
33893 ti->private = sc;
33894@@ -314,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33895 DMEMIT("%d ", sc->stripes);
33896 for (i = 0; i < sc->stripes; i++) {
33897 DMEMIT("%s ", sc->stripe[i].dev->name);
33898- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33899+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33900 'D' : 'A';
33901 }
33902 buffer[i] = '\0';
33903@@ -361,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33904 */
33905 for (i = 0; i < sc->stripes; i++)
33906 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33907- atomic_inc(&(sc->stripe[i].error_count));
33908- if (atomic_read(&(sc->stripe[i].error_count)) <
33909+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33910+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33911 DM_IO_ERROR_THRESHOLD)
33912 schedule_work(&sc->trigger_event);
33913 }
33914diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33915index 63cc542..8d45caf3 100644
33916--- a/drivers/md/dm-table.c
33917+++ b/drivers/md/dm-table.c
33918@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33919 if (!dev_size)
33920 return 0;
33921
33922- if ((start >= dev_size) || (start + len > dev_size)) {
33923+ if ((start >= dev_size) || (len > dev_size - start)) {
33924 DMWARN("%s: %s too small for target: "
33925 "start=%llu, len=%llu, dev_size=%llu",
33926 dm_device_name(ti->table->md), bdevname(bdev, b),
33927diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33928index 237571a..fb6d19b 100644
33929--- a/drivers/md/dm-thin-metadata.c
33930+++ b/drivers/md/dm-thin-metadata.c
33931@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33932
33933 pmd->info.tm = tm;
33934 pmd->info.levels = 2;
33935- pmd->info.value_type.context = pmd->data_sm;
33936+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33937 pmd->info.value_type.size = sizeof(__le64);
33938 pmd->info.value_type.inc = data_block_inc;
33939 pmd->info.value_type.dec = data_block_dec;
33940@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33941
33942 pmd->bl_info.tm = tm;
33943 pmd->bl_info.levels = 1;
33944- pmd->bl_info.value_type.context = pmd->data_sm;
33945+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33946 pmd->bl_info.value_type.size = sizeof(__le64);
33947 pmd->bl_info.value_type.inc = data_block_inc;
33948 pmd->bl_info.value_type.dec = data_block_dec;
33949diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33950index b89c548..2af3ce4 100644
33951--- a/drivers/md/dm.c
33952+++ b/drivers/md/dm.c
33953@@ -176,9 +176,9 @@ struct mapped_device {
33954 /*
33955 * Event handling.
33956 */
33957- atomic_t event_nr;
33958+ atomic_unchecked_t event_nr;
33959 wait_queue_head_t eventq;
33960- atomic_t uevent_seq;
33961+ atomic_unchecked_t uevent_seq;
33962 struct list_head uevent_list;
33963 spinlock_t uevent_lock; /* Protect access to uevent_list */
33964
33965@@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
33966 rwlock_init(&md->map_lock);
33967 atomic_set(&md->holders, 1);
33968 atomic_set(&md->open_count, 0);
33969- atomic_set(&md->event_nr, 0);
33970- atomic_set(&md->uevent_seq, 0);
33971+ atomic_set_unchecked(&md->event_nr, 0);
33972+ atomic_set_unchecked(&md->uevent_seq, 0);
33973 INIT_LIST_HEAD(&md->uevent_list);
33974 spin_lock_init(&md->uevent_lock);
33975
33976@@ -1979,7 +1979,7 @@ static void event_callback(void *context)
33977
33978 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33979
33980- atomic_inc(&md->event_nr);
33981+ atomic_inc_unchecked(&md->event_nr);
33982 wake_up(&md->eventq);
33983 }
33984
33985@@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33986
33987 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33988 {
33989- return atomic_add_return(1, &md->uevent_seq);
33990+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33991 }
33992
33993 uint32_t dm_get_event_nr(struct mapped_device *md)
33994 {
33995- return atomic_read(&md->event_nr);
33996+ return atomic_read_unchecked(&md->event_nr);
33997 }
33998
33999 int dm_wait_event(struct mapped_device *md, int event_nr)
34000 {
34001 return wait_event_interruptible(md->eventq,
34002- (event_nr != atomic_read(&md->event_nr)));
34003+ (event_nr != atomic_read_unchecked(&md->event_nr)));
34004 }
34005
34006 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
34007diff --git a/drivers/md/md.c b/drivers/md/md.c
34008index 58027d8..d9cddcd 100644
34009--- a/drivers/md/md.c
34010+++ b/drivers/md/md.c
34011@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
34012 * start build, activate spare
34013 */
34014 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
34015-static atomic_t md_event_count;
34016+static atomic_unchecked_t md_event_count;
34017 void md_new_event(struct mddev *mddev)
34018 {
34019- atomic_inc(&md_event_count);
34020+ atomic_inc_unchecked(&md_event_count);
34021 wake_up(&md_event_waiters);
34022 }
34023 EXPORT_SYMBOL_GPL(md_new_event);
34024@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
34025 */
34026 static void md_new_event_inintr(struct mddev *mddev)
34027 {
34028- atomic_inc(&md_event_count);
34029+ atomic_inc_unchecked(&md_event_count);
34030 wake_up(&md_event_waiters);
34031 }
34032
34033@@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
34034
34035 rdev->preferred_minor = 0xffff;
34036 rdev->data_offset = le64_to_cpu(sb->data_offset);
34037- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34038+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34039
34040 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
34041 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
34042@@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
34043 else
34044 sb->resync_offset = cpu_to_le64(0);
34045
34046- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
34047+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
34048
34049 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
34050 sb->size = cpu_to_le64(mddev->dev_sectors);
34051@@ -2689,7 +2689,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
34052 static ssize_t
34053 errors_show(struct md_rdev *rdev, char *page)
34054 {
34055- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
34056+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
34057 }
34058
34059 static ssize_t
34060@@ -2698,7 +2698,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
34061 char *e;
34062 unsigned long n = simple_strtoul(buf, &e, 10);
34063 if (*buf && (*e == 0 || *e == '\n')) {
34064- atomic_set(&rdev->corrected_errors, n);
34065+ atomic_set_unchecked(&rdev->corrected_errors, n);
34066 return len;
34067 }
34068 return -EINVAL;
34069@@ -3084,8 +3084,8 @@ int md_rdev_init(struct md_rdev *rdev)
34070 rdev->sb_loaded = 0;
34071 rdev->bb_page = NULL;
34072 atomic_set(&rdev->nr_pending, 0);
34073- atomic_set(&rdev->read_errors, 0);
34074- atomic_set(&rdev->corrected_errors, 0);
34075+ atomic_set_unchecked(&rdev->read_errors, 0);
34076+ atomic_set_unchecked(&rdev->corrected_errors, 0);
34077
34078 INIT_LIST_HEAD(&rdev->same_set);
34079 init_waitqueue_head(&rdev->blocked_wait);
34080@@ -6736,7 +6736,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
34081
34082 spin_unlock(&pers_lock);
34083 seq_printf(seq, "\n");
34084- seq->poll_event = atomic_read(&md_event_count);
34085+ seq->poll_event = atomic_read_unchecked(&md_event_count);
34086 return 0;
34087 }
34088 if (v == (void*)2) {
34089@@ -6828,7 +6828,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
34090 chunk_kb ? "KB" : "B");
34091 if (bitmap->file) {
34092 seq_printf(seq, ", file: ");
34093- seq_path(seq, &bitmap->file->f_path, " \t\n");
34094+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
34095 }
34096
34097 seq_printf(seq, "\n");
34098@@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
34099 return error;
34100
34101 seq = file->private_data;
34102- seq->poll_event = atomic_read(&md_event_count);
34103+ seq->poll_event = atomic_read_unchecked(&md_event_count);
34104 return error;
34105 }
34106
34107@@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
34108 /* always allow read */
34109 mask = POLLIN | POLLRDNORM;
34110
34111- if (seq->poll_event != atomic_read(&md_event_count))
34112+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
34113 mask |= POLLERR | POLLPRI;
34114 return mask;
34115 }
34116@@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
34117 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
34118 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
34119 (int)part_stat_read(&disk->part0, sectors[1]) -
34120- atomic_read(&disk->sync_io);
34121+ atomic_read_unchecked(&disk->sync_io);
34122 /* sync IO will cause sync_io to increase before the disk_stats
34123 * as sync_io is counted when a request starts, and
34124 * disk_stats is counted when it completes.
34125diff --git a/drivers/md/md.h b/drivers/md/md.h
34126index 44c63df..b795d1a 100644
34127--- a/drivers/md/md.h
34128+++ b/drivers/md/md.h
34129@@ -93,13 +93,13 @@ struct md_rdev {
34130 * only maintained for arrays that
34131 * support hot removal
34132 */
34133- atomic_t read_errors; /* number of consecutive read errors that
34134+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
34135 * we have tried to ignore.
34136 */
34137 struct timespec last_read_error; /* monotonic time since our
34138 * last read error
34139 */
34140- atomic_t corrected_errors; /* number of corrected read errors,
34141+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
34142 * for reporting to userspace and storing
34143 * in superblock.
34144 */
34145@@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
34146
34147 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
34148 {
34149- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34150+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34151 }
34152
34153 struct md_personality
34154diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
34155index 50ed53b..4f29d7d 100644
34156--- a/drivers/md/persistent-data/dm-space-map-checker.c
34157+++ b/drivers/md/persistent-data/dm-space-map-checker.c
34158@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
34159 /*----------------------------------------------------------------*/
34160
34161 struct sm_checker {
34162- struct dm_space_map sm;
34163+ dm_space_map_no_const sm;
34164
34165 struct count_array old_counts;
34166 struct count_array counts;
34167diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
34168index fc469ba..2d91555 100644
34169--- a/drivers/md/persistent-data/dm-space-map-disk.c
34170+++ b/drivers/md/persistent-data/dm-space-map-disk.c
34171@@ -23,7 +23,7 @@
34172 * Space map interface.
34173 */
34174 struct sm_disk {
34175- struct dm_space_map sm;
34176+ dm_space_map_no_const sm;
34177
34178 struct ll_disk ll;
34179 struct ll_disk old_ll;
34180diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
34181index e89ae5e..062e4c2 100644
34182--- a/drivers/md/persistent-data/dm-space-map-metadata.c
34183+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
34184@@ -43,7 +43,7 @@ struct block_op {
34185 };
34186
34187 struct sm_metadata {
34188- struct dm_space_map sm;
34189+ dm_space_map_no_const sm;
34190
34191 struct ll_disk ll;
34192 struct ll_disk old_ll;
34193diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
34194index 1cbfc6b..56e1dbb 100644
34195--- a/drivers/md/persistent-data/dm-space-map.h
34196+++ b/drivers/md/persistent-data/dm-space-map.h
34197@@ -60,6 +60,7 @@ struct dm_space_map {
34198 int (*root_size)(struct dm_space_map *sm, size_t *result);
34199 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
34200 };
34201+typedef struct dm_space_map __no_const dm_space_map_no_const;
34202
34203 /*----------------------------------------------------------------*/
34204
34205diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
34206index edc735a..e9b97f1 100644
34207--- a/drivers/md/raid1.c
34208+++ b/drivers/md/raid1.c
34209@@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
34210 if (r1_sync_page_io(rdev, sect, s,
34211 bio->bi_io_vec[idx].bv_page,
34212 READ) != 0)
34213- atomic_add(s, &rdev->corrected_errors);
34214+ atomic_add_unchecked(s, &rdev->corrected_errors);
34215 }
34216 sectors -= s;
34217 sect += s;
34218@@ -1859,7 +1859,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
34219 test_bit(In_sync, &rdev->flags)) {
34220 if (r1_sync_page_io(rdev, sect, s,
34221 conf->tmppage, READ)) {
34222- atomic_add(s, &rdev->corrected_errors);
34223+ atomic_add_unchecked(s, &rdev->corrected_errors);
34224 printk(KERN_INFO
34225 "md/raid1:%s: read error corrected "
34226 "(%d sectors at %llu on %s)\n",
34227diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
34228index 1898389..a3aa617 100644
34229--- a/drivers/md/raid10.c
34230+++ b/drivers/md/raid10.c
34231@@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
34232 /* The write handler will notice the lack of
34233 * R10BIO_Uptodate and record any errors etc
34234 */
34235- atomic_add(r10_bio->sectors,
34236+ atomic_add_unchecked(r10_bio->sectors,
34237 &conf->mirrors[d].rdev->corrected_errors);
34238
34239 /* for reconstruct, we always reschedule after a read.
34240@@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
34241 {
34242 struct timespec cur_time_mon;
34243 unsigned long hours_since_last;
34244- unsigned int read_errors = atomic_read(&rdev->read_errors);
34245+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
34246
34247 ktime_get_ts(&cur_time_mon);
34248
34249@@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
34250 * overflowing the shift of read_errors by hours_since_last.
34251 */
34252 if (hours_since_last >= 8 * sizeof(read_errors))
34253- atomic_set(&rdev->read_errors, 0);
34254+ atomic_set_unchecked(&rdev->read_errors, 0);
34255 else
34256- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
34257+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
34258 }
34259
34260 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
34261@@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34262 return;
34263
34264 check_decay_read_errors(mddev, rdev);
34265- atomic_inc(&rdev->read_errors);
34266- if (atomic_read(&rdev->read_errors) > max_read_errors) {
34267+ atomic_inc_unchecked(&rdev->read_errors);
34268+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
34269 char b[BDEVNAME_SIZE];
34270 bdevname(rdev->bdev, b);
34271
34272@@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34273 "md/raid10:%s: %s: Raid device exceeded "
34274 "read_error threshold [cur %d:max %d]\n",
34275 mdname(mddev), b,
34276- atomic_read(&rdev->read_errors), max_read_errors);
34277+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
34278 printk(KERN_NOTICE
34279 "md/raid10:%s: %s: Failing raid device\n",
34280 mdname(mddev), b);
34281@@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34282 (unsigned long long)(
34283 sect + rdev->data_offset),
34284 bdevname(rdev->bdev, b));
34285- atomic_add(s, &rdev->corrected_errors);
34286+ atomic_add_unchecked(s, &rdev->corrected_errors);
34287 }
34288
34289 rdev_dec_pending(rdev, mddev);
34290diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
34291index d1162e5..c7cd902 100644
34292--- a/drivers/md/raid5.c
34293+++ b/drivers/md/raid5.c
34294@@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
34295 (unsigned long long)(sh->sector
34296 + rdev->data_offset),
34297 bdevname(rdev->bdev, b));
34298- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
34299+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
34300 clear_bit(R5_ReadError, &sh->dev[i].flags);
34301 clear_bit(R5_ReWrite, &sh->dev[i].flags);
34302 }
34303- if (atomic_read(&rdev->read_errors))
34304- atomic_set(&rdev->read_errors, 0);
34305+ if (atomic_read_unchecked(&rdev->read_errors))
34306+ atomic_set_unchecked(&rdev->read_errors, 0);
34307 } else {
34308 const char *bdn = bdevname(rdev->bdev, b);
34309 int retry = 0;
34310
34311 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
34312- atomic_inc(&rdev->read_errors);
34313+ atomic_inc_unchecked(&rdev->read_errors);
34314 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
34315 printk_ratelimited(
34316 KERN_WARNING
34317@@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
34318 (unsigned long long)(sh->sector
34319 + rdev->data_offset),
34320 bdn);
34321- else if (atomic_read(&rdev->read_errors)
34322+ else if (atomic_read_unchecked(&rdev->read_errors)
34323 > conf->max_nr_stripes)
34324 printk(KERN_WARNING
34325 "md/raid:%s: Too many read errors, failing device %s.\n",
34326diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
34327index ce4f858..7bcfb46 100644
34328--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
34329+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
34330@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
34331 .subvendor = _subvend, .subdevice = _subdev, \
34332 .driver_data = (unsigned long)&_driverdata }
34333
34334-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
34335+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
34336 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
34337 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
34338 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
34339diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
34340index a7d876f..8c21b61 100644
34341--- a/drivers/media/dvb/dvb-core/dvb_demux.h
34342+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
34343@@ -73,7 +73,7 @@ struct dvb_demux_feed {
34344 union {
34345 dmx_ts_cb ts;
34346 dmx_section_cb sec;
34347- } cb;
34348+ } __no_const cb;
34349
34350 struct dvb_demux *demux;
34351 void *priv;
34352diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
34353index 00a6732..70a682e 100644
34354--- a/drivers/media/dvb/dvb-core/dvbdev.c
34355+++ b/drivers/media/dvb/dvb-core/dvbdev.c
34356@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
34357 const struct dvb_device *template, void *priv, int type)
34358 {
34359 struct dvb_device *dvbdev;
34360- struct file_operations *dvbdevfops;
34361+ file_operations_no_const *dvbdevfops;
34362 struct device *clsdev;
34363 int minor;
34364 int id;
34365diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
34366index 3940bb0..fb3952a 100644
34367--- a/drivers/media/dvb/dvb-usb/cxusb.c
34368+++ b/drivers/media/dvb/dvb-usb/cxusb.c
34369@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
34370
34371 struct dib0700_adapter_state {
34372 int (*set_param_save) (struct dvb_frontend *);
34373-};
34374+} __no_const;
34375
34376 static int dib7070_set_param_override(struct dvb_frontend *fe)
34377 {
34378diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
34379index 451c5a7..649f711 100644
34380--- a/drivers/media/dvb/dvb-usb/dw2102.c
34381+++ b/drivers/media/dvb/dvb-usb/dw2102.c
34382@@ -95,7 +95,7 @@ struct su3000_state {
34383
34384 struct s6x0_state {
34385 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
34386-};
34387+} __no_const;
34388
34389 /* debug */
34390 static int dvb_usb_dw2102_debug;
34391diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
34392index 404f63a..4796533 100644
34393--- a/drivers/media/dvb/frontends/dib3000.h
34394+++ b/drivers/media/dvb/frontends/dib3000.h
34395@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
34396 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
34397 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
34398 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
34399-};
34400+} __no_const;
34401
34402 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
34403 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
34404diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
34405index 8418c02..8555013 100644
34406--- a/drivers/media/dvb/ngene/ngene-cards.c
34407+++ b/drivers/media/dvb/ngene/ngene-cards.c
34408@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
34409
34410 /****************************************************************************/
34411
34412-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
34413+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
34414 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
34415 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
34416 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
34417diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
34418index 16a089f..ab1667d 100644
34419--- a/drivers/media/radio/radio-cadet.c
34420+++ b/drivers/media/radio/radio-cadet.c
34421@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
34422 unsigned char readbuf[RDS_BUFFER];
34423 int i = 0;
34424
34425+ if (count > RDS_BUFFER)
34426+ return -EFAULT;
34427 mutex_lock(&dev->lock);
34428 if (dev->rdsstat == 0) {
34429 dev->rdsstat = 1;
34430diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
34431index 9cde353..8c6a1c3 100644
34432--- a/drivers/media/video/au0828/au0828.h
34433+++ b/drivers/media/video/au0828/au0828.h
34434@@ -191,7 +191,7 @@ struct au0828_dev {
34435
34436 /* I2C */
34437 struct i2c_adapter i2c_adap;
34438- struct i2c_algorithm i2c_algo;
34439+ i2c_algorithm_no_const i2c_algo;
34440 struct i2c_client i2c_client;
34441 u32 i2c_rc;
34442
34443diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
34444index ee91e295..04ad048 100644
34445--- a/drivers/media/video/cpia2/cpia2_core.c
34446+++ b/drivers/media/video/cpia2/cpia2_core.c
34447@@ -86,6 +86,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
34448 return ret;
34449 }
34450
34451+static void *rvmalloc(unsigned long size) __size_overflow(1);
34452 static void *rvmalloc(unsigned long size)
34453 {
34454 void *mem;
34455diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
34456index 82d195b..181103c 100644
34457--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
34458+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
34459@@ -229,6 +229,8 @@ static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
34460
34461
34462 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34463+ size_t size) __size_overflow(2);
34464+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34465 size_t size)
34466 {
34467 struct snd_pcm_runtime *runtime = subs->runtime;
34468diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
34469index a2c2b7d..8f1bec7 100644
34470--- a/drivers/media/video/cx231xx/cx231xx-audio.c
34471+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
34472@@ -389,6 +389,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
34473 }
34474
34475 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34476+ size_t size) __size_overflow(2);
34477+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34478 size_t size)
34479 {
34480 struct snd_pcm_runtime *runtime = subs->runtime;
34481diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
34482index 04bf662..e0ac026 100644
34483--- a/drivers/media/video/cx88/cx88-alsa.c
34484+++ b/drivers/media/video/cx88/cx88-alsa.c
34485@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34486 * Only boards with eeprom and byte 1 at eeprom=1 have it
34487 */
34488
34489-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34490+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34491 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34492 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34493 {0, }
34494diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
34495index e2a7b77..753d0ee 100644
34496--- a/drivers/media/video/em28xx/em28xx-audio.c
34497+++ b/drivers/media/video/em28xx/em28xx-audio.c
34498@@ -225,6 +225,8 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
34499 }
34500
34501 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34502+ size_t size) __size_overflow(2);
34503+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34504 size_t size)
34505 {
34506 struct snd_pcm_runtime *runtime = subs->runtime;
34507diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
34508index b09a3c8..6dcba0a 100644
34509--- a/drivers/media/video/meye.c
34510+++ b/drivers/media/video/meye.c
34511@@ -72,6 +72,7 @@ static struct meye meye;
34512 /****************************************************************************/
34513 /* Memory allocation routines (stolen from bttv-driver.c) */
34514 /****************************************************************************/
34515+static void *rvmalloc(unsigned long size) __size_overflow(1);
34516 static void *rvmalloc(unsigned long size)
34517 {
34518 void *mem;
34519diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
34520index 1fb7d5b..3901e77 100644
34521--- a/drivers/media/video/omap/omap_vout.c
34522+++ b/drivers/media/video/omap/omap_vout.c
34523@@ -64,7 +64,6 @@ enum omap_vout_channels {
34524 OMAP_VIDEO2,
34525 };
34526
34527-static struct videobuf_queue_ops video_vbq_ops;
34528 /* Variables configurable through module params*/
34529 static u32 video1_numbuffers = 3;
34530 static u32 video2_numbuffers = 3;
34531@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
34532 {
34533 struct videobuf_queue *q;
34534 struct omap_vout_device *vout = NULL;
34535+ static struct videobuf_queue_ops video_vbq_ops = {
34536+ .buf_setup = omap_vout_buffer_setup,
34537+ .buf_prepare = omap_vout_buffer_prepare,
34538+ .buf_release = omap_vout_buffer_release,
34539+ .buf_queue = omap_vout_buffer_queue,
34540+ };
34541
34542 vout = video_drvdata(file);
34543 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34544@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
34545 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34546
34547 q = &vout->vbq;
34548- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34549- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34550- video_vbq_ops.buf_release = omap_vout_buffer_release;
34551- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34552 spin_lock_init(&vout->vbq_lock);
34553
34554 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34555diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34556index 305e6aa..0143317 100644
34557--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34558+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34559@@ -196,7 +196,7 @@ struct pvr2_hdw {
34560
34561 /* I2C stuff */
34562 struct i2c_adapter i2c_adap;
34563- struct i2c_algorithm i2c_algo;
34564+ i2c_algorithm_no_const i2c_algo;
34565 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34566 int i2c_cx25840_hack_state;
34567 int i2c_linked;
34568diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
34569index 2fd38a0..ddec3c4 100644
34570--- a/drivers/media/video/saa7164/saa7164-encoder.c
34571+++ b/drivers/media/video/saa7164/saa7164-encoder.c
34572@@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
34573 }
34574
34575 static ssize_t fops_read(struct file *file, char __user *buffer,
34576+ size_t count, loff_t *pos) __size_overflow(3);
34577+static ssize_t fops_read(struct file *file, char __user *buffer,
34578 size_t count, loff_t *pos)
34579 {
34580 struct saa7164_encoder_fh *fh = file->private_data;
34581diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
34582index e2e0341..b80056c 100644
34583--- a/drivers/media/video/saa7164/saa7164-vbi.c
34584+++ b/drivers/media/video/saa7164/saa7164-vbi.c
34585@@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
34586 }
34587
34588 static ssize_t fops_read(struct file *file, char __user *buffer,
34589+ size_t count, loff_t *pos) __size_overflow(3);
34590+static ssize_t fops_read(struct file *file, char __user *buffer,
34591 size_t count, loff_t *pos)
34592 {
34593 struct saa7164_vbi_fh *fh = file->private_data;
34594diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34595index 4ed1c7c2..8f15e13 100644
34596--- a/drivers/media/video/timblogiw.c
34597+++ b/drivers/media/video/timblogiw.c
34598@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34599
34600 /* Platform device functions */
34601
34602-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34603+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34604 .vidioc_querycap = timblogiw_querycap,
34605 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34606 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34607@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34608 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34609 };
34610
34611-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34612+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34613 .owner = THIS_MODULE,
34614 .open = timblogiw_open,
34615 .release = timblogiw_close,
34616diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
34617index c969111..a7910f4 100644
34618--- a/drivers/media/video/videobuf-dma-contig.c
34619+++ b/drivers/media/video/videobuf-dma-contig.c
34620@@ -184,6 +184,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
34621 return ret;
34622 }
34623
34624+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34625 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34626 {
34627 struct videobuf_dma_contig_memory *mem;
34628diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
34629index f300dea..5fc9c4a 100644
34630--- a/drivers/media/video/videobuf-dma-sg.c
34631+++ b/drivers/media/video/videobuf-dma-sg.c
34632@@ -419,6 +419,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34633 struct videobuf_dma_sg_memory
34634 */
34635
34636+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34637 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34638 {
34639 struct videobuf_dma_sg_memory *mem;
34640diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
34641index df14258..12cc7a3 100644
34642--- a/drivers/media/video/videobuf-vmalloc.c
34643+++ b/drivers/media/video/videobuf-vmalloc.c
34644@@ -135,6 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34645 struct videobuf_dma_sg_memory
34646 */
34647
34648+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34649 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34650 {
34651 struct videobuf_vmalloc_memory *mem;
34652diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34653index a7dc467..a55c423 100644
34654--- a/drivers/message/fusion/mptbase.c
34655+++ b/drivers/message/fusion/mptbase.c
34656@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34657 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34658 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34659
34660+#ifdef CONFIG_GRKERNSEC_HIDESYM
34661+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34662+#else
34663 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34664 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34665+#endif
34666+
34667 /*
34668 * Rounding UP to nearest 4-kB boundary here...
34669 */
34670diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34671index 551262e..7551198 100644
34672--- a/drivers/message/fusion/mptsas.c
34673+++ b/drivers/message/fusion/mptsas.c
34674@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34675 return 0;
34676 }
34677
34678+static inline void
34679+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34680+{
34681+ if (phy_info->port_details) {
34682+ phy_info->port_details->rphy = rphy;
34683+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34684+ ioc->name, rphy));
34685+ }
34686+
34687+ if (rphy) {
34688+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34689+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34690+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34691+ ioc->name, rphy, rphy->dev.release));
34692+ }
34693+}
34694+
34695 /* no mutex */
34696 static void
34697 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34698@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34699 return NULL;
34700 }
34701
34702-static inline void
34703-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34704-{
34705- if (phy_info->port_details) {
34706- phy_info->port_details->rphy = rphy;
34707- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34708- ioc->name, rphy));
34709- }
34710-
34711- if (rphy) {
34712- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34713- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34714- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34715- ioc->name, rphy, rphy->dev.release));
34716- }
34717-}
34718-
34719 static inline struct sas_port *
34720 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34721 {
34722diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34723index 0c3ced7..1fe34ec 100644
34724--- a/drivers/message/fusion/mptscsih.c
34725+++ b/drivers/message/fusion/mptscsih.c
34726@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34727
34728 h = shost_priv(SChost);
34729
34730- if (h) {
34731- if (h->info_kbuf == NULL)
34732- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34733- return h->info_kbuf;
34734- h->info_kbuf[0] = '\0';
34735+ if (!h)
34736+ return NULL;
34737
34738- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34739- h->info_kbuf[size-1] = '\0';
34740- }
34741+ if (h->info_kbuf == NULL)
34742+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34743+ return h->info_kbuf;
34744+ h->info_kbuf[0] = '\0';
34745+
34746+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34747+ h->info_kbuf[size-1] = '\0';
34748
34749 return h->info_kbuf;
34750 }
34751diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34752index 6d115c7..58ff7fd 100644
34753--- a/drivers/message/i2o/i2o_proc.c
34754+++ b/drivers/message/i2o/i2o_proc.c
34755@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34756 "Array Controller Device"
34757 };
34758
34759-static char *chtostr(u8 * chars, int n)
34760-{
34761- char tmp[256];
34762- tmp[0] = 0;
34763- return strncat(tmp, (char *)chars, n);
34764-}
34765-
34766 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34767 char *group)
34768 {
34769@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34770
34771 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34772 seq_printf(seq, "%-#8x", ddm_table.module_id);
34773- seq_printf(seq, "%-29s",
34774- chtostr(ddm_table.module_name_version, 28));
34775+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34776 seq_printf(seq, "%9d ", ddm_table.data_size);
34777 seq_printf(seq, "%8d", ddm_table.code_size);
34778
34779@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34780
34781 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34782 seq_printf(seq, "%-#8x", dst->module_id);
34783- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34784- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34785+ seq_printf(seq, "%-.28s", dst->module_name_version);
34786+ seq_printf(seq, "%-.8s", dst->date);
34787 seq_printf(seq, "%8d ", dst->module_size);
34788 seq_printf(seq, "%8d ", dst->mpb_size);
34789 seq_printf(seq, "0x%04x", dst->module_flags);
34790@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34791 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34792 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34793 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34794- seq_printf(seq, "Vendor info : %s\n",
34795- chtostr((u8 *) (work32 + 2), 16));
34796- seq_printf(seq, "Product info : %s\n",
34797- chtostr((u8 *) (work32 + 6), 16));
34798- seq_printf(seq, "Description : %s\n",
34799- chtostr((u8 *) (work32 + 10), 16));
34800- seq_printf(seq, "Product rev. : %s\n",
34801- chtostr((u8 *) (work32 + 14), 8));
34802+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34803+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34804+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34805+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34806
34807 seq_printf(seq, "Serial number : ");
34808 print_serial_number(seq, (u8 *) (work32 + 16),
34809@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34810 }
34811
34812 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34813- seq_printf(seq, "Module name : %s\n",
34814- chtostr(result.module_name, 24));
34815- seq_printf(seq, "Module revision : %s\n",
34816- chtostr(result.module_rev, 8));
34817+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34818+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34819
34820 seq_printf(seq, "Serial number : ");
34821 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34822@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34823 return 0;
34824 }
34825
34826- seq_printf(seq, "Device name : %s\n",
34827- chtostr(result.device_name, 64));
34828- seq_printf(seq, "Service name : %s\n",
34829- chtostr(result.service_name, 64));
34830- seq_printf(seq, "Physical name : %s\n",
34831- chtostr(result.physical_location, 64));
34832- seq_printf(seq, "Instance number : %s\n",
34833- chtostr(result.instance_number, 4));
34834+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34835+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34836+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34837+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34838
34839 return 0;
34840 }
34841diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34842index a8c08f3..155fe3d 100644
34843--- a/drivers/message/i2o/iop.c
34844+++ b/drivers/message/i2o/iop.c
34845@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34846
34847 spin_lock_irqsave(&c->context_list_lock, flags);
34848
34849- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34850- atomic_inc(&c->context_list_counter);
34851+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34852+ atomic_inc_unchecked(&c->context_list_counter);
34853
34854- entry->context = atomic_read(&c->context_list_counter);
34855+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34856
34857 list_add(&entry->list, &c->context_list);
34858
34859@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34860
34861 #if BITS_PER_LONG == 64
34862 spin_lock_init(&c->context_list_lock);
34863- atomic_set(&c->context_list_counter, 0);
34864+ atomic_set_unchecked(&c->context_list_counter, 0);
34865 INIT_LIST_HEAD(&c->context_list);
34866 #endif
34867
34868diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34869index 7ce65f4..e66e9bc 100644
34870--- a/drivers/mfd/abx500-core.c
34871+++ b/drivers/mfd/abx500-core.c
34872@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34873
34874 struct abx500_device_entry {
34875 struct list_head list;
34876- struct abx500_ops ops;
34877+ abx500_ops_no_const ops;
34878 struct device *dev;
34879 };
34880
34881diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34882index a9223ed..4127b13 100644
34883--- a/drivers/mfd/janz-cmodio.c
34884+++ b/drivers/mfd/janz-cmodio.c
34885@@ -13,6 +13,7 @@
34886
34887 #include <linux/kernel.h>
34888 #include <linux/module.h>
34889+#include <linux/slab.h>
34890 #include <linux/init.h>
34891 #include <linux/pci.h>
34892 #include <linux/interrupt.h>
34893diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34894index a981e2a..5ca0c8b 100644
34895--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34896+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34897@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34898 * the lid is closed. This leads to interrupts as soon as a little move
34899 * is done.
34900 */
34901- atomic_inc(&lis3->count);
34902+ atomic_inc_unchecked(&lis3->count);
34903
34904 wake_up_interruptible(&lis3->misc_wait);
34905 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34906@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34907 if (lis3->pm_dev)
34908 pm_runtime_get_sync(lis3->pm_dev);
34909
34910- atomic_set(&lis3->count, 0);
34911+ atomic_set_unchecked(&lis3->count, 0);
34912 return 0;
34913 }
34914
34915@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34916 add_wait_queue(&lis3->misc_wait, &wait);
34917 while (true) {
34918 set_current_state(TASK_INTERRUPTIBLE);
34919- data = atomic_xchg(&lis3->count, 0);
34920+ data = atomic_xchg_unchecked(&lis3->count, 0);
34921 if (data)
34922 break;
34923
34924@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34925 struct lis3lv02d, miscdev);
34926
34927 poll_wait(file, &lis3->misc_wait, wait);
34928- if (atomic_read(&lis3->count))
34929+ if (atomic_read_unchecked(&lis3->count))
34930 return POLLIN | POLLRDNORM;
34931 return 0;
34932 }
34933diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34934index 2b1482a..5d33616 100644
34935--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34936+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34937@@ -266,7 +266,7 @@ struct lis3lv02d {
34938 struct input_polled_dev *idev; /* input device */
34939 struct platform_device *pdev; /* platform device */
34940 struct regulator_bulk_data regulators[2];
34941- atomic_t count; /* interrupt count after last read */
34942+ atomic_unchecked_t count; /* interrupt count after last read */
34943 union axis_conversion ac; /* hw -> logical axis */
34944 int mapped_btns[3];
34945
34946diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34947index 2f30bad..c4c13d0 100644
34948--- a/drivers/misc/sgi-gru/gruhandles.c
34949+++ b/drivers/misc/sgi-gru/gruhandles.c
34950@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34951 unsigned long nsec;
34952
34953 nsec = CLKS2NSEC(clks);
34954- atomic_long_inc(&mcs_op_statistics[op].count);
34955- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34956+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34957+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34958 if (mcs_op_statistics[op].max < nsec)
34959 mcs_op_statistics[op].max = nsec;
34960 }
34961diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34962index 950dbe9..eeef0f8 100644
34963--- a/drivers/misc/sgi-gru/gruprocfs.c
34964+++ b/drivers/misc/sgi-gru/gruprocfs.c
34965@@ -32,9 +32,9 @@
34966
34967 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34968
34969-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34970+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34971 {
34972- unsigned long val = atomic_long_read(v);
34973+ unsigned long val = atomic_long_read_unchecked(v);
34974
34975 seq_printf(s, "%16lu %s\n", val, id);
34976 }
34977@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34978
34979 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34980 for (op = 0; op < mcsop_last; op++) {
34981- count = atomic_long_read(&mcs_op_statistics[op].count);
34982- total = atomic_long_read(&mcs_op_statistics[op].total);
34983+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34984+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34985 max = mcs_op_statistics[op].max;
34986 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34987 count ? total / count : 0, max);
34988diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34989index 5c3ce24..4915ccb 100644
34990--- a/drivers/misc/sgi-gru/grutables.h
34991+++ b/drivers/misc/sgi-gru/grutables.h
34992@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34993 * GRU statistics.
34994 */
34995 struct gru_stats_s {
34996- atomic_long_t vdata_alloc;
34997- atomic_long_t vdata_free;
34998- atomic_long_t gts_alloc;
34999- atomic_long_t gts_free;
35000- atomic_long_t gms_alloc;
35001- atomic_long_t gms_free;
35002- atomic_long_t gts_double_allocate;
35003- atomic_long_t assign_context;
35004- atomic_long_t assign_context_failed;
35005- atomic_long_t free_context;
35006- atomic_long_t load_user_context;
35007- atomic_long_t load_kernel_context;
35008- atomic_long_t lock_kernel_context;
35009- atomic_long_t unlock_kernel_context;
35010- atomic_long_t steal_user_context;
35011- atomic_long_t steal_kernel_context;
35012- atomic_long_t steal_context_failed;
35013- atomic_long_t nopfn;
35014- atomic_long_t asid_new;
35015- atomic_long_t asid_next;
35016- atomic_long_t asid_wrap;
35017- atomic_long_t asid_reuse;
35018- atomic_long_t intr;
35019- atomic_long_t intr_cbr;
35020- atomic_long_t intr_tfh;
35021- atomic_long_t intr_spurious;
35022- atomic_long_t intr_mm_lock_failed;
35023- atomic_long_t call_os;
35024- atomic_long_t call_os_wait_queue;
35025- atomic_long_t user_flush_tlb;
35026- atomic_long_t user_unload_context;
35027- atomic_long_t user_exception;
35028- atomic_long_t set_context_option;
35029- atomic_long_t check_context_retarget_intr;
35030- atomic_long_t check_context_unload;
35031- atomic_long_t tlb_dropin;
35032- atomic_long_t tlb_preload_page;
35033- atomic_long_t tlb_dropin_fail_no_asid;
35034- atomic_long_t tlb_dropin_fail_upm;
35035- atomic_long_t tlb_dropin_fail_invalid;
35036- atomic_long_t tlb_dropin_fail_range_active;
35037- atomic_long_t tlb_dropin_fail_idle;
35038- atomic_long_t tlb_dropin_fail_fmm;
35039- atomic_long_t tlb_dropin_fail_no_exception;
35040- atomic_long_t tfh_stale_on_fault;
35041- atomic_long_t mmu_invalidate_range;
35042- atomic_long_t mmu_invalidate_page;
35043- atomic_long_t flush_tlb;
35044- atomic_long_t flush_tlb_gru;
35045- atomic_long_t flush_tlb_gru_tgh;
35046- atomic_long_t flush_tlb_gru_zero_asid;
35047+ atomic_long_unchecked_t vdata_alloc;
35048+ atomic_long_unchecked_t vdata_free;
35049+ atomic_long_unchecked_t gts_alloc;
35050+ atomic_long_unchecked_t gts_free;
35051+ atomic_long_unchecked_t gms_alloc;
35052+ atomic_long_unchecked_t gms_free;
35053+ atomic_long_unchecked_t gts_double_allocate;
35054+ atomic_long_unchecked_t assign_context;
35055+ atomic_long_unchecked_t assign_context_failed;
35056+ atomic_long_unchecked_t free_context;
35057+ atomic_long_unchecked_t load_user_context;
35058+ atomic_long_unchecked_t load_kernel_context;
35059+ atomic_long_unchecked_t lock_kernel_context;
35060+ atomic_long_unchecked_t unlock_kernel_context;
35061+ atomic_long_unchecked_t steal_user_context;
35062+ atomic_long_unchecked_t steal_kernel_context;
35063+ atomic_long_unchecked_t steal_context_failed;
35064+ atomic_long_unchecked_t nopfn;
35065+ atomic_long_unchecked_t asid_new;
35066+ atomic_long_unchecked_t asid_next;
35067+ atomic_long_unchecked_t asid_wrap;
35068+ atomic_long_unchecked_t asid_reuse;
35069+ atomic_long_unchecked_t intr;
35070+ atomic_long_unchecked_t intr_cbr;
35071+ atomic_long_unchecked_t intr_tfh;
35072+ atomic_long_unchecked_t intr_spurious;
35073+ atomic_long_unchecked_t intr_mm_lock_failed;
35074+ atomic_long_unchecked_t call_os;
35075+ atomic_long_unchecked_t call_os_wait_queue;
35076+ atomic_long_unchecked_t user_flush_tlb;
35077+ atomic_long_unchecked_t user_unload_context;
35078+ atomic_long_unchecked_t user_exception;
35079+ atomic_long_unchecked_t set_context_option;
35080+ atomic_long_unchecked_t check_context_retarget_intr;
35081+ atomic_long_unchecked_t check_context_unload;
35082+ atomic_long_unchecked_t tlb_dropin;
35083+ atomic_long_unchecked_t tlb_preload_page;
35084+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
35085+ atomic_long_unchecked_t tlb_dropin_fail_upm;
35086+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
35087+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
35088+ atomic_long_unchecked_t tlb_dropin_fail_idle;
35089+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
35090+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
35091+ atomic_long_unchecked_t tfh_stale_on_fault;
35092+ atomic_long_unchecked_t mmu_invalidate_range;
35093+ atomic_long_unchecked_t mmu_invalidate_page;
35094+ atomic_long_unchecked_t flush_tlb;
35095+ atomic_long_unchecked_t flush_tlb_gru;
35096+ atomic_long_unchecked_t flush_tlb_gru_tgh;
35097+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
35098
35099- atomic_long_t copy_gpa;
35100- atomic_long_t read_gpa;
35101+ atomic_long_unchecked_t copy_gpa;
35102+ atomic_long_unchecked_t read_gpa;
35103
35104- atomic_long_t mesq_receive;
35105- atomic_long_t mesq_receive_none;
35106- atomic_long_t mesq_send;
35107- atomic_long_t mesq_send_failed;
35108- atomic_long_t mesq_noop;
35109- atomic_long_t mesq_send_unexpected_error;
35110- atomic_long_t mesq_send_lb_overflow;
35111- atomic_long_t mesq_send_qlimit_reached;
35112- atomic_long_t mesq_send_amo_nacked;
35113- atomic_long_t mesq_send_put_nacked;
35114- atomic_long_t mesq_page_overflow;
35115- atomic_long_t mesq_qf_locked;
35116- atomic_long_t mesq_qf_noop_not_full;
35117- atomic_long_t mesq_qf_switch_head_failed;
35118- atomic_long_t mesq_qf_unexpected_error;
35119- atomic_long_t mesq_noop_unexpected_error;
35120- atomic_long_t mesq_noop_lb_overflow;
35121- atomic_long_t mesq_noop_qlimit_reached;
35122- atomic_long_t mesq_noop_amo_nacked;
35123- atomic_long_t mesq_noop_put_nacked;
35124- atomic_long_t mesq_noop_page_overflow;
35125+ atomic_long_unchecked_t mesq_receive;
35126+ atomic_long_unchecked_t mesq_receive_none;
35127+ atomic_long_unchecked_t mesq_send;
35128+ atomic_long_unchecked_t mesq_send_failed;
35129+ atomic_long_unchecked_t mesq_noop;
35130+ atomic_long_unchecked_t mesq_send_unexpected_error;
35131+ atomic_long_unchecked_t mesq_send_lb_overflow;
35132+ atomic_long_unchecked_t mesq_send_qlimit_reached;
35133+ atomic_long_unchecked_t mesq_send_amo_nacked;
35134+ atomic_long_unchecked_t mesq_send_put_nacked;
35135+ atomic_long_unchecked_t mesq_page_overflow;
35136+ atomic_long_unchecked_t mesq_qf_locked;
35137+ atomic_long_unchecked_t mesq_qf_noop_not_full;
35138+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
35139+ atomic_long_unchecked_t mesq_qf_unexpected_error;
35140+ atomic_long_unchecked_t mesq_noop_unexpected_error;
35141+ atomic_long_unchecked_t mesq_noop_lb_overflow;
35142+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
35143+ atomic_long_unchecked_t mesq_noop_amo_nacked;
35144+ atomic_long_unchecked_t mesq_noop_put_nacked;
35145+ atomic_long_unchecked_t mesq_noop_page_overflow;
35146
35147 };
35148
35149@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
35150 tghop_invalidate, mcsop_last};
35151
35152 struct mcs_op_statistic {
35153- atomic_long_t count;
35154- atomic_long_t total;
35155+ atomic_long_unchecked_t count;
35156+ atomic_long_unchecked_t total;
35157 unsigned long max;
35158 };
35159
35160@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
35161
35162 #define STAT(id) do { \
35163 if (gru_options & OPT_STATS) \
35164- atomic_long_inc(&gru_stats.id); \
35165+ atomic_long_inc_unchecked(&gru_stats.id); \
35166 } while (0)
35167
35168 #ifdef CONFIG_SGI_GRU_DEBUG
35169diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
35170index 851b2f2..a4ec097 100644
35171--- a/drivers/misc/sgi-xp/xp.h
35172+++ b/drivers/misc/sgi-xp/xp.h
35173@@ -289,7 +289,7 @@ struct xpc_interface {
35174 xpc_notify_func, void *);
35175 void (*received) (short, int, void *);
35176 enum xp_retval (*partid_to_nasids) (short, void *);
35177-};
35178+} __no_const;
35179
35180 extern struct xpc_interface xpc_interface;
35181
35182diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
35183index b94d5f7..7f494c5 100644
35184--- a/drivers/misc/sgi-xp/xpc.h
35185+++ b/drivers/misc/sgi-xp/xpc.h
35186@@ -835,6 +835,7 @@ struct xpc_arch_operations {
35187 void (*received_payload) (struct xpc_channel *, void *);
35188 void (*notify_senders_of_disconnect) (struct xpc_channel *);
35189 };
35190+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
35191
35192 /* struct xpc_partition act_state values (for XPC HB) */
35193
35194@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
35195 /* found in xpc_main.c */
35196 extern struct device *xpc_part;
35197 extern struct device *xpc_chan;
35198-extern struct xpc_arch_operations xpc_arch_ops;
35199+extern xpc_arch_operations_no_const xpc_arch_ops;
35200 extern int xpc_disengage_timelimit;
35201 extern int xpc_disengage_timedout;
35202 extern int xpc_activate_IRQ_rcvd;
35203diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
35204index 8d082b4..aa749ae 100644
35205--- a/drivers/misc/sgi-xp/xpc_main.c
35206+++ b/drivers/misc/sgi-xp/xpc_main.c
35207@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
35208 .notifier_call = xpc_system_die,
35209 };
35210
35211-struct xpc_arch_operations xpc_arch_ops;
35212+xpc_arch_operations_no_const xpc_arch_ops;
35213
35214 /*
35215 * Timer function to enforce the timelimit on the partition disengage.
35216diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
35217index 6ebdc40..9edf5d8 100644
35218--- a/drivers/mmc/host/sdhci-pci.c
35219+++ b/drivers/mmc/host/sdhci-pci.c
35220@@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
35221 .probe = via_probe,
35222 };
35223
35224-static const struct pci_device_id pci_ids[] __devinitdata = {
35225+static const struct pci_device_id pci_ids[] __devinitconst = {
35226 {
35227 .vendor = PCI_VENDOR_ID_RICOH,
35228 .device = PCI_DEVICE_ID_RICOH_R5C822,
35229diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
35230index 87a431c..4959b43 100644
35231--- a/drivers/mtd/devices/doc2000.c
35232+++ b/drivers/mtd/devices/doc2000.c
35233@@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
35234
35235 /* The ECC will not be calculated correctly if less than 512 is written */
35236 /* DBB-
35237- if (len != 0x200 && eccbuf)
35238+ if (len != 0x200)
35239 printk(KERN_WARNING
35240 "ECC needs a full sector write (adr: %lx size %lx)\n",
35241 (long) to, (long) len);
35242diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
35243index 9eacf67..4534b5b 100644
35244--- a/drivers/mtd/devices/doc2001.c
35245+++ b/drivers/mtd/devices/doc2001.c
35246@@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
35247 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
35248
35249 /* Don't allow read past end of device */
35250- if (from >= this->totlen)
35251+ if (from >= this->totlen || !len)
35252 return -EINVAL;
35253
35254 /* Don't allow a single read to cross a 512-byte block boundary */
35255diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
35256index 3984d48..28aa897 100644
35257--- a/drivers/mtd/nand/denali.c
35258+++ b/drivers/mtd/nand/denali.c
35259@@ -26,6 +26,7 @@
35260 #include <linux/pci.h>
35261 #include <linux/mtd/mtd.h>
35262 #include <linux/module.h>
35263+#include <linux/slab.h>
35264
35265 #include "denali.h"
35266
35267diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
35268index 51b9d6a..52af9a7 100644
35269--- a/drivers/mtd/nftlmount.c
35270+++ b/drivers/mtd/nftlmount.c
35271@@ -24,6 +24,7 @@
35272 #include <asm/errno.h>
35273 #include <linux/delay.h>
35274 #include <linux/slab.h>
35275+#include <linux/sched.h>
35276 #include <linux/mtd/mtd.h>
35277 #include <linux/mtd/nand.h>
35278 #include <linux/mtd/nftl.h>
35279diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
35280index e2cdebf..d48183a 100644
35281--- a/drivers/mtd/ubi/debug.c
35282+++ b/drivers/mtd/ubi/debug.c
35283@@ -338,6 +338,8 @@ out:
35284
35285 /* Write an UBI debugfs file */
35286 static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
35287+ size_t count, loff_t *ppos) __size_overflow(3);
35288+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
35289 size_t count, loff_t *ppos)
35290 {
35291 unsigned long ubi_num = (unsigned long)file->private_data;
35292diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
35293index 071f4c8..440862e 100644
35294--- a/drivers/net/ethernet/atheros/atlx/atl2.c
35295+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
35296@@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
35297 */
35298
35299 #define ATL2_PARAM(X, desc) \
35300- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
35301+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
35302 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
35303 MODULE_PARM_DESC(X, desc);
35304 #else
35305diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35306index 66da39f..5dc436d 100644
35307--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35308+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35309@@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
35310
35311 int (*wait_comp)(struct bnx2x *bp,
35312 struct bnx2x_rx_mode_ramrod_params *p);
35313-};
35314+} __no_const;
35315
35316 /********************** Set multicast group ***********************************/
35317
35318diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
35319index aea8f72..fcebf75 100644
35320--- a/drivers/net/ethernet/broadcom/tg3.h
35321+++ b/drivers/net/ethernet/broadcom/tg3.h
35322@@ -140,6 +140,7 @@
35323 #define CHIPREV_ID_5750_A0 0x4000
35324 #define CHIPREV_ID_5750_A1 0x4001
35325 #define CHIPREV_ID_5750_A3 0x4003
35326+#define CHIPREV_ID_5750_C1 0x4201
35327 #define CHIPREV_ID_5750_C2 0x4202
35328 #define CHIPREV_ID_5752_A0_HW 0x5000
35329 #define CHIPREV_ID_5752_A0 0x6000
35330diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
35331index 47a8435..248e4b3 100644
35332--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
35333+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
35334@@ -1052,6 +1052,8 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
35335 * be copied but there is no memory for the copy.
35336 */
35337 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
35338+ struct freelQ *fl, unsigned int len) __size_overflow(3);
35339+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
35340 struct freelQ *fl, unsigned int len)
35341 {
35342 struct sk_buff *skb;
35343diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35344index c4e8643..0979484 100644
35345--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35346+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35347@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
35348 */
35349 struct l2t_skb_cb {
35350 arp_failure_handler_func arp_failure_handler;
35351-};
35352+} __no_const;
35353
35354 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
35355
35356diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
35357index cfb60e1..94af340 100644
35358--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
35359+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
35360@@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
35361 * of the SW ring.
35362 */
35363 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35364+ size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4);
35365+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35366 size_t sw_size, dma_addr_t * phys, void *metadata)
35367 {
35368 size_t len = nelem * elem_size;
35369@@ -777,6 +779,8 @@ static inline unsigned int flits_to_desc(unsigned int n)
35370 * be copied but there is no memory for the copy.
35371 */
35372 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35373+ unsigned int len, unsigned int drop_thres) __size_overflow(3);
35374+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35375 unsigned int len, unsigned int drop_thres)
35376 {
35377 struct sk_buff *skb = NULL;
35378diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
35379index 2dae795..73037d2 100644
35380--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
35381+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
35382@@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
35383 */
35384 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35385 size_t sw_size, dma_addr_t *phys, void *metadata,
35386+ size_t stat_size, int node) __size_overflow(2,4);
35387+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35388+ size_t sw_size, dma_addr_t *phys, void *metadata,
35389 size_t stat_size, int node)
35390 {
35391 size_t len = nelem * elem_size + stat_size;
35392diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35393index 0bd585b..d954ca5 100644
35394--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35395+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35396@@ -729,6 +729,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
35397 */
35398 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35399 size_t swsize, dma_addr_t *busaddrp, void *swringp,
35400+ size_t stat_size) __size_overflow(2,4);
35401+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35402+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
35403 size_t stat_size)
35404 {
35405 /*
35406diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
35407index 4d71f5a..8004440 100644
35408--- a/drivers/net/ethernet/dec/tulip/de4x5.c
35409+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
35410@@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35411 for (i=0; i<ETH_ALEN; i++) {
35412 tmp.addr[i] = dev->dev_addr[i];
35413 }
35414- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35415+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35416 break;
35417
35418 case DE4X5_SET_HWADDR: /* Set the hardware address */
35419@@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35420 spin_lock_irqsave(&lp->lock, flags);
35421 memcpy(&statbuf, &lp->pktStats, ioc->len);
35422 spin_unlock_irqrestore(&lp->lock, flags);
35423- if (copy_to_user(ioc->data, &statbuf, ioc->len))
35424+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35425 return -EFAULT;
35426 break;
35427 }
35428diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
35429index 14d5b61..1398636 100644
35430--- a/drivers/net/ethernet/dec/tulip/eeprom.c
35431+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
35432@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
35433 {NULL}};
35434
35435
35436-static const char *block_name[] __devinitdata = {
35437+static const char *block_name[] __devinitconst = {
35438 "21140 non-MII",
35439 "21140 MII PHY",
35440 "21142 Serial PHY",
35441diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
35442index 52da7b2..4ddfe1c 100644
35443--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
35444+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
35445@@ -236,7 +236,7 @@ struct pci_id_info {
35446 int drv_flags; /* Driver use, intended as capability flags. */
35447 };
35448
35449-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35450+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35451 { /* Sometime a Level-One switch card. */
35452 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
35453 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
35454diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
35455index b2dc2c8..2e09edb 100644
35456--- a/drivers/net/ethernet/dlink/dl2k.c
35457+++ b/drivers/net/ethernet/dlink/dl2k.c
35458@@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
35459 {
35460 int phy_addr;
35461 struct netdev_private *np = netdev_priv(dev);
35462- struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
35463-
35464- struct netdev_desc *desc;
35465- int i;
35466+ struct mii_ioctl_data *miidata = if_mii(rq);
35467
35468 phy_addr = np->phy_addr;
35469 switch (cmd) {
35470- case SIOCDEVPRIVATE:
35471+ case SIOCGMIIPHY:
35472+ miidata->phy_id = phy_addr;
35473 break;
35474-
35475- case SIOCDEVPRIVATE + 1:
35476- miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
35477+ case SIOCGMIIREG:
35478+ miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
35479 break;
35480- case SIOCDEVPRIVATE + 2:
35481- mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
35482+ case SIOCSMIIREG:
35483+ if (!capable(CAP_NET_ADMIN))
35484+ return -EPERM;
35485+ mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
35486 break;
35487- case SIOCDEVPRIVATE + 3:
35488- break;
35489- case SIOCDEVPRIVATE + 4:
35490- break;
35491- case SIOCDEVPRIVATE + 5:
35492- netif_stop_queue (dev);
35493- break;
35494- case SIOCDEVPRIVATE + 6:
35495- netif_wake_queue (dev);
35496- break;
35497- case SIOCDEVPRIVATE + 7:
35498- printk
35499- ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
35500- netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
35501- np->old_rx);
35502- break;
35503- case SIOCDEVPRIVATE + 8:
35504- printk("TX ring:\n");
35505- for (i = 0; i < TX_RING_SIZE; i++) {
35506- desc = &np->tx_ring[i];
35507- printk
35508- ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
35509- i,
35510- (u32) (np->tx_ring_dma + i * sizeof (*desc)),
35511- (u32)le64_to_cpu(desc->next_desc),
35512- (u32)le64_to_cpu(desc->status),
35513- (u32)(le64_to_cpu(desc->fraginfo) >> 32),
35514- (u32)le64_to_cpu(desc->fraginfo));
35515- printk ("\n");
35516- }
35517- printk ("\n");
35518- break;
35519-
35520 default:
35521 return -EOPNOTSUPP;
35522 }
35523diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
35524index ba0adca..30c2da3 100644
35525--- a/drivers/net/ethernet/dlink/dl2k.h
35526+++ b/drivers/net/ethernet/dlink/dl2k.h
35527@@ -365,13 +365,6 @@ struct ioctl_data {
35528 char *data;
35529 };
35530
35531-struct mii_data {
35532- __u16 reserved;
35533- __u16 reg_num;
35534- __u16 in_value;
35535- __u16 out_value;
35536-};
35537-
35538 /* The Rx and Tx buffer descriptors. */
35539 struct netdev_desc {
35540 __le64 next_desc;
35541diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
35542index 28a3a9b..d96cb63 100644
35543--- a/drivers/net/ethernet/dlink/sundance.c
35544+++ b/drivers/net/ethernet/dlink/sundance.c
35545@@ -218,7 +218,7 @@ enum {
35546 struct pci_id_info {
35547 const char *name;
35548 };
35549-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35550+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35551 {"D-Link DFE-550TX FAST Ethernet Adapter"},
35552 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
35553 {"D-Link DFE-580TX 4 port Server Adapter"},
35554diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
35555index e703d64..d62ecf9 100644
35556--- a/drivers/net/ethernet/emulex/benet/be_main.c
35557+++ b/drivers/net/ethernet/emulex/benet/be_main.c
35558@@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
35559
35560 if (wrapped)
35561 newacc += 65536;
35562- ACCESS_ONCE(*acc) = newacc;
35563+ ACCESS_ONCE_RW(*acc) = newacc;
35564 }
35565
35566 void be_parse_stats(struct be_adapter *adapter)
35567diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
35568index 47f85c3..82ab6c4 100644
35569--- a/drivers/net/ethernet/faraday/ftgmac100.c
35570+++ b/drivers/net/ethernet/faraday/ftgmac100.c
35571@@ -31,6 +31,8 @@
35572 #include <linux/netdevice.h>
35573 #include <linux/phy.h>
35574 #include <linux/platform_device.h>
35575+#include <linux/interrupt.h>
35576+#include <linux/irqreturn.h>
35577 #include <net/ip.h>
35578
35579 #include "ftgmac100.h"
35580diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
35581index bb336a0..4b472da 100644
35582--- a/drivers/net/ethernet/faraday/ftmac100.c
35583+++ b/drivers/net/ethernet/faraday/ftmac100.c
35584@@ -31,6 +31,8 @@
35585 #include <linux/module.h>
35586 #include <linux/netdevice.h>
35587 #include <linux/platform_device.h>
35588+#include <linux/interrupt.h>
35589+#include <linux/irqreturn.h>
35590
35591 #include "ftmac100.h"
35592
35593diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
35594index c82d444..0007fb4 100644
35595--- a/drivers/net/ethernet/fealnx.c
35596+++ b/drivers/net/ethernet/fealnx.c
35597@@ -150,7 +150,7 @@ struct chip_info {
35598 int flags;
35599 };
35600
35601-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
35602+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
35603 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35604 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
35605 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35606diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35607index e1159e5..e18684d 100644
35608--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35609+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35610@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
35611 {
35612 struct e1000_hw *hw = &adapter->hw;
35613 struct e1000_mac_info *mac = &hw->mac;
35614- struct e1000_mac_operations *func = &mac->ops;
35615+ e1000_mac_operations_no_const *func = &mac->ops;
35616
35617 /* Set media type */
35618 switch (adapter->pdev->device) {
35619diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
35620index a3e65fd..f451444 100644
35621--- a/drivers/net/ethernet/intel/e1000e/82571.c
35622+++ b/drivers/net/ethernet/intel/e1000e/82571.c
35623@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
35624 {
35625 struct e1000_hw *hw = &adapter->hw;
35626 struct e1000_mac_info *mac = &hw->mac;
35627- struct e1000_mac_operations *func = &mac->ops;
35628+ e1000_mac_operations_no_const *func = &mac->ops;
35629 u32 swsm = 0;
35630 u32 swsm2 = 0;
35631 bool force_clear_smbi = false;
35632diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
35633index 2967039..ca8c40c 100644
35634--- a/drivers/net/ethernet/intel/e1000e/hw.h
35635+++ b/drivers/net/ethernet/intel/e1000e/hw.h
35636@@ -778,6 +778,7 @@ struct e1000_mac_operations {
35637 void (*write_vfta)(struct e1000_hw *, u32, u32);
35638 s32 (*read_mac_addr)(struct e1000_hw *);
35639 };
35640+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35641
35642 /*
35643 * When to use various PHY register access functions:
35644@@ -818,6 +819,7 @@ struct e1000_phy_operations {
35645 void (*power_up)(struct e1000_hw *);
35646 void (*power_down)(struct e1000_hw *);
35647 };
35648+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35649
35650 /* Function pointers for the NVM. */
35651 struct e1000_nvm_operations {
35652@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
35653 s32 (*validate)(struct e1000_hw *);
35654 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
35655 };
35656+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35657
35658 struct e1000_mac_info {
35659- struct e1000_mac_operations ops;
35660+ e1000_mac_operations_no_const ops;
35661 u8 addr[ETH_ALEN];
35662 u8 perm_addr[ETH_ALEN];
35663
35664@@ -872,7 +875,7 @@ struct e1000_mac_info {
35665 };
35666
35667 struct e1000_phy_info {
35668- struct e1000_phy_operations ops;
35669+ e1000_phy_operations_no_const ops;
35670
35671 enum e1000_phy_type type;
35672
35673@@ -906,7 +909,7 @@ struct e1000_phy_info {
35674 };
35675
35676 struct e1000_nvm_info {
35677- struct e1000_nvm_operations ops;
35678+ e1000_nvm_operations_no_const ops;
35679
35680 enum e1000_nvm_type type;
35681 enum e1000_nvm_override override;
35682diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
35683index f67cbd3..cef9e3d 100644
35684--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
35685+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
35686@@ -314,6 +314,7 @@ struct e1000_mac_operations {
35687 s32 (*read_mac_addr)(struct e1000_hw *);
35688 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
35689 };
35690+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35691
35692 struct e1000_phy_operations {
35693 s32 (*acquire)(struct e1000_hw *);
35694@@ -330,6 +331,7 @@ struct e1000_phy_operations {
35695 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
35696 s32 (*write_reg)(struct e1000_hw *, u32, u16);
35697 };
35698+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35699
35700 struct e1000_nvm_operations {
35701 s32 (*acquire)(struct e1000_hw *);
35702@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
35703 s32 (*update)(struct e1000_hw *);
35704 s32 (*validate)(struct e1000_hw *);
35705 };
35706+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35707
35708 struct e1000_info {
35709 s32 (*get_invariants)(struct e1000_hw *);
35710@@ -350,7 +353,7 @@ struct e1000_info {
35711 extern const struct e1000_info e1000_82575_info;
35712
35713 struct e1000_mac_info {
35714- struct e1000_mac_operations ops;
35715+ e1000_mac_operations_no_const ops;
35716
35717 u8 addr[6];
35718 u8 perm_addr[6];
35719@@ -388,7 +391,7 @@ struct e1000_mac_info {
35720 };
35721
35722 struct e1000_phy_info {
35723- struct e1000_phy_operations ops;
35724+ e1000_phy_operations_no_const ops;
35725
35726 enum e1000_phy_type type;
35727
35728@@ -423,7 +426,7 @@ struct e1000_phy_info {
35729 };
35730
35731 struct e1000_nvm_info {
35732- struct e1000_nvm_operations ops;
35733+ e1000_nvm_operations_no_const ops;
35734 enum e1000_nvm_type type;
35735 enum e1000_nvm_override override;
35736
35737@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
35738 s32 (*check_for_ack)(struct e1000_hw *, u16);
35739 s32 (*check_for_rst)(struct e1000_hw *, u16);
35740 };
35741+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35742
35743 struct e1000_mbx_stats {
35744 u32 msgs_tx;
35745@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
35746 };
35747
35748 struct e1000_mbx_info {
35749- struct e1000_mbx_operations ops;
35750+ e1000_mbx_operations_no_const ops;
35751 struct e1000_mbx_stats stats;
35752 u32 timeout;
35753 u32 usec_delay;
35754diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
35755index 57db3c6..aa825fc 100644
35756--- a/drivers/net/ethernet/intel/igbvf/vf.h
35757+++ b/drivers/net/ethernet/intel/igbvf/vf.h
35758@@ -189,9 +189,10 @@ struct e1000_mac_operations {
35759 s32 (*read_mac_addr)(struct e1000_hw *);
35760 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35761 };
35762+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35763
35764 struct e1000_mac_info {
35765- struct e1000_mac_operations ops;
35766+ e1000_mac_operations_no_const ops;
35767 u8 addr[6];
35768 u8 perm_addr[6];
35769
35770@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35771 s32 (*check_for_ack)(struct e1000_hw *);
35772 s32 (*check_for_rst)(struct e1000_hw *);
35773 };
35774+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35775
35776 struct e1000_mbx_stats {
35777 u32 msgs_tx;
35778@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35779 };
35780
35781 struct e1000_mbx_info {
35782- struct e1000_mbx_operations ops;
35783+ e1000_mbx_operations_no_const ops;
35784 struct e1000_mbx_stats stats;
35785 u32 timeout;
35786 u32 usec_delay;
35787diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35788index 9b95bef..7e254ee 100644
35789--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35790+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35791@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
35792 s32 (*update_checksum)(struct ixgbe_hw *);
35793 u16 (*calc_checksum)(struct ixgbe_hw *);
35794 };
35795+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35796
35797 struct ixgbe_mac_operations {
35798 s32 (*init_hw)(struct ixgbe_hw *);
35799@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
35800 /* Manageability interface */
35801 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
35802 };
35803+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35804
35805 struct ixgbe_phy_operations {
35806 s32 (*identify)(struct ixgbe_hw *);
35807@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
35808 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35809 s32 (*check_overtemp)(struct ixgbe_hw *);
35810 };
35811+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35812
35813 struct ixgbe_eeprom_info {
35814- struct ixgbe_eeprom_operations ops;
35815+ ixgbe_eeprom_operations_no_const ops;
35816 enum ixgbe_eeprom_type type;
35817 u32 semaphore_delay;
35818 u16 word_size;
35819@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
35820
35821 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35822 struct ixgbe_mac_info {
35823- struct ixgbe_mac_operations ops;
35824+ ixgbe_mac_operations_no_const ops;
35825 enum ixgbe_mac_type type;
35826 u8 addr[ETH_ALEN];
35827 u8 perm_addr[ETH_ALEN];
35828@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
35829 };
35830
35831 struct ixgbe_phy_info {
35832- struct ixgbe_phy_operations ops;
35833+ ixgbe_phy_operations_no_const ops;
35834 struct mdio_if_info mdio;
35835 enum ixgbe_phy_type type;
35836 u32 id;
35837@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
35838 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35839 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35840 };
35841+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35842
35843 struct ixgbe_mbx_stats {
35844 u32 msgs_tx;
35845@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
35846 };
35847
35848 struct ixgbe_mbx_info {
35849- struct ixgbe_mbx_operations ops;
35850+ ixgbe_mbx_operations_no_const ops;
35851 struct ixgbe_mbx_stats stats;
35852 u32 timeout;
35853 u32 usec_delay;
35854diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35855index 25c951d..cc7cf33 100644
35856--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35857+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35858@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35859 s32 (*clear_vfta)(struct ixgbe_hw *);
35860 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35861 };
35862+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35863
35864 enum ixgbe_mac_type {
35865 ixgbe_mac_unknown = 0,
35866@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35867 };
35868
35869 struct ixgbe_mac_info {
35870- struct ixgbe_mac_operations ops;
35871+ ixgbe_mac_operations_no_const ops;
35872 u8 addr[6];
35873 u8 perm_addr[6];
35874
35875@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35876 s32 (*check_for_ack)(struct ixgbe_hw *);
35877 s32 (*check_for_rst)(struct ixgbe_hw *);
35878 };
35879+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35880
35881 struct ixgbe_mbx_stats {
35882 u32 msgs_tx;
35883@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35884 };
35885
35886 struct ixgbe_mbx_info {
35887- struct ixgbe_mbx_operations ops;
35888+ ixgbe_mbx_operations_no_const ops;
35889 struct ixgbe_mbx_stats stats;
35890 u32 timeout;
35891 u32 udelay;
35892diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35893index 8bf22b6..7f5baaa 100644
35894--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35895+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35896@@ -41,6 +41,7 @@
35897 #include <linux/slab.h>
35898 #include <linux/io-mapping.h>
35899 #include <linux/delay.h>
35900+#include <linux/sched.h>
35901
35902 #include <linux/mlx4/device.h>
35903 #include <linux/mlx4/doorbell.h>
35904diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35905index 5046a64..71ca936 100644
35906--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35907+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35908@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35909 void (*link_down)(struct __vxge_hw_device *devh);
35910 void (*crit_err)(struct __vxge_hw_device *devh,
35911 enum vxge_hw_event type, u64 ext_data);
35912-};
35913+} __no_const;
35914
35915 /*
35916 * struct __vxge_hw_blockpool_entry - Block private data structure
35917diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35918index 4a518a3..936b334 100644
35919--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35920+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35921@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35922 struct vxge_hw_mempool_dma *dma_object,
35923 u32 index,
35924 u32 is_last);
35925-};
35926+} __no_const;
35927
35928 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35929 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35930diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35931index bbacb37..d60887d 100644
35932--- a/drivers/net/ethernet/realtek/r8169.c
35933+++ b/drivers/net/ethernet/realtek/r8169.c
35934@@ -695,17 +695,17 @@ struct rtl8169_private {
35935 struct mdio_ops {
35936 void (*write)(void __iomem *, int, int);
35937 int (*read)(void __iomem *, int);
35938- } mdio_ops;
35939+ } __no_const mdio_ops;
35940
35941 struct pll_power_ops {
35942 void (*down)(struct rtl8169_private *);
35943 void (*up)(struct rtl8169_private *);
35944- } pll_power_ops;
35945+ } __no_const pll_power_ops;
35946
35947 struct jumbo_ops {
35948 void (*enable)(struct rtl8169_private *);
35949 void (*disable)(struct rtl8169_private *);
35950- } jumbo_ops;
35951+ } __no_const jumbo_ops;
35952
35953 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35954 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35955diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35956index 5b118cd..858b523 100644
35957--- a/drivers/net/ethernet/sis/sis190.c
35958+++ b/drivers/net/ethernet/sis/sis190.c
35959@@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35960 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35961 struct net_device *dev)
35962 {
35963- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35964+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35965 struct sis190_private *tp = netdev_priv(dev);
35966 struct pci_dev *isa_bridge;
35967 u8 reg, tmp8;
35968diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35969index c07cfe9..81cbf7e 100644
35970--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35971+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35972@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35973
35974 writel(value, ioaddr + MMC_CNTRL);
35975
35976- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35977- MMC_CNTRL, value);
35978+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35979+// MMC_CNTRL, value);
35980 }
35981
35982 /* To mask all all interrupts.*/
35983diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35984index dec5836..6d4db7d 100644
35985--- a/drivers/net/hyperv/hyperv_net.h
35986+++ b/drivers/net/hyperv/hyperv_net.h
35987@@ -97,7 +97,7 @@ struct rndis_device {
35988
35989 enum rndis_device_state state;
35990 bool link_state;
35991- atomic_t new_req_id;
35992+ atomic_unchecked_t new_req_id;
35993
35994 spinlock_t request_lock;
35995 struct list_head req_list;
35996diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35997index 133b7fb..d58c559 100644
35998--- a/drivers/net/hyperv/rndis_filter.c
35999+++ b/drivers/net/hyperv/rndis_filter.c
36000@@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36001 * template
36002 */
36003 set = &rndis_msg->msg.set_req;
36004- set->req_id = atomic_inc_return(&dev->new_req_id);
36005+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36006
36007 /* Add to the request list */
36008 spin_lock_irqsave(&dev->request_lock, flags);
36009@@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36010
36011 /* Setup the rndis set */
36012 halt = &request->request_msg.msg.halt_req;
36013- halt->req_id = atomic_inc_return(&dev->new_req_id);
36014+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36015
36016 /* Ignore return since this msg is optional. */
36017 rndis_filter_send_request(dev, request);
36018diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36019index 58dc117..f140c77 100644
36020--- a/drivers/net/macvtap.c
36021+++ b/drivers/net/macvtap.c
36022@@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
36023 }
36024 base = (unsigned long)from->iov_base + offset1;
36025 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
36026+ if (i + size >= MAX_SKB_FRAGS)
36027+ return -EFAULT;
36028 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
36029 if ((num_pages != size) ||
36030 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
36031diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
36032index 3ed983c..a1bb418 100644
36033--- a/drivers/net/ppp/ppp_generic.c
36034+++ b/drivers/net/ppp/ppp_generic.c
36035@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36036 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36037 struct ppp_stats stats;
36038 struct ppp_comp_stats cstats;
36039- char *vers;
36040
36041 switch (cmd) {
36042 case SIOCGPPPSTATS:
36043@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36044 break;
36045
36046 case SIOCGPPPVER:
36047- vers = PPP_VERSION;
36048- if (copy_to_user(addr, vers, strlen(vers) + 1))
36049+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36050 break;
36051 err = 0;
36052 break;
36053diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
36054index 515f122..41dd273 100644
36055--- a/drivers/net/tokenring/abyss.c
36056+++ b/drivers/net/tokenring/abyss.c
36057@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
36058
36059 static int __init abyss_init (void)
36060 {
36061- abyss_netdev_ops = tms380tr_netdev_ops;
36062+ pax_open_kernel();
36063+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36064
36065- abyss_netdev_ops.ndo_open = abyss_open;
36066- abyss_netdev_ops.ndo_stop = abyss_close;
36067+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
36068+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
36069+ pax_close_kernel();
36070
36071 return pci_register_driver(&abyss_driver);
36072 }
36073diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
36074index 6153cfd..cf69c1c 100644
36075--- a/drivers/net/tokenring/madgemc.c
36076+++ b/drivers/net/tokenring/madgemc.c
36077@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
36078
36079 static int __init madgemc_init (void)
36080 {
36081- madgemc_netdev_ops = tms380tr_netdev_ops;
36082- madgemc_netdev_ops.ndo_open = madgemc_open;
36083- madgemc_netdev_ops.ndo_stop = madgemc_close;
36084+ pax_open_kernel();
36085+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36086+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
36087+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
36088+ pax_close_kernel();
36089
36090 return mca_register_driver (&madgemc_driver);
36091 }
36092diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
36093index 8d362e6..f91cc52 100644
36094--- a/drivers/net/tokenring/proteon.c
36095+++ b/drivers/net/tokenring/proteon.c
36096@@ -353,9 +353,11 @@ static int __init proteon_init(void)
36097 struct platform_device *pdev;
36098 int i, num = 0, err = 0;
36099
36100- proteon_netdev_ops = tms380tr_netdev_ops;
36101- proteon_netdev_ops.ndo_open = proteon_open;
36102- proteon_netdev_ops.ndo_stop = tms380tr_close;
36103+ pax_open_kernel();
36104+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36105+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
36106+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
36107+ pax_close_kernel();
36108
36109 err = platform_driver_register(&proteon_driver);
36110 if (err)
36111diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
36112index 46db5c5..37c1536 100644
36113--- a/drivers/net/tokenring/skisa.c
36114+++ b/drivers/net/tokenring/skisa.c
36115@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
36116 struct platform_device *pdev;
36117 int i, num = 0, err = 0;
36118
36119- sk_isa_netdev_ops = tms380tr_netdev_ops;
36120- sk_isa_netdev_ops.ndo_open = sk_isa_open;
36121- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
36122+ pax_open_kernel();
36123+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
36124+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
36125+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
36126+ pax_close_kernel();
36127
36128 err = platform_driver_register(&sk_isa_driver);
36129 if (err)
36130diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
36131index e1324b4..e1b0041 100644
36132--- a/drivers/net/usb/hso.c
36133+++ b/drivers/net/usb/hso.c
36134@@ -71,7 +71,7 @@
36135 #include <asm/byteorder.h>
36136 #include <linux/serial_core.h>
36137 #include <linux/serial.h>
36138-
36139+#include <asm/local.h>
36140
36141 #define MOD_AUTHOR "Option Wireless"
36142 #define MOD_DESCRIPTION "USB High Speed Option driver"
36143@@ -257,7 +257,7 @@ struct hso_serial {
36144
36145 /* from usb_serial_port */
36146 struct tty_struct *tty;
36147- int open_count;
36148+ local_t open_count;
36149 spinlock_t serial_lock;
36150
36151 int (*write_data) (struct hso_serial *serial);
36152@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
36153 struct urb *urb;
36154
36155 urb = serial->rx_urb[0];
36156- if (serial->open_count > 0) {
36157+ if (local_read(&serial->open_count) > 0) {
36158 count = put_rxbuf_data(urb, serial);
36159 if (count == -1)
36160 return;
36161@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
36162 DUMP1(urb->transfer_buffer, urb->actual_length);
36163
36164 /* Anyone listening? */
36165- if (serial->open_count == 0)
36166+ if (local_read(&serial->open_count) == 0)
36167 return;
36168
36169 if (status == 0) {
36170@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36171 spin_unlock_irq(&serial->serial_lock);
36172
36173 /* check for port already opened, if not set the termios */
36174- serial->open_count++;
36175- if (serial->open_count == 1) {
36176+ if (local_inc_return(&serial->open_count) == 1) {
36177 serial->rx_state = RX_IDLE;
36178 /* Force default termio settings */
36179 _hso_serial_set_termios(tty, NULL);
36180@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36181 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36182 if (result) {
36183 hso_stop_serial_device(serial->parent);
36184- serial->open_count--;
36185+ local_dec(&serial->open_count);
36186 kref_put(&serial->parent->ref, hso_serial_ref_free);
36187 }
36188 } else {
36189@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
36190
36191 /* reset the rts and dtr */
36192 /* do the actual close */
36193- serial->open_count--;
36194+ local_dec(&serial->open_count);
36195
36196- if (serial->open_count <= 0) {
36197- serial->open_count = 0;
36198+ if (local_read(&serial->open_count) <= 0) {
36199+ local_set(&serial->open_count, 0);
36200 spin_lock_irq(&serial->serial_lock);
36201 if (serial->tty == tty) {
36202 serial->tty->driver_data = NULL;
36203@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
36204
36205 /* the actual setup */
36206 spin_lock_irqsave(&serial->serial_lock, flags);
36207- if (serial->open_count)
36208+ if (local_read(&serial->open_count))
36209 _hso_serial_set_termios(tty, old);
36210 else
36211 tty->termios = old;
36212@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
36213 D1("Pending read interrupt on port %d\n", i);
36214 spin_lock(&serial->serial_lock);
36215 if (serial->rx_state == RX_IDLE &&
36216- serial->open_count > 0) {
36217+ local_read(&serial->open_count) > 0) {
36218 /* Setup and send a ctrl req read on
36219 * port i */
36220 if (!serial->rx_urb_filled[0]) {
36221@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
36222 /* Start all serial ports */
36223 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36224 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36225- if (dev2ser(serial_table[i])->open_count) {
36226+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
36227 result =
36228 hso_start_serial_device(serial_table[i], GFP_NOIO);
36229 hso_kick_transmit(dev2ser(serial_table[i]));
36230diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
36231index efc0111..79c8f5b 100644
36232--- a/drivers/net/wireless/ath/ath.h
36233+++ b/drivers/net/wireless/ath/ath.h
36234@@ -119,6 +119,7 @@ struct ath_ops {
36235 void (*write_flush) (void *);
36236 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
36237 };
36238+typedef struct ath_ops __no_const ath_ops_no_const;
36239
36240 struct ath_common;
36241 struct ath_bus_ops;
36242diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
36243index 8c5ce8b..abf101b 100644
36244--- a/drivers/net/wireless/ath/ath5k/debug.c
36245+++ b/drivers/net/wireless/ath/ath5k/debug.c
36246@@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36247
36248 static ssize_t write_file_debug(struct file *file,
36249 const char __user *userbuf,
36250+ size_t count, loff_t *ppos) __size_overflow(3);
36251+static ssize_t write_file_debug(struct file *file,
36252+ const char __user *userbuf,
36253 size_t count, loff_t *ppos)
36254 {
36255 struct ath5k_hw *ah = file->private_data;
36256diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36257index 7b6417b..ab5db98 100644
36258--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36259+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36260@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36261 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36262 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36263
36264- ACCESS_ONCE(ads->ds_link) = i->link;
36265- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36266+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
36267+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36268
36269 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36270 ctl6 = SM(i->keytype, AR_EncrType);
36271@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36272
36273 if ((i->is_first || i->is_last) &&
36274 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36275- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36276+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36277 | set11nTries(i->rates, 1)
36278 | set11nTries(i->rates, 2)
36279 | set11nTries(i->rates, 3)
36280 | (i->dur_update ? AR_DurUpdateEna : 0)
36281 | SM(0, AR_BurstDur);
36282
36283- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36284+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36285 | set11nRate(i->rates, 1)
36286 | set11nRate(i->rates, 2)
36287 | set11nRate(i->rates, 3);
36288 } else {
36289- ACCESS_ONCE(ads->ds_ctl2) = 0;
36290- ACCESS_ONCE(ads->ds_ctl3) = 0;
36291+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36292+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36293 }
36294
36295 if (!i->is_first) {
36296- ACCESS_ONCE(ads->ds_ctl0) = 0;
36297- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36298- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36299+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36300+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36301+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36302 return;
36303 }
36304
36305@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36306 break;
36307 }
36308
36309- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36310+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36311 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36312 | SM(i->txpower, AR_XmitPower)
36313 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36314@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36315 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36316 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
36317
36318- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36319- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36320+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36321+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36322
36323 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36324 return;
36325
36326- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36327+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36328 | set11nPktDurRTSCTS(i->rates, 1);
36329
36330- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36331+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36332 | set11nPktDurRTSCTS(i->rates, 3);
36333
36334- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36335+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36336 | set11nRateFlags(i->rates, 1)
36337 | set11nRateFlags(i->rates, 2)
36338 | set11nRateFlags(i->rates, 3)
36339diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36340index 09b8c9d..905339e 100644
36341--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36342+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36343@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36344 (i->qcu << AR_TxQcuNum_S) | 0x17;
36345
36346 checksum += val;
36347- ACCESS_ONCE(ads->info) = val;
36348+ ACCESS_ONCE_RW(ads->info) = val;
36349
36350 checksum += i->link;
36351- ACCESS_ONCE(ads->link) = i->link;
36352+ ACCESS_ONCE_RW(ads->link) = i->link;
36353
36354 checksum += i->buf_addr[0];
36355- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
36356+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
36357 checksum += i->buf_addr[1];
36358- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
36359+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
36360 checksum += i->buf_addr[2];
36361- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36362+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36363 checksum += i->buf_addr[3];
36364- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36365+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36366
36367 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36368- ACCESS_ONCE(ads->ctl3) = val;
36369+ ACCESS_ONCE_RW(ads->ctl3) = val;
36370 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36371- ACCESS_ONCE(ads->ctl5) = val;
36372+ ACCESS_ONCE_RW(ads->ctl5) = val;
36373 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36374- ACCESS_ONCE(ads->ctl7) = val;
36375+ ACCESS_ONCE_RW(ads->ctl7) = val;
36376 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36377- ACCESS_ONCE(ads->ctl9) = val;
36378+ ACCESS_ONCE_RW(ads->ctl9) = val;
36379
36380 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36381- ACCESS_ONCE(ads->ctl10) = checksum;
36382+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
36383
36384 if (i->is_first || i->is_last) {
36385- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36386+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36387 | set11nTries(i->rates, 1)
36388 | set11nTries(i->rates, 2)
36389 | set11nTries(i->rates, 3)
36390 | (i->dur_update ? AR_DurUpdateEna : 0)
36391 | SM(0, AR_BurstDur);
36392
36393- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36394+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36395 | set11nRate(i->rates, 1)
36396 | set11nRate(i->rates, 2)
36397 | set11nRate(i->rates, 3);
36398 } else {
36399- ACCESS_ONCE(ads->ctl13) = 0;
36400- ACCESS_ONCE(ads->ctl14) = 0;
36401+ ACCESS_ONCE_RW(ads->ctl13) = 0;
36402+ ACCESS_ONCE_RW(ads->ctl14) = 0;
36403 }
36404
36405 ads->ctl20 = 0;
36406@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36407
36408 ctl17 = SM(i->keytype, AR_EncrType);
36409 if (!i->is_first) {
36410- ACCESS_ONCE(ads->ctl11) = 0;
36411- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36412- ACCESS_ONCE(ads->ctl15) = 0;
36413- ACCESS_ONCE(ads->ctl16) = 0;
36414- ACCESS_ONCE(ads->ctl17) = ctl17;
36415- ACCESS_ONCE(ads->ctl18) = 0;
36416- ACCESS_ONCE(ads->ctl19) = 0;
36417+ ACCESS_ONCE_RW(ads->ctl11) = 0;
36418+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36419+ ACCESS_ONCE_RW(ads->ctl15) = 0;
36420+ ACCESS_ONCE_RW(ads->ctl16) = 0;
36421+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36422+ ACCESS_ONCE_RW(ads->ctl18) = 0;
36423+ ACCESS_ONCE_RW(ads->ctl19) = 0;
36424 return;
36425 }
36426
36427- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36428+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36429 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36430 | SM(i->txpower, AR_XmitPower)
36431 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36432@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36433 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36434 ctl12 |= SM(val, AR_PAPRDChainMask);
36435
36436- ACCESS_ONCE(ads->ctl12) = ctl12;
36437- ACCESS_ONCE(ads->ctl17) = ctl17;
36438+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36439+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36440
36441- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36442+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36443 | set11nPktDurRTSCTS(i->rates, 1);
36444
36445- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36446+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36447 | set11nPktDurRTSCTS(i->rates, 3);
36448
36449- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36450+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36451 | set11nRateFlags(i->rates, 1)
36452 | set11nRateFlags(i->rates, 2)
36453 | set11nRateFlags(i->rates, 3)
36454 | SM(i->rtscts_rate, AR_RTSCTSRate);
36455
36456- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36457+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36458 }
36459
36460 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36461diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
36462index 68d972b..1d9205b 100644
36463--- a/drivers/net/wireless/ath/ath9k/debug.c
36464+++ b/drivers/net/wireless/ath/ath9k/debug.c
36465@@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36466 }
36467
36468 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36469+ size_t count, loff_t *ppos) __size_overflow(3);
36470+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36471 size_t count, loff_t *ppos)
36472 {
36473 struct ath_softc *sc = file->private_data;
36474diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36475index d3ff33c..c98bcda 100644
36476--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36477+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36478@@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36479 }
36480
36481 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36482+ size_t count, loff_t *ppos) __size_overflow(3);
36483+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36484 size_t count, loff_t *ppos)
36485 {
36486 struct ath9k_htc_priv *priv = file->private_data;
36487diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
36488index c8261d4..8d88929 100644
36489--- a/drivers/net/wireless/ath/ath9k/hw.h
36490+++ b/drivers/net/wireless/ath/ath9k/hw.h
36491@@ -773,7 +773,7 @@ struct ath_hw_private_ops {
36492
36493 /* ANI */
36494 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36495-};
36496+} __no_const;
36497
36498 /**
36499 * struct ath_hw_ops - callbacks used by hardware code and driver code
36500@@ -803,7 +803,7 @@ struct ath_hw_ops {
36501 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36502 struct ath_hw_antcomb_conf *antconf);
36503
36504-};
36505+} __no_const;
36506
36507 struct ath_nf_limits {
36508 s16 max;
36509@@ -823,7 +823,7 @@ enum ath_cal_list {
36510 #define AH_FASTCC 0x4
36511
36512 struct ath_hw {
36513- struct ath_ops reg_ops;
36514+ ath_ops_no_const reg_ops;
36515
36516 struct ieee80211_hw *hw;
36517 struct ath_common common;
36518diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36519index af00e2c..ab04d34 100644
36520--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36521+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36522@@ -545,7 +545,7 @@ struct phy_func_ptr {
36523 void (*carrsuppr)(struct brcms_phy *);
36524 s32 (*rxsigpwr)(struct brcms_phy *, s32);
36525 void (*detach)(struct brcms_phy *);
36526-};
36527+} __no_const;
36528
36529 struct brcms_phy {
36530 struct brcms_phy_pub pubpi_ro;
36531diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
36532index a2ec369..36fdf14 100644
36533--- a/drivers/net/wireless/iwlegacy/3945-mac.c
36534+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
36535@@ -3646,7 +3646,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
36536 */
36537 if (il3945_mod_params.disable_hw_scan) {
36538 D_INFO("Disabling hw_scan\n");
36539- il3945_hw_ops.hw_scan = NULL;
36540+ pax_open_kernel();
36541+ *(void **)&il3945_hw_ops.hw_scan = NULL;
36542+ pax_close_kernel();
36543 }
36544
36545 D_INFO("*** LOAD DRIVER ***\n");
36546diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
36547index f8fc239..8cade22 100644
36548--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
36549+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
36550@@ -86,8 +86,8 @@ do { \
36551 } while (0)
36552
36553 #else
36554-#define IWL_DEBUG(m, level, fmt, args...)
36555-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
36556+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
36557+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
36558 #define iwl_print_hex_dump(m, level, p, len)
36559 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
36560 do { \
36561diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
36562index 4b9e730..7603659 100644
36563--- a/drivers/net/wireless/mac80211_hwsim.c
36564+++ b/drivers/net/wireless/mac80211_hwsim.c
36565@@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
36566 return -EINVAL;
36567
36568 if (fake_hw_scan) {
36569- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36570- mac80211_hwsim_ops.sw_scan_start = NULL;
36571- mac80211_hwsim_ops.sw_scan_complete = NULL;
36572+ pax_open_kernel();
36573+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36574+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
36575+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
36576+ pax_close_kernel();
36577 }
36578
36579 spin_lock_init(&hwsim_radio_lock);
36580diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
36581index 3186aa4..b35b09f 100644
36582--- a/drivers/net/wireless/mwifiex/main.h
36583+++ b/drivers/net/wireless/mwifiex/main.h
36584@@ -536,7 +536,7 @@ struct mwifiex_if_ops {
36585 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
36586 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
36587 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
36588-};
36589+} __no_const;
36590
36591 struct mwifiex_adapter {
36592 u8 iface_type;
36593diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
36594index a330c69..a81540f 100644
36595--- a/drivers/net/wireless/rndis_wlan.c
36596+++ b/drivers/net/wireless/rndis_wlan.c
36597@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
36598
36599 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
36600
36601- if (rts_threshold < 0 || rts_threshold > 2347)
36602+ if (rts_threshold > 2347)
36603 rts_threshold = 2347;
36604
36605 tmp = cpu_to_le32(rts_threshold);
36606diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
36607index a77f1bb..c608b2b 100644
36608--- a/drivers/net/wireless/wl1251/wl1251.h
36609+++ b/drivers/net/wireless/wl1251/wl1251.h
36610@@ -266,7 +266,7 @@ struct wl1251_if_operations {
36611 void (*reset)(struct wl1251 *wl);
36612 void (*enable_irq)(struct wl1251 *wl);
36613 void (*disable_irq)(struct wl1251 *wl);
36614-};
36615+} __no_const;
36616
36617 struct wl1251 {
36618 struct ieee80211_hw *hw;
36619diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
36620index f34b5b2..b5abb9f 100644
36621--- a/drivers/oprofile/buffer_sync.c
36622+++ b/drivers/oprofile/buffer_sync.c
36623@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
36624 if (cookie == NO_COOKIE)
36625 offset = pc;
36626 if (cookie == INVALID_COOKIE) {
36627- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36628+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36629 offset = pc;
36630 }
36631 if (cookie != last_cookie) {
36632@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
36633 /* add userspace sample */
36634
36635 if (!mm) {
36636- atomic_inc(&oprofile_stats.sample_lost_no_mm);
36637+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36638 return 0;
36639 }
36640
36641 cookie = lookup_dcookie(mm, s->eip, &offset);
36642
36643 if (cookie == INVALID_COOKIE) {
36644- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36645+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36646 return 0;
36647 }
36648
36649@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
36650 /* ignore backtraces if failed to add a sample */
36651 if (state == sb_bt_start) {
36652 state = sb_bt_ignore;
36653- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36654+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36655 }
36656 }
36657 release_mm(mm);
36658diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
36659index c0cc4e7..44d4e54 100644
36660--- a/drivers/oprofile/event_buffer.c
36661+++ b/drivers/oprofile/event_buffer.c
36662@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
36663 }
36664
36665 if (buffer_pos == buffer_size) {
36666- atomic_inc(&oprofile_stats.event_lost_overflow);
36667+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36668 return;
36669 }
36670
36671diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
36672index ed2c3ec..deda85a 100644
36673--- a/drivers/oprofile/oprof.c
36674+++ b/drivers/oprofile/oprof.c
36675@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
36676 if (oprofile_ops.switch_events())
36677 return;
36678
36679- atomic_inc(&oprofile_stats.multiplex_counter);
36680+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36681 start_switch_worker();
36682 }
36683
36684diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
36685index 84a208d..f07d177 100644
36686--- a/drivers/oprofile/oprofile_files.c
36687+++ b/drivers/oprofile/oprofile_files.c
36688@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
36689
36690
36691 static ssize_t timeout_write(struct file *file, char const __user *buf,
36692+ size_t count, loff_t *offset) __size_overflow(3);
36693+static ssize_t timeout_write(struct file *file, char const __user *buf,
36694 size_t count, loff_t *offset)
36695 {
36696 unsigned long val;
36697@@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
36698 }
36699
36700
36701+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36702 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36703 {
36704 unsigned long val;
36705@@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = {
36706 };
36707
36708
36709+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36710 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
36711 {
36712 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
36713 }
36714
36715
36716+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36717 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36718 {
36719 unsigned long val;
36720diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
36721index 917d28e..d62d981 100644
36722--- a/drivers/oprofile/oprofile_stats.c
36723+++ b/drivers/oprofile/oprofile_stats.c
36724@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36725 cpu_buf->sample_invalid_eip = 0;
36726 }
36727
36728- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36729- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36730- atomic_set(&oprofile_stats.event_lost_overflow, 0);
36731- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36732- atomic_set(&oprofile_stats.multiplex_counter, 0);
36733+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36734+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36735+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36736+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36737+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36738 }
36739
36740
36741diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
36742index 38b6fc0..b5cbfce 100644
36743--- a/drivers/oprofile/oprofile_stats.h
36744+++ b/drivers/oprofile/oprofile_stats.h
36745@@ -13,11 +13,11 @@
36746 #include <linux/atomic.h>
36747
36748 struct oprofile_stat_struct {
36749- atomic_t sample_lost_no_mm;
36750- atomic_t sample_lost_no_mapping;
36751- atomic_t bt_lost_no_mapping;
36752- atomic_t event_lost_overflow;
36753- atomic_t multiplex_counter;
36754+ atomic_unchecked_t sample_lost_no_mm;
36755+ atomic_unchecked_t sample_lost_no_mapping;
36756+ atomic_unchecked_t bt_lost_no_mapping;
36757+ atomic_unchecked_t event_lost_overflow;
36758+ atomic_unchecked_t multiplex_counter;
36759 };
36760
36761 extern struct oprofile_stat_struct oprofile_stats;
36762diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
36763index 2f0aa0f..d5246c3 100644
36764--- a/drivers/oprofile/oprofilefs.c
36765+++ b/drivers/oprofile/oprofilefs.c
36766@@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
36767 }
36768
36769
36770+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36771 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36772 {
36773 unsigned long value;
36774@@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = {
36775
36776
36777 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36778- char const *name, atomic_t *val)
36779+ char const *name, atomic_unchecked_t *val)
36780 {
36781 return __oprofilefs_create_file(sb, root, name,
36782 &atomic_ro_fops, 0444, val);
36783diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36784index 3f56bc0..707d642 100644
36785--- a/drivers/parport/procfs.c
36786+++ b/drivers/parport/procfs.c
36787@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36788
36789 *ppos += len;
36790
36791- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36792+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36793 }
36794
36795 #ifdef CONFIG_PARPORT_1284
36796@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36797
36798 *ppos += len;
36799
36800- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36801+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36802 }
36803 #endif /* IEEE1284.3 support. */
36804
36805diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36806index 9fff878..ad0ad53 100644
36807--- a/drivers/pci/hotplug/cpci_hotplug.h
36808+++ b/drivers/pci/hotplug/cpci_hotplug.h
36809@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36810 int (*hardware_test) (struct slot* slot, u32 value);
36811 u8 (*get_power) (struct slot* slot);
36812 int (*set_power) (struct slot* slot, int value);
36813-};
36814+} __no_const;
36815
36816 struct cpci_hp_controller {
36817 unsigned int irq;
36818diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36819index 76ba8a1..20ca857 100644
36820--- a/drivers/pci/hotplug/cpqphp_nvram.c
36821+++ b/drivers/pci/hotplug/cpqphp_nvram.c
36822@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36823
36824 void compaq_nvram_init (void __iomem *rom_start)
36825 {
36826+
36827+#ifndef CONFIG_PAX_KERNEXEC
36828 if (rom_start) {
36829 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36830 }
36831+#endif
36832+
36833 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36834
36835 /* initialize our int15 lock */
36836diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36837index 2275162..95f1a92 100644
36838--- a/drivers/pci/pcie/aspm.c
36839+++ b/drivers/pci/pcie/aspm.c
36840@@ -27,9 +27,9 @@
36841 #define MODULE_PARAM_PREFIX "pcie_aspm."
36842
36843 /* Note: those are not register definitions */
36844-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36845-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36846-#define ASPM_STATE_L1 (4) /* L1 state */
36847+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36848+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36849+#define ASPM_STATE_L1 (4U) /* L1 state */
36850 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36851 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36852
36853diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36854index 71eac9c..2de27ef 100644
36855--- a/drivers/pci/probe.c
36856+++ b/drivers/pci/probe.c
36857@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36858 u32 l, sz, mask;
36859 u16 orig_cmd;
36860
36861- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36862+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36863
36864 if (!dev->mmio_always_on) {
36865 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36866diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36867index 27911b5..5b6db88 100644
36868--- a/drivers/pci/proc.c
36869+++ b/drivers/pci/proc.c
36870@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36871 static int __init pci_proc_init(void)
36872 {
36873 struct pci_dev *dev = NULL;
36874+
36875+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36876+#ifdef CONFIG_GRKERNSEC_PROC_USER
36877+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36878+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36879+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36880+#endif
36881+#else
36882 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36883+#endif
36884 proc_create("devices", 0, proc_bus_pci_dir,
36885 &proc_bus_pci_dev_operations);
36886 proc_initialized = 1;
36887diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
36888index 6f966d6..68e18ed 100644
36889--- a/drivers/platform/x86/asus_acpi.c
36890+++ b/drivers/platform/x86/asus_acpi.c
36891@@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
36892 }
36893
36894 static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36895+ size_t count, loff_t *pos) __size_overflow(3);
36896+static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36897 size_t count, loff_t *pos)
36898 {
36899 int rv, value;
36900diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36901index ea0c607..58c4628 100644
36902--- a/drivers/platform/x86/thinkpad_acpi.c
36903+++ b/drivers/platform/x86/thinkpad_acpi.c
36904@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36905 return 0;
36906 }
36907
36908-void static hotkey_mask_warn_incomplete_mask(void)
36909+static void hotkey_mask_warn_incomplete_mask(void)
36910 {
36911 /* log only what the user can fix... */
36912 const u32 wantedmask = hotkey_driver_mask &
36913@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36914 }
36915 }
36916
36917-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36918- struct tp_nvram_state *newn,
36919- const u32 event_mask)
36920-{
36921-
36922 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36923 do { \
36924 if ((event_mask & (1 << __scancode)) && \
36925@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36926 tpacpi_hotkey_send_key(__scancode); \
36927 } while (0)
36928
36929- void issue_volchange(const unsigned int oldvol,
36930- const unsigned int newvol)
36931- {
36932- unsigned int i = oldvol;
36933+static void issue_volchange(const unsigned int oldvol,
36934+ const unsigned int newvol,
36935+ const u32 event_mask)
36936+{
36937+ unsigned int i = oldvol;
36938
36939- while (i > newvol) {
36940- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36941- i--;
36942- }
36943- while (i < newvol) {
36944- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36945- i++;
36946- }
36947+ while (i > newvol) {
36948+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36949+ i--;
36950 }
36951+ while (i < newvol) {
36952+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36953+ i++;
36954+ }
36955+}
36956
36957- void issue_brightnesschange(const unsigned int oldbrt,
36958- const unsigned int newbrt)
36959- {
36960- unsigned int i = oldbrt;
36961+static void issue_brightnesschange(const unsigned int oldbrt,
36962+ const unsigned int newbrt,
36963+ const u32 event_mask)
36964+{
36965+ unsigned int i = oldbrt;
36966
36967- while (i > newbrt) {
36968- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36969- i--;
36970- }
36971- while (i < newbrt) {
36972- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36973- i++;
36974- }
36975+ while (i > newbrt) {
36976+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36977+ i--;
36978+ }
36979+ while (i < newbrt) {
36980+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36981+ i++;
36982 }
36983+}
36984
36985+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36986+ struct tp_nvram_state *newn,
36987+ const u32 event_mask)
36988+{
36989 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36990 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36991 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36992@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36993 oldn->volume_level != newn->volume_level) {
36994 /* recently muted, or repeated mute keypress, or
36995 * multiple presses ending in mute */
36996- issue_volchange(oldn->volume_level, newn->volume_level);
36997+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36998 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36999 }
37000 } else {
37001@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37002 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37003 }
37004 if (oldn->volume_level != newn->volume_level) {
37005- issue_volchange(oldn->volume_level, newn->volume_level);
37006+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37007 } else if (oldn->volume_toggle != newn->volume_toggle) {
37008 /* repeated vol up/down keypress at end of scale ? */
37009 if (newn->volume_level == 0)
37010@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37011 /* handle brightness */
37012 if (oldn->brightness_level != newn->brightness_level) {
37013 issue_brightnesschange(oldn->brightness_level,
37014- newn->brightness_level);
37015+ newn->brightness_level,
37016+ event_mask);
37017 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37018 /* repeated key presses that didn't change state */
37019 if (newn->brightness_level == 0)
37020@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37021 && !tp_features.bright_unkfw)
37022 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37023 }
37024+}
37025
37026 #undef TPACPI_COMPARE_KEY
37027 #undef TPACPI_MAY_SEND_KEY
37028-}
37029
37030 /*
37031 * Polling driver
37032diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
37033index dcdc1f4..85cee16 100644
37034--- a/drivers/platform/x86/toshiba_acpi.c
37035+++ b/drivers/platform/x86/toshiba_acpi.c
37036@@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd)
37037 }
37038
37039 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
37040+ size_t count, loff_t *pos) __size_overflow(3);
37041+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
37042 size_t count, loff_t *pos)
37043 {
37044 struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
37045diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37046index b859d16..5cc6b1a 100644
37047--- a/drivers/pnp/pnpbios/bioscalls.c
37048+++ b/drivers/pnp/pnpbios/bioscalls.c
37049@@ -59,7 +59,7 @@ do { \
37050 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
37051 } while(0)
37052
37053-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37054+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37055 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
37056
37057 /*
37058@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37059
37060 cpu = get_cpu();
37061 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37062+
37063+ pax_open_kernel();
37064 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
37065+ pax_close_kernel();
37066
37067 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37068 spin_lock_irqsave(&pnp_bios_lock, flags);
37069@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37070 :"memory");
37071 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37072
37073+ pax_open_kernel();
37074 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
37075+ pax_close_kernel();
37076+
37077 put_cpu();
37078
37079 /* If we get here and this is set then the PnP BIOS faulted on us. */
37080@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
37081 return status;
37082 }
37083
37084-void pnpbios_calls_init(union pnp_bios_install_struct *header)
37085+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37086 {
37087 int i;
37088
37089@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37090 pnp_bios_callpoint.offset = header->fields.pm16offset;
37091 pnp_bios_callpoint.segment = PNP_CS16;
37092
37093+ pax_open_kernel();
37094+
37095 for_each_possible_cpu(i) {
37096 struct desc_struct *gdt = get_cpu_gdt_table(i);
37097 if (!gdt)
37098@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37099 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37100 (unsigned long)__va(header->fields.pm16dseg));
37101 }
37102+
37103+ pax_close_kernel();
37104 }
37105diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37106index b0ecacb..7c9da2e 100644
37107--- a/drivers/pnp/resource.c
37108+++ b/drivers/pnp/resource.c
37109@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
37110 return 1;
37111
37112 /* check if the resource is valid */
37113- if (*irq < 0 || *irq > 15)
37114+ if (*irq > 15)
37115 return 0;
37116
37117 /* check if the resource is reserved */
37118@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
37119 return 1;
37120
37121 /* check if the resource is valid */
37122- if (*dma < 0 || *dma == 4 || *dma > 7)
37123+ if (*dma == 4 || *dma > 7)
37124 return 0;
37125
37126 /* check if the resource is reserved */
37127diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
37128index 1ed6ea0..77c0bd2 100644
37129--- a/drivers/power/bq27x00_battery.c
37130+++ b/drivers/power/bq27x00_battery.c
37131@@ -72,7 +72,7 @@
37132 struct bq27x00_device_info;
37133 struct bq27x00_access_methods {
37134 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
37135-};
37136+} __no_const;
37137
37138 enum bq27x00_chip { BQ27000, BQ27500 };
37139
37140diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
37141index a838e66..a9e1665 100644
37142--- a/drivers/regulator/max8660.c
37143+++ b/drivers/regulator/max8660.c
37144@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
37145 max8660->shadow_regs[MAX8660_OVER1] = 5;
37146 } else {
37147 /* Otherwise devices can be toggled via software */
37148- max8660_dcdc_ops.enable = max8660_dcdc_enable;
37149- max8660_dcdc_ops.disable = max8660_dcdc_disable;
37150+ pax_open_kernel();
37151+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37152+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37153+ pax_close_kernel();
37154 }
37155
37156 /*
37157diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
37158index e8cfc99..072aee2 100644
37159--- a/drivers/regulator/mc13892-regulator.c
37160+++ b/drivers/regulator/mc13892-regulator.c
37161@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
37162 }
37163 mc13xxx_unlock(mc13892);
37164
37165- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37166+ pax_open_kernel();
37167+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37168 = mc13892_vcam_set_mode;
37169- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37170+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37171 = mc13892_vcam_get_mode;
37172+ pax_close_kernel();
37173
37174 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
37175 ARRAY_SIZE(mc13892_regulators));
37176diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
37177index cace6d3..f623fda 100644
37178--- a/drivers/rtc/rtc-dev.c
37179+++ b/drivers/rtc/rtc-dev.c
37180@@ -14,6 +14,7 @@
37181 #include <linux/module.h>
37182 #include <linux/rtc.h>
37183 #include <linux/sched.h>
37184+#include <linux/grsecurity.h>
37185 #include "rtc-core.h"
37186
37187 static dev_t rtc_devt;
37188@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
37189 if (copy_from_user(&tm, uarg, sizeof(tm)))
37190 return -EFAULT;
37191
37192+ gr_log_timechange();
37193+
37194 return rtc_set_time(rtc, &tm);
37195
37196 case RTC_PIE_ON:
37197diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
37198index ffb5878..e6d785c 100644
37199--- a/drivers/scsi/aacraid/aacraid.h
37200+++ b/drivers/scsi/aacraid/aacraid.h
37201@@ -492,7 +492,7 @@ struct adapter_ops
37202 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
37203 /* Administrative operations */
37204 int (*adapter_comm)(struct aac_dev * dev, int comm);
37205-};
37206+} __no_const;
37207
37208 /*
37209 * Define which interrupt handler needs to be installed
37210diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
37211index 705e13e..91c873c 100644
37212--- a/drivers/scsi/aacraid/linit.c
37213+++ b/drivers/scsi/aacraid/linit.c
37214@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
37215 #elif defined(__devinitconst)
37216 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
37217 #else
37218-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
37219+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
37220 #endif
37221 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
37222 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
37223diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
37224index d5ff142..49c0ebb 100644
37225--- a/drivers/scsi/aic94xx/aic94xx_init.c
37226+++ b/drivers/scsi/aic94xx/aic94xx_init.c
37227@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
37228 .lldd_control_phy = asd_control_phy,
37229 };
37230
37231-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
37232+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
37233 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
37234 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
37235 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
37236diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
37237index a796de9..1ef20e1 100644
37238--- a/drivers/scsi/bfa/bfa.h
37239+++ b/drivers/scsi/bfa/bfa.h
37240@@ -196,7 +196,7 @@ struct bfa_hwif_s {
37241 u32 *end);
37242 int cpe_vec_q0;
37243 int rme_vec_q0;
37244-};
37245+} __no_const;
37246 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
37247
37248 struct bfa_faa_cbfn_s {
37249diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
37250index f0f80e2..8ec946b 100644
37251--- a/drivers/scsi/bfa/bfa_fcpim.c
37252+++ b/drivers/scsi/bfa/bfa_fcpim.c
37253@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
37254
37255 bfa_iotag_attach(fcp);
37256
37257- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
37258+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
37259 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
37260 (fcp->num_itns * sizeof(struct bfa_itn_s));
37261 memset(fcp->itn_arr, 0,
37262@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37263 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
37264 {
37265 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
37266- struct bfa_itn_s *itn;
37267+ bfa_itn_s_no_const *itn;
37268
37269 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
37270 itn->isr = isr;
37271diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
37272index 36f26da..38a34a8 100644
37273--- a/drivers/scsi/bfa/bfa_fcpim.h
37274+++ b/drivers/scsi/bfa/bfa_fcpim.h
37275@@ -37,6 +37,7 @@ struct bfa_iotag_s {
37276 struct bfa_itn_s {
37277 bfa_isr_func_t isr;
37278 };
37279+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
37280
37281 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37282 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
37283@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
37284 struct list_head iotag_tio_free_q; /* free IO resources */
37285 struct list_head iotag_unused_q; /* unused IO resources*/
37286 struct bfa_iotag_s *iotag_arr;
37287- struct bfa_itn_s *itn_arr;
37288+ bfa_itn_s_no_const *itn_arr;
37289 int num_ioim_reqs;
37290 int num_fwtio_reqs;
37291 int num_itns;
37292diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
37293index 546d46b..642fa5b 100644
37294--- a/drivers/scsi/bfa/bfa_ioc.h
37295+++ b/drivers/scsi/bfa/bfa_ioc.h
37296@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
37297 bfa_ioc_disable_cbfn_t disable_cbfn;
37298 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37299 bfa_ioc_reset_cbfn_t reset_cbfn;
37300-};
37301+} __no_const;
37302
37303 /*
37304 * IOC event notification mechanism.
37305@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
37306 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
37307 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
37308 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
37309-};
37310+} __no_const;
37311
37312 /*
37313 * Queue element to wait for room in request queue. FIFO order is
37314diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
37315index 351dc0b..951dc32 100644
37316--- a/drivers/scsi/hosts.c
37317+++ b/drivers/scsi/hosts.c
37318@@ -42,7 +42,7 @@
37319 #include "scsi_logging.h"
37320
37321
37322-static atomic_t scsi_host_next_hn; /* host_no for next new host */
37323+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
37324
37325
37326 static void scsi_host_cls_release(struct device *dev)
37327@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
37328 * subtract one because we increment first then return, but we need to
37329 * know what the next host number was before increment
37330 */
37331- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37332+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37333 shost->dma_channel = 0xff;
37334
37335 /* These three are default values which can be overridden */
37336diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
37337index b96962c..0c82ec2 100644
37338--- a/drivers/scsi/hpsa.c
37339+++ b/drivers/scsi/hpsa.c
37340@@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
37341 u32 a;
37342
37343 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37344- return h->access.command_completed(h);
37345+ return h->access->command_completed(h);
37346
37347 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37348 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37349@@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
37350 while (!list_empty(&h->reqQ)) {
37351 c = list_entry(h->reqQ.next, struct CommandList, list);
37352 /* can't do anything if fifo is full */
37353- if ((h->access.fifo_full(h))) {
37354+ if ((h->access->fifo_full(h))) {
37355 dev_warn(&h->pdev->dev, "fifo full\n");
37356 break;
37357 }
37358@@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
37359 h->Qdepth--;
37360
37361 /* Tell the controller execute command */
37362- h->access.submit_command(h, c);
37363+ h->access->submit_command(h, c);
37364
37365 /* Put job onto the completed Q */
37366 addQ(&h->cmpQ, c);
37367@@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
37368
37369 static inline unsigned long get_next_completion(struct ctlr_info *h)
37370 {
37371- return h->access.command_completed(h);
37372+ return h->access->command_completed(h);
37373 }
37374
37375 static inline bool interrupt_pending(struct ctlr_info *h)
37376 {
37377- return h->access.intr_pending(h);
37378+ return h->access->intr_pending(h);
37379 }
37380
37381 static inline long interrupt_not_for_us(struct ctlr_info *h)
37382 {
37383- return (h->access.intr_pending(h) == 0) ||
37384+ return (h->access->intr_pending(h) == 0) ||
37385 (h->interrupts_enabled == 0);
37386 }
37387
37388@@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
37389 if (prod_index < 0)
37390 return -ENODEV;
37391 h->product_name = products[prod_index].product_name;
37392- h->access = *(products[prod_index].access);
37393+ h->access = products[prod_index].access;
37394
37395 if (hpsa_board_disabled(h->pdev)) {
37396 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37397@@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
37398
37399 assert_spin_locked(&lockup_detector_lock);
37400 remove_ctlr_from_lockup_detector_list(h);
37401- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37402+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37403 spin_lock_irqsave(&h->lock, flags);
37404 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
37405 spin_unlock_irqrestore(&h->lock, flags);
37406@@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
37407 }
37408
37409 /* make sure the board interrupts are off */
37410- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37411+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37412
37413 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
37414 goto clean2;
37415@@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
37416 * fake ones to scoop up any residual completions.
37417 */
37418 spin_lock_irqsave(&h->lock, flags);
37419- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37420+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37421 spin_unlock_irqrestore(&h->lock, flags);
37422 free_irq(h->intr[h->intr_mode], h);
37423 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
37424@@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
37425 dev_info(&h->pdev->dev, "Board READY.\n");
37426 dev_info(&h->pdev->dev,
37427 "Waiting for stale completions to drain.\n");
37428- h->access.set_intr_mask(h, HPSA_INTR_ON);
37429+ h->access->set_intr_mask(h, HPSA_INTR_ON);
37430 msleep(10000);
37431- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37432+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37433
37434 rc = controller_reset_failed(h->cfgtable);
37435 if (rc)
37436@@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
37437 }
37438
37439 /* Turn the interrupts on so we can service requests */
37440- h->access.set_intr_mask(h, HPSA_INTR_ON);
37441+ h->access->set_intr_mask(h, HPSA_INTR_ON);
37442
37443 hpsa_hba_inquiry(h);
37444 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
37445@@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
37446 * To write all data in the battery backed cache to disks
37447 */
37448 hpsa_flush_cache(h);
37449- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37450+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37451 free_irq(h->intr[h->intr_mode], h);
37452 #ifdef CONFIG_PCI_MSI
37453 if (h->msix_vector)
37454@@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
37455 return;
37456 }
37457 /* Change the access methods to the performant access methods */
37458- h->access = SA5_performant_access;
37459+ h->access = &SA5_performant_access;
37460 h->transMethod = CFGTBL_Trans_Performant;
37461 }
37462
37463diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
37464index 91edafb..a9b88ec 100644
37465--- a/drivers/scsi/hpsa.h
37466+++ b/drivers/scsi/hpsa.h
37467@@ -73,7 +73,7 @@ struct ctlr_info {
37468 unsigned int msix_vector;
37469 unsigned int msi_vector;
37470 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
37471- struct access_method access;
37472+ struct access_method *access;
37473
37474 /* queue and queue Info */
37475 struct list_head reqQ;
37476diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
37477index f2df059..a3a9930 100644
37478--- a/drivers/scsi/ips.h
37479+++ b/drivers/scsi/ips.h
37480@@ -1027,7 +1027,7 @@ typedef struct {
37481 int (*intr)(struct ips_ha *);
37482 void (*enableint)(struct ips_ha *);
37483 uint32_t (*statupd)(struct ips_ha *);
37484-} ips_hw_func_t;
37485+} __no_const ips_hw_func_t;
37486
37487 typedef struct ips_ha {
37488 uint8_t ha_id[IPS_MAX_CHANNELS+1];
37489diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
37490index 4d70d96..84d0573 100644
37491--- a/drivers/scsi/libfc/fc_exch.c
37492+++ b/drivers/scsi/libfc/fc_exch.c
37493@@ -105,12 +105,12 @@ struct fc_exch_mgr {
37494 * all together if not used XXX
37495 */
37496 struct {
37497- atomic_t no_free_exch;
37498- atomic_t no_free_exch_xid;
37499- atomic_t xid_not_found;
37500- atomic_t xid_busy;
37501- atomic_t seq_not_found;
37502- atomic_t non_bls_resp;
37503+ atomic_unchecked_t no_free_exch;
37504+ atomic_unchecked_t no_free_exch_xid;
37505+ atomic_unchecked_t xid_not_found;
37506+ atomic_unchecked_t xid_busy;
37507+ atomic_unchecked_t seq_not_found;
37508+ atomic_unchecked_t non_bls_resp;
37509 } stats;
37510 };
37511
37512@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
37513 /* allocate memory for exchange */
37514 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37515 if (!ep) {
37516- atomic_inc(&mp->stats.no_free_exch);
37517+ atomic_inc_unchecked(&mp->stats.no_free_exch);
37518 goto out;
37519 }
37520 memset(ep, 0, sizeof(*ep));
37521@@ -780,7 +780,7 @@ out:
37522 return ep;
37523 err:
37524 spin_unlock_bh(&pool->lock);
37525- atomic_inc(&mp->stats.no_free_exch_xid);
37526+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37527 mempool_free(ep, mp->ep_pool);
37528 return NULL;
37529 }
37530@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37531 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37532 ep = fc_exch_find(mp, xid);
37533 if (!ep) {
37534- atomic_inc(&mp->stats.xid_not_found);
37535+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37536 reject = FC_RJT_OX_ID;
37537 goto out;
37538 }
37539@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37540 ep = fc_exch_find(mp, xid);
37541 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37542 if (ep) {
37543- atomic_inc(&mp->stats.xid_busy);
37544+ atomic_inc_unchecked(&mp->stats.xid_busy);
37545 reject = FC_RJT_RX_ID;
37546 goto rel;
37547 }
37548@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37549 }
37550 xid = ep->xid; /* get our XID */
37551 } else if (!ep) {
37552- atomic_inc(&mp->stats.xid_not_found);
37553+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37554 reject = FC_RJT_RX_ID; /* XID not found */
37555 goto out;
37556 }
37557@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37558 } else {
37559 sp = &ep->seq;
37560 if (sp->id != fh->fh_seq_id) {
37561- atomic_inc(&mp->stats.seq_not_found);
37562+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37563 if (f_ctl & FC_FC_END_SEQ) {
37564 /*
37565 * Update sequence_id based on incoming last
37566@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37567
37568 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37569 if (!ep) {
37570- atomic_inc(&mp->stats.xid_not_found);
37571+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37572 goto out;
37573 }
37574 if (ep->esb_stat & ESB_ST_COMPLETE) {
37575- atomic_inc(&mp->stats.xid_not_found);
37576+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37577 goto rel;
37578 }
37579 if (ep->rxid == FC_XID_UNKNOWN)
37580 ep->rxid = ntohs(fh->fh_rx_id);
37581 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37582- atomic_inc(&mp->stats.xid_not_found);
37583+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37584 goto rel;
37585 }
37586 if (ep->did != ntoh24(fh->fh_s_id) &&
37587 ep->did != FC_FID_FLOGI) {
37588- atomic_inc(&mp->stats.xid_not_found);
37589+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37590 goto rel;
37591 }
37592 sof = fr_sof(fp);
37593@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37594 sp->ssb_stat |= SSB_ST_RESP;
37595 sp->id = fh->fh_seq_id;
37596 } else if (sp->id != fh->fh_seq_id) {
37597- atomic_inc(&mp->stats.seq_not_found);
37598+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37599 goto rel;
37600 }
37601
37602@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37603 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37604
37605 if (!sp)
37606- atomic_inc(&mp->stats.xid_not_found);
37607+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37608 else
37609- atomic_inc(&mp->stats.non_bls_resp);
37610+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
37611
37612 fc_frame_free(fp);
37613 }
37614diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
37615index db9238f..4378ed2 100644
37616--- a/drivers/scsi/libsas/sas_ata.c
37617+++ b/drivers/scsi/libsas/sas_ata.c
37618@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
37619 .postreset = ata_std_postreset,
37620 .error_handler = ata_std_error_handler,
37621 .post_internal_cmd = sas_ata_post_internal,
37622- .qc_defer = ata_std_qc_defer,
37623+ .qc_defer = ata_std_qc_defer,
37624 .qc_prep = ata_noop_qc_prep,
37625 .qc_issue = sas_ata_qc_issue,
37626 .qc_fill_rtf = sas_ata_qc_fill_rtf,
37627diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
37628index 825f930..ce42672 100644
37629--- a/drivers/scsi/lpfc/lpfc.h
37630+++ b/drivers/scsi/lpfc/lpfc.h
37631@@ -413,7 +413,7 @@ struct lpfc_vport {
37632 struct dentry *debug_nodelist;
37633 struct dentry *vport_debugfs_root;
37634 struct lpfc_debugfs_trc *disc_trc;
37635- atomic_t disc_trc_cnt;
37636+ atomic_unchecked_t disc_trc_cnt;
37637 #endif
37638 uint8_t stat_data_enabled;
37639 uint8_t stat_data_blocked;
37640@@ -821,8 +821,8 @@ struct lpfc_hba {
37641 struct timer_list fabric_block_timer;
37642 unsigned long bit_flags;
37643 #define FABRIC_COMANDS_BLOCKED 0
37644- atomic_t num_rsrc_err;
37645- atomic_t num_cmd_success;
37646+ atomic_unchecked_t num_rsrc_err;
37647+ atomic_unchecked_t num_cmd_success;
37648 unsigned long last_rsrc_error_time;
37649 unsigned long last_ramp_down_time;
37650 unsigned long last_ramp_up_time;
37651@@ -852,7 +852,7 @@ struct lpfc_hba {
37652
37653 struct dentry *debug_slow_ring_trc;
37654 struct lpfc_debugfs_trc *slow_ring_trc;
37655- atomic_t slow_ring_trc_cnt;
37656+ atomic_unchecked_t slow_ring_trc_cnt;
37657 /* iDiag debugfs sub-directory */
37658 struct dentry *idiag_root;
37659 struct dentry *idiag_pci_cfg;
37660diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
37661index 3587a3f..d45b81b 100644
37662--- a/drivers/scsi/lpfc/lpfc_debugfs.c
37663+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
37664@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
37665
37666 #include <linux/debugfs.h>
37667
37668-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37669+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37670 static unsigned long lpfc_debugfs_start_time = 0L;
37671
37672 /* iDiag */
37673@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
37674 lpfc_debugfs_enable = 0;
37675
37676 len = 0;
37677- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37678+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37679 (lpfc_debugfs_max_disc_trc - 1);
37680 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37681 dtp = vport->disc_trc + i;
37682@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
37683 lpfc_debugfs_enable = 0;
37684
37685 len = 0;
37686- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37687+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37688 (lpfc_debugfs_max_slow_ring_trc - 1);
37689 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37690 dtp = phba->slow_ring_trc + i;
37691@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
37692 !vport || !vport->disc_trc)
37693 return;
37694
37695- index = atomic_inc_return(&vport->disc_trc_cnt) &
37696+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37697 (lpfc_debugfs_max_disc_trc - 1);
37698 dtp = vport->disc_trc + index;
37699 dtp->fmt = fmt;
37700 dtp->data1 = data1;
37701 dtp->data2 = data2;
37702 dtp->data3 = data3;
37703- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37704+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37705 dtp->jif = jiffies;
37706 #endif
37707 return;
37708@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
37709 !phba || !phba->slow_ring_trc)
37710 return;
37711
37712- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37713+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37714 (lpfc_debugfs_max_slow_ring_trc - 1);
37715 dtp = phba->slow_ring_trc + index;
37716 dtp->fmt = fmt;
37717 dtp->data1 = data1;
37718 dtp->data2 = data2;
37719 dtp->data3 = data3;
37720- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37721+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37722 dtp->jif = jiffies;
37723 #endif
37724 return;
37725@@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37726 "slow_ring buffer\n");
37727 goto debug_failed;
37728 }
37729- atomic_set(&phba->slow_ring_trc_cnt, 0);
37730+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37731 memset(phba->slow_ring_trc, 0,
37732 (sizeof(struct lpfc_debugfs_trc) *
37733 lpfc_debugfs_max_slow_ring_trc));
37734@@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37735 "buffer\n");
37736 goto debug_failed;
37737 }
37738- atomic_set(&vport->disc_trc_cnt, 0);
37739+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37740
37741 snprintf(name, sizeof(name), "discovery_trace");
37742 vport->debug_disc_trc =
37743diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
37744index dfea2da..8e17227 100644
37745--- a/drivers/scsi/lpfc/lpfc_init.c
37746+++ b/drivers/scsi/lpfc/lpfc_init.c
37747@@ -10145,8 +10145,10 @@ lpfc_init(void)
37748 printk(LPFC_COPYRIGHT "\n");
37749
37750 if (lpfc_enable_npiv) {
37751- lpfc_transport_functions.vport_create = lpfc_vport_create;
37752- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37753+ pax_open_kernel();
37754+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37755+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37756+ pax_close_kernel();
37757 }
37758 lpfc_transport_template =
37759 fc_attach_transport(&lpfc_transport_functions);
37760diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
37761index c60f5d0..751535c 100644
37762--- a/drivers/scsi/lpfc/lpfc_scsi.c
37763+++ b/drivers/scsi/lpfc/lpfc_scsi.c
37764@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
37765 uint32_t evt_posted;
37766
37767 spin_lock_irqsave(&phba->hbalock, flags);
37768- atomic_inc(&phba->num_rsrc_err);
37769+ atomic_inc_unchecked(&phba->num_rsrc_err);
37770 phba->last_rsrc_error_time = jiffies;
37771
37772 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37773@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
37774 unsigned long flags;
37775 struct lpfc_hba *phba = vport->phba;
37776 uint32_t evt_posted;
37777- atomic_inc(&phba->num_cmd_success);
37778+ atomic_inc_unchecked(&phba->num_cmd_success);
37779
37780 if (vport->cfg_lun_queue_depth <= queue_depth)
37781 return;
37782@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37783 unsigned long num_rsrc_err, num_cmd_success;
37784 int i;
37785
37786- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37787- num_cmd_success = atomic_read(&phba->num_cmd_success);
37788+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37789+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37790
37791 vports = lpfc_create_vport_work_array(phba);
37792 if (vports != NULL)
37793@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37794 }
37795 }
37796 lpfc_destroy_vport_work_array(phba, vports);
37797- atomic_set(&phba->num_rsrc_err, 0);
37798- atomic_set(&phba->num_cmd_success, 0);
37799+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37800+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37801 }
37802
37803 /**
37804@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37805 }
37806 }
37807 lpfc_destroy_vport_work_array(phba, vports);
37808- atomic_set(&phba->num_rsrc_err, 0);
37809- atomic_set(&phba->num_cmd_success, 0);
37810+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37811+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37812 }
37813
37814 /**
37815diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37816index ea8a0b4..812a124 100644
37817--- a/drivers/scsi/pmcraid.c
37818+++ b/drivers/scsi/pmcraid.c
37819@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37820 res->scsi_dev = scsi_dev;
37821 scsi_dev->hostdata = res;
37822 res->change_detected = 0;
37823- atomic_set(&res->read_failures, 0);
37824- atomic_set(&res->write_failures, 0);
37825+ atomic_set_unchecked(&res->read_failures, 0);
37826+ atomic_set_unchecked(&res->write_failures, 0);
37827 rc = 0;
37828 }
37829 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37830@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37831
37832 /* If this was a SCSI read/write command keep count of errors */
37833 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37834- atomic_inc(&res->read_failures);
37835+ atomic_inc_unchecked(&res->read_failures);
37836 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37837- atomic_inc(&res->write_failures);
37838+ atomic_inc_unchecked(&res->write_failures);
37839
37840 if (!RES_IS_GSCSI(res->cfg_entry) &&
37841 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37842@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37843 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37844 * hrrq_id assigned here in queuecommand
37845 */
37846- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37847+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37848 pinstance->num_hrrq;
37849 cmd->cmd_done = pmcraid_io_done;
37850
37851@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37852 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37853 * hrrq_id assigned here in queuecommand
37854 */
37855- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37856+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37857 pinstance->num_hrrq;
37858
37859 if (request_size) {
37860@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37861
37862 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37863 /* add resources only after host is added into system */
37864- if (!atomic_read(&pinstance->expose_resources))
37865+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37866 return;
37867
37868 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37869@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37870 init_waitqueue_head(&pinstance->reset_wait_q);
37871
37872 atomic_set(&pinstance->outstanding_cmds, 0);
37873- atomic_set(&pinstance->last_message_id, 0);
37874- atomic_set(&pinstance->expose_resources, 0);
37875+ atomic_set_unchecked(&pinstance->last_message_id, 0);
37876+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37877
37878 INIT_LIST_HEAD(&pinstance->free_res_q);
37879 INIT_LIST_HEAD(&pinstance->used_res_q);
37880@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37881 /* Schedule worker thread to handle CCN and take care of adding and
37882 * removing devices to OS
37883 */
37884- atomic_set(&pinstance->expose_resources, 1);
37885+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37886 schedule_work(&pinstance->worker_q);
37887 return rc;
37888
37889diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37890index ca496c7..9c791d5 100644
37891--- a/drivers/scsi/pmcraid.h
37892+++ b/drivers/scsi/pmcraid.h
37893@@ -748,7 +748,7 @@ struct pmcraid_instance {
37894 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37895
37896 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37897- atomic_t last_message_id;
37898+ atomic_unchecked_t last_message_id;
37899
37900 /* configuration table */
37901 struct pmcraid_config_table *cfg_table;
37902@@ -777,7 +777,7 @@ struct pmcraid_instance {
37903 atomic_t outstanding_cmds;
37904
37905 /* should add/delete resources to mid-layer now ?*/
37906- atomic_t expose_resources;
37907+ atomic_unchecked_t expose_resources;
37908
37909
37910
37911@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37912 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37913 };
37914 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37915- atomic_t read_failures; /* count of failed READ commands */
37916- atomic_t write_failures; /* count of failed WRITE commands */
37917+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37918+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37919
37920 /* To indicate add/delete/modify during CCN */
37921 u8 change_detected;
37922diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37923index af1003f..be55a75 100644
37924--- a/drivers/scsi/qla2xxx/qla_def.h
37925+++ b/drivers/scsi/qla2xxx/qla_def.h
37926@@ -2247,7 +2247,7 @@ struct isp_operations {
37927 int (*start_scsi) (srb_t *);
37928 int (*abort_isp) (struct scsi_qla_host *);
37929 int (*iospace_config)(struct qla_hw_data*);
37930-};
37931+} __no_const;
37932
37933 /* MSI-X Support *************************************************************/
37934
37935diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37936index bfe6854..ceac088 100644
37937--- a/drivers/scsi/qla4xxx/ql4_def.h
37938+++ b/drivers/scsi/qla4xxx/ql4_def.h
37939@@ -261,7 +261,7 @@ struct ddb_entry {
37940 * (4000 only) */
37941 atomic_t relogin_timer; /* Max Time to wait for
37942 * relogin to complete */
37943- atomic_t relogin_retry_count; /* Num of times relogin has been
37944+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37945 * retried */
37946 uint32_t default_time2wait; /* Default Min time between
37947 * relogins (+aens) */
37948diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37949index ce6d3b7..73fac54 100644
37950--- a/drivers/scsi/qla4xxx/ql4_os.c
37951+++ b/drivers/scsi/qla4xxx/ql4_os.c
37952@@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37953 */
37954 if (!iscsi_is_session_online(cls_sess)) {
37955 /* Reset retry relogin timer */
37956- atomic_inc(&ddb_entry->relogin_retry_count);
37957+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37958 DEBUG2(ql4_printk(KERN_INFO, ha,
37959 "%s: index[%d] relogin timed out-retrying"
37960 " relogin (%d), retry (%d)\n", __func__,
37961 ddb_entry->fw_ddb_index,
37962- atomic_read(&ddb_entry->relogin_retry_count),
37963+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37964 ddb_entry->default_time2wait + 4));
37965 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37966 atomic_set(&ddb_entry->retry_relogin_timer,
37967@@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37968
37969 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37970 atomic_set(&ddb_entry->relogin_timer, 0);
37971- atomic_set(&ddb_entry->relogin_retry_count, 0);
37972+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37973 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37974 ddb_entry->default_relogin_timeout =
37975 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37976diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37977index 2aeb2e9..46e3925 100644
37978--- a/drivers/scsi/scsi.c
37979+++ b/drivers/scsi/scsi.c
37980@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37981 unsigned long timeout;
37982 int rtn = 0;
37983
37984- atomic_inc(&cmd->device->iorequest_cnt);
37985+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37986
37987 /* check if the device is still usable */
37988 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37989diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37990index b2c95db..227d74e 100644
37991--- a/drivers/scsi/scsi_lib.c
37992+++ b/drivers/scsi/scsi_lib.c
37993@@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37994 shost = sdev->host;
37995 scsi_init_cmd_errh(cmd);
37996 cmd->result = DID_NO_CONNECT << 16;
37997- atomic_inc(&cmd->device->iorequest_cnt);
37998+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37999
38000 /*
38001 * SCSI request completion path will do scsi_device_unbusy(),
38002@@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
38003
38004 INIT_LIST_HEAD(&cmd->eh_entry);
38005
38006- atomic_inc(&cmd->device->iodone_cnt);
38007+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
38008 if (cmd->result)
38009- atomic_inc(&cmd->device->ioerr_cnt);
38010+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38011
38012 disposition = scsi_decide_disposition(cmd);
38013 if (disposition != SUCCESS &&
38014diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
38015index 04c2a27..9d8bd66 100644
38016--- a/drivers/scsi/scsi_sysfs.c
38017+++ b/drivers/scsi/scsi_sysfs.c
38018@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
38019 char *buf) \
38020 { \
38021 struct scsi_device *sdev = to_scsi_device(dev); \
38022- unsigned long long count = atomic_read(&sdev->field); \
38023+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
38024 return snprintf(buf, 20, "0x%llx\n", count); \
38025 } \
38026 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
38027diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38028index 84a1fdf..693b0d6 100644
38029--- a/drivers/scsi/scsi_tgt_lib.c
38030+++ b/drivers/scsi/scsi_tgt_lib.c
38031@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
38032 int err;
38033
38034 dprintk("%lx %u\n", uaddr, len);
38035- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38036+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38037 if (err) {
38038 /*
38039 * TODO: need to fixup sg_tablesize, max_segment_size,
38040diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
38041index f59d4a0..1d89407 100644
38042--- a/drivers/scsi/scsi_transport_fc.c
38043+++ b/drivers/scsi/scsi_transport_fc.c
38044@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
38045 * Netlink Infrastructure
38046 */
38047
38048-static atomic_t fc_event_seq;
38049+static atomic_unchecked_t fc_event_seq;
38050
38051 /**
38052 * fc_get_event_number - Obtain the next sequential FC event number
38053@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
38054 u32
38055 fc_get_event_number(void)
38056 {
38057- return atomic_add_return(1, &fc_event_seq);
38058+ return atomic_add_return_unchecked(1, &fc_event_seq);
38059 }
38060 EXPORT_SYMBOL(fc_get_event_number);
38061
38062@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
38063 {
38064 int error;
38065
38066- atomic_set(&fc_event_seq, 0);
38067+ atomic_set_unchecked(&fc_event_seq, 0);
38068
38069 error = transport_class_register(&fc_host_class);
38070 if (error)
38071@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
38072 char *cp;
38073
38074 *val = simple_strtoul(buf, &cp, 0);
38075- if ((*cp && (*cp != '\n')) || (*val < 0))
38076+ if (*cp && (*cp != '\n'))
38077 return -EINVAL;
38078 /*
38079 * Check for overflow; dev_loss_tmo is u32
38080diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
38081index e3e3c7d..ebdab62 100644
38082--- a/drivers/scsi/scsi_transport_iscsi.c
38083+++ b/drivers/scsi/scsi_transport_iscsi.c
38084@@ -79,7 +79,7 @@ struct iscsi_internal {
38085 struct transport_container session_cont;
38086 };
38087
38088-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38089+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38090 static struct workqueue_struct *iscsi_eh_timer_workq;
38091
38092 static DEFINE_IDA(iscsi_sess_ida);
38093@@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
38094 int err;
38095
38096 ihost = shost->shost_data;
38097- session->sid = atomic_add_return(1, &iscsi_session_nr);
38098+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38099
38100 if (target_id == ISCSI_MAX_TARGET) {
38101 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
38102@@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
38103 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38104 ISCSI_TRANSPORT_VERSION);
38105
38106- atomic_set(&iscsi_session_nr, 0);
38107+ atomic_set_unchecked(&iscsi_session_nr, 0);
38108
38109 err = class_register(&iscsi_transport_class);
38110 if (err)
38111diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38112index 21a045e..ec89e03 100644
38113--- a/drivers/scsi/scsi_transport_srp.c
38114+++ b/drivers/scsi/scsi_transport_srp.c
38115@@ -33,7 +33,7 @@
38116 #include "scsi_transport_srp_internal.h"
38117
38118 struct srp_host_attrs {
38119- atomic_t next_port_id;
38120+ atomic_unchecked_t next_port_id;
38121 };
38122 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38123
38124@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
38125 struct Scsi_Host *shost = dev_to_shost(dev);
38126 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38127
38128- atomic_set(&srp_host->next_port_id, 0);
38129+ atomic_set_unchecked(&srp_host->next_port_id, 0);
38130 return 0;
38131 }
38132
38133@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
38134 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38135 rport->roles = ids->roles;
38136
38137- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38138+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38139 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38140
38141 transport_setup_device(&rport->dev);
38142diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
38143index eacd46b..e3f4d62 100644
38144--- a/drivers/scsi/sg.c
38145+++ b/drivers/scsi/sg.c
38146@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
38147 sdp->disk->disk_name,
38148 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38149 NULL,
38150- (char *)arg);
38151+ (char __user *)arg);
38152 case BLKTRACESTART:
38153 return blk_trace_startstop(sdp->device->request_queue, 1);
38154 case BLKTRACESTOP:
38155@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
38156 const struct file_operations * fops;
38157 };
38158
38159-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
38160+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
38161 {"allow_dio", &adio_fops},
38162 {"debug", &debug_fops},
38163 {"def_reserved_size", &dressz_fops},
38164@@ -2332,7 +2332,7 @@ sg_proc_init(void)
38165 if (!sg_proc_sgp)
38166 return 1;
38167 for (k = 0; k < num_leaves; ++k) {
38168- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
38169+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
38170 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
38171 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
38172 }
38173diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
38174index f64250e..1ee3049 100644
38175--- a/drivers/spi/spi-dw-pci.c
38176+++ b/drivers/spi/spi-dw-pci.c
38177@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
38178 #define spi_resume NULL
38179 #endif
38180
38181-static const struct pci_device_id pci_ids[] __devinitdata = {
38182+static const struct pci_device_id pci_ids[] __devinitconst = {
38183 /* Intel MID platform SPI controller 0 */
38184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
38185 {},
38186diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
38187index b2ccdea..84cde75 100644
38188--- a/drivers/spi/spi.c
38189+++ b/drivers/spi/spi.c
38190@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
38191 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38192
38193 /* portable code must never pass more than 32 bytes */
38194-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38195+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38196
38197 static u8 *buf;
38198
38199diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
38200index 400df8c..065d4f4 100644
38201--- a/drivers/staging/octeon/ethernet-rx.c
38202+++ b/drivers/staging/octeon/ethernet-rx.c
38203@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38204 /* Increment RX stats for virtual ports */
38205 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38206 #ifdef CONFIG_64BIT
38207- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38208- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38209+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38210+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38211 #else
38212- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38213- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38214+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38215+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38216 #endif
38217 }
38218 netif_receive_skb(skb);
38219@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38220 dev->name);
38221 */
38222 #ifdef CONFIG_64BIT
38223- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38224+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38225 #else
38226- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38227+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
38228 #endif
38229 dev_kfree_skb_irq(skb);
38230 }
38231diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
38232index 9112cd8..92f8d51 100644
38233--- a/drivers/staging/octeon/ethernet.c
38234+++ b/drivers/staging/octeon/ethernet.c
38235@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
38236 * since the RX tasklet also increments it.
38237 */
38238 #ifdef CONFIG_64BIT
38239- atomic64_add(rx_status.dropped_packets,
38240- (atomic64_t *)&priv->stats.rx_dropped);
38241+ atomic64_add_unchecked(rx_status.dropped_packets,
38242+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38243 #else
38244- atomic_add(rx_status.dropped_packets,
38245- (atomic_t *)&priv->stats.rx_dropped);
38246+ atomic_add_unchecked(rx_status.dropped_packets,
38247+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38248 #endif
38249 }
38250
38251diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
38252index f9dae95..ff48901 100644
38253--- a/drivers/staging/rtl8192e/rtllib_module.c
38254+++ b/drivers/staging/rtl8192e/rtllib_module.c
38255@@ -215,6 +215,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
38256 }
38257
38258 static int store_debug_level(struct file *file, const char __user *buffer,
38259+ unsigned long count, void *data) __size_overflow(3);
38260+static int store_debug_level(struct file *file, const char __user *buffer,
38261 unsigned long count, void *data)
38262 {
38263 char buf[] = "0x00000000";
38264diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38265index e3d47bc..85f4d0d 100644
38266--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38267+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
38268@@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
38269 }
38270
38271 static int store_debug_level(struct file *file, const char *buffer,
38272+ unsigned long count, void *data) __size_overflow(3);
38273+static int store_debug_level(struct file *file, const char *buffer,
38274 unsigned long count, void *data)
38275 {
38276 char buf[] = "0x00000000";
38277diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
38278index 86308a0..feaa925 100644
38279--- a/drivers/staging/rtl8712/rtl871x_io.h
38280+++ b/drivers/staging/rtl8712/rtl871x_io.h
38281@@ -108,7 +108,7 @@ struct _io_ops {
38282 u8 *pmem);
38283 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
38284 u8 *pmem);
38285-};
38286+} __no_const;
38287
38288 struct io_req {
38289 struct list_head list;
38290diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
38291index c7b5e8b..783d6cb 100644
38292--- a/drivers/staging/sbe-2t3e3/netdev.c
38293+++ b/drivers/staging/sbe-2t3e3/netdev.c
38294@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38295 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
38296
38297 if (rlen)
38298- if (copy_to_user(data, &resp, rlen))
38299+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
38300 return -EFAULT;
38301
38302 return 0;
38303diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
38304index 42cdafe..2769103 100644
38305--- a/drivers/staging/speakup/speakup_soft.c
38306+++ b/drivers/staging/speakup/speakup_soft.c
38307@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
38308 break;
38309 } else if (!initialized) {
38310 if (*init) {
38311- ch = *init;
38312 init++;
38313 } else {
38314 initialized = 1;
38315 }
38316+ ch = *init;
38317 } else {
38318 ch = synth_buffer_getc();
38319 }
38320diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
38321index b8f8c48..1fc5025 100644
38322--- a/drivers/staging/usbip/usbip_common.h
38323+++ b/drivers/staging/usbip/usbip_common.h
38324@@ -289,7 +289,7 @@ struct usbip_device {
38325 void (*shutdown)(struct usbip_device *);
38326 void (*reset)(struct usbip_device *);
38327 void (*unusable)(struct usbip_device *);
38328- } eh_ops;
38329+ } __no_const eh_ops;
38330 };
38331
38332 /* usbip_common.c */
38333diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
38334index 88b3298..3783eee 100644
38335--- a/drivers/staging/usbip/vhci.h
38336+++ b/drivers/staging/usbip/vhci.h
38337@@ -88,7 +88,7 @@ struct vhci_hcd {
38338 unsigned resuming:1;
38339 unsigned long re_timeout;
38340
38341- atomic_t seqnum;
38342+ atomic_unchecked_t seqnum;
38343
38344 /*
38345 * NOTE:
38346diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
38347index 2ee97e2..0420b86 100644
38348--- a/drivers/staging/usbip/vhci_hcd.c
38349+++ b/drivers/staging/usbip/vhci_hcd.c
38350@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
38351 return;
38352 }
38353
38354- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38355+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38356 if (priv->seqnum == 0xffff)
38357 dev_info(&urb->dev->dev, "seqnum max\n");
38358
38359@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
38360 return -ENOMEM;
38361 }
38362
38363- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38364+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38365 if (unlink->seqnum == 0xffff)
38366 pr_info("seqnum max\n");
38367
38368@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
38369 vdev->rhport = rhport;
38370 }
38371
38372- atomic_set(&vhci->seqnum, 0);
38373+ atomic_set_unchecked(&vhci->seqnum, 0);
38374 spin_lock_init(&vhci->lock);
38375
38376 hcd->power_budget = 0; /* no limit */
38377diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
38378index 3f511b4..d3dbc1e 100644
38379--- a/drivers/staging/usbip/vhci_rx.c
38380+++ b/drivers/staging/usbip/vhci_rx.c
38381@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
38382 if (!urb) {
38383 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
38384 pr_info("max seqnum %d\n",
38385- atomic_read(&the_controller->seqnum));
38386+ atomic_read_unchecked(&the_controller->seqnum));
38387 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38388 return;
38389 }
38390diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
38391index 7735027..30eed13 100644
38392--- a/drivers/staging/vt6655/hostap.c
38393+++ b/drivers/staging/vt6655/hostap.c
38394@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
38395 *
38396 */
38397
38398+static net_device_ops_no_const apdev_netdev_ops;
38399+
38400 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38401 {
38402 PSDevice apdev_priv;
38403 struct net_device *dev = pDevice->dev;
38404 int ret;
38405- const struct net_device_ops apdev_netdev_ops = {
38406- .ndo_start_xmit = pDevice->tx_80211,
38407- };
38408
38409 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
38410
38411@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38412 *apdev_priv = *pDevice;
38413 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38414
38415+ /* only half broken now */
38416+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38417 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38418
38419 pDevice->apdev->type = ARPHRD_IEEE80211;
38420diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
38421index 51b5adf..098e320 100644
38422--- a/drivers/staging/vt6656/hostap.c
38423+++ b/drivers/staging/vt6656/hostap.c
38424@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
38425 *
38426 */
38427
38428+static net_device_ops_no_const apdev_netdev_ops;
38429+
38430 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38431 {
38432 PSDevice apdev_priv;
38433 struct net_device *dev = pDevice->dev;
38434 int ret;
38435- const struct net_device_ops apdev_netdev_ops = {
38436- .ndo_start_xmit = pDevice->tx_80211,
38437- };
38438
38439 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
38440
38441@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38442 *apdev_priv = *pDevice;
38443 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38444
38445+ /* only half broken now */
38446+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38447 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38448
38449 pDevice->apdev->type = ARPHRD_IEEE80211;
38450diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
38451index 7843dfd..3db105f 100644
38452--- a/drivers/staging/wlan-ng/hfa384x_usb.c
38453+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
38454@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
38455
38456 struct usbctlx_completor {
38457 int (*complete) (struct usbctlx_completor *);
38458-};
38459+} __no_const;
38460
38461 static int
38462 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
38463diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
38464index 1ca66ea..76f1343 100644
38465--- a/drivers/staging/zcache/tmem.c
38466+++ b/drivers/staging/zcache/tmem.c
38467@@ -39,7 +39,7 @@
38468 * A tmem host implementation must use this function to register callbacks
38469 * for memory allocation.
38470 */
38471-static struct tmem_hostops tmem_hostops;
38472+static tmem_hostops_no_const tmem_hostops;
38473
38474 static void tmem_objnode_tree_init(void);
38475
38476@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
38477 * A tmem host implementation must use this function to register
38478 * callbacks for a page-accessible memory (PAM) implementation
38479 */
38480-static struct tmem_pamops tmem_pamops;
38481+static tmem_pamops_no_const tmem_pamops;
38482
38483 void tmem_register_pamops(struct tmem_pamops *m)
38484 {
38485diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
38486index ed147c4..94fc3c6 100644
38487--- a/drivers/staging/zcache/tmem.h
38488+++ b/drivers/staging/zcache/tmem.h
38489@@ -180,6 +180,7 @@ struct tmem_pamops {
38490 void (*new_obj)(struct tmem_obj *);
38491 int (*replace_in_obj)(void *, struct tmem_obj *);
38492 };
38493+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
38494 extern void tmem_register_pamops(struct tmem_pamops *m);
38495
38496 /* memory allocation methods provided by the host implementation */
38497@@ -189,6 +190,7 @@ struct tmem_hostops {
38498 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
38499 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
38500 };
38501+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
38502 extern void tmem_register_hostops(struct tmem_hostops *m);
38503
38504 /* core tmem accessor functions */
38505diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
38506index 97c74ee..7f6d77d 100644
38507--- a/drivers/target/iscsi/iscsi_target.c
38508+++ b/drivers/target/iscsi/iscsi_target.c
38509@@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
38510 * outstanding_r2ts reaches zero, go ahead and send the delayed
38511 * TASK_ABORTED status.
38512 */
38513- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
38514+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
38515 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
38516 if (--cmd->outstanding_r2ts < 1) {
38517 iscsit_stop_dataout_timer(cmd);
38518diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
38519index dcb0618..97e3d85 100644
38520--- a/drivers/target/target_core_tmr.c
38521+++ b/drivers/target/target_core_tmr.c
38522@@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
38523 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
38524 cmd->t_task_list_num,
38525 atomic_read(&cmd->t_task_cdbs_left),
38526- atomic_read(&cmd->t_task_cdbs_sent),
38527+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38528 atomic_read(&cmd->t_transport_active),
38529 atomic_read(&cmd->t_transport_stop),
38530 atomic_read(&cmd->t_transport_sent));
38531@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
38532 pr_debug("LUN_RESET: got t_transport_active = 1 for"
38533 " task: %p, t_fe_count: %d dev: %p\n", task,
38534 fe_count, dev);
38535- atomic_set(&cmd->t_transport_aborted, 1);
38536+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38537 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38538
38539 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
38540@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
38541 }
38542 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
38543 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
38544- atomic_set(&cmd->t_transport_aborted, 1);
38545+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38546 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38547
38548 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
38549diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
38550index cd5cd95..5249d30 100644
38551--- a/drivers/target/target_core_transport.c
38552+++ b/drivers/target/target_core_transport.c
38553@@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
38554 spin_lock_init(&dev->se_port_lock);
38555 spin_lock_init(&dev->se_tmr_lock);
38556 spin_lock_init(&dev->qf_cmd_lock);
38557- atomic_set(&dev->dev_ordered_id, 0);
38558+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
38559
38560 se_dev_set_default_attribs(dev, dev_limits);
38561
38562@@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
38563 * Used to determine when ORDERED commands should go from
38564 * Dormant to Active status.
38565 */
38566- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
38567+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
38568 smp_mb__after_atomic_inc();
38569 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
38570 cmd->se_ordered_id, cmd->sam_task_attr,
38571@@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
38572 " t_transport_active: %d t_transport_stop: %d"
38573 " t_transport_sent: %d\n", cmd->t_task_list_num,
38574 atomic_read(&cmd->t_task_cdbs_left),
38575- atomic_read(&cmd->t_task_cdbs_sent),
38576+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38577 atomic_read(&cmd->t_task_cdbs_ex_left),
38578 atomic_read(&cmd->t_transport_active),
38579 atomic_read(&cmd->t_transport_stop),
38580@@ -2121,9 +2121,9 @@ check_depth:
38581 cmd = task->task_se_cmd;
38582 spin_lock_irqsave(&cmd->t_state_lock, flags);
38583 task->task_flags |= (TF_ACTIVE | TF_SENT);
38584- atomic_inc(&cmd->t_task_cdbs_sent);
38585+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
38586
38587- if (atomic_read(&cmd->t_task_cdbs_sent) ==
38588+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
38589 cmd->t_task_list_num)
38590 atomic_set(&cmd->t_transport_sent, 1);
38591
38592@@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
38593 atomic_set(&cmd->transport_lun_stop, 0);
38594 }
38595 if (!atomic_read(&cmd->t_transport_active) ||
38596- atomic_read(&cmd->t_transport_aborted)) {
38597+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
38598 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38599 return false;
38600 }
38601@@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
38602 {
38603 int ret = 0;
38604
38605- if (atomic_read(&cmd->t_transport_aborted) != 0) {
38606+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
38607 if (!send_status ||
38608 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
38609 return 1;
38610@@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
38611 */
38612 if (cmd->data_direction == DMA_TO_DEVICE) {
38613 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
38614- atomic_inc(&cmd->t_transport_aborted);
38615+ atomic_inc_unchecked(&cmd->t_transport_aborted);
38616 smp_mb__after_atomic_inc();
38617 }
38618 }
38619diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
38620index b9040be..e3f5aab 100644
38621--- a/drivers/tty/hvc/hvcs.c
38622+++ b/drivers/tty/hvc/hvcs.c
38623@@ -83,6 +83,7 @@
38624 #include <asm/hvcserver.h>
38625 #include <asm/uaccess.h>
38626 #include <asm/vio.h>
38627+#include <asm/local.h>
38628
38629 /*
38630 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
38631@@ -270,7 +271,7 @@ struct hvcs_struct {
38632 unsigned int index;
38633
38634 struct tty_struct *tty;
38635- int open_count;
38636+ local_t open_count;
38637
38638 /*
38639 * Used to tell the driver kernel_thread what operations need to take
38640@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
38641
38642 spin_lock_irqsave(&hvcsd->lock, flags);
38643
38644- if (hvcsd->open_count > 0) {
38645+ if (local_read(&hvcsd->open_count) > 0) {
38646 spin_unlock_irqrestore(&hvcsd->lock, flags);
38647 printk(KERN_INFO "HVCS: vterm state unchanged. "
38648 "The hvcs device node is still in use.\n");
38649@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
38650 if ((retval = hvcs_partner_connect(hvcsd)))
38651 goto error_release;
38652
38653- hvcsd->open_count = 1;
38654+ local_set(&hvcsd->open_count, 1);
38655 hvcsd->tty = tty;
38656 tty->driver_data = hvcsd;
38657
38658@@ -1179,7 +1180,7 @@ fast_open:
38659
38660 spin_lock_irqsave(&hvcsd->lock, flags);
38661 kref_get(&hvcsd->kref);
38662- hvcsd->open_count++;
38663+ local_inc(&hvcsd->open_count);
38664 hvcsd->todo_mask |= HVCS_SCHED_READ;
38665 spin_unlock_irqrestore(&hvcsd->lock, flags);
38666
38667@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38668 hvcsd = tty->driver_data;
38669
38670 spin_lock_irqsave(&hvcsd->lock, flags);
38671- if (--hvcsd->open_count == 0) {
38672+ if (local_dec_and_test(&hvcsd->open_count)) {
38673
38674 vio_disable_interrupts(hvcsd->vdev);
38675
38676@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38677 free_irq(irq, hvcsd);
38678 kref_put(&hvcsd->kref, destroy_hvcs_struct);
38679 return;
38680- } else if (hvcsd->open_count < 0) {
38681+ } else if (local_read(&hvcsd->open_count) < 0) {
38682 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
38683 " is missmanaged.\n",
38684- hvcsd->vdev->unit_address, hvcsd->open_count);
38685+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
38686 }
38687
38688 spin_unlock_irqrestore(&hvcsd->lock, flags);
38689@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38690
38691 spin_lock_irqsave(&hvcsd->lock, flags);
38692 /* Preserve this so that we know how many kref refs to put */
38693- temp_open_count = hvcsd->open_count;
38694+ temp_open_count = local_read(&hvcsd->open_count);
38695
38696 /*
38697 * Don't kref put inside the spinlock because the destruction
38698@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38699 hvcsd->tty->driver_data = NULL;
38700 hvcsd->tty = NULL;
38701
38702- hvcsd->open_count = 0;
38703+ local_set(&hvcsd->open_count, 0);
38704
38705 /* This will drop any buffered data on the floor which is OK in a hangup
38706 * scenario. */
38707@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
38708 * the middle of a write operation? This is a crummy place to do this
38709 * but we want to keep it all in the spinlock.
38710 */
38711- if (hvcsd->open_count <= 0) {
38712+ if (local_read(&hvcsd->open_count) <= 0) {
38713 spin_unlock_irqrestore(&hvcsd->lock, flags);
38714 return -ENODEV;
38715 }
38716@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
38717 {
38718 struct hvcs_struct *hvcsd = tty->driver_data;
38719
38720- if (!hvcsd || hvcsd->open_count <= 0)
38721+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
38722 return 0;
38723
38724 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
38725diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
38726index ef92869..f4ebd88 100644
38727--- a/drivers/tty/ipwireless/tty.c
38728+++ b/drivers/tty/ipwireless/tty.c
38729@@ -29,6 +29,7 @@
38730 #include <linux/tty_driver.h>
38731 #include <linux/tty_flip.h>
38732 #include <linux/uaccess.h>
38733+#include <asm/local.h>
38734
38735 #include "tty.h"
38736 #include "network.h"
38737@@ -51,7 +52,7 @@ struct ipw_tty {
38738 int tty_type;
38739 struct ipw_network *network;
38740 struct tty_struct *linux_tty;
38741- int open_count;
38742+ local_t open_count;
38743 unsigned int control_lines;
38744 struct mutex ipw_tty_mutex;
38745 int tx_bytes_queued;
38746@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38747 mutex_unlock(&tty->ipw_tty_mutex);
38748 return -ENODEV;
38749 }
38750- if (tty->open_count == 0)
38751+ if (local_read(&tty->open_count) == 0)
38752 tty->tx_bytes_queued = 0;
38753
38754- tty->open_count++;
38755+ local_inc(&tty->open_count);
38756
38757 tty->linux_tty = linux_tty;
38758 linux_tty->driver_data = tty;
38759@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38760
38761 static void do_ipw_close(struct ipw_tty *tty)
38762 {
38763- tty->open_count--;
38764-
38765- if (tty->open_count == 0) {
38766+ if (local_dec_return(&tty->open_count) == 0) {
38767 struct tty_struct *linux_tty = tty->linux_tty;
38768
38769 if (linux_tty != NULL) {
38770@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
38771 return;
38772
38773 mutex_lock(&tty->ipw_tty_mutex);
38774- if (tty->open_count == 0) {
38775+ if (local_read(&tty->open_count) == 0) {
38776 mutex_unlock(&tty->ipw_tty_mutex);
38777 return;
38778 }
38779@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38780 return;
38781 }
38782
38783- if (!tty->open_count) {
38784+ if (!local_read(&tty->open_count)) {
38785 mutex_unlock(&tty->ipw_tty_mutex);
38786 return;
38787 }
38788@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38789 return -ENODEV;
38790
38791 mutex_lock(&tty->ipw_tty_mutex);
38792- if (!tty->open_count) {
38793+ if (!local_read(&tty->open_count)) {
38794 mutex_unlock(&tty->ipw_tty_mutex);
38795 return -EINVAL;
38796 }
38797@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38798 if (!tty)
38799 return -ENODEV;
38800
38801- if (!tty->open_count)
38802+ if (!local_read(&tty->open_count))
38803 return -EINVAL;
38804
38805 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38806@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38807 if (!tty)
38808 return 0;
38809
38810- if (!tty->open_count)
38811+ if (!local_read(&tty->open_count))
38812 return 0;
38813
38814 return tty->tx_bytes_queued;
38815@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38816 if (!tty)
38817 return -ENODEV;
38818
38819- if (!tty->open_count)
38820+ if (!local_read(&tty->open_count))
38821 return -EINVAL;
38822
38823 return get_control_lines(tty);
38824@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38825 if (!tty)
38826 return -ENODEV;
38827
38828- if (!tty->open_count)
38829+ if (!local_read(&tty->open_count))
38830 return -EINVAL;
38831
38832 return set_control_lines(tty, set, clear);
38833@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38834 if (!tty)
38835 return -ENODEV;
38836
38837- if (!tty->open_count)
38838+ if (!local_read(&tty->open_count))
38839 return -EINVAL;
38840
38841 /* FIXME: Exactly how is the tty object locked here .. */
38842@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38843 against a parallel ioctl etc */
38844 mutex_lock(&ttyj->ipw_tty_mutex);
38845 }
38846- while (ttyj->open_count)
38847+ while (local_read(&ttyj->open_count))
38848 do_ipw_close(ttyj);
38849 ipwireless_disassociate_network_ttys(network,
38850 ttyj->channel_idx);
38851diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38852index fc7bbba..9527e93 100644
38853--- a/drivers/tty/n_gsm.c
38854+++ b/drivers/tty/n_gsm.c
38855@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38856 kref_init(&dlci->ref);
38857 mutex_init(&dlci->mutex);
38858 dlci->fifo = &dlci->_fifo;
38859- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38860+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38861 kfree(dlci);
38862 return NULL;
38863 }
38864diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38865index d2256d0..97476fa 100644
38866--- a/drivers/tty/n_tty.c
38867+++ b/drivers/tty/n_tty.c
38868@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38869 {
38870 *ops = tty_ldisc_N_TTY;
38871 ops->owner = NULL;
38872- ops->refcount = ops->flags = 0;
38873+ atomic_set(&ops->refcount, 0);
38874+ ops->flags = 0;
38875 }
38876 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38877diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38878index d8653ab..f8afd9d 100644
38879--- a/drivers/tty/pty.c
38880+++ b/drivers/tty/pty.c
38881@@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
38882 register_sysctl_table(pty_root_table);
38883
38884 /* Now create the /dev/ptmx special device */
38885+ pax_open_kernel();
38886 tty_default_fops(&ptmx_fops);
38887- ptmx_fops.open = ptmx_open;
38888+ *(void **)&ptmx_fops.open = ptmx_open;
38889+ pax_close_kernel();
38890
38891 cdev_init(&ptmx_cdev, &ptmx_fops);
38892 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38893diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38894index 2b42a01..32a2ed3 100644
38895--- a/drivers/tty/serial/kgdboc.c
38896+++ b/drivers/tty/serial/kgdboc.c
38897@@ -24,8 +24,9 @@
38898 #define MAX_CONFIG_LEN 40
38899
38900 static struct kgdb_io kgdboc_io_ops;
38901+static struct kgdb_io kgdboc_io_ops_console;
38902
38903-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38904+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38905 static int configured = -1;
38906
38907 static char config[MAX_CONFIG_LEN];
38908@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38909 kgdboc_unregister_kbd();
38910 if (configured == 1)
38911 kgdb_unregister_io_module(&kgdboc_io_ops);
38912+ else if (configured == 2)
38913+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
38914 }
38915
38916 static int configure_kgdboc(void)
38917@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38918 int err;
38919 char *cptr = config;
38920 struct console *cons;
38921+ int is_console = 0;
38922
38923 err = kgdboc_option_setup(config);
38924 if (err || !strlen(config) || isspace(config[0]))
38925 goto noconfig;
38926
38927 err = -ENODEV;
38928- kgdboc_io_ops.is_console = 0;
38929 kgdb_tty_driver = NULL;
38930
38931 kgdboc_use_kms = 0;
38932@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38933 int idx;
38934 if (cons->device && cons->device(cons, &idx) == p &&
38935 idx == tty_line) {
38936- kgdboc_io_ops.is_console = 1;
38937+ is_console = 1;
38938 break;
38939 }
38940 cons = cons->next;
38941@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38942 kgdb_tty_line = tty_line;
38943
38944 do_register:
38945- err = kgdb_register_io_module(&kgdboc_io_ops);
38946+ if (is_console) {
38947+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38948+ configured = 2;
38949+ } else {
38950+ err = kgdb_register_io_module(&kgdboc_io_ops);
38951+ configured = 1;
38952+ }
38953 if (err)
38954 goto noconfig;
38955
38956- configured = 1;
38957-
38958 return 0;
38959
38960 noconfig:
38961@@ -213,7 +220,7 @@ noconfig:
38962 static int __init init_kgdboc(void)
38963 {
38964 /* Already configured? */
38965- if (configured == 1)
38966+ if (configured >= 1)
38967 return 0;
38968
38969 return configure_kgdboc();
38970@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38971 if (config[len - 1] == '\n')
38972 config[len - 1] = '\0';
38973
38974- if (configured == 1)
38975+ if (configured >= 1)
38976 cleanup_kgdboc();
38977
38978 /* Go and configure with the new params. */
38979@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38980 .post_exception = kgdboc_post_exp_handler,
38981 };
38982
38983+static struct kgdb_io kgdboc_io_ops_console = {
38984+ .name = "kgdboc",
38985+ .read_char = kgdboc_get_char,
38986+ .write_char = kgdboc_put_char,
38987+ .pre_exception = kgdboc_pre_exp_handler,
38988+ .post_exception = kgdboc_post_exp_handler,
38989+ .is_console = 1
38990+};
38991+
38992 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38993 /* This is only available if kgdboc is a built in for early debugging */
38994 static int __init kgdboc_early_init(char *opt)
38995diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38996index 7867b7c..b3c119d 100644
38997--- a/drivers/tty/sysrq.c
38998+++ b/drivers/tty/sysrq.c
38999@@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
39000 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
39001 size_t count, loff_t *ppos)
39002 {
39003- if (count) {
39004+ if (count && capable(CAP_SYS_ADMIN)) {
39005 char c;
39006
39007 if (get_user(c, buf))
39008diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
39009index e41b9bb..84002fb 100644
39010--- a/drivers/tty/tty_io.c
39011+++ b/drivers/tty/tty_io.c
39012@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
39013
39014 void tty_default_fops(struct file_operations *fops)
39015 {
39016- *fops = tty_fops;
39017+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
39018 }
39019
39020 /*
39021diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
39022index 24b95db..9c078d0 100644
39023--- a/drivers/tty/tty_ldisc.c
39024+++ b/drivers/tty/tty_ldisc.c
39025@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
39026 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
39027 struct tty_ldisc_ops *ldo = ld->ops;
39028
39029- ldo->refcount--;
39030+ atomic_dec(&ldo->refcount);
39031 module_put(ldo->owner);
39032 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39033
39034@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
39035 spin_lock_irqsave(&tty_ldisc_lock, flags);
39036 tty_ldiscs[disc] = new_ldisc;
39037 new_ldisc->num = disc;
39038- new_ldisc->refcount = 0;
39039+ atomic_set(&new_ldisc->refcount, 0);
39040 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39041
39042 return ret;
39043@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
39044 return -EINVAL;
39045
39046 spin_lock_irqsave(&tty_ldisc_lock, flags);
39047- if (tty_ldiscs[disc]->refcount)
39048+ if (atomic_read(&tty_ldiscs[disc]->refcount))
39049 ret = -EBUSY;
39050 else
39051 tty_ldiscs[disc] = NULL;
39052@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
39053 if (ldops) {
39054 ret = ERR_PTR(-EAGAIN);
39055 if (try_module_get(ldops->owner)) {
39056- ldops->refcount++;
39057+ atomic_inc(&ldops->refcount);
39058 ret = ldops;
39059 }
39060 }
39061@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
39062 unsigned long flags;
39063
39064 spin_lock_irqsave(&tty_ldisc_lock, flags);
39065- ldops->refcount--;
39066+ atomic_dec(&ldops->refcount);
39067 module_put(ldops->owner);
39068 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
39069 }
39070diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
39071index a605549..6bd3c96 100644
39072--- a/drivers/tty/vt/keyboard.c
39073+++ b/drivers/tty/vt/keyboard.c
39074@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
39075 kbd->kbdmode == VC_OFF) &&
39076 value != KVAL(K_SAK))
39077 return; /* SAK is allowed even in raw mode */
39078+
39079+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39080+ {
39081+ void *func = fn_handler[value];
39082+ if (func == fn_show_state || func == fn_show_ptregs ||
39083+ func == fn_show_mem)
39084+ return;
39085+ }
39086+#endif
39087+
39088 fn_handler[value](vc);
39089 }
39090
39091diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
39092index 65447c5..0526f0a 100644
39093--- a/drivers/tty/vt/vt_ioctl.c
39094+++ b/drivers/tty/vt/vt_ioctl.c
39095@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
39096 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
39097 return -EFAULT;
39098
39099- if (!capable(CAP_SYS_TTY_CONFIG))
39100- perm = 0;
39101-
39102 switch (cmd) {
39103 case KDGKBENT:
39104 key_map = key_maps[s];
39105@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
39106 val = (i ? K_HOLE : K_NOSUCHMAP);
39107 return put_user(val, &user_kbe->kb_value);
39108 case KDSKBENT:
39109+ if (!capable(CAP_SYS_TTY_CONFIG))
39110+ perm = 0;
39111+
39112 if (!perm)
39113 return -EPERM;
39114 if (!i && v == K_NOSUCHMAP) {
39115@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
39116 int i, j, k;
39117 int ret;
39118
39119- if (!capable(CAP_SYS_TTY_CONFIG))
39120- perm = 0;
39121-
39122 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
39123 if (!kbs) {
39124 ret = -ENOMEM;
39125@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
39126 kfree(kbs);
39127 return ((p && *p) ? -EOVERFLOW : 0);
39128 case KDSKBSENT:
39129+ if (!capable(CAP_SYS_TTY_CONFIG))
39130+ perm = 0;
39131+
39132 if (!perm) {
39133 ret = -EPERM;
39134 goto reterr;
39135diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
39136index a783d53..cb30d94 100644
39137--- a/drivers/uio/uio.c
39138+++ b/drivers/uio/uio.c
39139@@ -25,6 +25,7 @@
39140 #include <linux/kobject.h>
39141 #include <linux/cdev.h>
39142 #include <linux/uio_driver.h>
39143+#include <asm/local.h>
39144
39145 #define UIO_MAX_DEVICES (1U << MINORBITS)
39146
39147@@ -32,10 +33,10 @@ struct uio_device {
39148 struct module *owner;
39149 struct device *dev;
39150 int minor;
39151- atomic_t event;
39152+ atomic_unchecked_t event;
39153 struct fasync_struct *async_queue;
39154 wait_queue_head_t wait;
39155- int vma_count;
39156+ local_t vma_count;
39157 struct uio_info *info;
39158 struct kobject *map_dir;
39159 struct kobject *portio_dir;
39160@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
39161 struct device_attribute *attr, char *buf)
39162 {
39163 struct uio_device *idev = dev_get_drvdata(dev);
39164- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
39165+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
39166 }
39167
39168 static struct device_attribute uio_class_attributes[] = {
39169@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
39170 {
39171 struct uio_device *idev = info->uio_dev;
39172
39173- atomic_inc(&idev->event);
39174+ atomic_inc_unchecked(&idev->event);
39175 wake_up_interruptible(&idev->wait);
39176 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
39177 }
39178@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
39179 }
39180
39181 listener->dev = idev;
39182- listener->event_count = atomic_read(&idev->event);
39183+ listener->event_count = atomic_read_unchecked(&idev->event);
39184 filep->private_data = listener;
39185
39186 if (idev->info->open) {
39187@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
39188 return -EIO;
39189
39190 poll_wait(filep, &idev->wait, wait);
39191- if (listener->event_count != atomic_read(&idev->event))
39192+ if (listener->event_count != atomic_read_unchecked(&idev->event))
39193 return POLLIN | POLLRDNORM;
39194 return 0;
39195 }
39196@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
39197 do {
39198 set_current_state(TASK_INTERRUPTIBLE);
39199
39200- event_count = atomic_read(&idev->event);
39201+ event_count = atomic_read_unchecked(&idev->event);
39202 if (event_count != listener->event_count) {
39203 if (copy_to_user(buf, &event_count, count))
39204 retval = -EFAULT;
39205@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
39206 static void uio_vma_open(struct vm_area_struct *vma)
39207 {
39208 struct uio_device *idev = vma->vm_private_data;
39209- idev->vma_count++;
39210+ local_inc(&idev->vma_count);
39211 }
39212
39213 static void uio_vma_close(struct vm_area_struct *vma)
39214 {
39215 struct uio_device *idev = vma->vm_private_data;
39216- idev->vma_count--;
39217+ local_dec(&idev->vma_count);
39218 }
39219
39220 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39221@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
39222 idev->owner = owner;
39223 idev->info = info;
39224 init_waitqueue_head(&idev->wait);
39225- atomic_set(&idev->event, 0);
39226+ atomic_set_unchecked(&idev->event, 0);
39227
39228 ret = uio_get_minor(idev);
39229 if (ret)
39230diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
39231index 98b89fe..aff824e 100644
39232--- a/drivers/usb/atm/cxacru.c
39233+++ b/drivers/usb/atm/cxacru.c
39234@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
39235 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
39236 if (ret < 2)
39237 return -EINVAL;
39238- if (index < 0 || index > 0x7f)
39239+ if (index > 0x7f)
39240 return -EINVAL;
39241 pos += tmp;
39242
39243diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
39244index d3448ca..d2864ca 100644
39245--- a/drivers/usb/atm/usbatm.c
39246+++ b/drivers/usb/atm/usbatm.c
39247@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39248 if (printk_ratelimit())
39249 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
39250 __func__, vpi, vci);
39251- atomic_inc(&vcc->stats->rx_err);
39252+ atomic_inc_unchecked(&vcc->stats->rx_err);
39253 return;
39254 }
39255
39256@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39257 if (length > ATM_MAX_AAL5_PDU) {
39258 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
39259 __func__, length, vcc);
39260- atomic_inc(&vcc->stats->rx_err);
39261+ atomic_inc_unchecked(&vcc->stats->rx_err);
39262 goto out;
39263 }
39264
39265@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39266 if (sarb->len < pdu_length) {
39267 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
39268 __func__, pdu_length, sarb->len, vcc);
39269- atomic_inc(&vcc->stats->rx_err);
39270+ atomic_inc_unchecked(&vcc->stats->rx_err);
39271 goto out;
39272 }
39273
39274 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
39275 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
39276 __func__, vcc);
39277- atomic_inc(&vcc->stats->rx_err);
39278+ atomic_inc_unchecked(&vcc->stats->rx_err);
39279 goto out;
39280 }
39281
39282@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39283 if (printk_ratelimit())
39284 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
39285 __func__, length);
39286- atomic_inc(&vcc->stats->rx_drop);
39287+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39288 goto out;
39289 }
39290
39291@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
39292
39293 vcc->push(vcc, skb);
39294
39295- atomic_inc(&vcc->stats->rx);
39296+ atomic_inc_unchecked(&vcc->stats->rx);
39297 out:
39298 skb_trim(sarb, 0);
39299 }
39300@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
39301 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
39302
39303 usbatm_pop(vcc, skb);
39304- atomic_inc(&vcc->stats->tx);
39305+ atomic_inc_unchecked(&vcc->stats->tx);
39306
39307 skb = skb_dequeue(&instance->sndqueue);
39308 }
39309@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
39310 if (!left--)
39311 return sprintf(page,
39312 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
39313- atomic_read(&atm_dev->stats.aal5.tx),
39314- atomic_read(&atm_dev->stats.aal5.tx_err),
39315- atomic_read(&atm_dev->stats.aal5.rx),
39316- atomic_read(&atm_dev->stats.aal5.rx_err),
39317- atomic_read(&atm_dev->stats.aal5.rx_drop));
39318+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
39319+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
39320+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
39321+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
39322+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
39323
39324 if (!left--) {
39325 if (instance->disconnected)
39326diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
39327index d956965..4179a77 100644
39328--- a/drivers/usb/core/devices.c
39329+++ b/drivers/usb/core/devices.c
39330@@ -126,7 +126,7 @@ static const char format_endpt[] =
39331 * time it gets called.
39332 */
39333 static struct device_connect_event {
39334- atomic_t count;
39335+ atomic_unchecked_t count;
39336 wait_queue_head_t wait;
39337 } device_event = {
39338 .count = ATOMIC_INIT(1),
39339@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
39340
39341 void usbfs_conn_disc_event(void)
39342 {
39343- atomic_add(2, &device_event.count);
39344+ atomic_add_unchecked(2, &device_event.count);
39345 wake_up(&device_event.wait);
39346 }
39347
39348@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
39349
39350 poll_wait(file, &device_event.wait, wait);
39351
39352- event_count = atomic_read(&device_event.count);
39353+ event_count = atomic_read_unchecked(&device_event.count);
39354 if (file->f_version != event_count) {
39355 file->f_version = event_count;
39356 return POLLIN | POLLRDNORM;
39357diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
39358index 1fc8f12..20647c1 100644
39359--- a/drivers/usb/early/ehci-dbgp.c
39360+++ b/drivers/usb/early/ehci-dbgp.c
39361@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
39362
39363 #ifdef CONFIG_KGDB
39364 static struct kgdb_io kgdbdbgp_io_ops;
39365-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
39366+static struct kgdb_io kgdbdbgp_io_ops_console;
39367+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
39368 #else
39369 #define dbgp_kgdb_mode (0)
39370 #endif
39371@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
39372 .write_char = kgdbdbgp_write_char,
39373 };
39374
39375+static struct kgdb_io kgdbdbgp_io_ops_console = {
39376+ .name = "kgdbdbgp",
39377+ .read_char = kgdbdbgp_read_char,
39378+ .write_char = kgdbdbgp_write_char,
39379+ .is_console = 1
39380+};
39381+
39382 static int kgdbdbgp_wait_time;
39383
39384 static int __init kgdbdbgp_parse_config(char *str)
39385@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
39386 ptr++;
39387 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
39388 }
39389- kgdb_register_io_module(&kgdbdbgp_io_ops);
39390- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
39391+ if (early_dbgp_console.index != -1)
39392+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
39393+ else
39394+ kgdb_register_io_module(&kgdbdbgp_io_ops);
39395
39396 return 0;
39397 }
39398diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39399index d6bea3e..60b250e 100644
39400--- a/drivers/usb/wusbcore/wa-hc.h
39401+++ b/drivers/usb/wusbcore/wa-hc.h
39402@@ -192,7 +192,7 @@ struct wahc {
39403 struct list_head xfer_delayed_list;
39404 spinlock_t xfer_list_lock;
39405 struct work_struct xfer_work;
39406- atomic_t xfer_id_count;
39407+ atomic_unchecked_t xfer_id_count;
39408 };
39409
39410
39411@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
39412 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39413 spin_lock_init(&wa->xfer_list_lock);
39414 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39415- atomic_set(&wa->xfer_id_count, 1);
39416+ atomic_set_unchecked(&wa->xfer_id_count, 1);
39417 }
39418
39419 /**
39420diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
39421index 57c01ab..8a05959 100644
39422--- a/drivers/usb/wusbcore/wa-xfer.c
39423+++ b/drivers/usb/wusbcore/wa-xfer.c
39424@@ -296,7 +296,7 @@ out:
39425 */
39426 static void wa_xfer_id_init(struct wa_xfer *xfer)
39427 {
39428- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39429+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39430 }
39431
39432 /*
39433diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39434index c14c42b..f955cc2 100644
39435--- a/drivers/vhost/vhost.c
39436+++ b/drivers/vhost/vhost.c
39437@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
39438 return 0;
39439 }
39440
39441-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39442+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39443 {
39444 struct file *eventfp, *filep = NULL,
39445 *pollstart = NULL, *pollstop = NULL;
39446diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39447index b0b2ac3..89a4399 100644
39448--- a/drivers/video/aty/aty128fb.c
39449+++ b/drivers/video/aty/aty128fb.c
39450@@ -148,7 +148,7 @@ enum {
39451 };
39452
39453 /* Must match above enum */
39454-static const char *r128_family[] __devinitdata = {
39455+static const char *r128_family[] __devinitconst = {
39456 "AGP",
39457 "PCI",
39458 "PRO AGP",
39459diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39460index 5c3960d..15cf8fc 100644
39461--- a/drivers/video/fbcmap.c
39462+++ b/drivers/video/fbcmap.c
39463@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
39464 rc = -ENODEV;
39465 goto out;
39466 }
39467- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39468- !info->fbops->fb_setcmap)) {
39469+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39470 rc = -EINVAL;
39471 goto out1;
39472 }
39473diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
39474index c6ce416..3b9b642 100644
39475--- a/drivers/video/fbmem.c
39476+++ b/drivers/video/fbmem.c
39477@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39478 image->dx += image->width + 8;
39479 }
39480 } else if (rotate == FB_ROTATE_UD) {
39481- for (x = 0; x < num && image->dx >= 0; x++) {
39482+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39483 info->fbops->fb_imageblit(info, image);
39484 image->dx -= image->width + 8;
39485 }
39486@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39487 image->dy += image->height + 8;
39488 }
39489 } else if (rotate == FB_ROTATE_CCW) {
39490- for (x = 0; x < num && image->dy >= 0; x++) {
39491+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39492 info->fbops->fb_imageblit(info, image);
39493 image->dy -= image->height + 8;
39494 }
39495@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
39496 return -EFAULT;
39497 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39498 return -EINVAL;
39499- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39500+ if (con2fb.framebuffer >= FB_MAX)
39501 return -EINVAL;
39502 if (!registered_fb[con2fb.framebuffer])
39503 request_module("fb%d", con2fb.framebuffer);
39504diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39505index 5a5d092..265c5ed 100644
39506--- a/drivers/video/geode/gx1fb_core.c
39507+++ b/drivers/video/geode/gx1fb_core.c
39508@@ -29,7 +29,7 @@ static int crt_option = 1;
39509 static char panel_option[32] = "";
39510
39511 /* Modes relevant to the GX1 (taken from modedb.c) */
39512-static const struct fb_videomode __devinitdata gx1_modedb[] = {
39513+static const struct fb_videomode __devinitconst gx1_modedb[] = {
39514 /* 640x480-60 VESA */
39515 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39516 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
39517diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
39518index 0fad23f..0e9afa4 100644
39519--- a/drivers/video/gxt4500.c
39520+++ b/drivers/video/gxt4500.c
39521@@ -156,7 +156,7 @@ struct gxt4500_par {
39522 static char *mode_option;
39523
39524 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39525-static const struct fb_videomode defaultmode __devinitdata = {
39526+static const struct fb_videomode defaultmode __devinitconst = {
39527 .refresh = 60,
39528 .xres = 1280,
39529 .yres = 1024,
39530@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
39531 return 0;
39532 }
39533
39534-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39535+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39536 .id = "IBM GXT4500P",
39537 .type = FB_TYPE_PACKED_PIXELS,
39538 .visual = FB_VISUAL_PSEUDOCOLOR,
39539diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39540index 7672d2e..b56437f 100644
39541--- a/drivers/video/i810/i810_accel.c
39542+++ b/drivers/video/i810/i810_accel.c
39543@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
39544 }
39545 }
39546 printk("ringbuffer lockup!!!\n");
39547+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39548 i810_report_error(mmio);
39549 par->dev_flags |= LOCKUP;
39550 info->pixmap.scan_align = 1;
39551diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
39552index b83f361..2b05a91 100644
39553--- a/drivers/video/i810/i810_main.c
39554+++ b/drivers/video/i810/i810_main.c
39555@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
39556 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39557
39558 /* PCI */
39559-static const char *i810_pci_list[] __devinitdata = {
39560+static const char *i810_pci_list[] __devinitconst = {
39561 "Intel(R) 810 Framebuffer Device" ,
39562 "Intel(R) 810-DC100 Framebuffer Device" ,
39563 "Intel(R) 810E Framebuffer Device" ,
39564diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39565index de36693..3c63fc2 100644
39566--- a/drivers/video/jz4740_fb.c
39567+++ b/drivers/video/jz4740_fb.c
39568@@ -136,7 +136,7 @@ struct jzfb {
39569 uint32_t pseudo_palette[16];
39570 };
39571
39572-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39573+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39574 .id = "JZ4740 FB",
39575 .type = FB_TYPE_PACKED_PIXELS,
39576 .visual = FB_VISUAL_TRUECOLOR,
39577diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39578index 3c14e43..eafa544 100644
39579--- a/drivers/video/logo/logo_linux_clut224.ppm
39580+++ b/drivers/video/logo/logo_linux_clut224.ppm
39581@@ -1,1604 +1,1123 @@
39582 P3
39583-# Standard 224-color Linux logo
39584 80 80
39585 255
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 0 0 0 0 0 0 0 0 0 0 0 0
39588- 0 0 0 0 0 0 0 0 0 0 0 0
39589- 0 0 0 0 0 0 0 0 0 0 0 0
39590- 0 0 0 0 0 0 0 0 0 0 0 0
39591- 0 0 0 0 0 0 0 0 0 0 0 0
39592- 0 0 0 0 0 0 0 0 0 0 0 0
39593- 0 0 0 0 0 0 0 0 0 0 0 0
39594- 0 0 0 0 0 0 0 0 0 0 0 0
39595- 6 6 6 6 6 6 10 10 10 10 10 10
39596- 10 10 10 6 6 6 6 6 6 6 6 6
39597- 0 0 0 0 0 0 0 0 0 0 0 0
39598- 0 0 0 0 0 0 0 0 0 0 0 0
39599- 0 0 0 0 0 0 0 0 0 0 0 0
39600- 0 0 0 0 0 0 0 0 0 0 0 0
39601- 0 0 0 0 0 0 0 0 0 0 0 0
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 0 0 0
39607- 0 0 0 0 0 0 0 0 0 0 0 0
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 0 0 0 0 0 0 0 0 0 0 0 0
39610- 0 0 0 0 0 0 0 0 0 0 0 0
39611- 0 0 0 0 0 0 0 0 0 0 0 0
39612- 0 0 0 0 0 0 0 0 0 0 0 0
39613- 0 0 0 0 0 0 0 0 0 0 0 0
39614- 0 0 0 6 6 6 10 10 10 14 14 14
39615- 22 22 22 26 26 26 30 30 30 34 34 34
39616- 30 30 30 30 30 30 26 26 26 18 18 18
39617- 14 14 14 10 10 10 6 6 6 0 0 0
39618- 0 0 0 0 0 0 0 0 0 0 0 0
39619- 0 0 0 0 0 0 0 0 0 0 0 0
39620- 0 0 0 0 0 0 0 0 0 0 0 0
39621- 0 0 0 0 0 0 0 0 0 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 0 0 0
39627- 0 0 0 0 0 1 0 0 1 0 0 0
39628- 0 0 0 0 0 0 0 0 0 0 0 0
39629- 0 0 0 0 0 0 0 0 0 0 0 0
39630- 0 0 0 0 0 0 0 0 0 0 0 0
39631- 0 0 0 0 0 0 0 0 0 0 0 0
39632- 0 0 0 0 0 0 0 0 0 0 0 0
39633- 0 0 0 0 0 0 0 0 0 0 0 0
39634- 6 6 6 14 14 14 26 26 26 42 42 42
39635- 54 54 54 66 66 66 78 78 78 78 78 78
39636- 78 78 78 74 74 74 66 66 66 54 54 54
39637- 42 42 42 26 26 26 18 18 18 10 10 10
39638- 6 6 6 0 0 0 0 0 0 0 0 0
39639- 0 0 0 0 0 0 0 0 0 0 0 0
39640- 0 0 0 0 0 0 0 0 0 0 0 0
39641- 0 0 0 0 0 0 0 0 0 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 1 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 0 0 0 0 0 0
39649- 0 0 0 0 0 0 0 0 0 0 0 0
39650- 0 0 0 0 0 0 0 0 0 0 0 0
39651- 0 0 0 0 0 0 0 0 0 0 0 0
39652- 0 0 0 0 0 0 0 0 0 0 0 0
39653- 0 0 0 0 0 0 0 0 0 10 10 10
39654- 22 22 22 42 42 42 66 66 66 86 86 86
39655- 66 66 66 38 38 38 38 38 38 22 22 22
39656- 26 26 26 34 34 34 54 54 54 66 66 66
39657- 86 86 86 70 70 70 46 46 46 26 26 26
39658- 14 14 14 6 6 6 0 0 0 0 0 0
39659- 0 0 0 0 0 0 0 0 0 0 0 0
39660- 0 0 0 0 0 0 0 0 0 0 0 0
39661- 0 0 0 0 0 0 0 0 0 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 0 0 0 0
39667- 0 0 1 0 0 1 0 0 1 0 0 0
39668- 0 0 0 0 0 0 0 0 0 0 0 0
39669- 0 0 0 0 0 0 0 0 0 0 0 0
39670- 0 0 0 0 0 0 0 0 0 0 0 0
39671- 0 0 0 0 0 0 0 0 0 0 0 0
39672- 0 0 0 0 0 0 0 0 0 0 0 0
39673- 0 0 0 0 0 0 10 10 10 26 26 26
39674- 50 50 50 82 82 82 58 58 58 6 6 6
39675- 2 2 6 2 2 6 2 2 6 2 2 6
39676- 2 2 6 2 2 6 2 2 6 2 2 6
39677- 6 6 6 54 54 54 86 86 86 66 66 66
39678- 38 38 38 18 18 18 6 6 6 0 0 0
39679- 0 0 0 0 0 0 0 0 0 0 0 0
39680- 0 0 0 0 0 0 0 0 0 0 0 0
39681- 0 0 0 0 0 0 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 0 0 0 0
39687- 0 0 0 0 0 0 0 0 0 0 0 0
39688- 0 0 0 0 0 0 0 0 0 0 0 0
39689- 0 0 0 0 0 0 0 0 0 0 0 0
39690- 0 0 0 0 0 0 0 0 0 0 0 0
39691- 0 0 0 0 0 0 0 0 0 0 0 0
39692- 0 0 0 0 0 0 0 0 0 0 0 0
39693- 0 0 0 6 6 6 22 22 22 50 50 50
39694- 78 78 78 34 34 34 2 2 6 2 2 6
39695- 2 2 6 2 2 6 2 2 6 2 2 6
39696- 2 2 6 2 2 6 2 2 6 2 2 6
39697- 2 2 6 2 2 6 6 6 6 70 70 70
39698- 78 78 78 46 46 46 22 22 22 6 6 6
39699- 0 0 0 0 0 0 0 0 0 0 0 0
39700- 0 0 0 0 0 0 0 0 0 0 0 0
39701- 0 0 0 0 0 0 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 0 0 0 0
39707- 0 0 1 0 0 1 0 0 1 0 0 0
39708- 0 0 0 0 0 0 0 0 0 0 0 0
39709- 0 0 0 0 0 0 0 0 0 0 0 0
39710- 0 0 0 0 0 0 0 0 0 0 0 0
39711- 0 0 0 0 0 0 0 0 0 0 0 0
39712- 0 0 0 0 0 0 0 0 0 0 0 0
39713- 6 6 6 18 18 18 42 42 42 82 82 82
39714- 26 26 26 2 2 6 2 2 6 2 2 6
39715- 2 2 6 2 2 6 2 2 6 2 2 6
39716- 2 2 6 2 2 6 2 2 6 14 14 14
39717- 46 46 46 34 34 34 6 6 6 2 2 6
39718- 42 42 42 78 78 78 42 42 42 18 18 18
39719- 6 6 6 0 0 0 0 0 0 0 0 0
39720- 0 0 0 0 0 0 0 0 0 0 0 0
39721- 0 0 0 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 1 0 0 0 0 0 1 0 0 0
39728- 0 0 0 0 0 0 0 0 0 0 0 0
39729- 0 0 0 0 0 0 0 0 0 0 0 0
39730- 0 0 0 0 0 0 0 0 0 0 0 0
39731- 0 0 0 0 0 0 0 0 0 0 0 0
39732- 0 0 0 0 0 0 0 0 0 0 0 0
39733- 10 10 10 30 30 30 66 66 66 58 58 58
39734- 2 2 6 2 2 6 2 2 6 2 2 6
39735- 2 2 6 2 2 6 2 2 6 2 2 6
39736- 2 2 6 2 2 6 2 2 6 26 26 26
39737- 86 86 86 101 101 101 46 46 46 10 10 10
39738- 2 2 6 58 58 58 70 70 70 34 34 34
39739- 10 10 10 0 0 0 0 0 0 0 0 0
39740- 0 0 0 0 0 0 0 0 0 0 0 0
39741- 0 0 0 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 1 0 0 1 0 0 1 0 0 0
39748- 0 0 0 0 0 0 0 0 0 0 0 0
39749- 0 0 0 0 0 0 0 0 0 0 0 0
39750- 0 0 0 0 0 0 0 0 0 0 0 0
39751- 0 0 0 0 0 0 0 0 0 0 0 0
39752- 0 0 0 0 0 0 0 0 0 0 0 0
39753- 14 14 14 42 42 42 86 86 86 10 10 10
39754- 2 2 6 2 2 6 2 2 6 2 2 6
39755- 2 2 6 2 2 6 2 2 6 2 2 6
39756- 2 2 6 2 2 6 2 2 6 30 30 30
39757- 94 94 94 94 94 94 58 58 58 26 26 26
39758- 2 2 6 6 6 6 78 78 78 54 54 54
39759- 22 22 22 6 6 6 0 0 0 0 0 0
39760- 0 0 0 0 0 0 0 0 0 0 0 0
39761- 0 0 0 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 0 0 0 0 0 0 0 0 0 0 0 0
39768- 0 0 0 0 0 0 0 0 0 0 0 0
39769- 0 0 0 0 0 0 0 0 0 0 0 0
39770- 0 0 0 0 0 0 0 0 0 0 0 0
39771- 0 0 0 0 0 0 0 0 0 0 0 0
39772- 0 0 0 0 0 0 0 0 0 6 6 6
39773- 22 22 22 62 62 62 62 62 62 2 2 6
39774- 2 2 6 2 2 6 2 2 6 2 2 6
39775- 2 2 6 2 2 6 2 2 6 2 2 6
39776- 2 2 6 2 2 6 2 2 6 26 26 26
39777- 54 54 54 38 38 38 18 18 18 10 10 10
39778- 2 2 6 2 2 6 34 34 34 82 82 82
39779- 38 38 38 14 14 14 0 0 0 0 0 0
39780- 0 0 0 0 0 0 0 0 0 0 0 0
39781- 0 0 0 0 0 0 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 0 0 0 0 0 1 0 0 1 0 0 0
39788- 0 0 0 0 0 0 0 0 0 0 0 0
39789- 0 0 0 0 0 0 0 0 0 0 0 0
39790- 0 0 0 0 0 0 0 0 0 0 0 0
39791- 0 0 0 0 0 0 0 0 0 0 0 0
39792- 0 0 0 0 0 0 0 0 0 6 6 6
39793- 30 30 30 78 78 78 30 30 30 2 2 6
39794- 2 2 6 2 2 6 2 2 6 2 2 6
39795- 2 2 6 2 2 6 2 2 6 2 2 6
39796- 2 2 6 2 2 6 2 2 6 10 10 10
39797- 10 10 10 2 2 6 2 2 6 2 2 6
39798- 2 2 6 2 2 6 2 2 6 78 78 78
39799- 50 50 50 18 18 18 6 6 6 0 0 0
39800- 0 0 0 0 0 0 0 0 0 0 0 0
39801- 0 0 0 0 0 0 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 0 0 0
39807- 0 0 1 0 0 0 0 0 0 0 0 0
39808- 0 0 0 0 0 0 0 0 0 0 0 0
39809- 0 0 0 0 0 0 0 0 0 0 0 0
39810- 0 0 0 0 0 0 0 0 0 0 0 0
39811- 0 0 0 0 0 0 0 0 0 0 0 0
39812- 0 0 0 0 0 0 0 0 0 10 10 10
39813- 38 38 38 86 86 86 14 14 14 2 2 6
39814- 2 2 6 2 2 6 2 2 6 2 2 6
39815- 2 2 6 2 2 6 2 2 6 2 2 6
39816- 2 2 6 2 2 6 2 2 6 2 2 6
39817- 2 2 6 2 2 6 2 2 6 2 2 6
39818- 2 2 6 2 2 6 2 2 6 54 54 54
39819- 66 66 66 26 26 26 6 6 6 0 0 0
39820- 0 0 0 0 0 0 0 0 0 0 0 0
39821- 0 0 0 0 0 0 0 0 0 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 0 0 0
39827- 0 0 0 0 0 1 0 0 1 0 0 0
39828- 0 0 0 0 0 0 0 0 0 0 0 0
39829- 0 0 0 0 0 0 0 0 0 0 0 0
39830- 0 0 0 0 0 0 0 0 0 0 0 0
39831- 0 0 0 0 0 0 0 0 0 0 0 0
39832- 0 0 0 0 0 0 0 0 0 14 14 14
39833- 42 42 42 82 82 82 2 2 6 2 2 6
39834- 2 2 6 6 6 6 10 10 10 2 2 6
39835- 2 2 6 2 2 6 2 2 6 2 2 6
39836- 2 2 6 2 2 6 2 2 6 6 6 6
39837- 14 14 14 10 10 10 2 2 6 2 2 6
39838- 2 2 6 2 2 6 2 2 6 18 18 18
39839- 82 82 82 34 34 34 10 10 10 0 0 0
39840- 0 0 0 0 0 0 0 0 0 0 0 0
39841- 0 0 0 0 0 0 0 0 0 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 0 0 0
39847- 0 0 1 0 0 0 0 0 0 0 0 0
39848- 0 0 0 0 0 0 0 0 0 0 0 0
39849- 0 0 0 0 0 0 0 0 0 0 0 0
39850- 0 0 0 0 0 0 0 0 0 0 0 0
39851- 0 0 0 0 0 0 0 0 0 0 0 0
39852- 0 0 0 0 0 0 0 0 0 14 14 14
39853- 46 46 46 86 86 86 2 2 6 2 2 6
39854- 6 6 6 6 6 6 22 22 22 34 34 34
39855- 6 6 6 2 2 6 2 2 6 2 2 6
39856- 2 2 6 2 2 6 18 18 18 34 34 34
39857- 10 10 10 50 50 50 22 22 22 2 2 6
39858- 2 2 6 2 2 6 2 2 6 10 10 10
39859- 86 86 86 42 42 42 14 14 14 0 0 0
39860- 0 0 0 0 0 0 0 0 0 0 0 0
39861- 0 0 0 0 0 0 0 0 0 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 0 0 0
39867- 0 0 1 0 0 1 0 0 1 0 0 0
39868- 0 0 0 0 0 0 0 0 0 0 0 0
39869- 0 0 0 0 0 0 0 0 0 0 0 0
39870- 0 0 0 0 0 0 0 0 0 0 0 0
39871- 0 0 0 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 0 0 0 14 14 14
39873- 46 46 46 86 86 86 2 2 6 2 2 6
39874- 38 38 38 116 116 116 94 94 94 22 22 22
39875- 22 22 22 2 2 6 2 2 6 2 2 6
39876- 14 14 14 86 86 86 138 138 138 162 162 162
39877-154 154 154 38 38 38 26 26 26 6 6 6
39878- 2 2 6 2 2 6 2 2 6 2 2 6
39879- 86 86 86 46 46 46 14 14 14 0 0 0
39880- 0 0 0 0 0 0 0 0 0 0 0 0
39881- 0 0 0 0 0 0 0 0 0 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 0 0 0
39887- 0 0 0 0 0 0 0 0 0 0 0 0
39888- 0 0 0 0 0 0 0 0 0 0 0 0
39889- 0 0 0 0 0 0 0 0 0 0 0 0
39890- 0 0 0 0 0 0 0 0 0 0 0 0
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 0 0 0 14 14 14
39893- 46 46 46 86 86 86 2 2 6 14 14 14
39894-134 134 134 198 198 198 195 195 195 116 116 116
39895- 10 10 10 2 2 6 2 2 6 6 6 6
39896-101 98 89 187 187 187 210 210 210 218 218 218
39897-214 214 214 134 134 134 14 14 14 6 6 6
39898- 2 2 6 2 2 6 2 2 6 2 2 6
39899- 86 86 86 50 50 50 18 18 18 6 6 6
39900- 0 0 0 0 0 0 0 0 0 0 0 0
39901- 0 0 0 0 0 0 0 0 0 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 1 0 0 0
39907- 0 0 1 0 0 1 0 0 1 0 0 0
39908- 0 0 0 0 0 0 0 0 0 0 0 0
39909- 0 0 0 0 0 0 0 0 0 0 0 0
39910- 0 0 0 0 0 0 0 0 0 0 0 0
39911- 0 0 0 0 0 0 0 0 0 0 0 0
39912- 0 0 0 0 0 0 0 0 0 14 14 14
39913- 46 46 46 86 86 86 2 2 6 54 54 54
39914-218 218 218 195 195 195 226 226 226 246 246 246
39915- 58 58 58 2 2 6 2 2 6 30 30 30
39916-210 210 210 253 253 253 174 174 174 123 123 123
39917-221 221 221 234 234 234 74 74 74 2 2 6
39918- 2 2 6 2 2 6 2 2 6 2 2 6
39919- 70 70 70 58 58 58 22 22 22 6 6 6
39920- 0 0 0 0 0 0 0 0 0 0 0 0
39921- 0 0 0 0 0 0 0 0 0 0 0 0
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 0 0 0
39927- 0 0 0 0 0 0 0 0 0 0 0 0
39928- 0 0 0 0 0 0 0 0 0 0 0 0
39929- 0 0 0 0 0 0 0 0 0 0 0 0
39930- 0 0 0 0 0 0 0 0 0 0 0 0
39931- 0 0 0 0 0 0 0 0 0 0 0 0
39932- 0 0 0 0 0 0 0 0 0 14 14 14
39933- 46 46 46 82 82 82 2 2 6 106 106 106
39934-170 170 170 26 26 26 86 86 86 226 226 226
39935-123 123 123 10 10 10 14 14 14 46 46 46
39936-231 231 231 190 190 190 6 6 6 70 70 70
39937- 90 90 90 238 238 238 158 158 158 2 2 6
39938- 2 2 6 2 2 6 2 2 6 2 2 6
39939- 70 70 70 58 58 58 22 22 22 6 6 6
39940- 0 0 0 0 0 0 0 0 0 0 0 0
39941- 0 0 0 0 0 0 0 0 0 0 0 0
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 1 0 0 0
39947- 0 0 1 0 0 1 0 0 1 0 0 0
39948- 0 0 0 0 0 0 0 0 0 0 0 0
39949- 0 0 0 0 0 0 0 0 0 0 0 0
39950- 0 0 0 0 0 0 0 0 0 0 0 0
39951- 0 0 0 0 0 0 0 0 0 0 0 0
39952- 0 0 0 0 0 0 0 0 0 14 14 14
39953- 42 42 42 86 86 86 6 6 6 116 116 116
39954-106 106 106 6 6 6 70 70 70 149 149 149
39955-128 128 128 18 18 18 38 38 38 54 54 54
39956-221 221 221 106 106 106 2 2 6 14 14 14
39957- 46 46 46 190 190 190 198 198 198 2 2 6
39958- 2 2 6 2 2 6 2 2 6 2 2 6
39959- 74 74 74 62 62 62 22 22 22 6 6 6
39960- 0 0 0 0 0 0 0 0 0 0 0 0
39961- 0 0 0 0 0 0 0 0 0 0 0 0
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 0 0 0
39966- 0 0 0 0 0 0 0 0 1 0 0 0
39967- 0 0 1 0 0 0 0 0 1 0 0 0
39968- 0 0 0 0 0 0 0 0 0 0 0 0
39969- 0 0 0 0 0 0 0 0 0 0 0 0
39970- 0 0 0 0 0 0 0 0 0 0 0 0
39971- 0 0 0 0 0 0 0 0 0 0 0 0
39972- 0 0 0 0 0 0 0 0 0 14 14 14
39973- 42 42 42 94 94 94 14 14 14 101 101 101
39974-128 128 128 2 2 6 18 18 18 116 116 116
39975-118 98 46 121 92 8 121 92 8 98 78 10
39976-162 162 162 106 106 106 2 2 6 2 2 6
39977- 2 2 6 195 195 195 195 195 195 6 6 6
39978- 2 2 6 2 2 6 2 2 6 2 2 6
39979- 74 74 74 62 62 62 22 22 22 6 6 6
39980- 0 0 0 0 0 0 0 0 0 0 0 0
39981- 0 0 0 0 0 0 0 0 0 0 0 0
39982- 0 0 0 0 0 0 0 0 0 0 0 0
39983- 0 0 0 0 0 0 0 0 0 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 0 0 0
39986- 0 0 0 0 0 0 0 0 1 0 0 1
39987- 0 0 1 0 0 0 0 0 1 0 0 0
39988- 0 0 0 0 0 0 0 0 0 0 0 0
39989- 0 0 0 0 0 0 0 0 0 0 0 0
39990- 0 0 0 0 0 0 0 0 0 0 0 0
39991- 0 0 0 0 0 0 0 0 0 0 0 0
39992- 0 0 0 0 0 0 0 0 0 10 10 10
39993- 38 38 38 90 90 90 14 14 14 58 58 58
39994-210 210 210 26 26 26 54 38 6 154 114 10
39995-226 170 11 236 186 11 225 175 15 184 144 12
39996-215 174 15 175 146 61 37 26 9 2 2 6
39997- 70 70 70 246 246 246 138 138 138 2 2 6
39998- 2 2 6 2 2 6 2 2 6 2 2 6
39999- 70 70 70 66 66 66 26 26 26 6 6 6
40000- 0 0 0 0 0 0 0 0 0 0 0 0
40001- 0 0 0 0 0 0 0 0 0 0 0 0
40002- 0 0 0 0 0 0 0 0 0 0 0 0
40003- 0 0 0 0 0 0 0 0 0 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 0 0 0 0 0 0
40006- 0 0 0 0 0 0 0 0 0 0 0 0
40007- 0 0 0 0 0 0 0 0 0 0 0 0
40008- 0 0 0 0 0 0 0 0 0 0 0 0
40009- 0 0 0 0 0 0 0 0 0 0 0 0
40010- 0 0 0 0 0 0 0 0 0 0 0 0
40011- 0 0 0 0 0 0 0 0 0 0 0 0
40012- 0 0 0 0 0 0 0 0 0 10 10 10
40013- 38 38 38 86 86 86 14 14 14 10 10 10
40014-195 195 195 188 164 115 192 133 9 225 175 15
40015-239 182 13 234 190 10 232 195 16 232 200 30
40016-245 207 45 241 208 19 232 195 16 184 144 12
40017-218 194 134 211 206 186 42 42 42 2 2 6
40018- 2 2 6 2 2 6 2 2 6 2 2 6
40019- 50 50 50 74 74 74 30 30 30 6 6 6
40020- 0 0 0 0 0 0 0 0 0 0 0 0
40021- 0 0 0 0 0 0 0 0 0 0 0 0
40022- 0 0 0 0 0 0 0 0 0 0 0 0
40023- 0 0 0 0 0 0 0 0 0 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 0 0 0 0 0 0
40026- 0 0 0 0 0 0 0 0 0 0 0 0
40027- 0 0 0 0 0 0 0 0 0 0 0 0
40028- 0 0 0 0 0 0 0 0 0 0 0 0
40029- 0 0 0 0 0 0 0 0 0 0 0 0
40030- 0 0 0 0 0 0 0 0 0 0 0 0
40031- 0 0 0 0 0 0 0 0 0 0 0 0
40032- 0 0 0 0 0 0 0 0 0 10 10 10
40033- 34 34 34 86 86 86 14 14 14 2 2 6
40034-121 87 25 192 133 9 219 162 10 239 182 13
40035-236 186 11 232 195 16 241 208 19 244 214 54
40036-246 218 60 246 218 38 246 215 20 241 208 19
40037-241 208 19 226 184 13 121 87 25 2 2 6
40038- 2 2 6 2 2 6 2 2 6 2 2 6
40039- 50 50 50 82 82 82 34 34 34 10 10 10
40040- 0 0 0 0 0 0 0 0 0 0 0 0
40041- 0 0 0 0 0 0 0 0 0 0 0 0
40042- 0 0 0 0 0 0 0 0 0 0 0 0
40043- 0 0 0 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 0 0 0 0 0 0 0 0 0
40046- 0 0 0 0 0 0 0 0 0 0 0 0
40047- 0 0 0 0 0 0 0 0 0 0 0 0
40048- 0 0 0 0 0 0 0 0 0 0 0 0
40049- 0 0 0 0 0 0 0 0 0 0 0 0
40050- 0 0 0 0 0 0 0 0 0 0 0 0
40051- 0 0 0 0 0 0 0 0 0 0 0 0
40052- 0 0 0 0 0 0 0 0 0 10 10 10
40053- 34 34 34 82 82 82 30 30 30 61 42 6
40054-180 123 7 206 145 10 230 174 11 239 182 13
40055-234 190 10 238 202 15 241 208 19 246 218 74
40056-246 218 38 246 215 20 246 215 20 246 215 20
40057-226 184 13 215 174 15 184 144 12 6 6 6
40058- 2 2 6 2 2 6 2 2 6 2 2 6
40059- 26 26 26 94 94 94 42 42 42 14 14 14
40060- 0 0 0 0 0 0 0 0 0 0 0 0
40061- 0 0 0 0 0 0 0 0 0 0 0 0
40062- 0 0 0 0 0 0 0 0 0 0 0 0
40063- 0 0 0 0 0 0 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 0 0 0 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 0 0 0 0 0 0
40067- 0 0 0 0 0 0 0 0 0 0 0 0
40068- 0 0 0 0 0 0 0 0 0 0 0 0
40069- 0 0 0 0 0 0 0 0 0 0 0 0
40070- 0 0 0 0 0 0 0 0 0 0 0 0
40071- 0 0 0 0 0 0 0 0 0 0 0 0
40072- 0 0 0 0 0 0 0 0 0 10 10 10
40073- 30 30 30 78 78 78 50 50 50 104 69 6
40074-192 133 9 216 158 10 236 178 12 236 186 11
40075-232 195 16 241 208 19 244 214 54 245 215 43
40076-246 215 20 246 215 20 241 208 19 198 155 10
40077-200 144 11 216 158 10 156 118 10 2 2 6
40078- 2 2 6 2 2 6 2 2 6 2 2 6
40079- 6 6 6 90 90 90 54 54 54 18 18 18
40080- 6 6 6 0 0 0 0 0 0 0 0 0
40081- 0 0 0 0 0 0 0 0 0 0 0 0
40082- 0 0 0 0 0 0 0 0 0 0 0 0
40083- 0 0 0 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 0 0 0
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 0 0 0 0 0 0
40087- 0 0 0 0 0 0 0 0 0 0 0 0
40088- 0 0 0 0 0 0 0 0 0 0 0 0
40089- 0 0 0 0 0 0 0 0 0 0 0 0
40090- 0 0 0 0 0 0 0 0 0 0 0 0
40091- 0 0 0 0 0 0 0 0 0 0 0 0
40092- 0 0 0 0 0 0 0 0 0 10 10 10
40093- 30 30 30 78 78 78 46 46 46 22 22 22
40094-137 92 6 210 162 10 239 182 13 238 190 10
40095-238 202 15 241 208 19 246 215 20 246 215 20
40096-241 208 19 203 166 17 185 133 11 210 150 10
40097-216 158 10 210 150 10 102 78 10 2 2 6
40098- 6 6 6 54 54 54 14 14 14 2 2 6
40099- 2 2 6 62 62 62 74 74 74 30 30 30
40100- 10 10 10 0 0 0 0 0 0 0 0 0
40101- 0 0 0 0 0 0 0 0 0 0 0 0
40102- 0 0 0 0 0 0 0 0 0 0 0 0
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 0 0 0
40107- 0 0 0 0 0 0 0 0 0 0 0 0
40108- 0 0 0 0 0 0 0 0 0 0 0 0
40109- 0 0 0 0 0 0 0 0 0 0 0 0
40110- 0 0 0 0 0 0 0 0 0 0 0 0
40111- 0 0 0 0 0 0 0 0 0 0 0 0
40112- 0 0 0 0 0 0 0 0 0 10 10 10
40113- 34 34 34 78 78 78 50 50 50 6 6 6
40114- 94 70 30 139 102 15 190 146 13 226 184 13
40115-232 200 30 232 195 16 215 174 15 190 146 13
40116-168 122 10 192 133 9 210 150 10 213 154 11
40117-202 150 34 182 157 106 101 98 89 2 2 6
40118- 2 2 6 78 78 78 116 116 116 58 58 58
40119- 2 2 6 22 22 22 90 90 90 46 46 46
40120- 18 18 18 6 6 6 0 0 0 0 0 0
40121- 0 0 0 0 0 0 0 0 0 0 0 0
40122- 0 0 0 0 0 0 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 0 0 0
40127- 0 0 0 0 0 0 0 0 0 0 0 0
40128- 0 0 0 0 0 0 0 0 0 0 0 0
40129- 0 0 0 0 0 0 0 0 0 0 0 0
40130- 0 0 0 0 0 0 0 0 0 0 0 0
40131- 0 0 0 0 0 0 0 0 0 0 0 0
40132- 0 0 0 0 0 0 0 0 0 10 10 10
40133- 38 38 38 86 86 86 50 50 50 6 6 6
40134-128 128 128 174 154 114 156 107 11 168 122 10
40135-198 155 10 184 144 12 197 138 11 200 144 11
40136-206 145 10 206 145 10 197 138 11 188 164 115
40137-195 195 195 198 198 198 174 174 174 14 14 14
40138- 2 2 6 22 22 22 116 116 116 116 116 116
40139- 22 22 22 2 2 6 74 74 74 70 70 70
40140- 30 30 30 10 10 10 0 0 0 0 0 0
40141- 0 0 0 0 0 0 0 0 0 0 0 0
40142- 0 0 0 0 0 0 0 0 0 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 0 0 0 0 0 0 0 0 0 0 0 0
40148- 0 0 0 0 0 0 0 0 0 0 0 0
40149- 0 0 0 0 0 0 0 0 0 0 0 0
40150- 0 0 0 0 0 0 0 0 0 0 0 0
40151- 0 0 0 0 0 0 0 0 0 0 0 0
40152- 0 0 0 0 0 0 6 6 6 18 18 18
40153- 50 50 50 101 101 101 26 26 26 10 10 10
40154-138 138 138 190 190 190 174 154 114 156 107 11
40155-197 138 11 200 144 11 197 138 11 192 133 9
40156-180 123 7 190 142 34 190 178 144 187 187 187
40157-202 202 202 221 221 221 214 214 214 66 66 66
40158- 2 2 6 2 2 6 50 50 50 62 62 62
40159- 6 6 6 2 2 6 10 10 10 90 90 90
40160- 50 50 50 18 18 18 6 6 6 0 0 0
40161- 0 0 0 0 0 0 0 0 0 0 0 0
40162- 0 0 0 0 0 0 0 0 0 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 0 0 0 0 0 0
40168- 0 0 0 0 0 0 0 0 0 0 0 0
40169- 0 0 0 0 0 0 0 0 0 0 0 0
40170- 0 0 0 0 0 0 0 0 0 0 0 0
40171- 0 0 0 0 0 0 0 0 0 0 0 0
40172- 0 0 0 0 0 0 10 10 10 34 34 34
40173- 74 74 74 74 74 74 2 2 6 6 6 6
40174-144 144 144 198 198 198 190 190 190 178 166 146
40175-154 121 60 156 107 11 156 107 11 168 124 44
40176-174 154 114 187 187 187 190 190 190 210 210 210
40177-246 246 246 253 253 253 253 253 253 182 182 182
40178- 6 6 6 2 2 6 2 2 6 2 2 6
40179- 2 2 6 2 2 6 2 2 6 62 62 62
40180- 74 74 74 34 34 34 14 14 14 0 0 0
40181- 0 0 0 0 0 0 0 0 0 0 0 0
40182- 0 0 0 0 0 0 0 0 0 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 0 0 0 0 0 0 0 0 0
40189- 0 0 0 0 0 0 0 0 0 0 0 0
40190- 0 0 0 0 0 0 0 0 0 0 0 0
40191- 0 0 0 0 0 0 0 0 0 0 0 0
40192- 0 0 0 10 10 10 22 22 22 54 54 54
40193- 94 94 94 18 18 18 2 2 6 46 46 46
40194-234 234 234 221 221 221 190 190 190 190 190 190
40195-190 190 190 187 187 187 187 187 187 190 190 190
40196-190 190 190 195 195 195 214 214 214 242 242 242
40197-253 253 253 253 253 253 253 253 253 253 253 253
40198- 82 82 82 2 2 6 2 2 6 2 2 6
40199- 2 2 6 2 2 6 2 2 6 14 14 14
40200- 86 86 86 54 54 54 22 22 22 6 6 6
40201- 0 0 0 0 0 0 0 0 0 0 0 0
40202- 0 0 0 0 0 0 0 0 0 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 0 0 0 0 0 0 0 0 0
40211- 0 0 0 0 0 0 0 0 0 0 0 0
40212- 6 6 6 18 18 18 46 46 46 90 90 90
40213- 46 46 46 18 18 18 6 6 6 182 182 182
40214-253 253 253 246 246 246 206 206 206 190 190 190
40215-190 190 190 190 190 190 190 190 190 190 190 190
40216-206 206 206 231 231 231 250 250 250 253 253 253
40217-253 253 253 253 253 253 253 253 253 253 253 253
40218-202 202 202 14 14 14 2 2 6 2 2 6
40219- 2 2 6 2 2 6 2 2 6 2 2 6
40220- 42 42 42 86 86 86 42 42 42 18 18 18
40221- 6 6 6 0 0 0 0 0 0 0 0 0
40222- 0 0 0 0 0 0 0 0 0 0 0 0
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225- 0 0 0 0 0 0 0 0 0 0 0 0
40226- 0 0 0 0 0 0 0 0 0 0 0 0
40227- 0 0 0 0 0 0 0 0 0 0 0 0
40228- 0 0 0 0 0 0 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 0 0 0 0 0 0 0 0 0 0 0 0
40231- 0 0 0 0 0 0 0 0 0 6 6 6
40232- 14 14 14 38 38 38 74 74 74 66 66 66
40233- 2 2 6 6 6 6 90 90 90 250 250 250
40234-253 253 253 253 253 253 238 238 238 198 198 198
40235-190 190 190 190 190 190 195 195 195 221 221 221
40236-246 246 246 253 253 253 253 253 253 253 253 253
40237-253 253 253 253 253 253 253 253 253 253 253 253
40238-253 253 253 82 82 82 2 2 6 2 2 6
40239- 2 2 6 2 2 6 2 2 6 2 2 6
40240- 2 2 6 78 78 78 70 70 70 34 34 34
40241- 14 14 14 6 6 6 0 0 0 0 0 0
40242- 0 0 0 0 0 0 0 0 0 0 0 0
40243- 0 0 0 0 0 0 0 0 0 0 0 0
40244- 0 0 0 0 0 0 0 0 0 0 0 0
40245- 0 0 0 0 0 0 0 0 0 0 0 0
40246- 0 0 0 0 0 0 0 0 0 0 0 0
40247- 0 0 0 0 0 0 0 0 0 0 0 0
40248- 0 0 0 0 0 0 0 0 0 0 0 0
40249- 0 0 0 0 0 0 0 0 0 0 0 0
40250- 0 0 0 0 0 0 0 0 0 0 0 0
40251- 0 0 0 0 0 0 0 0 0 14 14 14
40252- 34 34 34 66 66 66 78 78 78 6 6 6
40253- 2 2 6 18 18 18 218 218 218 253 253 253
40254-253 253 253 253 253 253 253 253 253 246 246 246
40255-226 226 226 231 231 231 246 246 246 253 253 253
40256-253 253 253 253 253 253 253 253 253 253 253 253
40257-253 253 253 253 253 253 253 253 253 253 253 253
40258-253 253 253 178 178 178 2 2 6 2 2 6
40259- 2 2 6 2 2 6 2 2 6 2 2 6
40260- 2 2 6 18 18 18 90 90 90 62 62 62
40261- 30 30 30 10 10 10 0 0 0 0 0 0
40262- 0 0 0 0 0 0 0 0 0 0 0 0
40263- 0 0 0 0 0 0 0 0 0 0 0 0
40264- 0 0 0 0 0 0 0 0 0 0 0 0
40265- 0 0 0 0 0 0 0 0 0 0 0 0
40266- 0 0 0 0 0 0 0 0 0 0 0 0
40267- 0 0 0 0 0 0 0 0 0 0 0 0
40268- 0 0 0 0 0 0 0 0 0 0 0 0
40269- 0 0 0 0 0 0 0 0 0 0 0 0
40270- 0 0 0 0 0 0 0 0 0 0 0 0
40271- 0 0 0 0 0 0 10 10 10 26 26 26
40272- 58 58 58 90 90 90 18 18 18 2 2 6
40273- 2 2 6 110 110 110 253 253 253 253 253 253
40274-253 253 253 253 253 253 253 253 253 253 253 253
40275-250 250 250 253 253 253 253 253 253 253 253 253
40276-253 253 253 253 253 253 253 253 253 253 253 253
40277-253 253 253 253 253 253 253 253 253 253 253 253
40278-253 253 253 231 231 231 18 18 18 2 2 6
40279- 2 2 6 2 2 6 2 2 6 2 2 6
40280- 2 2 6 2 2 6 18 18 18 94 94 94
40281- 54 54 54 26 26 26 10 10 10 0 0 0
40282- 0 0 0 0 0 0 0 0 0 0 0 0
40283- 0 0 0 0 0 0 0 0 0 0 0 0
40284- 0 0 0 0 0 0 0 0 0 0 0 0
40285- 0 0 0 0 0 0 0 0 0 0 0 0
40286- 0 0 0 0 0 0 0 0 0 0 0 0
40287- 0 0 0 0 0 0 0 0 0 0 0 0
40288- 0 0 0 0 0 0 0 0 0 0 0 0
40289- 0 0 0 0 0 0 0 0 0 0 0 0
40290- 0 0 0 0 0 0 0 0 0 0 0 0
40291- 0 0 0 6 6 6 22 22 22 50 50 50
40292- 90 90 90 26 26 26 2 2 6 2 2 6
40293- 14 14 14 195 195 195 250 250 250 253 253 253
40294-253 253 253 253 253 253 253 253 253 253 253 253
40295-253 253 253 253 253 253 253 253 253 253 253 253
40296-253 253 253 253 253 253 253 253 253 253 253 253
40297-253 253 253 253 253 253 253 253 253 253 253 253
40298-250 250 250 242 242 242 54 54 54 2 2 6
40299- 2 2 6 2 2 6 2 2 6 2 2 6
40300- 2 2 6 2 2 6 2 2 6 38 38 38
40301- 86 86 86 50 50 50 22 22 22 6 6 6
40302- 0 0 0 0 0 0 0 0 0 0 0 0
40303- 0 0 0 0 0 0 0 0 0 0 0 0
40304- 0 0 0 0 0 0 0 0 0 0 0 0
40305- 0 0 0 0 0 0 0 0 0 0 0 0
40306- 0 0 0 0 0 0 0 0 0 0 0 0
40307- 0 0 0 0 0 0 0 0 0 0 0 0
40308- 0 0 0 0 0 0 0 0 0 0 0 0
40309- 0 0 0 0 0 0 0 0 0 0 0 0
40310- 0 0 0 0 0 0 0 0 0 0 0 0
40311- 6 6 6 14 14 14 38 38 38 82 82 82
40312- 34 34 34 2 2 6 2 2 6 2 2 6
40313- 42 42 42 195 195 195 246 246 246 253 253 253
40314-253 253 253 253 253 253 253 253 253 250 250 250
40315-242 242 242 242 242 242 250 250 250 253 253 253
40316-253 253 253 253 253 253 253 253 253 253 253 253
40317-253 253 253 250 250 250 246 246 246 238 238 238
40318-226 226 226 231 231 231 101 101 101 6 6 6
40319- 2 2 6 2 2 6 2 2 6 2 2 6
40320- 2 2 6 2 2 6 2 2 6 2 2 6
40321- 38 38 38 82 82 82 42 42 42 14 14 14
40322- 6 6 6 0 0 0 0 0 0 0 0 0
40323- 0 0 0 0 0 0 0 0 0 0 0 0
40324- 0 0 0 0 0 0 0 0 0 0 0 0
40325- 0 0 0 0 0 0 0 0 0 0 0 0
40326- 0 0 0 0 0 0 0 0 0 0 0 0
40327- 0 0 0 0 0 0 0 0 0 0 0 0
40328- 0 0 0 0 0 0 0 0 0 0 0 0
40329- 0 0 0 0 0 0 0 0 0 0 0 0
40330- 0 0 0 0 0 0 0 0 0 0 0 0
40331- 10 10 10 26 26 26 62 62 62 66 66 66
40332- 2 2 6 2 2 6 2 2 6 6 6 6
40333- 70 70 70 170 170 170 206 206 206 234 234 234
40334-246 246 246 250 250 250 250 250 250 238 238 238
40335-226 226 226 231 231 231 238 238 238 250 250 250
40336-250 250 250 250 250 250 246 246 246 231 231 231
40337-214 214 214 206 206 206 202 202 202 202 202 202
40338-198 198 198 202 202 202 182 182 182 18 18 18
40339- 2 2 6 2 2 6 2 2 6 2 2 6
40340- 2 2 6 2 2 6 2 2 6 2 2 6
40341- 2 2 6 62 62 62 66 66 66 30 30 30
40342- 10 10 10 0 0 0 0 0 0 0 0 0
40343- 0 0 0 0 0 0 0 0 0 0 0 0
40344- 0 0 0 0 0 0 0 0 0 0 0 0
40345- 0 0 0 0 0 0 0 0 0 0 0 0
40346- 0 0 0 0 0 0 0 0 0 0 0 0
40347- 0 0 0 0 0 0 0 0 0 0 0 0
40348- 0 0 0 0 0 0 0 0 0 0 0 0
40349- 0 0 0 0 0 0 0 0 0 0 0 0
40350- 0 0 0 0 0 0 0 0 0 0 0 0
40351- 14 14 14 42 42 42 82 82 82 18 18 18
40352- 2 2 6 2 2 6 2 2 6 10 10 10
40353- 94 94 94 182 182 182 218 218 218 242 242 242
40354-250 250 250 253 253 253 253 253 253 250 250 250
40355-234 234 234 253 253 253 253 253 253 253 253 253
40356-253 253 253 253 253 253 253 253 253 246 246 246
40357-238 238 238 226 226 226 210 210 210 202 202 202
40358-195 195 195 195 195 195 210 210 210 158 158 158
40359- 6 6 6 14 14 14 50 50 50 14 14 14
40360- 2 2 6 2 2 6 2 2 6 2 2 6
40361- 2 2 6 6 6 6 86 86 86 46 46 46
40362- 18 18 18 6 6 6 0 0 0 0 0 0
40363- 0 0 0 0 0 0 0 0 0 0 0 0
40364- 0 0 0 0 0 0 0 0 0 0 0 0
40365- 0 0 0 0 0 0 0 0 0 0 0 0
40366- 0 0 0 0 0 0 0 0 0 0 0 0
40367- 0 0 0 0 0 0 0 0 0 0 0 0
40368- 0 0 0 0 0 0 0 0 0 0 0 0
40369- 0 0 0 0 0 0 0 0 0 0 0 0
40370- 0 0 0 0 0 0 0 0 0 6 6 6
40371- 22 22 22 54 54 54 70 70 70 2 2 6
40372- 2 2 6 10 10 10 2 2 6 22 22 22
40373-166 166 166 231 231 231 250 250 250 253 253 253
40374-253 253 253 253 253 253 253 253 253 250 250 250
40375-242 242 242 253 253 253 253 253 253 253 253 253
40376-253 253 253 253 253 253 253 253 253 253 253 253
40377-253 253 253 253 253 253 253 253 253 246 246 246
40378-231 231 231 206 206 206 198 198 198 226 226 226
40379- 94 94 94 2 2 6 6 6 6 38 38 38
40380- 30 30 30 2 2 6 2 2 6 2 2 6
40381- 2 2 6 2 2 6 62 62 62 66 66 66
40382- 26 26 26 10 10 10 0 0 0 0 0 0
40383- 0 0 0 0 0 0 0 0 0 0 0 0
40384- 0 0 0 0 0 0 0 0 0 0 0 0
40385- 0 0 0 0 0 0 0 0 0 0 0 0
40386- 0 0 0 0 0 0 0 0 0 0 0 0
40387- 0 0 0 0 0 0 0 0 0 0 0 0
40388- 0 0 0 0 0 0 0 0 0 0 0 0
40389- 0 0 0 0 0 0 0 0 0 0 0 0
40390- 0 0 0 0 0 0 0 0 0 10 10 10
40391- 30 30 30 74 74 74 50 50 50 2 2 6
40392- 26 26 26 26 26 26 2 2 6 106 106 106
40393-238 238 238 253 253 253 253 253 253 253 253 253
40394-253 253 253 253 253 253 253 253 253 253 253 253
40395-253 253 253 253 253 253 253 253 253 253 253 253
40396-253 253 253 253 253 253 253 253 253 253 253 253
40397-253 253 253 253 253 253 253 253 253 253 253 253
40398-253 253 253 246 246 246 218 218 218 202 202 202
40399-210 210 210 14 14 14 2 2 6 2 2 6
40400- 30 30 30 22 22 22 2 2 6 2 2 6
40401- 2 2 6 2 2 6 18 18 18 86 86 86
40402- 42 42 42 14 14 14 0 0 0 0 0 0
40403- 0 0 0 0 0 0 0 0 0 0 0 0
40404- 0 0 0 0 0 0 0 0 0 0 0 0
40405- 0 0 0 0 0 0 0 0 0 0 0 0
40406- 0 0 0 0 0 0 0 0 0 0 0 0
40407- 0 0 0 0 0 0 0 0 0 0 0 0
40408- 0 0 0 0 0 0 0 0 0 0 0 0
40409- 0 0 0 0 0 0 0 0 0 0 0 0
40410- 0 0 0 0 0 0 0 0 0 14 14 14
40411- 42 42 42 90 90 90 22 22 22 2 2 6
40412- 42 42 42 2 2 6 18 18 18 218 218 218
40413-253 253 253 253 253 253 253 253 253 253 253 253
40414-253 253 253 253 253 253 253 253 253 253 253 253
40415-253 253 253 253 253 253 253 253 253 253 253 253
40416-253 253 253 253 253 253 253 253 253 253 253 253
40417-253 253 253 253 253 253 253 253 253 253 253 253
40418-253 253 253 253 253 253 250 250 250 221 221 221
40419-218 218 218 101 101 101 2 2 6 14 14 14
40420- 18 18 18 38 38 38 10 10 10 2 2 6
40421- 2 2 6 2 2 6 2 2 6 78 78 78
40422- 58 58 58 22 22 22 6 6 6 0 0 0
40423- 0 0 0 0 0 0 0 0 0 0 0 0
40424- 0 0 0 0 0 0 0 0 0 0 0 0
40425- 0 0 0 0 0 0 0 0 0 0 0 0
40426- 0 0 0 0 0 0 0 0 0 0 0 0
40427- 0 0 0 0 0 0 0 0 0 0 0 0
40428- 0 0 0 0 0 0 0 0 0 0 0 0
40429- 0 0 0 0 0 0 0 0 0 0 0 0
40430- 0 0 0 0 0 0 6 6 6 18 18 18
40431- 54 54 54 82 82 82 2 2 6 26 26 26
40432- 22 22 22 2 2 6 123 123 123 253 253 253
40433-253 253 253 253 253 253 253 253 253 253 253 253
40434-253 253 253 253 253 253 253 253 253 253 253 253
40435-253 253 253 253 253 253 253 253 253 253 253 253
40436-253 253 253 253 253 253 253 253 253 253 253 253
40437-253 253 253 253 253 253 253 253 253 253 253 253
40438-253 253 253 253 253 253 253 253 253 250 250 250
40439-238 238 238 198 198 198 6 6 6 38 38 38
40440- 58 58 58 26 26 26 38 38 38 2 2 6
40441- 2 2 6 2 2 6 2 2 6 46 46 46
40442- 78 78 78 30 30 30 10 10 10 0 0 0
40443- 0 0 0 0 0 0 0 0 0 0 0 0
40444- 0 0 0 0 0 0 0 0 0 0 0 0
40445- 0 0 0 0 0 0 0 0 0 0 0 0
40446- 0 0 0 0 0 0 0 0 0 0 0 0
40447- 0 0 0 0 0 0 0 0 0 0 0 0
40448- 0 0 0 0 0 0 0 0 0 0 0 0
40449- 0 0 0 0 0 0 0 0 0 0 0 0
40450- 0 0 0 0 0 0 10 10 10 30 30 30
40451- 74 74 74 58 58 58 2 2 6 42 42 42
40452- 2 2 6 22 22 22 231 231 231 253 253 253
40453-253 253 253 253 253 253 253 253 253 253 253 253
40454-253 253 253 253 253 253 253 253 253 250 250 250
40455-253 253 253 253 253 253 253 253 253 253 253 253
40456-253 253 253 253 253 253 253 253 253 253 253 253
40457-253 253 253 253 253 253 253 253 253 253 253 253
40458-253 253 253 253 253 253 253 253 253 253 253 253
40459-253 253 253 246 246 246 46 46 46 38 38 38
40460- 42 42 42 14 14 14 38 38 38 14 14 14
40461- 2 2 6 2 2 6 2 2 6 6 6 6
40462- 86 86 86 46 46 46 14 14 14 0 0 0
40463- 0 0 0 0 0 0 0 0 0 0 0 0
40464- 0 0 0 0 0 0 0 0 0 0 0 0
40465- 0 0 0 0 0 0 0 0 0 0 0 0
40466- 0 0 0 0 0 0 0 0 0 0 0 0
40467- 0 0 0 0 0 0 0 0 0 0 0 0
40468- 0 0 0 0 0 0 0 0 0 0 0 0
40469- 0 0 0 0 0 0 0 0 0 0 0 0
40470- 0 0 0 6 6 6 14 14 14 42 42 42
40471- 90 90 90 18 18 18 18 18 18 26 26 26
40472- 2 2 6 116 116 116 253 253 253 253 253 253
40473-253 253 253 253 253 253 253 253 253 253 253 253
40474-253 253 253 253 253 253 250 250 250 238 238 238
40475-253 253 253 253 253 253 253 253 253 253 253 253
40476-253 253 253 253 253 253 253 253 253 253 253 253
40477-253 253 253 253 253 253 253 253 253 253 253 253
40478-253 253 253 253 253 253 253 253 253 253 253 253
40479-253 253 253 253 253 253 94 94 94 6 6 6
40480- 2 2 6 2 2 6 10 10 10 34 34 34
40481- 2 2 6 2 2 6 2 2 6 2 2 6
40482- 74 74 74 58 58 58 22 22 22 6 6 6
40483- 0 0 0 0 0 0 0 0 0 0 0 0
40484- 0 0 0 0 0 0 0 0 0 0 0 0
40485- 0 0 0 0 0 0 0 0 0 0 0 0
40486- 0 0 0 0 0 0 0 0 0 0 0 0
40487- 0 0 0 0 0 0 0 0 0 0 0 0
40488- 0 0 0 0 0 0 0 0 0 0 0 0
40489- 0 0 0 0 0 0 0 0 0 0 0 0
40490- 0 0 0 10 10 10 26 26 26 66 66 66
40491- 82 82 82 2 2 6 38 38 38 6 6 6
40492- 14 14 14 210 210 210 253 253 253 253 253 253
40493-253 253 253 253 253 253 253 253 253 253 253 253
40494-253 253 253 253 253 253 246 246 246 242 242 242
40495-253 253 253 253 253 253 253 253 253 253 253 253
40496-253 253 253 253 253 253 253 253 253 253 253 253
40497-253 253 253 253 253 253 253 253 253 253 253 253
40498-253 253 253 253 253 253 253 253 253 253 253 253
40499-253 253 253 253 253 253 144 144 144 2 2 6
40500- 2 2 6 2 2 6 2 2 6 46 46 46
40501- 2 2 6 2 2 6 2 2 6 2 2 6
40502- 42 42 42 74 74 74 30 30 30 10 10 10
40503- 0 0 0 0 0 0 0 0 0 0 0 0
40504- 0 0 0 0 0 0 0 0 0 0 0 0
40505- 0 0 0 0 0 0 0 0 0 0 0 0
40506- 0 0 0 0 0 0 0 0 0 0 0 0
40507- 0 0 0 0 0 0 0 0 0 0 0 0
40508- 0 0 0 0 0 0 0 0 0 0 0 0
40509- 0 0 0 0 0 0 0 0 0 0 0 0
40510- 6 6 6 14 14 14 42 42 42 90 90 90
40511- 26 26 26 6 6 6 42 42 42 2 2 6
40512- 74 74 74 250 250 250 253 253 253 253 253 253
40513-253 253 253 253 253 253 253 253 253 253 253 253
40514-253 253 253 253 253 253 242 242 242 242 242 242
40515-253 253 253 253 253 253 253 253 253 253 253 253
40516-253 253 253 253 253 253 253 253 253 253 253 253
40517-253 253 253 253 253 253 253 253 253 253 253 253
40518-253 253 253 253 253 253 253 253 253 253 253 253
40519-253 253 253 253 253 253 182 182 182 2 2 6
40520- 2 2 6 2 2 6 2 2 6 46 46 46
40521- 2 2 6 2 2 6 2 2 6 2 2 6
40522- 10 10 10 86 86 86 38 38 38 10 10 10
40523- 0 0 0 0 0 0 0 0 0 0 0 0
40524- 0 0 0 0 0 0 0 0 0 0 0 0
40525- 0 0 0 0 0 0 0 0 0 0 0 0
40526- 0 0 0 0 0 0 0 0 0 0 0 0
40527- 0 0 0 0 0 0 0 0 0 0 0 0
40528- 0 0 0 0 0 0 0 0 0 0 0 0
40529- 0 0 0 0 0 0 0 0 0 0 0 0
40530- 10 10 10 26 26 26 66 66 66 82 82 82
40531- 2 2 6 22 22 22 18 18 18 2 2 6
40532-149 149 149 253 253 253 253 253 253 253 253 253
40533-253 253 253 253 253 253 253 253 253 253 253 253
40534-253 253 253 253 253 253 234 234 234 242 242 242
40535-253 253 253 253 253 253 253 253 253 253 253 253
40536-253 253 253 253 253 253 253 253 253 253 253 253
40537-253 253 253 253 253 253 253 253 253 253 253 253
40538-253 253 253 253 253 253 253 253 253 253 253 253
40539-253 253 253 253 253 253 206 206 206 2 2 6
40540- 2 2 6 2 2 6 2 2 6 38 38 38
40541- 2 2 6 2 2 6 2 2 6 2 2 6
40542- 6 6 6 86 86 86 46 46 46 14 14 14
40543- 0 0 0 0 0 0 0 0 0 0 0 0
40544- 0 0 0 0 0 0 0 0 0 0 0 0
40545- 0 0 0 0 0 0 0 0 0 0 0 0
40546- 0 0 0 0 0 0 0 0 0 0 0 0
40547- 0 0 0 0 0 0 0 0 0 0 0 0
40548- 0 0 0 0 0 0 0 0 0 0 0 0
40549- 0 0 0 0 0 0 0 0 0 6 6 6
40550- 18 18 18 46 46 46 86 86 86 18 18 18
40551- 2 2 6 34 34 34 10 10 10 6 6 6
40552-210 210 210 253 253 253 253 253 253 253 253 253
40553-253 253 253 253 253 253 253 253 253 253 253 253
40554-253 253 253 253 253 253 234 234 234 242 242 242
40555-253 253 253 253 253 253 253 253 253 253 253 253
40556-253 253 253 253 253 253 253 253 253 253 253 253
40557-253 253 253 253 253 253 253 253 253 253 253 253
40558-253 253 253 253 253 253 253 253 253 253 253 253
40559-253 253 253 253 253 253 221 221 221 6 6 6
40560- 2 2 6 2 2 6 6 6 6 30 30 30
40561- 2 2 6 2 2 6 2 2 6 2 2 6
40562- 2 2 6 82 82 82 54 54 54 18 18 18
40563- 6 6 6 0 0 0 0 0 0 0 0 0
40564- 0 0 0 0 0 0 0 0 0 0 0 0
40565- 0 0 0 0 0 0 0 0 0 0 0 0
40566- 0 0 0 0 0 0 0 0 0 0 0 0
40567- 0 0 0 0 0 0 0 0 0 0 0 0
40568- 0 0 0 0 0 0 0 0 0 0 0 0
40569- 0 0 0 0 0 0 0 0 0 10 10 10
40570- 26 26 26 66 66 66 62 62 62 2 2 6
40571- 2 2 6 38 38 38 10 10 10 26 26 26
40572-238 238 238 253 253 253 253 253 253 253 253 253
40573-253 253 253 253 253 253 253 253 253 253 253 253
40574-253 253 253 253 253 253 231 231 231 238 238 238
40575-253 253 253 253 253 253 253 253 253 253 253 253
40576-253 253 253 253 253 253 253 253 253 253 253 253
40577-253 253 253 253 253 253 253 253 253 253 253 253
40578-253 253 253 253 253 253 253 253 253 253 253 253
40579-253 253 253 253 253 253 231 231 231 6 6 6
40580- 2 2 6 2 2 6 10 10 10 30 30 30
40581- 2 2 6 2 2 6 2 2 6 2 2 6
40582- 2 2 6 66 66 66 58 58 58 22 22 22
40583- 6 6 6 0 0 0 0 0 0 0 0 0
40584- 0 0 0 0 0 0 0 0 0 0 0 0
40585- 0 0 0 0 0 0 0 0 0 0 0 0
40586- 0 0 0 0 0 0 0 0 0 0 0 0
40587- 0 0 0 0 0 0 0 0 0 0 0 0
40588- 0 0 0 0 0 0 0 0 0 0 0 0
40589- 0 0 0 0 0 0 0 0 0 10 10 10
40590- 38 38 38 78 78 78 6 6 6 2 2 6
40591- 2 2 6 46 46 46 14 14 14 42 42 42
40592-246 246 246 253 253 253 253 253 253 253 253 253
40593-253 253 253 253 253 253 253 253 253 253 253 253
40594-253 253 253 253 253 253 231 231 231 242 242 242
40595-253 253 253 253 253 253 253 253 253 253 253 253
40596-253 253 253 253 253 253 253 253 253 253 253 253
40597-253 253 253 253 253 253 253 253 253 253 253 253
40598-253 253 253 253 253 253 253 253 253 253 253 253
40599-253 253 253 253 253 253 234 234 234 10 10 10
40600- 2 2 6 2 2 6 22 22 22 14 14 14
40601- 2 2 6 2 2 6 2 2 6 2 2 6
40602- 2 2 6 66 66 66 62 62 62 22 22 22
40603- 6 6 6 0 0 0 0 0 0 0 0 0
40604- 0 0 0 0 0 0 0 0 0 0 0 0
40605- 0 0 0 0 0 0 0 0 0 0 0 0
40606- 0 0 0 0 0 0 0 0 0 0 0 0
40607- 0 0 0 0 0 0 0 0 0 0 0 0
40608- 0 0 0 0 0 0 0 0 0 0 0 0
40609- 0 0 0 0 0 0 6 6 6 18 18 18
40610- 50 50 50 74 74 74 2 2 6 2 2 6
40611- 14 14 14 70 70 70 34 34 34 62 62 62
40612-250 250 250 253 253 253 253 253 253 253 253 253
40613-253 253 253 253 253 253 253 253 253 253 253 253
40614-253 253 253 253 253 253 231 231 231 246 246 246
40615-253 253 253 253 253 253 253 253 253 253 253 253
40616-253 253 253 253 253 253 253 253 253 253 253 253
40617-253 253 253 253 253 253 253 253 253 253 253 253
40618-253 253 253 253 253 253 253 253 253 253 253 253
40619-253 253 253 253 253 253 234 234 234 14 14 14
40620- 2 2 6 2 2 6 30 30 30 2 2 6
40621- 2 2 6 2 2 6 2 2 6 2 2 6
40622- 2 2 6 66 66 66 62 62 62 22 22 22
40623- 6 6 6 0 0 0 0 0 0 0 0 0
40624- 0 0 0 0 0 0 0 0 0 0 0 0
40625- 0 0 0 0 0 0 0 0 0 0 0 0
40626- 0 0 0 0 0 0 0 0 0 0 0 0
40627- 0 0 0 0 0 0 0 0 0 0 0 0
40628- 0 0 0 0 0 0 0 0 0 0 0 0
40629- 0 0 0 0 0 0 6 6 6 18 18 18
40630- 54 54 54 62 62 62 2 2 6 2 2 6
40631- 2 2 6 30 30 30 46 46 46 70 70 70
40632-250 250 250 253 253 253 253 253 253 253 253 253
40633-253 253 253 253 253 253 253 253 253 253 253 253
40634-253 253 253 253 253 253 231 231 231 246 246 246
40635-253 253 253 253 253 253 253 253 253 253 253 253
40636-253 253 253 253 253 253 253 253 253 253 253 253
40637-253 253 253 253 253 253 253 253 253 253 253 253
40638-253 253 253 253 253 253 253 253 253 253 253 253
40639-253 253 253 253 253 253 226 226 226 10 10 10
40640- 2 2 6 6 6 6 30 30 30 2 2 6
40641- 2 2 6 2 2 6 2 2 6 2 2 6
40642- 2 2 6 66 66 66 58 58 58 22 22 22
40643- 6 6 6 0 0 0 0 0 0 0 0 0
40644- 0 0 0 0 0 0 0 0 0 0 0 0
40645- 0 0 0 0 0 0 0 0 0 0 0 0
40646- 0 0 0 0 0 0 0 0 0 0 0 0
40647- 0 0 0 0 0 0 0 0 0 0 0 0
40648- 0 0 0 0 0 0 0 0 0 0 0 0
40649- 0 0 0 0 0 0 6 6 6 22 22 22
40650- 58 58 58 62 62 62 2 2 6 2 2 6
40651- 2 2 6 2 2 6 30 30 30 78 78 78
40652-250 250 250 253 253 253 253 253 253 253 253 253
40653-253 253 253 253 253 253 253 253 253 253 253 253
40654-253 253 253 253 253 253 231 231 231 246 246 246
40655-253 253 253 253 253 253 253 253 253 253 253 253
40656-253 253 253 253 253 253 253 253 253 253 253 253
40657-253 253 253 253 253 253 253 253 253 253 253 253
40658-253 253 253 253 253 253 253 253 253 253 253 253
40659-253 253 253 253 253 253 206 206 206 2 2 6
40660- 22 22 22 34 34 34 18 14 6 22 22 22
40661- 26 26 26 18 18 18 6 6 6 2 2 6
40662- 2 2 6 82 82 82 54 54 54 18 18 18
40663- 6 6 6 0 0 0 0 0 0 0 0 0
40664- 0 0 0 0 0 0 0 0 0 0 0 0
40665- 0 0 0 0 0 0 0 0 0 0 0 0
40666- 0 0 0 0 0 0 0 0 0 0 0 0
40667- 0 0 0 0 0 0 0 0 0 0 0 0
40668- 0 0 0 0 0 0 0 0 0 0 0 0
40669- 0 0 0 0 0 0 6 6 6 26 26 26
40670- 62 62 62 106 106 106 74 54 14 185 133 11
40671-210 162 10 121 92 8 6 6 6 62 62 62
40672-238 238 238 253 253 253 253 253 253 253 253 253
40673-253 253 253 253 253 253 253 253 253 253 253 253
40674-253 253 253 253 253 253 231 231 231 246 246 246
40675-253 253 253 253 253 253 253 253 253 253 253 253
40676-253 253 253 253 253 253 253 253 253 253 253 253
40677-253 253 253 253 253 253 253 253 253 253 253 253
40678-253 253 253 253 253 253 253 253 253 253 253 253
40679-253 253 253 253 253 253 158 158 158 18 18 18
40680- 14 14 14 2 2 6 2 2 6 2 2 6
40681- 6 6 6 18 18 18 66 66 66 38 38 38
40682- 6 6 6 94 94 94 50 50 50 18 18 18
40683- 6 6 6 0 0 0 0 0 0 0 0 0
40684- 0 0 0 0 0 0 0 0 0 0 0 0
40685- 0 0 0 0 0 0 0 0 0 0 0 0
40686- 0 0 0 0 0 0 0 0 0 0 0 0
40687- 0 0 0 0 0 0 0 0 0 0 0 0
40688- 0 0 0 0 0 0 0 0 0 6 6 6
40689- 10 10 10 10 10 10 18 18 18 38 38 38
40690- 78 78 78 142 134 106 216 158 10 242 186 14
40691-246 190 14 246 190 14 156 118 10 10 10 10
40692- 90 90 90 238 238 238 253 253 253 253 253 253
40693-253 253 253 253 253 253 253 253 253 253 253 253
40694-253 253 253 253 253 253 231 231 231 250 250 250
40695-253 253 253 253 253 253 253 253 253 253 253 253
40696-253 253 253 253 253 253 253 253 253 253 253 253
40697-253 253 253 253 253 253 253 253 253 253 253 253
40698-253 253 253 253 253 253 253 253 253 246 230 190
40699-238 204 91 238 204 91 181 142 44 37 26 9
40700- 2 2 6 2 2 6 2 2 6 2 2 6
40701- 2 2 6 2 2 6 38 38 38 46 46 46
40702- 26 26 26 106 106 106 54 54 54 18 18 18
40703- 6 6 6 0 0 0 0 0 0 0 0 0
40704- 0 0 0 0 0 0 0 0 0 0 0 0
40705- 0 0 0 0 0 0 0 0 0 0 0 0
40706- 0 0 0 0 0 0 0 0 0 0 0 0
40707- 0 0 0 0 0 0 0 0 0 0 0 0
40708- 0 0 0 6 6 6 14 14 14 22 22 22
40709- 30 30 30 38 38 38 50 50 50 70 70 70
40710-106 106 106 190 142 34 226 170 11 242 186 14
40711-246 190 14 246 190 14 246 190 14 154 114 10
40712- 6 6 6 74 74 74 226 226 226 253 253 253
40713-253 253 253 253 253 253 253 253 253 253 253 253
40714-253 253 253 253 253 253 231 231 231 250 250 250
40715-253 253 253 253 253 253 253 253 253 253 253 253
40716-253 253 253 253 253 253 253 253 253 253 253 253
40717-253 253 253 253 253 253 253 253 253 253 253 253
40718-253 253 253 253 253 253 253 253 253 228 184 62
40719-241 196 14 241 208 19 232 195 16 38 30 10
40720- 2 2 6 2 2 6 2 2 6 2 2 6
40721- 2 2 6 6 6 6 30 30 30 26 26 26
40722-203 166 17 154 142 90 66 66 66 26 26 26
40723- 6 6 6 0 0 0 0 0 0 0 0 0
40724- 0 0 0 0 0 0 0 0 0 0 0 0
40725- 0 0 0 0 0 0 0 0 0 0 0 0
40726- 0 0 0 0 0 0 0 0 0 0 0 0
40727- 0 0 0 0 0 0 0 0 0 0 0 0
40728- 6 6 6 18 18 18 38 38 38 58 58 58
40729- 78 78 78 86 86 86 101 101 101 123 123 123
40730-175 146 61 210 150 10 234 174 13 246 186 14
40731-246 190 14 246 190 14 246 190 14 238 190 10
40732-102 78 10 2 2 6 46 46 46 198 198 198
40733-253 253 253 253 253 253 253 253 253 253 253 253
40734-253 253 253 253 253 253 234 234 234 242 242 242
40735-253 253 253 253 253 253 253 253 253 253 253 253
40736-253 253 253 253 253 253 253 253 253 253 253 253
40737-253 253 253 253 253 253 253 253 253 253 253 253
40738-253 253 253 253 253 253 253 253 253 224 178 62
40739-242 186 14 241 196 14 210 166 10 22 18 6
40740- 2 2 6 2 2 6 2 2 6 2 2 6
40741- 2 2 6 2 2 6 6 6 6 121 92 8
40742-238 202 15 232 195 16 82 82 82 34 34 34
40743- 10 10 10 0 0 0 0 0 0 0 0 0
40744- 0 0 0 0 0 0 0 0 0 0 0 0
40745- 0 0 0 0 0 0 0 0 0 0 0 0
40746- 0 0 0 0 0 0 0 0 0 0 0 0
40747- 0 0 0 0 0 0 0 0 0 0 0 0
40748- 14 14 14 38 38 38 70 70 70 154 122 46
40749-190 142 34 200 144 11 197 138 11 197 138 11
40750-213 154 11 226 170 11 242 186 14 246 190 14
40751-246 190 14 246 190 14 246 190 14 246 190 14
40752-225 175 15 46 32 6 2 2 6 22 22 22
40753-158 158 158 250 250 250 253 253 253 253 253 253
40754-253 253 253 253 253 253 253 253 253 253 253 253
40755-253 253 253 253 253 253 253 253 253 253 253 253
40756-253 253 253 253 253 253 253 253 253 253 253 253
40757-253 253 253 253 253 253 253 253 253 253 253 253
40758-253 253 253 250 250 250 242 242 242 224 178 62
40759-239 182 13 236 186 11 213 154 11 46 32 6
40760- 2 2 6 2 2 6 2 2 6 2 2 6
40761- 2 2 6 2 2 6 61 42 6 225 175 15
40762-238 190 10 236 186 11 112 100 78 42 42 42
40763- 14 14 14 0 0 0 0 0 0 0 0 0
40764- 0 0 0 0 0 0 0 0 0 0 0 0
40765- 0 0 0 0 0 0 0 0 0 0 0 0
40766- 0 0 0 0 0 0 0 0 0 0 0 0
40767- 0 0 0 0 0 0 0 0 0 6 6 6
40768- 22 22 22 54 54 54 154 122 46 213 154 11
40769-226 170 11 230 174 11 226 170 11 226 170 11
40770-236 178 12 242 186 14 246 190 14 246 190 14
40771-246 190 14 246 190 14 246 190 14 246 190 14
40772-241 196 14 184 144 12 10 10 10 2 2 6
40773- 6 6 6 116 116 116 242 242 242 253 253 253
40774-253 253 253 253 253 253 253 253 253 253 253 253
40775-253 253 253 253 253 253 253 253 253 253 253 253
40776-253 253 253 253 253 253 253 253 253 253 253 253
40777-253 253 253 253 253 253 253 253 253 253 253 253
40778-253 253 253 231 231 231 198 198 198 214 170 54
40779-236 178 12 236 178 12 210 150 10 137 92 6
40780- 18 14 6 2 2 6 2 2 6 2 2 6
40781- 6 6 6 70 47 6 200 144 11 236 178 12
40782-239 182 13 239 182 13 124 112 88 58 58 58
40783- 22 22 22 6 6 6 0 0 0 0 0 0
40784- 0 0 0 0 0 0 0 0 0 0 0 0
40785- 0 0 0 0 0 0 0 0 0 0 0 0
40786- 0 0 0 0 0 0 0 0 0 0 0 0
40787- 0 0 0 0 0 0 0 0 0 10 10 10
40788- 30 30 30 70 70 70 180 133 36 226 170 11
40789-239 182 13 242 186 14 242 186 14 246 186 14
40790-246 190 14 246 190 14 246 190 14 246 190 14
40791-246 190 14 246 190 14 246 190 14 246 190 14
40792-246 190 14 232 195 16 98 70 6 2 2 6
40793- 2 2 6 2 2 6 66 66 66 221 221 221
40794-253 253 253 253 253 253 253 253 253 253 253 253
40795-253 253 253 253 253 253 253 253 253 253 253 253
40796-253 253 253 253 253 253 253 253 253 253 253 253
40797-253 253 253 253 253 253 253 253 253 253 253 253
40798-253 253 253 206 206 206 198 198 198 214 166 58
40799-230 174 11 230 174 11 216 158 10 192 133 9
40800-163 110 8 116 81 8 102 78 10 116 81 8
40801-167 114 7 197 138 11 226 170 11 239 182 13
40802-242 186 14 242 186 14 162 146 94 78 78 78
40803- 34 34 34 14 14 14 6 6 6 0 0 0
40804- 0 0 0 0 0 0 0 0 0 0 0 0
40805- 0 0 0 0 0 0 0 0 0 0 0 0
40806- 0 0 0 0 0 0 0 0 0 0 0 0
40807- 0 0 0 0 0 0 0 0 0 6 6 6
40808- 30 30 30 78 78 78 190 142 34 226 170 11
40809-239 182 13 246 190 14 246 190 14 246 190 14
40810-246 190 14 246 190 14 246 190 14 246 190 14
40811-246 190 14 246 190 14 246 190 14 246 190 14
40812-246 190 14 241 196 14 203 166 17 22 18 6
40813- 2 2 6 2 2 6 2 2 6 38 38 38
40814-218 218 218 253 253 253 253 253 253 253 253 253
40815-253 253 253 253 253 253 253 253 253 253 253 253
40816-253 253 253 253 253 253 253 253 253 253 253 253
40817-253 253 253 253 253 253 253 253 253 253 253 253
40818-250 250 250 206 206 206 198 198 198 202 162 69
40819-226 170 11 236 178 12 224 166 10 210 150 10
40820-200 144 11 197 138 11 192 133 9 197 138 11
40821-210 150 10 226 170 11 242 186 14 246 190 14
40822-246 190 14 246 186 14 225 175 15 124 112 88
40823- 62 62 62 30 30 30 14 14 14 6 6 6
40824- 0 0 0 0 0 0 0 0 0 0 0 0
40825- 0 0 0 0 0 0 0 0 0 0 0 0
40826- 0 0 0 0 0 0 0 0 0 0 0 0
40827- 0 0 0 0 0 0 0 0 0 10 10 10
40828- 30 30 30 78 78 78 174 135 50 224 166 10
40829-239 182 13 246 190 14 246 190 14 246 190 14
40830-246 190 14 246 190 14 246 190 14 246 190 14
40831-246 190 14 246 190 14 246 190 14 246 190 14
40832-246 190 14 246 190 14 241 196 14 139 102 15
40833- 2 2 6 2 2 6 2 2 6 2 2 6
40834- 78 78 78 250 250 250 253 253 253 253 253 253
40835-253 253 253 253 253 253 253 253 253 253 253 253
40836-253 253 253 253 253 253 253 253 253 253 253 253
40837-253 253 253 253 253 253 253 253 253 253 253 253
40838-250 250 250 214 214 214 198 198 198 190 150 46
40839-219 162 10 236 178 12 234 174 13 224 166 10
40840-216 158 10 213 154 11 213 154 11 216 158 10
40841-226 170 11 239 182 13 246 190 14 246 190 14
40842-246 190 14 246 190 14 242 186 14 206 162 42
40843-101 101 101 58 58 58 30 30 30 14 14 14
40844- 6 6 6 0 0 0 0 0 0 0 0 0
40845- 0 0 0 0 0 0 0 0 0 0 0 0
40846- 0 0 0 0 0 0 0 0 0 0 0 0
40847- 0 0 0 0 0 0 0 0 0 10 10 10
40848- 30 30 30 74 74 74 174 135 50 216 158 10
40849-236 178 12 246 190 14 246 190 14 246 190 14
40850-246 190 14 246 190 14 246 190 14 246 190 14
40851-246 190 14 246 190 14 246 190 14 246 190 14
40852-246 190 14 246 190 14 241 196 14 226 184 13
40853- 61 42 6 2 2 6 2 2 6 2 2 6
40854- 22 22 22 238 238 238 253 253 253 253 253 253
40855-253 253 253 253 253 253 253 253 253 253 253 253
40856-253 253 253 253 253 253 253 253 253 253 253 253
40857-253 253 253 253 253 253 253 253 253 253 253 253
40858-253 253 253 226 226 226 187 187 187 180 133 36
40859-216 158 10 236 178 12 239 182 13 236 178 12
40860-230 174 11 226 170 11 226 170 11 230 174 11
40861-236 178 12 242 186 14 246 190 14 246 190 14
40862-246 190 14 246 190 14 246 186 14 239 182 13
40863-206 162 42 106 106 106 66 66 66 34 34 34
40864- 14 14 14 6 6 6 0 0 0 0 0 0
40865- 0 0 0 0 0 0 0 0 0 0 0 0
40866- 0 0 0 0 0 0 0 0 0 0 0 0
40867- 0 0 0 0 0 0 0 0 0 6 6 6
40868- 26 26 26 70 70 70 163 133 67 213 154 11
40869-236 178 12 246 190 14 246 190 14 246 190 14
40870-246 190 14 246 190 14 246 190 14 246 190 14
40871-246 190 14 246 190 14 246 190 14 246 190 14
40872-246 190 14 246 190 14 246 190 14 241 196 14
40873-190 146 13 18 14 6 2 2 6 2 2 6
40874- 46 46 46 246 246 246 253 253 253 253 253 253
40875-253 253 253 253 253 253 253 253 253 253 253 253
40876-253 253 253 253 253 253 253 253 253 253 253 253
40877-253 253 253 253 253 253 253 253 253 253 253 253
40878-253 253 253 221 221 221 86 86 86 156 107 11
40879-216 158 10 236 178 12 242 186 14 246 186 14
40880-242 186 14 239 182 13 239 182 13 242 186 14
40881-242 186 14 246 186 14 246 190 14 246 190 14
40882-246 190 14 246 190 14 246 190 14 246 190 14
40883-242 186 14 225 175 15 142 122 72 66 66 66
40884- 30 30 30 10 10 10 0 0 0 0 0 0
40885- 0 0 0 0 0 0 0 0 0 0 0 0
40886- 0 0 0 0 0 0 0 0 0 0 0 0
40887- 0 0 0 0 0 0 0 0 0 6 6 6
40888- 26 26 26 70 70 70 163 133 67 210 150 10
40889-236 178 12 246 190 14 246 190 14 246 190 14
40890-246 190 14 246 190 14 246 190 14 246 190 14
40891-246 190 14 246 190 14 246 190 14 246 190 14
40892-246 190 14 246 190 14 246 190 14 246 190 14
40893-232 195 16 121 92 8 34 34 34 106 106 106
40894-221 221 221 253 253 253 253 253 253 253 253 253
40895-253 253 253 253 253 253 253 253 253 253 253 253
40896-253 253 253 253 253 253 253 253 253 253 253 253
40897-253 253 253 253 253 253 253 253 253 253 253 253
40898-242 242 242 82 82 82 18 14 6 163 110 8
40899-216 158 10 236 178 12 242 186 14 246 190 14
40900-246 190 14 246 190 14 246 190 14 246 190 14
40901-246 190 14 246 190 14 246 190 14 246 190 14
40902-246 190 14 246 190 14 246 190 14 246 190 14
40903-246 190 14 246 190 14 242 186 14 163 133 67
40904- 46 46 46 18 18 18 6 6 6 0 0 0
40905- 0 0 0 0 0 0 0 0 0 0 0 0
40906- 0 0 0 0 0 0 0 0 0 0 0 0
40907- 0 0 0 0 0 0 0 0 0 10 10 10
40908- 30 30 30 78 78 78 163 133 67 210 150 10
40909-236 178 12 246 186 14 246 190 14 246 190 14
40910-246 190 14 246 190 14 246 190 14 246 190 14
40911-246 190 14 246 190 14 246 190 14 246 190 14
40912-246 190 14 246 190 14 246 190 14 246 190 14
40913-241 196 14 215 174 15 190 178 144 253 253 253
40914-253 253 253 253 253 253 253 253 253 253 253 253
40915-253 253 253 253 253 253 253 253 253 253 253 253
40916-253 253 253 253 253 253 253 253 253 253 253 253
40917-253 253 253 253 253 253 253 253 253 218 218 218
40918- 58 58 58 2 2 6 22 18 6 167 114 7
40919-216 158 10 236 178 12 246 186 14 246 190 14
40920-246 190 14 246 190 14 246 190 14 246 190 14
40921-246 190 14 246 190 14 246 190 14 246 190 14
40922-246 190 14 246 190 14 246 190 14 246 190 14
40923-246 190 14 246 186 14 242 186 14 190 150 46
40924- 54 54 54 22 22 22 6 6 6 0 0 0
40925- 0 0 0 0 0 0 0 0 0 0 0 0
40926- 0 0 0 0 0 0 0 0 0 0 0 0
40927- 0 0 0 0 0 0 0 0 0 14 14 14
40928- 38 38 38 86 86 86 180 133 36 213 154 11
40929-236 178 12 246 186 14 246 190 14 246 190 14
40930-246 190 14 246 190 14 246 190 14 246 190 14
40931-246 190 14 246 190 14 246 190 14 246 190 14
40932-246 190 14 246 190 14 246 190 14 246 190 14
40933-246 190 14 232 195 16 190 146 13 214 214 214
40934-253 253 253 253 253 253 253 253 253 253 253 253
40935-253 253 253 253 253 253 253 253 253 253 253 253
40936-253 253 253 253 253 253 253 253 253 253 253 253
40937-253 253 253 250 250 250 170 170 170 26 26 26
40938- 2 2 6 2 2 6 37 26 9 163 110 8
40939-219 162 10 239 182 13 246 186 14 246 190 14
40940-246 190 14 246 190 14 246 190 14 246 190 14
40941-246 190 14 246 190 14 246 190 14 246 190 14
40942-246 190 14 246 190 14 246 190 14 246 190 14
40943-246 186 14 236 178 12 224 166 10 142 122 72
40944- 46 46 46 18 18 18 6 6 6 0 0 0
40945- 0 0 0 0 0 0 0 0 0 0 0 0
40946- 0 0 0 0 0 0 0 0 0 0 0 0
40947- 0 0 0 0 0 0 6 6 6 18 18 18
40948- 50 50 50 109 106 95 192 133 9 224 166 10
40949-242 186 14 246 190 14 246 190 14 246 190 14
40950-246 190 14 246 190 14 246 190 14 246 190 14
40951-246 190 14 246 190 14 246 190 14 246 190 14
40952-246 190 14 246 190 14 246 190 14 246 190 14
40953-242 186 14 226 184 13 210 162 10 142 110 46
40954-226 226 226 253 253 253 253 253 253 253 253 253
40955-253 253 253 253 253 253 253 253 253 253 253 253
40956-253 253 253 253 253 253 253 253 253 253 253 253
40957-198 198 198 66 66 66 2 2 6 2 2 6
40958- 2 2 6 2 2 6 50 34 6 156 107 11
40959-219 162 10 239 182 13 246 186 14 246 190 14
40960-246 190 14 246 190 14 246 190 14 246 190 14
40961-246 190 14 246 190 14 246 190 14 246 190 14
40962-246 190 14 246 190 14 246 190 14 242 186 14
40963-234 174 13 213 154 11 154 122 46 66 66 66
40964- 30 30 30 10 10 10 0 0 0 0 0 0
40965- 0 0 0 0 0 0 0 0 0 0 0 0
40966- 0 0 0 0 0 0 0 0 0 0 0 0
40967- 0 0 0 0 0 0 6 6 6 22 22 22
40968- 58 58 58 154 121 60 206 145 10 234 174 13
40969-242 186 14 246 186 14 246 190 14 246 190 14
40970-246 190 14 246 190 14 246 190 14 246 190 14
40971-246 190 14 246 190 14 246 190 14 246 190 14
40972-246 190 14 246 190 14 246 190 14 246 190 14
40973-246 186 14 236 178 12 210 162 10 163 110 8
40974- 61 42 6 138 138 138 218 218 218 250 250 250
40975-253 253 253 253 253 253 253 253 253 250 250 250
40976-242 242 242 210 210 210 144 144 144 66 66 66
40977- 6 6 6 2 2 6 2 2 6 2 2 6
40978- 2 2 6 2 2 6 61 42 6 163 110 8
40979-216 158 10 236 178 12 246 190 14 246 190 14
40980-246 190 14 246 190 14 246 190 14 246 190 14
40981-246 190 14 246 190 14 246 190 14 246 190 14
40982-246 190 14 239 182 13 230 174 11 216 158 10
40983-190 142 34 124 112 88 70 70 70 38 38 38
40984- 18 18 18 6 6 6 0 0 0 0 0 0
40985- 0 0 0 0 0 0 0 0 0 0 0 0
40986- 0 0 0 0 0 0 0 0 0 0 0 0
40987- 0 0 0 0 0 0 6 6 6 22 22 22
40988- 62 62 62 168 124 44 206 145 10 224 166 10
40989-236 178 12 239 182 13 242 186 14 242 186 14
40990-246 186 14 246 190 14 246 190 14 246 190 14
40991-246 190 14 246 190 14 246 190 14 246 190 14
40992-246 190 14 246 190 14 246 190 14 246 190 14
40993-246 190 14 236 178 12 216 158 10 175 118 6
40994- 80 54 7 2 2 6 6 6 6 30 30 30
40995- 54 54 54 62 62 62 50 50 50 38 38 38
40996- 14 14 14 2 2 6 2 2 6 2 2 6
40997- 2 2 6 2 2 6 2 2 6 2 2 6
40998- 2 2 6 6 6 6 80 54 7 167 114 7
40999-213 154 11 236 178 12 246 190 14 246 190 14
41000-246 190 14 246 190 14 246 190 14 246 190 14
41001-246 190 14 242 186 14 239 182 13 239 182 13
41002-230 174 11 210 150 10 174 135 50 124 112 88
41003- 82 82 82 54 54 54 34 34 34 18 18 18
41004- 6 6 6 0 0 0 0 0 0 0 0 0
41005- 0 0 0 0 0 0 0 0 0 0 0 0
41006- 0 0 0 0 0 0 0 0 0 0 0 0
41007- 0 0 0 0 0 0 6 6 6 18 18 18
41008- 50 50 50 158 118 36 192 133 9 200 144 11
41009-216 158 10 219 162 10 224 166 10 226 170 11
41010-230 174 11 236 178 12 239 182 13 239 182 13
41011-242 186 14 246 186 14 246 190 14 246 190 14
41012-246 190 14 246 190 14 246 190 14 246 190 14
41013-246 186 14 230 174 11 210 150 10 163 110 8
41014-104 69 6 10 10 10 2 2 6 2 2 6
41015- 2 2 6 2 2 6 2 2 6 2 2 6
41016- 2 2 6 2 2 6 2 2 6 2 2 6
41017- 2 2 6 2 2 6 2 2 6 2 2 6
41018- 2 2 6 6 6 6 91 60 6 167 114 7
41019-206 145 10 230 174 11 242 186 14 246 190 14
41020-246 190 14 246 190 14 246 186 14 242 186 14
41021-239 182 13 230 174 11 224 166 10 213 154 11
41022-180 133 36 124 112 88 86 86 86 58 58 58
41023- 38 38 38 22 22 22 10 10 10 6 6 6
41024- 0 0 0 0 0 0 0 0 0 0 0 0
41025- 0 0 0 0 0 0 0 0 0 0 0 0
41026- 0 0 0 0 0 0 0 0 0 0 0 0
41027- 0 0 0 0 0 0 0 0 0 14 14 14
41028- 34 34 34 70 70 70 138 110 50 158 118 36
41029-167 114 7 180 123 7 192 133 9 197 138 11
41030-200 144 11 206 145 10 213 154 11 219 162 10
41031-224 166 10 230 174 11 239 182 13 242 186 14
41032-246 186 14 246 186 14 246 186 14 246 186 14
41033-239 182 13 216 158 10 185 133 11 152 99 6
41034-104 69 6 18 14 6 2 2 6 2 2 6
41035- 2 2 6 2 2 6 2 2 6 2 2 6
41036- 2 2 6 2 2 6 2 2 6 2 2 6
41037- 2 2 6 2 2 6 2 2 6 2 2 6
41038- 2 2 6 6 6 6 80 54 7 152 99 6
41039-192 133 9 219 162 10 236 178 12 239 182 13
41040-246 186 14 242 186 14 239 182 13 236 178 12
41041-224 166 10 206 145 10 192 133 9 154 121 60
41042- 94 94 94 62 62 62 42 42 42 22 22 22
41043- 14 14 14 6 6 6 0 0 0 0 0 0
41044- 0 0 0 0 0 0 0 0 0 0 0 0
41045- 0 0 0 0 0 0 0 0 0 0 0 0
41046- 0 0 0 0 0 0 0 0 0 0 0 0
41047- 0 0 0 0 0 0 0 0 0 6 6 6
41048- 18 18 18 34 34 34 58 58 58 78 78 78
41049-101 98 89 124 112 88 142 110 46 156 107 11
41050-163 110 8 167 114 7 175 118 6 180 123 7
41051-185 133 11 197 138 11 210 150 10 219 162 10
41052-226 170 11 236 178 12 236 178 12 234 174 13
41053-219 162 10 197 138 11 163 110 8 130 83 6
41054- 91 60 6 10 10 10 2 2 6 2 2 6
41055- 18 18 18 38 38 38 38 38 38 38 38 38
41056- 38 38 38 38 38 38 38 38 38 38 38 38
41057- 38 38 38 38 38 38 26 26 26 2 2 6
41058- 2 2 6 6 6 6 70 47 6 137 92 6
41059-175 118 6 200 144 11 219 162 10 230 174 11
41060-234 174 13 230 174 11 219 162 10 210 150 10
41061-192 133 9 163 110 8 124 112 88 82 82 82
41062- 50 50 50 30 30 30 14 14 14 6 6 6
41063- 0 0 0 0 0 0 0 0 0 0 0 0
41064- 0 0 0 0 0 0 0 0 0 0 0 0
41065- 0 0 0 0 0 0 0 0 0 0 0 0
41066- 0 0 0 0 0 0 0 0 0 0 0 0
41067- 0 0 0 0 0 0 0 0 0 0 0 0
41068- 6 6 6 14 14 14 22 22 22 34 34 34
41069- 42 42 42 58 58 58 74 74 74 86 86 86
41070-101 98 89 122 102 70 130 98 46 121 87 25
41071-137 92 6 152 99 6 163 110 8 180 123 7
41072-185 133 11 197 138 11 206 145 10 200 144 11
41073-180 123 7 156 107 11 130 83 6 104 69 6
41074- 50 34 6 54 54 54 110 110 110 101 98 89
41075- 86 86 86 82 82 82 78 78 78 78 78 78
41076- 78 78 78 78 78 78 78 78 78 78 78 78
41077- 78 78 78 82 82 82 86 86 86 94 94 94
41078-106 106 106 101 101 101 86 66 34 124 80 6
41079-156 107 11 180 123 7 192 133 9 200 144 11
41080-206 145 10 200 144 11 192 133 9 175 118 6
41081-139 102 15 109 106 95 70 70 70 42 42 42
41082- 22 22 22 10 10 10 0 0 0 0 0 0
41083- 0 0 0 0 0 0 0 0 0 0 0 0
41084- 0 0 0 0 0 0 0 0 0 0 0 0
41085- 0 0 0 0 0 0 0 0 0 0 0 0
41086- 0 0 0 0 0 0 0 0 0 0 0 0
41087- 0 0 0 0 0 0 0 0 0 0 0 0
41088- 0 0 0 0 0 0 6 6 6 10 10 10
41089- 14 14 14 22 22 22 30 30 30 38 38 38
41090- 50 50 50 62 62 62 74 74 74 90 90 90
41091-101 98 89 112 100 78 121 87 25 124 80 6
41092-137 92 6 152 99 6 152 99 6 152 99 6
41093-138 86 6 124 80 6 98 70 6 86 66 30
41094-101 98 89 82 82 82 58 58 58 46 46 46
41095- 38 38 38 34 34 34 34 34 34 34 34 34
41096- 34 34 34 34 34 34 34 34 34 34 34 34
41097- 34 34 34 34 34 34 38 38 38 42 42 42
41098- 54 54 54 82 82 82 94 86 76 91 60 6
41099-134 86 6 156 107 11 167 114 7 175 118 6
41100-175 118 6 167 114 7 152 99 6 121 87 25
41101-101 98 89 62 62 62 34 34 34 18 18 18
41102- 6 6 6 0 0 0 0 0 0 0 0 0
41103- 0 0 0 0 0 0 0 0 0 0 0 0
41104- 0 0 0 0 0 0 0 0 0 0 0 0
41105- 0 0 0 0 0 0 0 0 0 0 0 0
41106- 0 0 0 0 0 0 0 0 0 0 0 0
41107- 0 0 0 0 0 0 0 0 0 0 0 0
41108- 0 0 0 0 0 0 0 0 0 0 0 0
41109- 0 0 0 6 6 6 6 6 6 10 10 10
41110- 18 18 18 22 22 22 30 30 30 42 42 42
41111- 50 50 50 66 66 66 86 86 86 101 98 89
41112-106 86 58 98 70 6 104 69 6 104 69 6
41113-104 69 6 91 60 6 82 62 34 90 90 90
41114- 62 62 62 38 38 38 22 22 22 14 14 14
41115- 10 10 10 10 10 10 10 10 10 10 10 10
41116- 10 10 10 10 10 10 6 6 6 10 10 10
41117- 10 10 10 10 10 10 10 10 10 14 14 14
41118- 22 22 22 42 42 42 70 70 70 89 81 66
41119- 80 54 7 104 69 6 124 80 6 137 92 6
41120-134 86 6 116 81 8 100 82 52 86 86 86
41121- 58 58 58 30 30 30 14 14 14 6 6 6
41122- 0 0 0 0 0 0 0 0 0 0 0 0
41123- 0 0 0 0 0 0 0 0 0 0 0 0
41124- 0 0 0 0 0 0 0 0 0 0 0 0
41125- 0 0 0 0 0 0 0 0 0 0 0 0
41126- 0 0 0 0 0 0 0 0 0 0 0 0
41127- 0 0 0 0 0 0 0 0 0 0 0 0
41128- 0 0 0 0 0 0 0 0 0 0 0 0
41129- 0 0 0 0 0 0 0 0 0 0 0 0
41130- 0 0 0 6 6 6 10 10 10 14 14 14
41131- 18 18 18 26 26 26 38 38 38 54 54 54
41132- 70 70 70 86 86 86 94 86 76 89 81 66
41133- 89 81 66 86 86 86 74 74 74 50 50 50
41134- 30 30 30 14 14 14 6 6 6 0 0 0
41135- 0 0 0 0 0 0 0 0 0 0 0 0
41136- 0 0 0 0 0 0 0 0 0 0 0 0
41137- 0 0 0 0 0 0 0 0 0 0 0 0
41138- 6 6 6 18 18 18 34 34 34 58 58 58
41139- 82 82 82 89 81 66 89 81 66 89 81 66
41140- 94 86 66 94 86 76 74 74 74 50 50 50
41141- 26 26 26 14 14 14 6 6 6 0 0 0
41142- 0 0 0 0 0 0 0 0 0 0 0 0
41143- 0 0 0 0 0 0 0 0 0 0 0 0
41144- 0 0 0 0 0 0 0 0 0 0 0 0
41145- 0 0 0 0 0 0 0 0 0 0 0 0
41146- 0 0 0 0 0 0 0 0 0 0 0 0
41147- 0 0 0 0 0 0 0 0 0 0 0 0
41148- 0 0 0 0 0 0 0 0 0 0 0 0
41149- 0 0 0 0 0 0 0 0 0 0 0 0
41150- 0 0 0 0 0 0 0 0 0 0 0 0
41151- 6 6 6 6 6 6 14 14 14 18 18 18
41152- 30 30 30 38 38 38 46 46 46 54 54 54
41153- 50 50 50 42 42 42 30 30 30 18 18 18
41154- 10 10 10 0 0 0 0 0 0 0 0 0
41155- 0 0 0 0 0 0 0 0 0 0 0 0
41156- 0 0 0 0 0 0 0 0 0 0 0 0
41157- 0 0 0 0 0 0 0 0 0 0 0 0
41158- 0 0 0 6 6 6 14 14 14 26 26 26
41159- 38 38 38 50 50 50 58 58 58 58 58 58
41160- 54 54 54 42 42 42 30 30 30 18 18 18
41161- 10 10 10 0 0 0 0 0 0 0 0 0
41162- 0 0 0 0 0 0 0 0 0 0 0 0
41163- 0 0 0 0 0 0 0 0 0 0 0 0
41164- 0 0 0 0 0 0 0 0 0 0 0 0
41165- 0 0 0 0 0 0 0 0 0 0 0 0
41166- 0 0 0 0 0 0 0 0 0 0 0 0
41167- 0 0 0 0 0 0 0 0 0 0 0 0
41168- 0 0 0 0 0 0 0 0 0 0 0 0
41169- 0 0 0 0 0 0 0 0 0 0 0 0
41170- 0 0 0 0 0 0 0 0 0 0 0 0
41171- 0 0 0 0 0 0 0 0 0 6 6 6
41172- 6 6 6 10 10 10 14 14 14 18 18 18
41173- 18 18 18 14 14 14 10 10 10 6 6 6
41174- 0 0 0 0 0 0 0 0 0 0 0 0
41175- 0 0 0 0 0 0 0 0 0 0 0 0
41176- 0 0 0 0 0 0 0 0 0 0 0 0
41177- 0 0 0 0 0 0 0 0 0 0 0 0
41178- 0 0 0 0 0 0 0 0 0 6 6 6
41179- 14 14 14 18 18 18 22 22 22 22 22 22
41180- 18 18 18 14 14 14 10 10 10 6 6 6
41181- 0 0 0 0 0 0 0 0 0 0 0 0
41182- 0 0 0 0 0 0 0 0 0 0 0 0
41183- 0 0 0 0 0 0 0 0 0 0 0 0
41184- 0 0 0 0 0 0 0 0 0 0 0 0
41185- 0 0 0 0 0 0 0 0 0 0 0 0
41186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41199+4 4 4 4 4 4
41200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41213+4 4 4 4 4 4
41214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227+4 4 4 4 4 4
41228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41241+4 4 4 4 4 4
41242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41255+4 4 4 4 4 4
41256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269+4 4 4 4 4 4
41270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
41275+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
41276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41280+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41281+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283+4 4 4 4 4 4
41284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41289+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41290+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41294+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41295+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41296+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297+4 4 4 4 4 4
41298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41303+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41304+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41308+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41309+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41310+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41311+4 4 4 4 4 4
41312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41316+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41317+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41318+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41321+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41322+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41323+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41324+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41325+4 4 4 4 4 4
41326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41330+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41331+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41332+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41333+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41334+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41335+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41336+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41337+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41338+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41339+4 4 4 4 4 4
41340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41343+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41344+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41345+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41346+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41347+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41348+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41349+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41350+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41351+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41352+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41353+4 4 4 4 4 4
41354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41357+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41358+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41359+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41360+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41361+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41362+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41363+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41364+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41365+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41366+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41367+4 4 4 4 4 4
41368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41370+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41371+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41372+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41373+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41374+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41375+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41376+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41377+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41378+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41379+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41380+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41381+4 4 4 4 4 4
41382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41384+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41385+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41386+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41387+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41388+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41389+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41390+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41391+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41392+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41393+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41394+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41395+4 4 4 4 4 4
41396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41398+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41399+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41400+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41401+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41402+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41403+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41404+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41405+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41406+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41407+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41408+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41409+4 4 4 4 4 4
41410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41411+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41412+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41413+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41414+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41415+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41416+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41417+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41418+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41419+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41420+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41421+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41422+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41423+4 4 4 4 4 4
41424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41425+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41426+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41427+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41428+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41429+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41430+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41431+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41432+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41433+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41434+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41435+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41436+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41437+0 0 0 4 4 4
41438+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41439+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41440+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41441+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41442+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41443+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41444+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41445+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41446+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41447+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41448+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41449+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41450+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41451+2 0 0 0 0 0
41452+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41453+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41454+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41455+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41456+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41457+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41458+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41459+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41460+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41461+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41462+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41463+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41464+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41465+37 38 37 0 0 0
41466+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41467+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41468+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41469+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41470+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41471+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41472+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41473+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41474+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41475+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41476+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41477+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41478+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41479+85 115 134 4 0 0
41480+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41481+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41482+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41483+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41484+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41485+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41486+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41487+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41488+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41489+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41490+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41491+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41492+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41493+60 73 81 4 0 0
41494+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41495+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41496+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41497+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41498+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41499+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41500+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41501+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41502+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41503+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41504+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41505+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41506+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41507+16 19 21 4 0 0
41508+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41509+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41510+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41511+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41512+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41513+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41514+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41515+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41516+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41517+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41518+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41519+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41520+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41521+4 0 0 4 3 3
41522+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41523+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41524+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41526+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41527+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41528+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41529+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41530+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41531+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41532+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41533+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41534+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41535+3 2 2 4 4 4
41536+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41537+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41538+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41539+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41540+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41541+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41542+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41543+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41544+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41545+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41546+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41547+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41548+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41549+4 4 4 4 4 4
41550+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41551+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41552+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41553+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41554+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41555+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41556+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41557+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41558+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41559+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41560+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41561+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41562+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41563+4 4 4 4 4 4
41564+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41565+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41566+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41567+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41568+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41569+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41570+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41571+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41572+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41573+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41574+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41575+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41576+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41577+5 5 5 5 5 5
41578+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41579+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41580+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41581+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41582+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41583+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41584+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41585+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41586+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41587+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41588+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41589+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41590+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41591+5 5 5 4 4 4
41592+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41593+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41594+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41595+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41596+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41597+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41598+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41599+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41600+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41601+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41602+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41603+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41605+4 4 4 4 4 4
41606+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41607+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41608+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41609+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41610+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41611+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41612+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41613+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41614+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41615+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41616+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41617+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41619+4 4 4 4 4 4
41620+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41621+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41622+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41623+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41624+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41625+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41626+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41627+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41628+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41629+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41630+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41633+4 4 4 4 4 4
41634+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41635+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41636+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41637+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41638+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41639+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41640+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41641+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41642+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41643+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41644+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41647+4 4 4 4 4 4
41648+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41649+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41650+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41651+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41652+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41653+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41654+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41655+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41656+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41657+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41658+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41661+4 4 4 4 4 4
41662+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41663+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41664+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41665+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41666+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41667+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41668+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41669+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41670+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41671+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41672+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41675+4 4 4 4 4 4
41676+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41677+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41678+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41679+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41680+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41681+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41682+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41683+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41684+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41685+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41686+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41689+4 4 4 4 4 4
41690+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41691+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41692+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41693+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41694+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41695+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41696+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41697+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41698+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41699+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41700+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41703+4 4 4 4 4 4
41704+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41705+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41706+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41707+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41708+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41709+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41710+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41711+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41712+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41713+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41714+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41717+4 4 4 4 4 4
41718+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41719+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41720+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41721+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41722+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41723+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41724+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41725+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41726+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41727+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41728+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41731+4 4 4 4 4 4
41732+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41733+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41734+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41735+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41736+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41737+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41738+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41739+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41740+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41741+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41742+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41745+4 4 4 4 4 4
41746+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41747+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41748+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41749+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41750+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41751+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41752+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41753+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41754+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41755+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41756+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41759+4 4 4 4 4 4
41760+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41761+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41762+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41763+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41764+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41765+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41766+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41767+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41768+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41769+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41770+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41773+4 4 4 4 4 4
41774+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41775+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41776+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41777+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41778+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41779+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41780+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41781+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41782+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41783+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41784+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41787+4 4 4 4 4 4
41788+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41789+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41790+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41791+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41792+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41793+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41794+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41795+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41796+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41797+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41798+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41801+4 4 4 4 4 4
41802+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41803+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41804+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41805+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41806+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41807+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41808+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41809+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41810+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41811+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41812+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41815+4 4 4 4 4 4
41816+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41817+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41818+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41819+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41820+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41821+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41822+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41823+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41824+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41825+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41826+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41829+4 4 4 4 4 4
41830+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41831+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41832+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41833+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41834+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41835+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41836+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41837+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41838+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41839+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41840+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41843+4 4 4 4 4 4
41844+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41845+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41846+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41847+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41848+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41849+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41850+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41851+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41852+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41853+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41854+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41857+4 4 4 4 4 4
41858+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41859+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41860+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41861+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41862+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41863+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41864+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41865+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41866+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41867+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41868+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41871+4 4 4 4 4 4
41872+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41873+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41874+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41875+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41876+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41877+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41878+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41879+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41880+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41881+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41882+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41885+4 4 4 4 4 4
41886+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41887+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41888+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41889+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41890+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41891+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41892+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41893+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41894+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41895+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41896+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41899+4 4 4 4 4 4
41900+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41901+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41902+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41903+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41904+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41905+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41906+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41907+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41908+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41909+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41910+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41913+4 4 4 4 4 4
41914+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41915+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41916+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41917+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41918+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41919+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41920+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41921+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41922+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41923+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41924+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41927+4 4 4 4 4 4
41928+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41929+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41930+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41931+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41932+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41933+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41934+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41935+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41936+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41937+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41938+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41941+4 4 4 4 4 4
41942+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41943+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41944+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41945+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41946+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41947+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41948+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41949+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41950+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41951+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41952+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41955+4 4 4 4 4 4
41956+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41957+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41958+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41959+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41960+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41961+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41962+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41963+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41964+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41965+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41966+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41969+4 4 4 4 4 4
41970+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41971+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41972+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41973+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41974+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41975+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41976+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41977+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41978+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41979+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41980+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41983+4 4 4 4 4 4
41984+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41985+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41986+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41987+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41988+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41989+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41990+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41991+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41992+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41993+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41994+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41997+4 4 4 4 4 4
41998+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41999+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
42000+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42001+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
42002+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
42003+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
42004+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
42005+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
42006+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
42007+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
42008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42011+4 4 4 4 4 4
42012+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42013+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
42014+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
42015+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
42016+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
42017+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
42018+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
42019+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
42020+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
42021+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42025+4 4 4 4 4 4
42026+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
42027+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
42028+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
42029+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
42030+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
42031+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
42032+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
42033+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
42034+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
42035+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
42036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42039+4 4 4 4 4 4
42040+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
42041+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
42042+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
42043+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
42044+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
42045+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
42046+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
42047+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
42048+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42049+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42053+4 4 4 4 4 4
42054+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
42055+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42056+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
42057+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
42058+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
42059+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
42060+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
42061+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
42062+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
42063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42067+4 4 4 4 4 4
42068+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
42069+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
42070+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
42071+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
42072+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
42073+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
42074+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
42075+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
42076+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
42077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42081+4 4 4 4 4 4
42082+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42083+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
42084+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
42085+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
42086+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
42087+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
42088+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
42089+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
42090+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42095+4 4 4 4 4 4
42096+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
42097+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
42098+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42099+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
42100+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
42101+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
42102+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
42103+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
42104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42109+4 4 4 4 4 4
42110+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42111+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
42112+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
42113+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
42114+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
42115+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
42116+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
42117+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
42118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42123+4 4 4 4 4 4
42124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42125+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
42126+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42127+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
42128+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
42129+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
42130+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
42131+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
42132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42137+4 4 4 4 4 4
42138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42139+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
42140+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
42141+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
42142+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
42143+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
42144+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
42145+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
42146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42151+4 4 4 4 4 4
42152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42153+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42154+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
42155+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42156+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
42157+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
42158+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
42159+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42165+4 4 4 4 4 4
42166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42168+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42169+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
42170+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
42171+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
42172+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
42173+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42179+4 4 4 4 4 4
42180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42183+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42184+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
42185+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
42186+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
42187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42193+4 4 4 4 4 4
42194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42197+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42198+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42199+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
42200+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
42201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42207+4 4 4 4 4 4
42208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42211+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42212+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42213+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42214+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
42215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42221+4 4 4 4 4 4
42222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42225+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
42226+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
42227+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
42228+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
42229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42235+4 4 4 4 4 4
42236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42240+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
42241+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42242+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42249+4 4 4 4 4 4
42250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42254+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
42255+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
42256+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42263+4 4 4 4 4 4
42264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42268+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
42269+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
42270+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42277+4 4 4 4 4 4
42278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42282+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42283+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42291+4 4 4 4 4 4
42292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42296+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42297+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42305+4 4 4 4 4 4
42306diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
42307index a40c05e..785c583 100644
42308--- a/drivers/video/udlfb.c
42309+++ b/drivers/video/udlfb.c
42310@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
42311 dlfb_urb_completion(urb);
42312
42313 error:
42314- atomic_add(bytes_sent, &dev->bytes_sent);
42315- atomic_add(bytes_identical, &dev->bytes_identical);
42316- atomic_add(width*height*2, &dev->bytes_rendered);
42317+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42318+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42319+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
42320 end_cycles = get_cycles();
42321- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42322+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42323 >> 10)), /* Kcycles */
42324 &dev->cpu_kcycles_used);
42325
42326@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
42327 dlfb_urb_completion(urb);
42328
42329 error:
42330- atomic_add(bytes_sent, &dev->bytes_sent);
42331- atomic_add(bytes_identical, &dev->bytes_identical);
42332- atomic_add(bytes_rendered, &dev->bytes_rendered);
42333+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42334+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42335+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
42336 end_cycles = get_cycles();
42337- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42338+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42339 >> 10)), /* Kcycles */
42340 &dev->cpu_kcycles_used);
42341 }
42342@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
42343 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42344 struct dlfb_data *dev = fb_info->par;
42345 return snprintf(buf, PAGE_SIZE, "%u\n",
42346- atomic_read(&dev->bytes_rendered));
42347+ atomic_read_unchecked(&dev->bytes_rendered));
42348 }
42349
42350 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42351@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42352 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42353 struct dlfb_data *dev = fb_info->par;
42354 return snprintf(buf, PAGE_SIZE, "%u\n",
42355- atomic_read(&dev->bytes_identical));
42356+ atomic_read_unchecked(&dev->bytes_identical));
42357 }
42358
42359 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42360@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42361 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42362 struct dlfb_data *dev = fb_info->par;
42363 return snprintf(buf, PAGE_SIZE, "%u\n",
42364- atomic_read(&dev->bytes_sent));
42365+ atomic_read_unchecked(&dev->bytes_sent));
42366 }
42367
42368 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42369@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42370 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42371 struct dlfb_data *dev = fb_info->par;
42372 return snprintf(buf, PAGE_SIZE, "%u\n",
42373- atomic_read(&dev->cpu_kcycles_used));
42374+ atomic_read_unchecked(&dev->cpu_kcycles_used));
42375 }
42376
42377 static ssize_t edid_show(
42378@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
42379 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42380 struct dlfb_data *dev = fb_info->par;
42381
42382- atomic_set(&dev->bytes_rendered, 0);
42383- atomic_set(&dev->bytes_identical, 0);
42384- atomic_set(&dev->bytes_sent, 0);
42385- atomic_set(&dev->cpu_kcycles_used, 0);
42386+ atomic_set_unchecked(&dev->bytes_rendered, 0);
42387+ atomic_set_unchecked(&dev->bytes_identical, 0);
42388+ atomic_set_unchecked(&dev->bytes_sent, 0);
42389+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42390
42391 return count;
42392 }
42393diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
42394index 8408543..357841c 100644
42395--- a/drivers/video/uvesafb.c
42396+++ b/drivers/video/uvesafb.c
42397@@ -19,6 +19,7 @@
42398 #include <linux/io.h>
42399 #include <linux/mutex.h>
42400 #include <linux/slab.h>
42401+#include <linux/moduleloader.h>
42402 #include <video/edid.h>
42403 #include <video/uvesafb.h>
42404 #ifdef CONFIG_X86
42405@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
42406 NULL,
42407 };
42408
42409- return call_usermodehelper(v86d_path, argv, envp, 1);
42410+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
42411 }
42412
42413 /*
42414@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
42415 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42416 par->pmi_setpal = par->ypan = 0;
42417 } else {
42418+
42419+#ifdef CONFIG_PAX_KERNEXEC
42420+#ifdef CONFIG_MODULES
42421+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42422+#endif
42423+ if (!par->pmi_code) {
42424+ par->pmi_setpal = par->ypan = 0;
42425+ return 0;
42426+ }
42427+#endif
42428+
42429 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42430 + task->t.regs.edi);
42431+
42432+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42433+ pax_open_kernel();
42434+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42435+ pax_close_kernel();
42436+
42437+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42438+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42439+#else
42440 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42441 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42442+#endif
42443+
42444 printk(KERN_INFO "uvesafb: protected mode interface info at "
42445 "%04x:%04x\n",
42446 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42447@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42448 par->ypan = ypan;
42449
42450 if (par->pmi_setpal || par->ypan) {
42451+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42452 if (__supported_pte_mask & _PAGE_NX) {
42453 par->pmi_setpal = par->ypan = 0;
42454 printk(KERN_WARNING "uvesafb: NX protection is actively."
42455 "We have better not to use the PMI.\n");
42456- } else {
42457+ } else
42458+#endif
42459 uvesafb_vbe_getpmi(task, par);
42460- }
42461 }
42462 #else
42463 /* The protected mode interface is not available on non-x86. */
42464@@ -1828,6 +1852,11 @@ out:
42465 if (par->vbe_modes)
42466 kfree(par->vbe_modes);
42467
42468+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42469+ if (par->pmi_code)
42470+ module_free_exec(NULL, par->pmi_code);
42471+#endif
42472+
42473 framebuffer_release(info);
42474 return err;
42475 }
42476@@ -1854,6 +1883,12 @@ static int uvesafb_remove(struct platform_device *dev)
42477 kfree(par->vbe_state_orig);
42478 if (par->vbe_state_saved)
42479 kfree(par->vbe_state_saved);
42480+
42481+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42482+ if (par->pmi_code)
42483+ module_free_exec(NULL, par->pmi_code);
42484+#endif
42485+
42486 }
42487
42488 framebuffer_release(info);
42489diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42490index 501b340..86bd4cf 100644
42491--- a/drivers/video/vesafb.c
42492+++ b/drivers/video/vesafb.c
42493@@ -9,6 +9,7 @@
42494 */
42495
42496 #include <linux/module.h>
42497+#include <linux/moduleloader.h>
42498 #include <linux/kernel.h>
42499 #include <linux/errno.h>
42500 #include <linux/string.h>
42501@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
42502 static int vram_total __initdata; /* Set total amount of memory */
42503 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42504 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42505-static void (*pmi_start)(void) __read_mostly;
42506-static void (*pmi_pal) (void) __read_mostly;
42507+static void (*pmi_start)(void) __read_only;
42508+static void (*pmi_pal) (void) __read_only;
42509 static int depth __read_mostly;
42510 static int vga_compat __read_mostly;
42511 /* --------------------------------------------------------------------- */
42512@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
42513 unsigned int size_vmode;
42514 unsigned int size_remap;
42515 unsigned int size_total;
42516+ void *pmi_code = NULL;
42517
42518 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42519 return -ENODEV;
42520@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
42521 size_remap = size_total;
42522 vesafb_fix.smem_len = size_remap;
42523
42524-#ifndef __i386__
42525- screen_info.vesapm_seg = 0;
42526-#endif
42527-
42528 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42529 printk(KERN_WARNING
42530 "vesafb: cannot reserve video memory at 0x%lx\n",
42531@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
42532 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42533 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42534
42535+#ifdef __i386__
42536+
42537+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42538+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
42539+ if (!pmi_code)
42540+#elif !defined(CONFIG_PAX_KERNEXEC)
42541+ if (0)
42542+#endif
42543+
42544+#endif
42545+ screen_info.vesapm_seg = 0;
42546+
42547 if (screen_info.vesapm_seg) {
42548- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42549- screen_info.vesapm_seg,screen_info.vesapm_off);
42550+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42551+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42552 }
42553
42554 if (screen_info.vesapm_seg < 0xc000)
42555@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
42556
42557 if (ypan || pmi_setpal) {
42558 unsigned short *pmi_base;
42559+
42560 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42561- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42562- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42563+
42564+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42565+ pax_open_kernel();
42566+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42567+#else
42568+ pmi_code = pmi_base;
42569+#endif
42570+
42571+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42572+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42573+
42574+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42575+ pmi_start = ktva_ktla(pmi_start);
42576+ pmi_pal = ktva_ktla(pmi_pal);
42577+ pax_close_kernel();
42578+#endif
42579+
42580 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42581 if (pmi_base[3]) {
42582 printk(KERN_INFO "vesafb: pmi: ports = ");
42583@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
42584 info->node, info->fix.id);
42585 return 0;
42586 err:
42587+
42588+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42589+ module_free_exec(NULL, pmi_code);
42590+#endif
42591+
42592 if (info->screen_base)
42593 iounmap(info->screen_base);
42594 framebuffer_release(info);
42595diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42596index 88714ae..16c2e11 100644
42597--- a/drivers/video/via/via_clock.h
42598+++ b/drivers/video/via/via_clock.h
42599@@ -56,7 +56,7 @@ struct via_clock {
42600
42601 void (*set_engine_pll_state)(u8 state);
42602 void (*set_engine_pll)(struct via_pll_config config);
42603-};
42604+} __no_const;
42605
42606
42607 static inline u32 get_pll_internal_frequency(u32 ref_freq,
42608diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42609index e56c934..fc22f4b 100644
42610--- a/drivers/xen/xen-pciback/conf_space.h
42611+++ b/drivers/xen/xen-pciback/conf_space.h
42612@@ -44,15 +44,15 @@ struct config_field {
42613 struct {
42614 conf_dword_write write;
42615 conf_dword_read read;
42616- } dw;
42617+ } __no_const dw;
42618 struct {
42619 conf_word_write write;
42620 conf_word_read read;
42621- } w;
42622+ } __no_const w;
42623 struct {
42624 conf_byte_write write;
42625 conf_byte_read read;
42626- } b;
42627+ } __no_const b;
42628 } u;
42629 struct list_head list;
42630 };
42631diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42632index 014c8dd..6f3dfe6 100644
42633--- a/fs/9p/vfs_inode.c
42634+++ b/fs/9p/vfs_inode.c
42635@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42636 void
42637 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42638 {
42639- char *s = nd_get_link(nd);
42640+ const char *s = nd_get_link(nd);
42641
42642 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42643 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
42644diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42645index e95d1b6..3454244 100644
42646--- a/fs/Kconfig.binfmt
42647+++ b/fs/Kconfig.binfmt
42648@@ -89,7 +89,7 @@ config HAVE_AOUT
42649
42650 config BINFMT_AOUT
42651 tristate "Kernel support for a.out and ECOFF binaries"
42652- depends on HAVE_AOUT
42653+ depends on HAVE_AOUT && BROKEN
42654 ---help---
42655 A.out (Assembler.OUTput) is a set of formats for libraries and
42656 executables used in the earliest versions of UNIX. Linux used
42657diff --git a/fs/aio.c b/fs/aio.c
42658index b9d64d8..86cb1d5 100644
42659--- a/fs/aio.c
42660+++ b/fs/aio.c
42661@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42662 size += sizeof(struct io_event) * nr_events;
42663 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42664
42665- if (nr_pages < 0)
42666+ if (nr_pages <= 0)
42667 return -EINVAL;
42668
42669 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42670@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42671 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42672 {
42673 ssize_t ret;
42674+ struct iovec iovstack;
42675
42676 #ifdef CONFIG_COMPAT
42677 if (compat)
42678 ret = compat_rw_copy_check_uvector(type,
42679 (struct compat_iovec __user *)kiocb->ki_buf,
42680- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42681+ kiocb->ki_nbytes, 1, &iovstack,
42682 &kiocb->ki_iovec, 1);
42683 else
42684 #endif
42685 ret = rw_copy_check_uvector(type,
42686 (struct iovec __user *)kiocb->ki_buf,
42687- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42688+ kiocb->ki_nbytes, 1, &iovstack,
42689 &kiocb->ki_iovec, 1);
42690 if (ret < 0)
42691 goto out;
42692
42693+ if (kiocb->ki_iovec == &iovstack) {
42694+ kiocb->ki_inline_vec = iovstack;
42695+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
42696+ }
42697 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42698 kiocb->ki_cur_seg = 0;
42699 /* ki_nbytes/left now reflect bytes instead of segs */
42700diff --git a/fs/attr.c b/fs/attr.c
42701index 95053ad..2cc93ca 100644
42702--- a/fs/attr.c
42703+++ b/fs/attr.c
42704@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42705 unsigned long limit;
42706
42707 limit = rlimit(RLIMIT_FSIZE);
42708+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42709 if (limit != RLIM_INFINITY && offset > limit)
42710 goto out_sig;
42711 if (offset > inode->i_sb->s_maxbytes)
42712diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42713index f624cd0..3d9a559 100644
42714--- a/fs/autofs4/waitq.c
42715+++ b/fs/autofs4/waitq.c
42716@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
42717 {
42718 unsigned long sigpipe, flags;
42719 mm_segment_t fs;
42720- const char *data = (const char *)addr;
42721+ const char __user *data = (const char __force_user *)addr;
42722 ssize_t wr = 0;
42723
42724 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
42725diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42726index 6e6d536..457113a 100644
42727--- a/fs/befs/linuxvfs.c
42728+++ b/fs/befs/linuxvfs.c
42729@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42730 {
42731 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42732 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42733- char *link = nd_get_link(nd);
42734+ const char *link = nd_get_link(nd);
42735 if (!IS_ERR(link))
42736 kfree(link);
42737 }
42738diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42739index 1ff9405..f1e376a 100644
42740--- a/fs/binfmt_aout.c
42741+++ b/fs/binfmt_aout.c
42742@@ -16,6 +16,7 @@
42743 #include <linux/string.h>
42744 #include <linux/fs.h>
42745 #include <linux/file.h>
42746+#include <linux/security.h>
42747 #include <linux/stat.h>
42748 #include <linux/fcntl.h>
42749 #include <linux/ptrace.h>
42750@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42751 #endif
42752 # define START_STACK(u) ((void __user *)u.start_stack)
42753
42754+ memset(&dump, 0, sizeof(dump));
42755+
42756 fs = get_fs();
42757 set_fs(KERNEL_DS);
42758 has_dumped = 1;
42759@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42760
42761 /* If the size of the dump file exceeds the rlimit, then see what would happen
42762 if we wrote the stack, but not the data area. */
42763+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42764 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42765 dump.u_dsize = 0;
42766
42767 /* Make sure we have enough room to write the stack and data areas. */
42768+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42769 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42770 dump.u_ssize = 0;
42771
42772@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42773 rlim = rlimit(RLIMIT_DATA);
42774 if (rlim >= RLIM_INFINITY)
42775 rlim = ~0;
42776+
42777+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42778 if (ex.a_data + ex.a_bss > rlim)
42779 return -ENOMEM;
42780
42781@@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42782 install_exec_creds(bprm);
42783 current->flags &= ~PF_FORKNOEXEC;
42784
42785+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42786+ current->mm->pax_flags = 0UL;
42787+#endif
42788+
42789+#ifdef CONFIG_PAX_PAGEEXEC
42790+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42791+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42792+
42793+#ifdef CONFIG_PAX_EMUTRAMP
42794+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42795+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42796+#endif
42797+
42798+#ifdef CONFIG_PAX_MPROTECT
42799+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42800+ current->mm->pax_flags |= MF_PAX_MPROTECT;
42801+#endif
42802+
42803+ }
42804+#endif
42805+
42806 if (N_MAGIC(ex) == OMAGIC) {
42807 unsigned long text_addr, map_size;
42808 loff_t pos;
42809@@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42810
42811 down_write(&current->mm->mmap_sem);
42812 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42813- PROT_READ | PROT_WRITE | PROT_EXEC,
42814+ PROT_READ | PROT_WRITE,
42815 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42816 fd_offset + ex.a_text);
42817 up_write(&current->mm->mmap_sem);
42818diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42819index 07d096c..25762af 100644
42820--- a/fs/binfmt_elf.c
42821+++ b/fs/binfmt_elf.c
42822@@ -32,6 +32,7 @@
42823 #include <linux/elf.h>
42824 #include <linux/utsname.h>
42825 #include <linux/coredump.h>
42826+#include <linux/xattr.h>
42827 #include <asm/uaccess.h>
42828 #include <asm/param.h>
42829 #include <asm/page.h>
42830@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42831 #define elf_core_dump NULL
42832 #endif
42833
42834+#ifdef CONFIG_PAX_MPROTECT
42835+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42836+#endif
42837+
42838 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42839 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42840 #else
42841@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42842 .load_binary = load_elf_binary,
42843 .load_shlib = load_elf_library,
42844 .core_dump = elf_core_dump,
42845+
42846+#ifdef CONFIG_PAX_MPROTECT
42847+ .handle_mprotect= elf_handle_mprotect,
42848+#endif
42849+
42850 .min_coredump = ELF_EXEC_PAGESIZE,
42851 };
42852
42853@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42854
42855 static int set_brk(unsigned long start, unsigned long end)
42856 {
42857+ unsigned long e = end;
42858+
42859 start = ELF_PAGEALIGN(start);
42860 end = ELF_PAGEALIGN(end);
42861 if (end > start) {
42862@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42863 if (BAD_ADDR(addr))
42864 return addr;
42865 }
42866- current->mm->start_brk = current->mm->brk = end;
42867+ current->mm->start_brk = current->mm->brk = e;
42868 return 0;
42869 }
42870
42871@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42872 elf_addr_t __user *u_rand_bytes;
42873 const char *k_platform = ELF_PLATFORM;
42874 const char *k_base_platform = ELF_BASE_PLATFORM;
42875- unsigned char k_rand_bytes[16];
42876+ u32 k_rand_bytes[4];
42877 int items;
42878 elf_addr_t *elf_info;
42879 int ei_index = 0;
42880 const struct cred *cred = current_cred();
42881 struct vm_area_struct *vma;
42882+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42883
42884 /*
42885 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42886@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42887 * Generate 16 random bytes for userspace PRNG seeding.
42888 */
42889 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42890- u_rand_bytes = (elf_addr_t __user *)
42891- STACK_ALLOC(p, sizeof(k_rand_bytes));
42892+ srandom32(k_rand_bytes[0] ^ random32());
42893+ srandom32(k_rand_bytes[1] ^ random32());
42894+ srandom32(k_rand_bytes[2] ^ random32());
42895+ srandom32(k_rand_bytes[3] ^ random32());
42896+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42897+ u_rand_bytes = (elf_addr_t __user *) p;
42898 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42899 return -EFAULT;
42900
42901@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42902 return -EFAULT;
42903 current->mm->env_end = p;
42904
42905+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42906+
42907 /* Put the elf_info on the stack in the right place. */
42908 sp = (elf_addr_t __user *)envp + 1;
42909- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42910+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42911 return -EFAULT;
42912 return 0;
42913 }
42914@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42915 {
42916 struct elf_phdr *elf_phdata;
42917 struct elf_phdr *eppnt;
42918- unsigned long load_addr = 0;
42919+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42920 int load_addr_set = 0;
42921 unsigned long last_bss = 0, elf_bss = 0;
42922- unsigned long error = ~0UL;
42923+ unsigned long error = -EINVAL;
42924 unsigned long total_size;
42925 int retval, i, size;
42926
42927@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42928 goto out_close;
42929 }
42930
42931+#ifdef CONFIG_PAX_SEGMEXEC
42932+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42933+ pax_task_size = SEGMEXEC_TASK_SIZE;
42934+#endif
42935+
42936 eppnt = elf_phdata;
42937 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42938 if (eppnt->p_type == PT_LOAD) {
42939@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42940 k = load_addr + eppnt->p_vaddr;
42941 if (BAD_ADDR(k) ||
42942 eppnt->p_filesz > eppnt->p_memsz ||
42943- eppnt->p_memsz > TASK_SIZE ||
42944- TASK_SIZE - eppnt->p_memsz < k) {
42945+ eppnt->p_memsz > pax_task_size ||
42946+ pax_task_size - eppnt->p_memsz < k) {
42947 error = -ENOMEM;
42948 goto out_close;
42949 }
42950@@ -528,6 +552,351 @@ out:
42951 return error;
42952 }
42953
42954+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42955+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42956+{
42957+ unsigned long pax_flags = 0UL;
42958+
42959+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42960+
42961+#ifdef CONFIG_PAX_PAGEEXEC
42962+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42963+ pax_flags |= MF_PAX_PAGEEXEC;
42964+#endif
42965+
42966+#ifdef CONFIG_PAX_SEGMEXEC
42967+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42968+ pax_flags |= MF_PAX_SEGMEXEC;
42969+#endif
42970+
42971+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42972+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42973+ if ((__supported_pte_mask & _PAGE_NX))
42974+ pax_flags &= ~MF_PAX_SEGMEXEC;
42975+ else
42976+ pax_flags &= ~MF_PAX_PAGEEXEC;
42977+ }
42978+#endif
42979+
42980+#ifdef CONFIG_PAX_EMUTRAMP
42981+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42982+ pax_flags |= MF_PAX_EMUTRAMP;
42983+#endif
42984+
42985+#ifdef CONFIG_PAX_MPROTECT
42986+ if (elf_phdata->p_flags & PF_MPROTECT)
42987+ pax_flags |= MF_PAX_MPROTECT;
42988+#endif
42989+
42990+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42991+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42992+ pax_flags |= MF_PAX_RANDMMAP;
42993+#endif
42994+
42995+#endif
42996+
42997+ return pax_flags;
42998+}
42999+
43000+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
43001+{
43002+ unsigned long pax_flags = 0UL;
43003+
43004+#ifdef CONFIG_PAX_PT_PAX_FLAGS
43005+
43006+#ifdef CONFIG_PAX_PAGEEXEC
43007+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
43008+ pax_flags |= MF_PAX_PAGEEXEC;
43009+#endif
43010+
43011+#ifdef CONFIG_PAX_SEGMEXEC
43012+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
43013+ pax_flags |= MF_PAX_SEGMEXEC;
43014+#endif
43015+
43016+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43017+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43018+ if ((__supported_pte_mask & _PAGE_NX))
43019+ pax_flags &= ~MF_PAX_SEGMEXEC;
43020+ else
43021+ pax_flags &= ~MF_PAX_PAGEEXEC;
43022+ }
43023+#endif
43024+
43025+#ifdef CONFIG_PAX_EMUTRAMP
43026+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
43027+ pax_flags |= MF_PAX_EMUTRAMP;
43028+#endif
43029+
43030+#ifdef CONFIG_PAX_MPROTECT
43031+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
43032+ pax_flags |= MF_PAX_MPROTECT;
43033+#endif
43034+
43035+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43036+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
43037+ pax_flags |= MF_PAX_RANDMMAP;
43038+#endif
43039+
43040+#endif
43041+
43042+ return pax_flags;
43043+}
43044+
43045+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
43046+{
43047+ unsigned long pax_flags = 0UL;
43048+
43049+#ifdef CONFIG_PAX_EI_PAX
43050+
43051+#ifdef CONFIG_PAX_PAGEEXEC
43052+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
43053+ pax_flags |= MF_PAX_PAGEEXEC;
43054+#endif
43055+
43056+#ifdef CONFIG_PAX_SEGMEXEC
43057+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
43058+ pax_flags |= MF_PAX_SEGMEXEC;
43059+#endif
43060+
43061+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43062+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43063+ if ((__supported_pte_mask & _PAGE_NX))
43064+ pax_flags &= ~MF_PAX_SEGMEXEC;
43065+ else
43066+ pax_flags &= ~MF_PAX_PAGEEXEC;
43067+ }
43068+#endif
43069+
43070+#ifdef CONFIG_PAX_EMUTRAMP
43071+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
43072+ pax_flags |= MF_PAX_EMUTRAMP;
43073+#endif
43074+
43075+#ifdef CONFIG_PAX_MPROTECT
43076+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
43077+ pax_flags |= MF_PAX_MPROTECT;
43078+#endif
43079+
43080+#ifdef CONFIG_PAX_ASLR
43081+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
43082+ pax_flags |= MF_PAX_RANDMMAP;
43083+#endif
43084+
43085+#else
43086+
43087+#ifdef CONFIG_PAX_PAGEEXEC
43088+ pax_flags |= MF_PAX_PAGEEXEC;
43089+#endif
43090+
43091+#ifdef CONFIG_PAX_MPROTECT
43092+ pax_flags |= MF_PAX_MPROTECT;
43093+#endif
43094+
43095+#ifdef CONFIG_PAX_RANDMMAP
43096+ pax_flags |= MF_PAX_RANDMMAP;
43097+#endif
43098+
43099+#ifdef CONFIG_PAX_SEGMEXEC
43100+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
43101+ pax_flags &= ~MF_PAX_PAGEEXEC;
43102+ pax_flags |= MF_PAX_SEGMEXEC;
43103+ }
43104+#endif
43105+
43106+#endif
43107+
43108+ return pax_flags;
43109+}
43110+
43111+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
43112+{
43113+
43114+#ifdef CONFIG_PAX_PT_PAX_FLAGS
43115+ unsigned long i;
43116+
43117+ for (i = 0UL; i < elf_ex->e_phnum; i++)
43118+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
43119+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
43120+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
43121+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
43122+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
43123+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
43124+ return ~0UL;
43125+
43126+#ifdef CONFIG_PAX_SOFTMODE
43127+ if (pax_softmode)
43128+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
43129+ else
43130+#endif
43131+
43132+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
43133+ break;
43134+ }
43135+#endif
43136+
43137+ return ~0UL;
43138+}
43139+
43140+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43141+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
43142+{
43143+ unsigned long pax_flags = 0UL;
43144+
43145+#ifdef CONFIG_PAX_PAGEEXEC
43146+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
43147+ pax_flags |= MF_PAX_PAGEEXEC;
43148+#endif
43149+
43150+#ifdef CONFIG_PAX_SEGMEXEC
43151+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
43152+ pax_flags |= MF_PAX_SEGMEXEC;
43153+#endif
43154+
43155+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43156+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43157+ if ((__supported_pte_mask & _PAGE_NX))
43158+ pax_flags &= ~MF_PAX_SEGMEXEC;
43159+ else
43160+ pax_flags &= ~MF_PAX_PAGEEXEC;
43161+ }
43162+#endif
43163+
43164+#ifdef CONFIG_PAX_EMUTRAMP
43165+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
43166+ pax_flags |= MF_PAX_EMUTRAMP;
43167+#endif
43168+
43169+#ifdef CONFIG_PAX_MPROTECT
43170+ if (pax_flags_softmode & MF_PAX_MPROTECT)
43171+ pax_flags |= MF_PAX_MPROTECT;
43172+#endif
43173+
43174+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43175+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
43176+ pax_flags |= MF_PAX_RANDMMAP;
43177+#endif
43178+
43179+ return pax_flags;
43180+}
43181+
43182+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
43183+{
43184+ unsigned long pax_flags = 0UL;
43185+
43186+#ifdef CONFIG_PAX_PAGEEXEC
43187+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
43188+ pax_flags |= MF_PAX_PAGEEXEC;
43189+#endif
43190+
43191+#ifdef CONFIG_PAX_SEGMEXEC
43192+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
43193+ pax_flags |= MF_PAX_SEGMEXEC;
43194+#endif
43195+
43196+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
43197+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43198+ if ((__supported_pte_mask & _PAGE_NX))
43199+ pax_flags &= ~MF_PAX_SEGMEXEC;
43200+ else
43201+ pax_flags &= ~MF_PAX_PAGEEXEC;
43202+ }
43203+#endif
43204+
43205+#ifdef CONFIG_PAX_EMUTRAMP
43206+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
43207+ pax_flags |= MF_PAX_EMUTRAMP;
43208+#endif
43209+
43210+#ifdef CONFIG_PAX_MPROTECT
43211+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
43212+ pax_flags |= MF_PAX_MPROTECT;
43213+#endif
43214+
43215+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
43216+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
43217+ pax_flags |= MF_PAX_RANDMMAP;
43218+#endif
43219+
43220+ return pax_flags;
43221+}
43222+#endif
43223+
43224+static unsigned long pax_parse_xattr_pax(struct file * const file)
43225+{
43226+
43227+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
43228+ ssize_t xattr_size, i;
43229+ unsigned char xattr_value[5];
43230+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
43231+
43232+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
43233+ if (xattr_size <= 0)
43234+ return ~0UL;
43235+
43236+ for (i = 0; i < xattr_size; i++)
43237+ switch (xattr_value[i]) {
43238+ default:
43239+ return ~0UL;
43240+
43241+#define parse_flag(option1, option2, flag) \
43242+ case option1: \
43243+ pax_flags_hardmode |= MF_PAX_##flag; \
43244+ break; \
43245+ case option2: \
43246+ pax_flags_softmode |= MF_PAX_##flag; \
43247+ break;
43248+
43249+ parse_flag('p', 'P', PAGEEXEC);
43250+ parse_flag('e', 'E', EMUTRAMP);
43251+ parse_flag('m', 'M', MPROTECT);
43252+ parse_flag('r', 'R', RANDMMAP);
43253+ parse_flag('s', 'S', SEGMEXEC);
43254+
43255+#undef parse_flag
43256+ }
43257+
43258+ if (pax_flags_hardmode & pax_flags_softmode)
43259+ return ~0UL;
43260+
43261+#ifdef CONFIG_PAX_SOFTMODE
43262+ if (pax_softmode)
43263+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
43264+ else
43265+#endif
43266+
43267+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
43268+#else
43269+ return ~0UL;
43270+#endif
43271+
43272+}
43273+
43274+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
43275+{
43276+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
43277+
43278+ pax_flags = pax_parse_ei_pax(elf_ex);
43279+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
43280+ xattr_pax_flags = pax_parse_xattr_pax(file);
43281+
43282+ if (pt_pax_flags == ~0UL)
43283+ pt_pax_flags = xattr_pax_flags;
43284+ else if (xattr_pax_flags == ~0UL)
43285+ xattr_pax_flags = pt_pax_flags;
43286+ if (pt_pax_flags != xattr_pax_flags)
43287+ return -EINVAL;
43288+ if (pt_pax_flags != ~0UL)
43289+ pax_flags = pt_pax_flags;
43290+
43291+ if (0 > pax_check_flags(&pax_flags))
43292+ return -EINVAL;
43293+
43294+ current->mm->pax_flags = pax_flags;
43295+ return 0;
43296+}
43297+#endif
43298+
43299 /*
43300 * These are the functions used to load ELF style executables and shared
43301 * libraries. There is no binary dependent code anywhere else.
43302@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
43303 {
43304 unsigned int random_variable = 0;
43305
43306+#ifdef CONFIG_PAX_RANDUSTACK
43307+ if (randomize_va_space)
43308+ return stack_top - current->mm->delta_stack;
43309+#endif
43310+
43311 if ((current->flags & PF_RANDOMIZE) &&
43312 !(current->personality & ADDR_NO_RANDOMIZE)) {
43313 random_variable = get_random_int() & STACK_RND_MASK;
43314@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43315 unsigned long load_addr = 0, load_bias = 0;
43316 int load_addr_set = 0;
43317 char * elf_interpreter = NULL;
43318- unsigned long error;
43319+ unsigned long error = 0;
43320 struct elf_phdr *elf_ppnt, *elf_phdata;
43321 unsigned long elf_bss, elf_brk;
43322 int retval, i;
43323@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43324 unsigned long start_code, end_code, start_data, end_data;
43325 unsigned long reloc_func_desc __maybe_unused = 0;
43326 int executable_stack = EXSTACK_DEFAULT;
43327- unsigned long def_flags = 0;
43328 struct {
43329 struct elfhdr elf_ex;
43330 struct elfhdr interp_elf_ex;
43331 } *loc;
43332+ unsigned long pax_task_size = TASK_SIZE;
43333
43334 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
43335 if (!loc) {
43336@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43337
43338 /* OK, This is the point of no return */
43339 current->flags &= ~PF_FORKNOEXEC;
43340- current->mm->def_flags = def_flags;
43341+
43342+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43343+ current->mm->pax_flags = 0UL;
43344+#endif
43345+
43346+#ifdef CONFIG_PAX_DLRESOLVE
43347+ current->mm->call_dl_resolve = 0UL;
43348+#endif
43349+
43350+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
43351+ current->mm->call_syscall = 0UL;
43352+#endif
43353+
43354+#ifdef CONFIG_PAX_ASLR
43355+ current->mm->delta_mmap = 0UL;
43356+ current->mm->delta_stack = 0UL;
43357+#endif
43358+
43359+ current->mm->def_flags = 0;
43360+
43361+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
43362+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
43363+ send_sig(SIGKILL, current, 0);
43364+ goto out_free_dentry;
43365+ }
43366+#endif
43367+
43368+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43369+ pax_set_initial_flags(bprm);
43370+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43371+ if (pax_set_initial_flags_func)
43372+ (pax_set_initial_flags_func)(bprm);
43373+#endif
43374+
43375+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43376+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
43377+ current->mm->context.user_cs_limit = PAGE_SIZE;
43378+ current->mm->def_flags |= VM_PAGEEXEC;
43379+ }
43380+#endif
43381+
43382+#ifdef CONFIG_PAX_SEGMEXEC
43383+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43384+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43385+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43386+ pax_task_size = SEGMEXEC_TASK_SIZE;
43387+ current->mm->def_flags |= VM_NOHUGEPAGE;
43388+ }
43389+#endif
43390+
43391+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43392+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43393+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43394+ put_cpu();
43395+ }
43396+#endif
43397
43398 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43399 may depend on the personality. */
43400 SET_PERSONALITY(loc->elf_ex);
43401+
43402+#ifdef CONFIG_PAX_ASLR
43403+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43404+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43405+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43406+ }
43407+#endif
43408+
43409+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43410+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43411+ executable_stack = EXSTACK_DISABLE_X;
43412+ current->personality &= ~READ_IMPLIES_EXEC;
43413+ } else
43414+#endif
43415+
43416 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43417 current->personality |= READ_IMPLIES_EXEC;
43418
43419@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43420 #else
43421 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43422 #endif
43423+
43424+#ifdef CONFIG_PAX_RANDMMAP
43425+ /* PaX: randomize base address at the default exe base if requested */
43426+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43427+#ifdef CONFIG_SPARC64
43428+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43429+#else
43430+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43431+#endif
43432+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43433+ elf_flags |= MAP_FIXED;
43434+ }
43435+#endif
43436+
43437 }
43438
43439 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43440@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43441 * allowed task size. Note that p_filesz must always be
43442 * <= p_memsz so it is only necessary to check p_memsz.
43443 */
43444- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43445- elf_ppnt->p_memsz > TASK_SIZE ||
43446- TASK_SIZE - elf_ppnt->p_memsz < k) {
43447+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43448+ elf_ppnt->p_memsz > pax_task_size ||
43449+ pax_task_size - elf_ppnt->p_memsz < k) {
43450 /* set_brk can never work. Avoid overflows. */
43451 send_sig(SIGKILL, current, 0);
43452 retval = -EINVAL;
43453@@ -881,11 +1339,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43454 goto out_free_dentry;
43455 }
43456 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43457- send_sig(SIGSEGV, current, 0);
43458- retval = -EFAULT; /* Nobody gets to see this, but.. */
43459- goto out_free_dentry;
43460+ /*
43461+ * This bss-zeroing can fail if the ELF
43462+ * file specifies odd protections. So
43463+ * we don't check the return value
43464+ */
43465 }
43466
43467+#ifdef CONFIG_PAX_RANDMMAP
43468+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43469+ unsigned long start, size;
43470+
43471+ start = ELF_PAGEALIGN(elf_brk);
43472+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43473+ down_write(&current->mm->mmap_sem);
43474+ retval = -ENOMEM;
43475+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43476+ unsigned long prot = PROT_NONE;
43477+
43478+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43479+// if (current->personality & ADDR_NO_RANDOMIZE)
43480+// prot = PROT_READ;
43481+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43482+ retval = IS_ERR_VALUE(start) ? start : 0;
43483+ }
43484+ up_write(&current->mm->mmap_sem);
43485+ if (retval == 0)
43486+ retval = set_brk(start + size, start + size + PAGE_SIZE);
43487+ if (retval < 0) {
43488+ send_sig(SIGKILL, current, 0);
43489+ goto out_free_dentry;
43490+ }
43491+ }
43492+#endif
43493+
43494 if (elf_interpreter) {
43495 unsigned long uninitialized_var(interp_map_addr);
43496
43497@@ -1098,7 +1585,7 @@ out:
43498 * Decide what to dump of a segment, part, all or none.
43499 */
43500 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43501- unsigned long mm_flags)
43502+ unsigned long mm_flags, long signr)
43503 {
43504 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43505
43506@@ -1132,7 +1619,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
43507 if (vma->vm_file == NULL)
43508 return 0;
43509
43510- if (FILTER(MAPPED_PRIVATE))
43511+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43512 goto whole;
43513
43514 /*
43515@@ -1354,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
43516 {
43517 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43518 int i = 0;
43519- do
43520+ do {
43521 i += 2;
43522- while (auxv[i - 2] != AT_NULL);
43523+ } while (auxv[i - 2] != AT_NULL);
43524 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43525 }
43526
43527@@ -1862,14 +2349,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
43528 }
43529
43530 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43531- unsigned long mm_flags)
43532+ struct coredump_params *cprm)
43533 {
43534 struct vm_area_struct *vma;
43535 size_t size = 0;
43536
43537 for (vma = first_vma(current, gate_vma); vma != NULL;
43538 vma = next_vma(vma, gate_vma))
43539- size += vma_dump_size(vma, mm_flags);
43540+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43541 return size;
43542 }
43543
43544@@ -1963,7 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43545
43546 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43547
43548- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43549+ offset += elf_core_vma_data_size(gate_vma, cprm);
43550 offset += elf_core_extra_data_size();
43551 e_shoff = offset;
43552
43553@@ -1977,10 +2464,12 @@ static int elf_core_dump(struct coredump_params *cprm)
43554 offset = dataoff;
43555
43556 size += sizeof(*elf);
43557+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43558 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43559 goto end_coredump;
43560
43561 size += sizeof(*phdr4note);
43562+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43563 if (size > cprm->limit
43564 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43565 goto end_coredump;
43566@@ -1994,7 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43567 phdr.p_offset = offset;
43568 phdr.p_vaddr = vma->vm_start;
43569 phdr.p_paddr = 0;
43570- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43571+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43572 phdr.p_memsz = vma->vm_end - vma->vm_start;
43573 offset += phdr.p_filesz;
43574 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43575@@ -2005,6 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43576 phdr.p_align = ELF_EXEC_PAGESIZE;
43577
43578 size += sizeof(phdr);
43579+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43580 if (size > cprm->limit
43581 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43582 goto end_coredump;
43583@@ -2029,7 +2519,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43584 unsigned long addr;
43585 unsigned long end;
43586
43587- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43588+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43589
43590 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43591 struct page *page;
43592@@ -2038,6 +2528,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43593 page = get_dump_page(addr);
43594 if (page) {
43595 void *kaddr = kmap(page);
43596+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43597 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43598 !dump_write(cprm->file, kaddr,
43599 PAGE_SIZE);
43600@@ -2055,6 +2546,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43601
43602 if (e_phnum == PN_XNUM) {
43603 size += sizeof(*shdr4extnum);
43604+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43605 if (size > cprm->limit
43606 || !dump_write(cprm->file, shdr4extnum,
43607 sizeof(*shdr4extnum)))
43608@@ -2075,6 +2567,97 @@ out:
43609
43610 #endif /* CONFIG_ELF_CORE */
43611
43612+#ifdef CONFIG_PAX_MPROTECT
43613+/* PaX: non-PIC ELF libraries need relocations on their executable segments
43614+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43615+ * we'll remove VM_MAYWRITE for good on RELRO segments.
43616+ *
43617+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43618+ * basis because we want to allow the common case and not the special ones.
43619+ */
43620+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43621+{
43622+ struct elfhdr elf_h;
43623+ struct elf_phdr elf_p;
43624+ unsigned long i;
43625+ unsigned long oldflags;
43626+ bool is_textrel_rw, is_textrel_rx, is_relro;
43627+
43628+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43629+ return;
43630+
43631+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43632+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43633+
43634+#ifdef CONFIG_PAX_ELFRELOCS
43635+ /* possible TEXTREL */
43636+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43637+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43638+#else
43639+ is_textrel_rw = false;
43640+ is_textrel_rx = false;
43641+#endif
43642+
43643+ /* possible RELRO */
43644+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43645+
43646+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43647+ return;
43648+
43649+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43650+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43651+
43652+#ifdef CONFIG_PAX_ETEXECRELOCS
43653+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43654+#else
43655+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43656+#endif
43657+
43658+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43659+ !elf_check_arch(&elf_h) ||
43660+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43661+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43662+ return;
43663+
43664+ for (i = 0UL; i < elf_h.e_phnum; i++) {
43665+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43666+ return;
43667+ switch (elf_p.p_type) {
43668+ case PT_DYNAMIC:
43669+ if (!is_textrel_rw && !is_textrel_rx)
43670+ continue;
43671+ i = 0UL;
43672+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43673+ elf_dyn dyn;
43674+
43675+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43676+ return;
43677+ if (dyn.d_tag == DT_NULL)
43678+ return;
43679+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43680+ gr_log_textrel(vma);
43681+ if (is_textrel_rw)
43682+ vma->vm_flags |= VM_MAYWRITE;
43683+ else
43684+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43685+ vma->vm_flags &= ~VM_MAYWRITE;
43686+ return;
43687+ }
43688+ i++;
43689+ }
43690+ return;
43691+
43692+ case PT_GNU_RELRO:
43693+ if (!is_relro)
43694+ continue;
43695+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43696+ vma->vm_flags &= ~VM_MAYWRITE;
43697+ return;
43698+ }
43699+ }
43700+}
43701+#endif
43702+
43703 static int __init init_elf_binfmt(void)
43704 {
43705 return register_binfmt(&elf_format);
43706diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43707index 1bffbe0..c8c283e 100644
43708--- a/fs/binfmt_flat.c
43709+++ b/fs/binfmt_flat.c
43710@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43711 realdatastart = (unsigned long) -ENOMEM;
43712 printk("Unable to allocate RAM for process data, errno %d\n",
43713 (int)-realdatastart);
43714+ down_write(&current->mm->mmap_sem);
43715 do_munmap(current->mm, textpos, text_len);
43716+ up_write(&current->mm->mmap_sem);
43717 ret = realdatastart;
43718 goto err;
43719 }
43720@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43721 }
43722 if (IS_ERR_VALUE(result)) {
43723 printk("Unable to read data+bss, errno %d\n", (int)-result);
43724+ down_write(&current->mm->mmap_sem);
43725 do_munmap(current->mm, textpos, text_len);
43726 do_munmap(current->mm, realdatastart, len);
43727+ up_write(&current->mm->mmap_sem);
43728 ret = result;
43729 goto err;
43730 }
43731@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43732 }
43733 if (IS_ERR_VALUE(result)) {
43734 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43735+ down_write(&current->mm->mmap_sem);
43736 do_munmap(current->mm, textpos, text_len + data_len + extra +
43737 MAX_SHARED_LIBS * sizeof(unsigned long));
43738+ up_write(&current->mm->mmap_sem);
43739 ret = result;
43740 goto err;
43741 }
43742diff --git a/fs/bio.c b/fs/bio.c
43743index b980ecd..74800bf 100644
43744--- a/fs/bio.c
43745+++ b/fs/bio.c
43746@@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43747 /*
43748 * Overflow, abort
43749 */
43750- if (end < start)
43751+ if (end < start || end - start > INT_MAX - nr_pages)
43752 return ERR_PTR(-EINVAL);
43753
43754 nr_pages += end - start;
43755@@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43756 const int read = bio_data_dir(bio) == READ;
43757 struct bio_map_data *bmd = bio->bi_private;
43758 int i;
43759- char *p = bmd->sgvecs[0].iov_base;
43760+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43761
43762 __bio_for_each_segment(bvec, bio, i, 0) {
43763 char *addr = page_address(bvec->bv_page);
43764diff --git a/fs/block_dev.c b/fs/block_dev.c
43765index 5e9f198..6bf9b1c 100644
43766--- a/fs/block_dev.c
43767+++ b/fs/block_dev.c
43768@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43769 else if (bdev->bd_contains == bdev)
43770 return true; /* is a whole device which isn't held */
43771
43772- else if (whole->bd_holder == bd_may_claim)
43773+ else if (whole->bd_holder == (void *)bd_may_claim)
43774 return true; /* is a partition of a device that is being partitioned */
43775 else if (whole->bd_holder != NULL)
43776 return false; /* is a partition of a held device */
43777diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43778index d986824..af1befd 100644
43779--- a/fs/btrfs/check-integrity.c
43780+++ b/fs/btrfs/check-integrity.c
43781@@ -157,7 +157,7 @@ struct btrfsic_block {
43782 union {
43783 bio_end_io_t *bio;
43784 bh_end_io_t *bh;
43785- } orig_bio_bh_end_io;
43786+ } __no_const orig_bio_bh_end_io;
43787 int submit_bio_bh_rw;
43788 u64 flush_gen; /* only valid if !never_written */
43789 };
43790diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43791index 0639a55..7d9e07f 100644
43792--- a/fs/btrfs/ctree.c
43793+++ b/fs/btrfs/ctree.c
43794@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43795 free_extent_buffer(buf);
43796 add_root_to_dirty_list(root);
43797 } else {
43798- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43799- parent_start = parent->start;
43800- else
43801+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43802+ if (parent)
43803+ parent_start = parent->start;
43804+ else
43805+ parent_start = 0;
43806+ } else
43807 parent_start = 0;
43808
43809 WARN_ON(trans->transid != btrfs_header_generation(parent));
43810diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43811index 892b347..b3db246 100644
43812--- a/fs/btrfs/inode.c
43813+++ b/fs/btrfs/inode.c
43814@@ -6930,7 +6930,7 @@ fail:
43815 return -ENOMEM;
43816 }
43817
43818-static int btrfs_getattr(struct vfsmount *mnt,
43819+int btrfs_getattr(struct vfsmount *mnt,
43820 struct dentry *dentry, struct kstat *stat)
43821 {
43822 struct inode *inode = dentry->d_inode;
43823@@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43824 return 0;
43825 }
43826
43827+EXPORT_SYMBOL(btrfs_getattr);
43828+
43829+dev_t get_btrfs_dev_from_inode(struct inode *inode)
43830+{
43831+ return BTRFS_I(inode)->root->anon_dev;
43832+}
43833+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43834+
43835 /*
43836 * If a file is moved, it will inherit the cow and compression flags of the new
43837 * directory.
43838diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43839index 1b36f19..5ac7360 100644
43840--- a/fs/btrfs/ioctl.c
43841+++ b/fs/btrfs/ioctl.c
43842@@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43843 for (i = 0; i < num_types; i++) {
43844 struct btrfs_space_info *tmp;
43845
43846+ /* Don't copy in more than we allocated */
43847 if (!slot_count)
43848 break;
43849
43850+ slot_count--;
43851+
43852 info = NULL;
43853 rcu_read_lock();
43854 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43855@@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43856 memcpy(dest, &space, sizeof(space));
43857 dest++;
43858 space_args.total_spaces++;
43859- slot_count--;
43860 }
43861- if (!slot_count)
43862- break;
43863 }
43864 up_read(&info->groups_sem);
43865 }
43866
43867- user_dest = (struct btrfs_ioctl_space_info *)
43868+ user_dest = (struct btrfs_ioctl_space_info __user *)
43869 (arg + sizeof(struct btrfs_ioctl_space_args));
43870
43871 if (copy_to_user(user_dest, dest_orig, alloc_size))
43872diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43873index 8c1aae2..1e46446 100644
43874--- a/fs/btrfs/relocation.c
43875+++ b/fs/btrfs/relocation.c
43876@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43877 }
43878 spin_unlock(&rc->reloc_root_tree.lock);
43879
43880- BUG_ON((struct btrfs_root *)node->data != root);
43881+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43882
43883 if (!del) {
43884 spin_lock(&rc->reloc_root_tree.lock);
43885diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43886index 622f469..e8d2d55 100644
43887--- a/fs/cachefiles/bind.c
43888+++ b/fs/cachefiles/bind.c
43889@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43890 args);
43891
43892 /* start by checking things over */
43893- ASSERT(cache->fstop_percent >= 0 &&
43894- cache->fstop_percent < cache->fcull_percent &&
43895+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43896 cache->fcull_percent < cache->frun_percent &&
43897 cache->frun_percent < 100);
43898
43899- ASSERT(cache->bstop_percent >= 0 &&
43900- cache->bstop_percent < cache->bcull_percent &&
43901+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43902 cache->bcull_percent < cache->brun_percent &&
43903 cache->brun_percent < 100);
43904
43905diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43906index 0a1467b..6a53245 100644
43907--- a/fs/cachefiles/daemon.c
43908+++ b/fs/cachefiles/daemon.c
43909@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43910 if (n > buflen)
43911 return -EMSGSIZE;
43912
43913- if (copy_to_user(_buffer, buffer, n) != 0)
43914+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43915 return -EFAULT;
43916
43917 return n;
43918@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43919 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43920 return -EIO;
43921
43922- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43923+ if (datalen > PAGE_SIZE - 1)
43924 return -EOPNOTSUPP;
43925
43926 /* drag the command string into the kernel so we can parse it */
43927@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43928 if (args[0] != '%' || args[1] != '\0')
43929 return -EINVAL;
43930
43931- if (fstop < 0 || fstop >= cache->fcull_percent)
43932+ if (fstop >= cache->fcull_percent)
43933 return cachefiles_daemon_range_error(cache, args);
43934
43935 cache->fstop_percent = fstop;
43936@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43937 if (args[0] != '%' || args[1] != '\0')
43938 return -EINVAL;
43939
43940- if (bstop < 0 || bstop >= cache->bcull_percent)
43941+ if (bstop >= cache->bcull_percent)
43942 return cachefiles_daemon_range_error(cache, args);
43943
43944 cache->bstop_percent = bstop;
43945diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43946index bd6bc1b..b627b53 100644
43947--- a/fs/cachefiles/internal.h
43948+++ b/fs/cachefiles/internal.h
43949@@ -57,7 +57,7 @@ struct cachefiles_cache {
43950 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43951 struct rb_root active_nodes; /* active nodes (can't be culled) */
43952 rwlock_t active_lock; /* lock for active_nodes */
43953- atomic_t gravecounter; /* graveyard uniquifier */
43954+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43955 unsigned frun_percent; /* when to stop culling (% files) */
43956 unsigned fcull_percent; /* when to start culling (% files) */
43957 unsigned fstop_percent; /* when to stop allocating (% files) */
43958@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43959 * proc.c
43960 */
43961 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43962-extern atomic_t cachefiles_lookup_histogram[HZ];
43963-extern atomic_t cachefiles_mkdir_histogram[HZ];
43964-extern atomic_t cachefiles_create_histogram[HZ];
43965+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43966+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43967+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43968
43969 extern int __init cachefiles_proc_init(void);
43970 extern void cachefiles_proc_cleanup(void);
43971 static inline
43972-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43973+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43974 {
43975 unsigned long jif = jiffies - start_jif;
43976 if (jif >= HZ)
43977 jif = HZ - 1;
43978- atomic_inc(&histogram[jif]);
43979+ atomic_inc_unchecked(&histogram[jif]);
43980 }
43981
43982 #else
43983diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43984index a0358c2..d6137f2 100644
43985--- a/fs/cachefiles/namei.c
43986+++ b/fs/cachefiles/namei.c
43987@@ -318,7 +318,7 @@ try_again:
43988 /* first step is to make up a grave dentry in the graveyard */
43989 sprintf(nbuffer, "%08x%08x",
43990 (uint32_t) get_seconds(),
43991- (uint32_t) atomic_inc_return(&cache->gravecounter));
43992+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43993
43994 /* do the multiway lock magic */
43995 trap = lock_rename(cache->graveyard, dir);
43996diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43997index eccd339..4c1d995 100644
43998--- a/fs/cachefiles/proc.c
43999+++ b/fs/cachefiles/proc.c
44000@@ -14,9 +14,9 @@
44001 #include <linux/seq_file.h>
44002 #include "internal.h"
44003
44004-atomic_t cachefiles_lookup_histogram[HZ];
44005-atomic_t cachefiles_mkdir_histogram[HZ];
44006-atomic_t cachefiles_create_histogram[HZ];
44007+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
44008+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
44009+atomic_unchecked_t cachefiles_create_histogram[HZ];
44010
44011 /*
44012 * display the latency histogram
44013@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
44014 return 0;
44015 default:
44016 index = (unsigned long) v - 3;
44017- x = atomic_read(&cachefiles_lookup_histogram[index]);
44018- y = atomic_read(&cachefiles_mkdir_histogram[index]);
44019- z = atomic_read(&cachefiles_create_histogram[index]);
44020+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
44021+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
44022+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
44023 if (x == 0 && y == 0 && z == 0)
44024 return 0;
44025
44026diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
44027index 0e3c092..818480e 100644
44028--- a/fs/cachefiles/rdwr.c
44029+++ b/fs/cachefiles/rdwr.c
44030@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
44031 old_fs = get_fs();
44032 set_fs(KERNEL_DS);
44033 ret = file->f_op->write(
44034- file, (const void __user *) data, len, &pos);
44035+ file, (const void __force_user *) data, len, &pos);
44036 set_fs(old_fs);
44037 kunmap(page);
44038 if (ret != len)
44039diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
44040index 3e8094b..cb3ff3d 100644
44041--- a/fs/ceph/dir.c
44042+++ b/fs/ceph/dir.c
44043@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
44044 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
44045 struct ceph_mds_client *mdsc = fsc->mdsc;
44046 unsigned frag = fpos_frag(filp->f_pos);
44047- int off = fpos_off(filp->f_pos);
44048+ unsigned int off = fpos_off(filp->f_pos);
44049 int err;
44050 u32 ftype;
44051 struct ceph_mds_reply_info_parsed *rinfo;
44052@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
44053 if (nd &&
44054 (nd->flags & LOOKUP_OPEN) &&
44055 !(nd->intent.open.flags & O_CREAT)) {
44056- int mode = nd->intent.open.create_mode & ~current->fs->umask;
44057+ int mode = nd->intent.open.create_mode & ~current_umask();
44058 return ceph_lookup_open(dir, dentry, nd, mode, 1);
44059 }
44060
44061diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
44062index cfd1ce3..6b13a74 100644
44063--- a/fs/cifs/asn1.c
44064+++ b/fs/cifs/asn1.c
44065@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
44066
44067 static int
44068 asn1_oid_decode(struct asn1_ctx *ctx,
44069+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
44070+static int
44071+asn1_oid_decode(struct asn1_ctx *ctx,
44072 unsigned char *eoc, unsigned long **oid, unsigned int *len)
44073 {
44074 unsigned long subid;
44075diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
44076index 24b3dfc..3cd5454 100644
44077--- a/fs/cifs/cifs_debug.c
44078+++ b/fs/cifs/cifs_debug.c
44079@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
44080
44081 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
44082 #ifdef CONFIG_CIFS_STATS2
44083- atomic_set(&totBufAllocCount, 0);
44084- atomic_set(&totSmBufAllocCount, 0);
44085+ atomic_set_unchecked(&totBufAllocCount, 0);
44086+ atomic_set_unchecked(&totSmBufAllocCount, 0);
44087 #endif /* CONFIG_CIFS_STATS2 */
44088 spin_lock(&cifs_tcp_ses_lock);
44089 list_for_each(tmp1, &cifs_tcp_ses_list) {
44090@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
44091 tcon = list_entry(tmp3,
44092 struct cifs_tcon,
44093 tcon_list);
44094- atomic_set(&tcon->num_smbs_sent, 0);
44095- atomic_set(&tcon->num_writes, 0);
44096- atomic_set(&tcon->num_reads, 0);
44097- atomic_set(&tcon->num_oplock_brks, 0);
44098- atomic_set(&tcon->num_opens, 0);
44099- atomic_set(&tcon->num_posixopens, 0);
44100- atomic_set(&tcon->num_posixmkdirs, 0);
44101- atomic_set(&tcon->num_closes, 0);
44102- atomic_set(&tcon->num_deletes, 0);
44103- atomic_set(&tcon->num_mkdirs, 0);
44104- atomic_set(&tcon->num_rmdirs, 0);
44105- atomic_set(&tcon->num_renames, 0);
44106- atomic_set(&tcon->num_t2renames, 0);
44107- atomic_set(&tcon->num_ffirst, 0);
44108- atomic_set(&tcon->num_fnext, 0);
44109- atomic_set(&tcon->num_fclose, 0);
44110- atomic_set(&tcon->num_hardlinks, 0);
44111- atomic_set(&tcon->num_symlinks, 0);
44112- atomic_set(&tcon->num_locks, 0);
44113+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
44114+ atomic_set_unchecked(&tcon->num_writes, 0);
44115+ atomic_set_unchecked(&tcon->num_reads, 0);
44116+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
44117+ atomic_set_unchecked(&tcon->num_opens, 0);
44118+ atomic_set_unchecked(&tcon->num_posixopens, 0);
44119+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
44120+ atomic_set_unchecked(&tcon->num_closes, 0);
44121+ atomic_set_unchecked(&tcon->num_deletes, 0);
44122+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
44123+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
44124+ atomic_set_unchecked(&tcon->num_renames, 0);
44125+ atomic_set_unchecked(&tcon->num_t2renames, 0);
44126+ atomic_set_unchecked(&tcon->num_ffirst, 0);
44127+ atomic_set_unchecked(&tcon->num_fnext, 0);
44128+ atomic_set_unchecked(&tcon->num_fclose, 0);
44129+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
44130+ atomic_set_unchecked(&tcon->num_symlinks, 0);
44131+ atomic_set_unchecked(&tcon->num_locks, 0);
44132 }
44133 }
44134 }
44135@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44136 smBufAllocCount.counter, cifs_min_small);
44137 #ifdef CONFIG_CIFS_STATS2
44138 seq_printf(m, "Total Large %d Small %d Allocations\n",
44139- atomic_read(&totBufAllocCount),
44140- atomic_read(&totSmBufAllocCount));
44141+ atomic_read_unchecked(&totBufAllocCount),
44142+ atomic_read_unchecked(&totSmBufAllocCount));
44143 #endif /* CONFIG_CIFS_STATS2 */
44144
44145 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
44146@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
44147 if (tcon->need_reconnect)
44148 seq_puts(m, "\tDISCONNECTED ");
44149 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
44150- atomic_read(&tcon->num_smbs_sent),
44151- atomic_read(&tcon->num_oplock_brks));
44152+ atomic_read_unchecked(&tcon->num_smbs_sent),
44153+ atomic_read_unchecked(&tcon->num_oplock_brks));
44154 seq_printf(m, "\nReads: %d Bytes: %lld",
44155- atomic_read(&tcon->num_reads),
44156+ atomic_read_unchecked(&tcon->num_reads),
44157 (long long)(tcon->bytes_read));
44158 seq_printf(m, "\nWrites: %d Bytes: %lld",
44159- atomic_read(&tcon->num_writes),
44160+ atomic_read_unchecked(&tcon->num_writes),
44161 (long long)(tcon->bytes_written));
44162 seq_printf(m, "\nFlushes: %d",
44163- atomic_read(&tcon->num_flushes));
44164+ atomic_read_unchecked(&tcon->num_flushes));
44165 seq_printf(m, "\nLocks: %d HardLinks: %d "
44166 "Symlinks: %d",
44167- atomic_read(&tcon->num_locks),
44168- atomic_read(&tcon->num_hardlinks),
44169- atomic_read(&tcon->num_symlinks));
44170+ atomic_read_unchecked(&tcon->num_locks),
44171+ atomic_read_unchecked(&tcon->num_hardlinks),
44172+ atomic_read_unchecked(&tcon->num_symlinks));
44173 seq_printf(m, "\nOpens: %d Closes: %d "
44174 "Deletes: %d",
44175- atomic_read(&tcon->num_opens),
44176- atomic_read(&tcon->num_closes),
44177- atomic_read(&tcon->num_deletes));
44178+ atomic_read_unchecked(&tcon->num_opens),
44179+ atomic_read_unchecked(&tcon->num_closes),
44180+ atomic_read_unchecked(&tcon->num_deletes));
44181 seq_printf(m, "\nPosix Opens: %d "
44182 "Posix Mkdirs: %d",
44183- atomic_read(&tcon->num_posixopens),
44184- atomic_read(&tcon->num_posixmkdirs));
44185+ atomic_read_unchecked(&tcon->num_posixopens),
44186+ atomic_read_unchecked(&tcon->num_posixmkdirs));
44187 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
44188- atomic_read(&tcon->num_mkdirs),
44189- atomic_read(&tcon->num_rmdirs));
44190+ atomic_read_unchecked(&tcon->num_mkdirs),
44191+ atomic_read_unchecked(&tcon->num_rmdirs));
44192 seq_printf(m, "\nRenames: %d T2 Renames %d",
44193- atomic_read(&tcon->num_renames),
44194- atomic_read(&tcon->num_t2renames));
44195+ atomic_read_unchecked(&tcon->num_renames),
44196+ atomic_read_unchecked(&tcon->num_t2renames));
44197 seq_printf(m, "\nFindFirst: %d FNext %d "
44198 "FClose %d",
44199- atomic_read(&tcon->num_ffirst),
44200- atomic_read(&tcon->num_fnext),
44201- atomic_read(&tcon->num_fclose));
44202+ atomic_read_unchecked(&tcon->num_ffirst),
44203+ atomic_read_unchecked(&tcon->num_fnext),
44204+ atomic_read_unchecked(&tcon->num_fclose));
44205 }
44206 }
44207 }
44208diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
44209index 6ee1cb4..8443157 100644
44210--- a/fs/cifs/cifsfs.c
44211+++ b/fs/cifs/cifsfs.c
44212@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
44213 cifs_req_cachep = kmem_cache_create("cifs_request",
44214 CIFSMaxBufSize +
44215 MAX_CIFS_HDR_SIZE, 0,
44216- SLAB_HWCACHE_ALIGN, NULL);
44217+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
44218 if (cifs_req_cachep == NULL)
44219 return -ENOMEM;
44220
44221@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
44222 efficient to alloc 1 per page off the slab compared to 17K (5page)
44223 alloc of large cifs buffers even when page debugging is on */
44224 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
44225- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
44226+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
44227 NULL);
44228 if (cifs_sm_req_cachep == NULL) {
44229 mempool_destroy(cifs_req_poolp);
44230@@ -1101,8 +1101,8 @@ init_cifs(void)
44231 atomic_set(&bufAllocCount, 0);
44232 atomic_set(&smBufAllocCount, 0);
44233 #ifdef CONFIG_CIFS_STATS2
44234- atomic_set(&totBufAllocCount, 0);
44235- atomic_set(&totSmBufAllocCount, 0);
44236+ atomic_set_unchecked(&totBufAllocCount, 0);
44237+ atomic_set_unchecked(&totSmBufAllocCount, 0);
44238 #endif /* CONFIG_CIFS_STATS2 */
44239
44240 atomic_set(&midCount, 0);
44241diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
44242index d47d20a..77e8b33 100644
44243--- a/fs/cifs/cifsglob.h
44244+++ b/fs/cifs/cifsglob.h
44245@@ -388,28 +388,28 @@ struct cifs_tcon {
44246 __u16 Flags; /* optional support bits */
44247 enum statusEnum tidStatus;
44248 #ifdef CONFIG_CIFS_STATS
44249- atomic_t num_smbs_sent;
44250- atomic_t num_writes;
44251- atomic_t num_reads;
44252- atomic_t num_flushes;
44253- atomic_t num_oplock_brks;
44254- atomic_t num_opens;
44255- atomic_t num_closes;
44256- atomic_t num_deletes;
44257- atomic_t num_mkdirs;
44258- atomic_t num_posixopens;
44259- atomic_t num_posixmkdirs;
44260- atomic_t num_rmdirs;
44261- atomic_t num_renames;
44262- atomic_t num_t2renames;
44263- atomic_t num_ffirst;
44264- atomic_t num_fnext;
44265- atomic_t num_fclose;
44266- atomic_t num_hardlinks;
44267- atomic_t num_symlinks;
44268- atomic_t num_locks;
44269- atomic_t num_acl_get;
44270- atomic_t num_acl_set;
44271+ atomic_unchecked_t num_smbs_sent;
44272+ atomic_unchecked_t num_writes;
44273+ atomic_unchecked_t num_reads;
44274+ atomic_unchecked_t num_flushes;
44275+ atomic_unchecked_t num_oplock_brks;
44276+ atomic_unchecked_t num_opens;
44277+ atomic_unchecked_t num_closes;
44278+ atomic_unchecked_t num_deletes;
44279+ atomic_unchecked_t num_mkdirs;
44280+ atomic_unchecked_t num_posixopens;
44281+ atomic_unchecked_t num_posixmkdirs;
44282+ atomic_unchecked_t num_rmdirs;
44283+ atomic_unchecked_t num_renames;
44284+ atomic_unchecked_t num_t2renames;
44285+ atomic_unchecked_t num_ffirst;
44286+ atomic_unchecked_t num_fnext;
44287+ atomic_unchecked_t num_fclose;
44288+ atomic_unchecked_t num_hardlinks;
44289+ atomic_unchecked_t num_symlinks;
44290+ atomic_unchecked_t num_locks;
44291+ atomic_unchecked_t num_acl_get;
44292+ atomic_unchecked_t num_acl_set;
44293 #ifdef CONFIG_CIFS_STATS2
44294 unsigned long long time_writes;
44295 unsigned long long time_reads;
44296@@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
44297 }
44298
44299 #ifdef CONFIG_CIFS_STATS
44300-#define cifs_stats_inc atomic_inc
44301+#define cifs_stats_inc atomic_inc_unchecked
44302
44303 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
44304 unsigned int bytes)
44305@@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
44306 /* Various Debug counters */
44307 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
44308 #ifdef CONFIG_CIFS_STATS2
44309-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
44310-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
44311+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
44312+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
44313 #endif
44314 GLOBAL_EXTERN atomic_t smBufAllocCount;
44315 GLOBAL_EXTERN atomic_t midCount;
44316diff --git a/fs/cifs/link.c b/fs/cifs/link.c
44317index 6b0e064..94e6c3c 100644
44318--- a/fs/cifs/link.c
44319+++ b/fs/cifs/link.c
44320@@ -600,7 +600,7 @@ symlink_exit:
44321
44322 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
44323 {
44324- char *p = nd_get_link(nd);
44325+ const char *p = nd_get_link(nd);
44326 if (!IS_ERR(p))
44327 kfree(p);
44328 }
44329diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
44330index 703ef5c..2a44ed5 100644
44331--- a/fs/cifs/misc.c
44332+++ b/fs/cifs/misc.c
44333@@ -156,7 +156,7 @@ cifs_buf_get(void)
44334 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
44335 atomic_inc(&bufAllocCount);
44336 #ifdef CONFIG_CIFS_STATS2
44337- atomic_inc(&totBufAllocCount);
44338+ atomic_inc_unchecked(&totBufAllocCount);
44339 #endif /* CONFIG_CIFS_STATS2 */
44340 }
44341
44342@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
44343 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
44344 atomic_inc(&smBufAllocCount);
44345 #ifdef CONFIG_CIFS_STATS2
44346- atomic_inc(&totSmBufAllocCount);
44347+ atomic_inc_unchecked(&totSmBufAllocCount);
44348 #endif /* CONFIG_CIFS_STATS2 */
44349
44350 }
44351diff --git a/fs/coda/cache.c b/fs/coda/cache.c
44352index 6901578..d402eb5 100644
44353--- a/fs/coda/cache.c
44354+++ b/fs/coda/cache.c
44355@@ -24,7 +24,7 @@
44356 #include "coda_linux.h"
44357 #include "coda_cache.h"
44358
44359-static atomic_t permission_epoch = ATOMIC_INIT(0);
44360+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44361
44362 /* replace or extend an acl cache hit */
44363 void coda_cache_enter(struct inode *inode, int mask)
44364@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
44365 struct coda_inode_info *cii = ITOC(inode);
44366
44367 spin_lock(&cii->c_lock);
44368- cii->c_cached_epoch = atomic_read(&permission_epoch);
44369+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44370 if (cii->c_uid != current_fsuid()) {
44371 cii->c_uid = current_fsuid();
44372 cii->c_cached_perm = mask;
44373@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
44374 {
44375 struct coda_inode_info *cii = ITOC(inode);
44376 spin_lock(&cii->c_lock);
44377- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44378+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44379 spin_unlock(&cii->c_lock);
44380 }
44381
44382 /* remove all acl caches */
44383 void coda_cache_clear_all(struct super_block *sb)
44384 {
44385- atomic_inc(&permission_epoch);
44386+ atomic_inc_unchecked(&permission_epoch);
44387 }
44388
44389
44390@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
44391 spin_lock(&cii->c_lock);
44392 hit = (mask & cii->c_cached_perm) == mask &&
44393 cii->c_uid == current_fsuid() &&
44394- cii->c_cached_epoch == atomic_read(&permission_epoch);
44395+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44396 spin_unlock(&cii->c_lock);
44397
44398 return hit;
44399diff --git a/fs/compat.c b/fs/compat.c
44400index 07880ba..3fb2862 100644
44401--- a/fs/compat.c
44402+++ b/fs/compat.c
44403@@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
44404
44405 set_fs(KERNEL_DS);
44406 /* The __user pointer cast is valid because of the set_fs() */
44407- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44408+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44409 set_fs(oldfs);
44410 /* truncating is ok because it's a user address */
44411 if (!ret)
44412@@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
44413 goto out;
44414
44415 ret = -EINVAL;
44416- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44417+ if (nr_segs > UIO_MAXIOV)
44418 goto out;
44419 if (nr_segs > fast_segs) {
44420 ret = -ENOMEM;
44421@@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
44422
44423 struct compat_readdir_callback {
44424 struct compat_old_linux_dirent __user *dirent;
44425+ struct file * file;
44426 int result;
44427 };
44428
44429@@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
44430 buf->result = -EOVERFLOW;
44431 return -EOVERFLOW;
44432 }
44433+
44434+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44435+ return 0;
44436+
44437 buf->result++;
44438 dirent = buf->dirent;
44439 if (!access_ok(VERIFY_WRITE, dirent,
44440@@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
44441
44442 buf.result = 0;
44443 buf.dirent = dirent;
44444+ buf.file = file;
44445
44446 error = vfs_readdir(file, compat_fillonedir, &buf);
44447 if (buf.result)
44448@@ -901,6 +907,7 @@ struct compat_linux_dirent {
44449 struct compat_getdents_callback {
44450 struct compat_linux_dirent __user *current_dir;
44451 struct compat_linux_dirent __user *previous;
44452+ struct file * file;
44453 int count;
44454 int error;
44455 };
44456@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
44457 buf->error = -EOVERFLOW;
44458 return -EOVERFLOW;
44459 }
44460+
44461+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44462+ return 0;
44463+
44464 dirent = buf->previous;
44465 if (dirent) {
44466 if (__put_user(offset, &dirent->d_off))
44467@@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44468 buf.previous = NULL;
44469 buf.count = count;
44470 buf.error = 0;
44471+ buf.file = file;
44472
44473 error = vfs_readdir(file, compat_filldir, &buf);
44474 if (error >= 0)
44475@@ -990,6 +1002,7 @@ out:
44476 struct compat_getdents_callback64 {
44477 struct linux_dirent64 __user *current_dir;
44478 struct linux_dirent64 __user *previous;
44479+ struct file * file;
44480 int count;
44481 int error;
44482 };
44483@@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
44484 buf->error = -EINVAL; /* only used if we fail.. */
44485 if (reclen > buf->count)
44486 return -EINVAL;
44487+
44488+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44489+ return 0;
44490+
44491 dirent = buf->previous;
44492
44493 if (dirent) {
44494@@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
44495 buf.previous = NULL;
44496 buf.count = count;
44497 buf.error = 0;
44498+ buf.file = file;
44499
44500 error = vfs_readdir(file, compat_filldir64, &buf);
44501 if (error >= 0)
44502 error = buf.error;
44503 lastdirent = buf.previous;
44504 if (lastdirent) {
44505- typeof(lastdirent->d_off) d_off = file->f_pos;
44506+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44507 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44508 error = -EFAULT;
44509 else
44510diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44511index 112e45a..b59845b 100644
44512--- a/fs/compat_binfmt_elf.c
44513+++ b/fs/compat_binfmt_elf.c
44514@@ -30,11 +30,13 @@
44515 #undef elf_phdr
44516 #undef elf_shdr
44517 #undef elf_note
44518+#undef elf_dyn
44519 #undef elf_addr_t
44520 #define elfhdr elf32_hdr
44521 #define elf_phdr elf32_phdr
44522 #define elf_shdr elf32_shdr
44523 #define elf_note elf32_note
44524+#define elf_dyn Elf32_Dyn
44525 #define elf_addr_t Elf32_Addr
44526
44527 /*
44528diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
44529index a26bea1..ae23e72 100644
44530--- a/fs/compat_ioctl.c
44531+++ b/fs/compat_ioctl.c
44532@@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
44533
44534 err = get_user(palp, &up->palette);
44535 err |= get_user(length, &up->length);
44536+ if (err)
44537+ return -EFAULT;
44538
44539 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44540 err = put_user(compat_ptr(palp), &up_native->palette);
44541@@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
44542 return -EFAULT;
44543 if (__get_user(udata, &ss32->iomem_base))
44544 return -EFAULT;
44545- ss.iomem_base = compat_ptr(udata);
44546+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44547 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44548 __get_user(ss.port_high, &ss32->port_high))
44549 return -EFAULT;
44550@@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
44551 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44552 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44553 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44554- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44555+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44556 return -EFAULT;
44557
44558 return ioctl_preallocate(file, p);
44559@@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
44560 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44561 {
44562 unsigned int a, b;
44563- a = *(unsigned int *)p;
44564- b = *(unsigned int *)q;
44565+ a = *(const unsigned int *)p;
44566+ b = *(const unsigned int *)q;
44567 if (a > b)
44568 return 1;
44569 if (a < b)
44570diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
44571index 5ddd7eb..c18bf04 100644
44572--- a/fs/configfs/dir.c
44573+++ b/fs/configfs/dir.c
44574@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44575 }
44576 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44577 struct configfs_dirent *next;
44578- const char * name;
44579+ const unsigned char * name;
44580+ char d_name[sizeof(next->s_dentry->d_iname)];
44581 int len;
44582 struct inode *inode = NULL;
44583
44584@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44585 continue;
44586
44587 name = configfs_get_name(next);
44588- len = strlen(name);
44589+ if (next->s_dentry && name == next->s_dentry->d_iname) {
44590+ len = next->s_dentry->d_name.len;
44591+ memcpy(d_name, name, len);
44592+ name = d_name;
44593+ } else
44594+ len = strlen(name);
44595
44596 /*
44597 * We'll have a dentry and an inode for
44598diff --git a/fs/configfs/file.c b/fs/configfs/file.c
44599index 2b6cb23..d76e879 100644
44600--- a/fs/configfs/file.c
44601+++ b/fs/configfs/file.c
44602@@ -135,6 +135,8 @@ out:
44603 */
44604
44605 static int
44606+fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count) __size_overflow(3);
44607+static int
44608 fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
44609 {
44610 int error;
44611diff --git a/fs/dcache.c b/fs/dcache.c
44612index 2576d14..0cec38d 100644
44613--- a/fs/dcache.c
44614+++ b/fs/dcache.c
44615@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
44616 static struct hlist_bl_head *dentry_hashtable __read_mostly;
44617
44618 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
44619- unsigned long hash)
44620+ unsigned int hash)
44621 {
44622- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
44623- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
44624+ hash += (unsigned long) parent / L1_CACHE_BYTES;
44625+ hash = hash + (hash >> D_HASHBITS);
44626 return dentry_hashtable + (hash & D_HASHMASK);
44627 }
44628
44629@@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
44630 mempages -= reserve;
44631
44632 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44633- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44634+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44635
44636 dcache_init();
44637 inode_init();
44638diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
44639index 956d5dd..e755e04 100644
44640--- a/fs/debugfs/inode.c
44641+++ b/fs/debugfs/inode.c
44642@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44643 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44644 {
44645 return debugfs_create_file(name,
44646+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44647+ S_IFDIR | S_IRWXU,
44648+#else
44649 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44650+#endif
44651 parent, NULL, NULL);
44652 }
44653 EXPORT_SYMBOL_GPL(debugfs_create_dir);
44654diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
44655index ab35b11..b30af66 100644
44656--- a/fs/ecryptfs/inode.c
44657+++ b/fs/ecryptfs/inode.c
44658@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
44659 old_fs = get_fs();
44660 set_fs(get_ds());
44661 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44662- (char __user *)lower_buf,
44663+ (char __force_user *)lower_buf,
44664 lower_bufsiz);
44665 set_fs(old_fs);
44666 if (rc < 0)
44667@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
44668 }
44669 old_fs = get_fs();
44670 set_fs(get_ds());
44671- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
44672+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
44673 set_fs(old_fs);
44674 if (rc < 0) {
44675 kfree(buf);
44676@@ -733,7 +733,7 @@ out:
44677 static void
44678 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44679 {
44680- char *buf = nd_get_link(nd);
44681+ const char *buf = nd_get_link(nd);
44682 if (!IS_ERR(buf)) {
44683 /* Free the char* */
44684 kfree(buf);
44685diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
44686index 3a06f40..f7af544 100644
44687--- a/fs/ecryptfs/miscdev.c
44688+++ b/fs/ecryptfs/miscdev.c
44689@@ -345,7 +345,7 @@ check_list:
44690 goto out_unlock_msg_ctx;
44691 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
44692 if (msg_ctx->msg) {
44693- if (copy_to_user(&buf[i], packet_length, packet_length_size))
44694+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44695 goto out_unlock_msg_ctx;
44696 i += packet_length_size;
44697 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
44698diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
44699index b2a34a1..162fa69 100644
44700--- a/fs/ecryptfs/read_write.c
44701+++ b/fs/ecryptfs/read_write.c
44702@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44703 return -EIO;
44704 fs_save = get_fs();
44705 set_fs(get_ds());
44706- rc = vfs_write(lower_file, data, size, &offset);
44707+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44708 set_fs(fs_save);
44709 mark_inode_dirty_sync(ecryptfs_inode);
44710 return rc;
44711@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44712 return -EIO;
44713 fs_save = get_fs();
44714 set_fs(get_ds());
44715- rc = vfs_read(lower_file, data, size, &offset);
44716+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44717 set_fs(fs_save);
44718 return rc;
44719 }
44720diff --git a/fs/exec.c b/fs/exec.c
44721index ae42277..32c9035 100644
44722--- a/fs/exec.c
44723+++ b/fs/exec.c
44724@@ -55,6 +55,13 @@
44725 #include <linux/pipe_fs_i.h>
44726 #include <linux/oom.h>
44727 #include <linux/compat.h>
44728+#include <linux/random.h>
44729+#include <linux/seq_file.h>
44730+
44731+#ifdef CONFIG_PAX_REFCOUNT
44732+#include <linux/kallsyms.h>
44733+#include <linux/kdebug.h>
44734+#endif
44735
44736 #include <asm/uaccess.h>
44737 #include <asm/mmu_context.h>
44738@@ -63,6 +70,15 @@
44739 #include <trace/events/task.h>
44740 #include "internal.h"
44741
44742+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
44743+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
44744+#endif
44745+
44746+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44747+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44748+EXPORT_SYMBOL(pax_set_initial_flags_func);
44749+#endif
44750+
44751 int core_uses_pid;
44752 char core_pattern[CORENAME_MAX_SIZE] = "core";
44753 unsigned int core_pipe_limit;
44754@@ -72,7 +88,7 @@ struct core_name {
44755 char *corename;
44756 int used, size;
44757 };
44758-static atomic_t call_count = ATOMIC_INIT(1);
44759+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44760
44761 /* The maximal length of core_pattern is also specified in sysctl.c */
44762
44763@@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44764 int write)
44765 {
44766 struct page *page;
44767- int ret;
44768
44769-#ifdef CONFIG_STACK_GROWSUP
44770- if (write) {
44771- ret = expand_downwards(bprm->vma, pos);
44772- if (ret < 0)
44773- return NULL;
44774- }
44775-#endif
44776- ret = get_user_pages(current, bprm->mm, pos,
44777- 1, write, 1, &page, NULL);
44778- if (ret <= 0)
44779+ if (0 > expand_downwards(bprm->vma, pos))
44780+ return NULL;
44781+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44782 return NULL;
44783
44784 if (write) {
44785@@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44786 if (size <= ARG_MAX)
44787 return page;
44788
44789+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44790+ // only allow 512KB for argv+env on suid/sgid binaries
44791+ // to prevent easy ASLR exhaustion
44792+ if (((bprm->cred->euid != current_euid()) ||
44793+ (bprm->cred->egid != current_egid())) &&
44794+ (size > (512 * 1024))) {
44795+ put_page(page);
44796+ return NULL;
44797+ }
44798+#endif
44799+
44800 /*
44801 * Limit to 1/4-th the stack size for the argv+env strings.
44802 * This ensures that:
44803@@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44804 vma->vm_end = STACK_TOP_MAX;
44805 vma->vm_start = vma->vm_end - PAGE_SIZE;
44806 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44807+
44808+#ifdef CONFIG_PAX_SEGMEXEC
44809+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44810+#endif
44811+
44812 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44813 INIT_LIST_HEAD(&vma->anon_vma_chain);
44814
44815@@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44816 mm->stack_vm = mm->total_vm = 1;
44817 up_write(&mm->mmap_sem);
44818 bprm->p = vma->vm_end - sizeof(void *);
44819+
44820+#ifdef CONFIG_PAX_RANDUSTACK
44821+ if (randomize_va_space)
44822+ bprm->p ^= random32() & ~PAGE_MASK;
44823+#endif
44824+
44825 return 0;
44826 err:
44827 up_write(&mm->mmap_sem);
44828@@ -398,19 +428,7 @@ err:
44829 return err;
44830 }
44831
44832-struct user_arg_ptr {
44833-#ifdef CONFIG_COMPAT
44834- bool is_compat;
44835-#endif
44836- union {
44837- const char __user *const __user *native;
44838-#ifdef CONFIG_COMPAT
44839- compat_uptr_t __user *compat;
44840-#endif
44841- } ptr;
44842-};
44843-
44844-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44845+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44846 {
44847 const char __user *native;
44848
44849@@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44850 compat_uptr_t compat;
44851
44852 if (get_user(compat, argv.ptr.compat + nr))
44853- return ERR_PTR(-EFAULT);
44854+ return (const char __force_user *)ERR_PTR(-EFAULT);
44855
44856 return compat_ptr(compat);
44857 }
44858 #endif
44859
44860 if (get_user(native, argv.ptr.native + nr))
44861- return ERR_PTR(-EFAULT);
44862+ return (const char __force_user *)ERR_PTR(-EFAULT);
44863
44864 return native;
44865 }
44866@@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
44867 if (!p)
44868 break;
44869
44870- if (IS_ERR(p))
44871+ if (IS_ERR((const char __force_kernel *)p))
44872 return -EFAULT;
44873
44874 if (i++ >= max)
44875@@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44876
44877 ret = -EFAULT;
44878 str = get_user_arg_ptr(argv, argc);
44879- if (IS_ERR(str))
44880+ if (IS_ERR((const char __force_kernel *)str))
44881 goto out;
44882
44883 len = strnlen_user(str, MAX_ARG_STRLEN);
44884@@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44885 int r;
44886 mm_segment_t oldfs = get_fs();
44887 struct user_arg_ptr argv = {
44888- .ptr.native = (const char __user *const __user *)__argv,
44889+ .ptr.native = (const char __force_user *const __force_user *)__argv,
44890 };
44891
44892 set_fs(KERNEL_DS);
44893@@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44894 unsigned long new_end = old_end - shift;
44895 struct mmu_gather tlb;
44896
44897- BUG_ON(new_start > new_end);
44898+ if (new_start >= new_end || new_start < mmap_min_addr)
44899+ return -ENOMEM;
44900
44901 /*
44902 * ensure there are no vmas between where we want to go
44903@@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44904 if (vma != find_vma(mm, new_start))
44905 return -EFAULT;
44906
44907+#ifdef CONFIG_PAX_SEGMEXEC
44908+ BUG_ON(pax_find_mirror_vma(vma));
44909+#endif
44910+
44911 /*
44912 * cover the whole range: [new_start, old_end)
44913 */
44914@@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44915 stack_top = arch_align_stack(stack_top);
44916 stack_top = PAGE_ALIGN(stack_top);
44917
44918- if (unlikely(stack_top < mmap_min_addr) ||
44919- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44920- return -ENOMEM;
44921-
44922 stack_shift = vma->vm_end - stack_top;
44923
44924 bprm->p -= stack_shift;
44925@@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44926 bprm->exec -= stack_shift;
44927
44928 down_write(&mm->mmap_sem);
44929+
44930+ /* Move stack pages down in memory. */
44931+ if (stack_shift) {
44932+ ret = shift_arg_pages(vma, stack_shift);
44933+ if (ret)
44934+ goto out_unlock;
44935+ }
44936+
44937 vm_flags = VM_STACK_FLAGS;
44938
44939+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44940+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44941+ vm_flags &= ~VM_EXEC;
44942+
44943+#ifdef CONFIG_PAX_MPROTECT
44944+ if (mm->pax_flags & MF_PAX_MPROTECT)
44945+ vm_flags &= ~VM_MAYEXEC;
44946+#endif
44947+
44948+ }
44949+#endif
44950+
44951 /*
44952 * Adjust stack execute permissions; explicitly enable for
44953 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44954@@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44955 goto out_unlock;
44956 BUG_ON(prev != vma);
44957
44958- /* Move stack pages down in memory. */
44959- if (stack_shift) {
44960- ret = shift_arg_pages(vma, stack_shift);
44961- if (ret)
44962- goto out_unlock;
44963- }
44964-
44965 /* mprotect_fixup is overkill to remove the temporary stack flags */
44966 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44967
44968@@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
44969 old_fs = get_fs();
44970 set_fs(get_ds());
44971 /* The cast to a user pointer is valid due to the set_fs() */
44972- result = vfs_read(file, (void __user *)addr, count, &pos);
44973+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
44974 set_fs(old_fs);
44975 return result;
44976 }
44977@@ -1255,7 +1287,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44978 }
44979 rcu_read_unlock();
44980
44981- if (p->fs->users > n_fs) {
44982+ if (atomic_read(&p->fs->users) > n_fs) {
44983 bprm->unsafe |= LSM_UNSAFE_SHARE;
44984 } else {
44985 res = -EAGAIN;
44986@@ -1450,6 +1482,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44987
44988 EXPORT_SYMBOL(search_binary_handler);
44989
44990+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44991+static DEFINE_PER_CPU(u64, exec_counter);
44992+static int __init init_exec_counters(void)
44993+{
44994+ unsigned int cpu;
44995+
44996+ for_each_possible_cpu(cpu) {
44997+ per_cpu(exec_counter, cpu) = (u64)cpu;
44998+ }
44999+
45000+ return 0;
45001+}
45002+early_initcall(init_exec_counters);
45003+static inline void increment_exec_counter(void)
45004+{
45005+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
45006+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
45007+}
45008+#else
45009+static inline void increment_exec_counter(void) {}
45010+#endif
45011+
45012 /*
45013 * sys_execve() executes a new program.
45014 */
45015@@ -1458,6 +1512,11 @@ static int do_execve_common(const char *filename,
45016 struct user_arg_ptr envp,
45017 struct pt_regs *regs)
45018 {
45019+#ifdef CONFIG_GRKERNSEC
45020+ struct file *old_exec_file;
45021+ struct acl_subject_label *old_acl;
45022+ struct rlimit old_rlim[RLIM_NLIMITS];
45023+#endif
45024 struct linux_binprm *bprm;
45025 struct file *file;
45026 struct files_struct *displaced;
45027@@ -1465,6 +1524,8 @@ static int do_execve_common(const char *filename,
45028 int retval;
45029 const struct cred *cred = current_cred();
45030
45031+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
45032+
45033 /*
45034 * We move the actual failure in case of RLIMIT_NPROC excess from
45035 * set*uid() to execve() because too many poorly written programs
45036@@ -1505,12 +1566,27 @@ static int do_execve_common(const char *filename,
45037 if (IS_ERR(file))
45038 goto out_unmark;
45039
45040+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
45041+ retval = -EPERM;
45042+ goto out_file;
45043+ }
45044+
45045 sched_exec();
45046
45047 bprm->file = file;
45048 bprm->filename = filename;
45049 bprm->interp = filename;
45050
45051+ if (gr_process_user_ban()) {
45052+ retval = -EPERM;
45053+ goto out_file;
45054+ }
45055+
45056+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
45057+ retval = -EACCES;
45058+ goto out_file;
45059+ }
45060+
45061 retval = bprm_mm_init(bprm);
45062 if (retval)
45063 goto out_file;
45064@@ -1527,24 +1603,65 @@ static int do_execve_common(const char *filename,
45065 if (retval < 0)
45066 goto out;
45067
45068+#ifdef CONFIG_GRKERNSEC
45069+ old_acl = current->acl;
45070+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
45071+ old_exec_file = current->exec_file;
45072+ get_file(file);
45073+ current->exec_file = file;
45074+#endif
45075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45076+ /* limit suid stack to 8MB
45077+ we saved the old limits above and will restore them if this exec fails
45078+ */
45079+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
45080+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
45081+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
45082+#endif
45083+
45084+ if (!gr_tpe_allow(file)) {
45085+ retval = -EACCES;
45086+ goto out_fail;
45087+ }
45088+
45089+ if (gr_check_crash_exec(file)) {
45090+ retval = -EACCES;
45091+ goto out_fail;
45092+ }
45093+
45094+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
45095+ bprm->unsafe);
45096+ if (retval < 0)
45097+ goto out_fail;
45098+
45099 retval = copy_strings_kernel(1, &bprm->filename, bprm);
45100 if (retval < 0)
45101- goto out;
45102+ goto out_fail;
45103
45104 bprm->exec = bprm->p;
45105 retval = copy_strings(bprm->envc, envp, bprm);
45106 if (retval < 0)
45107- goto out;
45108+ goto out_fail;
45109
45110 retval = copy_strings(bprm->argc, argv, bprm);
45111 if (retval < 0)
45112- goto out;
45113+ goto out_fail;
45114+
45115+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
45116+
45117+ gr_handle_exec_args(bprm, argv);
45118
45119 retval = search_binary_handler(bprm,regs);
45120 if (retval < 0)
45121- goto out;
45122+ goto out_fail;
45123+#ifdef CONFIG_GRKERNSEC
45124+ if (old_exec_file)
45125+ fput(old_exec_file);
45126+#endif
45127
45128 /* execve succeeded */
45129+
45130+ increment_exec_counter();
45131 current->fs->in_exec = 0;
45132 current->in_execve = 0;
45133 acct_update_integrals(current);
45134@@ -1553,6 +1670,14 @@ static int do_execve_common(const char *filename,
45135 put_files_struct(displaced);
45136 return retval;
45137
45138+out_fail:
45139+#ifdef CONFIG_GRKERNSEC
45140+ current->acl = old_acl;
45141+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
45142+ fput(current->exec_file);
45143+ current->exec_file = old_exec_file;
45144+#endif
45145+
45146 out:
45147 if (bprm->mm) {
45148 acct_arg_size(bprm, 0);
45149@@ -1626,7 +1751,7 @@ static int expand_corename(struct core_name *cn)
45150 {
45151 char *old_corename = cn->corename;
45152
45153- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
45154+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
45155 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
45156
45157 if (!cn->corename) {
45158@@ -1723,7 +1848,7 @@ static int format_corename(struct core_name *cn, long signr)
45159 int pid_in_pattern = 0;
45160 int err = 0;
45161
45162- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
45163+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
45164 cn->corename = kmalloc(cn->size, GFP_KERNEL);
45165 cn->used = 0;
45166
45167@@ -1820,6 +1945,228 @@ out:
45168 return ispipe;
45169 }
45170
45171+int pax_check_flags(unsigned long *flags)
45172+{
45173+ int retval = 0;
45174+
45175+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
45176+ if (*flags & MF_PAX_SEGMEXEC)
45177+ {
45178+ *flags &= ~MF_PAX_SEGMEXEC;
45179+ retval = -EINVAL;
45180+ }
45181+#endif
45182+
45183+ if ((*flags & MF_PAX_PAGEEXEC)
45184+
45185+#ifdef CONFIG_PAX_PAGEEXEC
45186+ && (*flags & MF_PAX_SEGMEXEC)
45187+#endif
45188+
45189+ )
45190+ {
45191+ *flags &= ~MF_PAX_PAGEEXEC;
45192+ retval = -EINVAL;
45193+ }
45194+
45195+ if ((*flags & MF_PAX_MPROTECT)
45196+
45197+#ifdef CONFIG_PAX_MPROTECT
45198+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45199+#endif
45200+
45201+ )
45202+ {
45203+ *flags &= ~MF_PAX_MPROTECT;
45204+ retval = -EINVAL;
45205+ }
45206+
45207+ if ((*flags & MF_PAX_EMUTRAMP)
45208+
45209+#ifdef CONFIG_PAX_EMUTRAMP
45210+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
45211+#endif
45212+
45213+ )
45214+ {
45215+ *flags &= ~MF_PAX_EMUTRAMP;
45216+ retval = -EINVAL;
45217+ }
45218+
45219+ return retval;
45220+}
45221+
45222+EXPORT_SYMBOL(pax_check_flags);
45223+
45224+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45225+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
45226+{
45227+ struct task_struct *tsk = current;
45228+ struct mm_struct *mm = current->mm;
45229+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
45230+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
45231+ char *path_exec = NULL;
45232+ char *path_fault = NULL;
45233+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
45234+
45235+ if (buffer_exec && buffer_fault) {
45236+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
45237+
45238+ down_read(&mm->mmap_sem);
45239+ vma = mm->mmap;
45240+ while (vma && (!vma_exec || !vma_fault)) {
45241+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
45242+ vma_exec = vma;
45243+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
45244+ vma_fault = vma;
45245+ vma = vma->vm_next;
45246+ }
45247+ if (vma_exec) {
45248+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
45249+ if (IS_ERR(path_exec))
45250+ path_exec = "<path too long>";
45251+ else {
45252+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
45253+ if (path_exec) {
45254+ *path_exec = 0;
45255+ path_exec = buffer_exec;
45256+ } else
45257+ path_exec = "<path too long>";
45258+ }
45259+ }
45260+ if (vma_fault) {
45261+ start = vma_fault->vm_start;
45262+ end = vma_fault->vm_end;
45263+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
45264+ if (vma_fault->vm_file) {
45265+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
45266+ if (IS_ERR(path_fault))
45267+ path_fault = "<path too long>";
45268+ else {
45269+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
45270+ if (path_fault) {
45271+ *path_fault = 0;
45272+ path_fault = buffer_fault;
45273+ } else
45274+ path_fault = "<path too long>";
45275+ }
45276+ } else
45277+ path_fault = "<anonymous mapping>";
45278+ }
45279+ up_read(&mm->mmap_sem);
45280+ }
45281+ if (tsk->signal->curr_ip)
45282+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
45283+ else
45284+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
45285+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
45286+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
45287+ task_uid(tsk), task_euid(tsk), pc, sp);
45288+ free_page((unsigned long)buffer_exec);
45289+ free_page((unsigned long)buffer_fault);
45290+ pax_report_insns(regs, pc, sp);
45291+ do_coredump(SIGKILL, SIGKILL, regs);
45292+}
45293+#endif
45294+
45295+#ifdef CONFIG_PAX_REFCOUNT
45296+void pax_report_refcount_overflow(struct pt_regs *regs)
45297+{
45298+ if (current->signal->curr_ip)
45299+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45300+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
45301+ else
45302+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45303+ current->comm, task_pid_nr(current), current_uid(), current_euid());
45304+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
45305+ show_regs(regs);
45306+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
45307+}
45308+#endif
45309+
45310+#ifdef CONFIG_PAX_USERCOPY
45311+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
45312+int object_is_on_stack(const void *obj, unsigned long len)
45313+{
45314+ const void * const stack = task_stack_page(current);
45315+ const void * const stackend = stack + THREAD_SIZE;
45316+
45317+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45318+ const void *frame = NULL;
45319+ const void *oldframe;
45320+#endif
45321+
45322+ if (obj + len < obj)
45323+ return -1;
45324+
45325+ if (obj + len <= stack || stackend <= obj)
45326+ return 0;
45327+
45328+ if (obj < stack || stackend < obj + len)
45329+ return -1;
45330+
45331+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45332+ oldframe = __builtin_frame_address(1);
45333+ if (oldframe)
45334+ frame = __builtin_frame_address(2);
45335+ /*
45336+ low ----------------------------------------------> high
45337+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
45338+ ^----------------^
45339+ allow copies only within here
45340+ */
45341+ while (stack <= frame && frame < stackend) {
45342+ /* if obj + len extends past the last frame, this
45343+ check won't pass and the next frame will be 0,
45344+ causing us to bail out and correctly report
45345+ the copy as invalid
45346+ */
45347+ if (obj + len <= frame)
45348+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
45349+ oldframe = frame;
45350+ frame = *(const void * const *)frame;
45351+ }
45352+ return -1;
45353+#else
45354+ return 1;
45355+#endif
45356+}
45357+
45358+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
45359+{
45360+ if (current->signal->curr_ip)
45361+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45362+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45363+ else
45364+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45365+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45366+ dump_stack();
45367+ gr_handle_kernel_exploit();
45368+ do_group_exit(SIGKILL);
45369+}
45370+#endif
45371+
45372+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45373+void pax_track_stack(void)
45374+{
45375+ unsigned long sp = (unsigned long)&sp;
45376+ if (sp < current_thread_info()->lowest_stack &&
45377+ sp > (unsigned long)task_stack_page(current))
45378+ current_thread_info()->lowest_stack = sp;
45379+}
45380+EXPORT_SYMBOL(pax_track_stack);
45381+#endif
45382+
45383+#ifdef CONFIG_PAX_SIZE_OVERFLOW
45384+void report_size_overflow(const char *file, unsigned int line, const char *func)
45385+{
45386+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
45387+ dump_stack();
45388+ do_group_exit(SIGKILL);
45389+}
45390+EXPORT_SYMBOL(report_size_overflow);
45391+#endif
45392+
45393 static int zap_process(struct task_struct *start, int exit_code)
45394 {
45395 struct task_struct *t;
45396@@ -2017,17 +2364,17 @@ static void wait_for_dump_helpers(struct file *file)
45397 pipe = file->f_path.dentry->d_inode->i_pipe;
45398
45399 pipe_lock(pipe);
45400- pipe->readers++;
45401- pipe->writers--;
45402+ atomic_inc(&pipe->readers);
45403+ atomic_dec(&pipe->writers);
45404
45405- while ((pipe->readers > 1) && (!signal_pending(current))) {
45406+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45407 wake_up_interruptible_sync(&pipe->wait);
45408 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45409 pipe_wait(pipe);
45410 }
45411
45412- pipe->readers--;
45413- pipe->writers++;
45414+ atomic_dec(&pipe->readers);
45415+ atomic_inc(&pipe->writers);
45416 pipe_unlock(pipe);
45417
45418 }
45419@@ -2088,7 +2435,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45420 int retval = 0;
45421 int flag = 0;
45422 int ispipe;
45423- static atomic_t core_dump_count = ATOMIC_INIT(0);
45424+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45425 struct coredump_params cprm = {
45426 .signr = signr,
45427 .regs = regs,
45428@@ -2103,6 +2450,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45429
45430 audit_core_dumps(signr);
45431
45432+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45433+ gr_handle_brute_attach(current, cprm.mm_flags);
45434+
45435 binfmt = mm->binfmt;
45436 if (!binfmt || !binfmt->core_dump)
45437 goto fail;
45438@@ -2170,7 +2520,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45439 }
45440 cprm.limit = RLIM_INFINITY;
45441
45442- dump_count = atomic_inc_return(&core_dump_count);
45443+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
45444 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45445 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45446 task_tgid_vnr(current), current->comm);
45447@@ -2197,6 +2547,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45448 } else {
45449 struct inode *inode;
45450
45451+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45452+
45453 if (cprm.limit < binfmt->min_coredump)
45454 goto fail_unlock;
45455
45456@@ -2240,7 +2592,7 @@ close_fail:
45457 filp_close(cprm.file, NULL);
45458 fail_dropcount:
45459 if (ispipe)
45460- atomic_dec(&core_dump_count);
45461+ atomic_dec_unchecked(&core_dump_count);
45462 fail_unlock:
45463 kfree(cn.corename);
45464 fail_corename:
45465@@ -2259,7 +2611,7 @@ fail:
45466 */
45467 int dump_write(struct file *file, const void *addr, int nr)
45468 {
45469- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45470+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45471 }
45472 EXPORT_SYMBOL(dump_write);
45473
45474diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
45475index a8cbe1b..fed04cb 100644
45476--- a/fs/ext2/balloc.c
45477+++ b/fs/ext2/balloc.c
45478@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
45479
45480 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45481 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45482- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45483+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
45484 sbi->s_resuid != current_fsuid() &&
45485 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45486 return 0;
45487diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
45488index a203892..4e64db5 100644
45489--- a/fs/ext3/balloc.c
45490+++ b/fs/ext3/balloc.c
45491@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
45492
45493 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45494 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45495- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45496+ if (free_blocks < root_blocks + 1 &&
45497 !use_reservation && sbi->s_resuid != current_fsuid() &&
45498- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45499+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
45500+ !capable_nolog(CAP_SYS_RESOURCE)) {
45501 return 0;
45502 }
45503 return 1;
45504diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
45505index f9e2cd8..bfdc476 100644
45506--- a/fs/ext4/balloc.c
45507+++ b/fs/ext4/balloc.c
45508@@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
45509 /* Hm, nope. Are (enough) root reserved clusters available? */
45510 if (sbi->s_resuid == current_fsuid() ||
45511 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
45512- capable(CAP_SYS_RESOURCE) ||
45513- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
45514+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45515+ capable_nolog(CAP_SYS_RESOURCE)) {
45516
45517 if (free_clusters >= (nclusters + dirty_clusters))
45518 return 1;
45519diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
45520index 9983ba8..2a5272c 100644
45521--- a/fs/ext4/ext4.h
45522+++ b/fs/ext4/ext4.h
45523@@ -1217,19 +1217,19 @@ struct ext4_sb_info {
45524 unsigned long s_mb_last_start;
45525
45526 /* stats for buddy allocator */
45527- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45528- atomic_t s_bal_success; /* we found long enough chunks */
45529- atomic_t s_bal_allocated; /* in blocks */
45530- atomic_t s_bal_ex_scanned; /* total extents scanned */
45531- atomic_t s_bal_goals; /* goal hits */
45532- atomic_t s_bal_breaks; /* too long searches */
45533- atomic_t s_bal_2orders; /* 2^order hits */
45534+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45535+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45536+ atomic_unchecked_t s_bal_allocated; /* in blocks */
45537+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45538+ atomic_unchecked_t s_bal_goals; /* goal hits */
45539+ atomic_unchecked_t s_bal_breaks; /* too long searches */
45540+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45541 spinlock_t s_bal_lock;
45542 unsigned long s_mb_buddies_generated;
45543 unsigned long long s_mb_generation_time;
45544- atomic_t s_mb_lost_chunks;
45545- atomic_t s_mb_preallocated;
45546- atomic_t s_mb_discarded;
45547+ atomic_unchecked_t s_mb_lost_chunks;
45548+ atomic_unchecked_t s_mb_preallocated;
45549+ atomic_unchecked_t s_mb_discarded;
45550 atomic_t s_lock_busy;
45551
45552 /* locality groups */
45553diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
45554index cb990b2..4820141 100644
45555--- a/fs/ext4/mballoc.c
45556+++ b/fs/ext4/mballoc.c
45557@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
45558 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45559
45560 if (EXT4_SB(sb)->s_mb_stats)
45561- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45562+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45563
45564 break;
45565 }
45566@@ -2088,7 +2088,7 @@ repeat:
45567 ac->ac_status = AC_STATUS_CONTINUE;
45568 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45569 cr = 3;
45570- atomic_inc(&sbi->s_mb_lost_chunks);
45571+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45572 goto repeat;
45573 }
45574 }
45575@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
45576 if (sbi->s_mb_stats) {
45577 ext4_msg(sb, KERN_INFO,
45578 "mballoc: %u blocks %u reqs (%u success)",
45579- atomic_read(&sbi->s_bal_allocated),
45580- atomic_read(&sbi->s_bal_reqs),
45581- atomic_read(&sbi->s_bal_success));
45582+ atomic_read_unchecked(&sbi->s_bal_allocated),
45583+ atomic_read_unchecked(&sbi->s_bal_reqs),
45584+ atomic_read_unchecked(&sbi->s_bal_success));
45585 ext4_msg(sb, KERN_INFO,
45586 "mballoc: %u extents scanned, %u goal hits, "
45587 "%u 2^N hits, %u breaks, %u lost",
45588- atomic_read(&sbi->s_bal_ex_scanned),
45589- atomic_read(&sbi->s_bal_goals),
45590- atomic_read(&sbi->s_bal_2orders),
45591- atomic_read(&sbi->s_bal_breaks),
45592- atomic_read(&sbi->s_mb_lost_chunks));
45593+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45594+ atomic_read_unchecked(&sbi->s_bal_goals),
45595+ atomic_read_unchecked(&sbi->s_bal_2orders),
45596+ atomic_read_unchecked(&sbi->s_bal_breaks),
45597+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45598 ext4_msg(sb, KERN_INFO,
45599 "mballoc: %lu generated and it took %Lu",
45600 sbi->s_mb_buddies_generated,
45601 sbi->s_mb_generation_time);
45602 ext4_msg(sb, KERN_INFO,
45603 "mballoc: %u preallocated, %u discarded",
45604- atomic_read(&sbi->s_mb_preallocated),
45605- atomic_read(&sbi->s_mb_discarded));
45606+ atomic_read_unchecked(&sbi->s_mb_preallocated),
45607+ atomic_read_unchecked(&sbi->s_mb_discarded));
45608 }
45609
45610 free_percpu(sbi->s_locality_groups);
45611@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
45612 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45613
45614 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45615- atomic_inc(&sbi->s_bal_reqs);
45616- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45617+ atomic_inc_unchecked(&sbi->s_bal_reqs);
45618+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45619 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45620- atomic_inc(&sbi->s_bal_success);
45621- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45622+ atomic_inc_unchecked(&sbi->s_bal_success);
45623+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45624 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45625 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45626- atomic_inc(&sbi->s_bal_goals);
45627+ atomic_inc_unchecked(&sbi->s_bal_goals);
45628 if (ac->ac_found > sbi->s_mb_max_to_scan)
45629- atomic_inc(&sbi->s_bal_breaks);
45630+ atomic_inc_unchecked(&sbi->s_bal_breaks);
45631 }
45632
45633 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45634@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
45635 trace_ext4_mb_new_inode_pa(ac, pa);
45636
45637 ext4_mb_use_inode_pa(ac, pa);
45638- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45639+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
45640
45641 ei = EXT4_I(ac->ac_inode);
45642 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45643@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
45644 trace_ext4_mb_new_group_pa(ac, pa);
45645
45646 ext4_mb_use_group_pa(ac, pa);
45647- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45648+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45649
45650 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45651 lg = ac->ac_lg;
45652@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
45653 * from the bitmap and continue.
45654 */
45655 }
45656- atomic_add(free, &sbi->s_mb_discarded);
45657+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
45658
45659 return err;
45660 }
45661@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
45662 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45663 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45664 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45665- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45666+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45667 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45668
45669 return 0;
45670diff --git a/fs/fcntl.c b/fs/fcntl.c
45671index 22764c7..86372c9 100644
45672--- a/fs/fcntl.c
45673+++ b/fs/fcntl.c
45674@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
45675 if (err)
45676 return err;
45677
45678+ if (gr_handle_chroot_fowner(pid, type))
45679+ return -ENOENT;
45680+ if (gr_check_protected_task_fowner(pid, type))
45681+ return -EACCES;
45682+
45683 f_modown(filp, pid, type, force);
45684 return 0;
45685 }
45686@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45687
45688 static int f_setown_ex(struct file *filp, unsigned long arg)
45689 {
45690- struct f_owner_ex * __user owner_p = (void * __user)arg;
45691+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45692 struct f_owner_ex owner;
45693 struct pid *pid;
45694 int type;
45695@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
45696
45697 static int f_getown_ex(struct file *filp, unsigned long arg)
45698 {
45699- struct f_owner_ex * __user owner_p = (void * __user)arg;
45700+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45701 struct f_owner_ex owner;
45702 int ret = 0;
45703
45704@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
45705 switch (cmd) {
45706 case F_DUPFD:
45707 case F_DUPFD_CLOEXEC:
45708+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45709 if (arg >= rlimit(RLIMIT_NOFILE))
45710 break;
45711 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45712diff --git a/fs/fifo.c b/fs/fifo.c
45713index b1a524d..4ee270e 100644
45714--- a/fs/fifo.c
45715+++ b/fs/fifo.c
45716@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
45717 */
45718 filp->f_op = &read_pipefifo_fops;
45719 pipe->r_counter++;
45720- if (pipe->readers++ == 0)
45721+ if (atomic_inc_return(&pipe->readers) == 1)
45722 wake_up_partner(inode);
45723
45724- if (!pipe->writers) {
45725+ if (!atomic_read(&pipe->writers)) {
45726 if ((filp->f_flags & O_NONBLOCK)) {
45727 /* suppress POLLHUP until we have
45728 * seen a writer */
45729@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
45730 * errno=ENXIO when there is no process reading the FIFO.
45731 */
45732 ret = -ENXIO;
45733- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45734+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45735 goto err;
45736
45737 filp->f_op = &write_pipefifo_fops;
45738 pipe->w_counter++;
45739- if (!pipe->writers++)
45740+ if (atomic_inc_return(&pipe->writers) == 1)
45741 wake_up_partner(inode);
45742
45743- if (!pipe->readers) {
45744+ if (!atomic_read(&pipe->readers)) {
45745 wait_for_partner(inode, &pipe->r_counter);
45746 if (signal_pending(current))
45747 goto err_wr;
45748@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
45749 */
45750 filp->f_op = &rdwr_pipefifo_fops;
45751
45752- pipe->readers++;
45753- pipe->writers++;
45754+ atomic_inc(&pipe->readers);
45755+ atomic_inc(&pipe->writers);
45756 pipe->r_counter++;
45757 pipe->w_counter++;
45758- if (pipe->readers == 1 || pipe->writers == 1)
45759+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45760 wake_up_partner(inode);
45761 break;
45762
45763@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45764 return 0;
45765
45766 err_rd:
45767- if (!--pipe->readers)
45768+ if (atomic_dec_and_test(&pipe->readers))
45769 wake_up_interruptible(&pipe->wait);
45770 ret = -ERESTARTSYS;
45771 goto err;
45772
45773 err_wr:
45774- if (!--pipe->writers)
45775+ if (atomic_dec_and_test(&pipe->writers))
45776 wake_up_interruptible(&pipe->wait);
45777 ret = -ERESTARTSYS;
45778 goto err;
45779
45780 err:
45781- if (!pipe->readers && !pipe->writers)
45782+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45783 free_pipe_info(inode);
45784
45785 err_nocleanup:
45786diff --git a/fs/file.c b/fs/file.c
45787index 4c6992d..104cdea 100644
45788--- a/fs/file.c
45789+++ b/fs/file.c
45790@@ -15,6 +15,7 @@
45791 #include <linux/slab.h>
45792 #include <linux/vmalloc.h>
45793 #include <linux/file.h>
45794+#include <linux/security.h>
45795 #include <linux/fdtable.h>
45796 #include <linux/bitops.h>
45797 #include <linux/interrupt.h>
45798@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
45799 * N.B. For clone tasks sharing a files structure, this test
45800 * will limit the total number of files that can be opened.
45801 */
45802+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45803 if (nr >= rlimit(RLIMIT_NOFILE))
45804 return -EMFILE;
45805
45806diff --git a/fs/filesystems.c b/fs/filesystems.c
45807index 96f2428..f5eeb8e 100644
45808--- a/fs/filesystems.c
45809+++ b/fs/filesystems.c
45810@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45811 int len = dot ? dot - name : strlen(name);
45812
45813 fs = __get_fs_type(name, len);
45814+
45815+#ifdef CONFIG_GRKERNSEC_MODHARDEN
45816+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45817+#else
45818 if (!fs && (request_module("%.*s", len, name) == 0))
45819+#endif
45820 fs = __get_fs_type(name, len);
45821
45822 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45823diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45824index 78b519c..a8b4979 100644
45825--- a/fs/fs_struct.c
45826+++ b/fs/fs_struct.c
45827@@ -4,6 +4,7 @@
45828 #include <linux/path.h>
45829 #include <linux/slab.h>
45830 #include <linux/fs_struct.h>
45831+#include <linux/grsecurity.h>
45832 #include "internal.h"
45833
45834 static inline void path_get_longterm(struct path *path)
45835@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45836 old_root = fs->root;
45837 fs->root = *path;
45838 path_get_longterm(path);
45839+ gr_set_chroot_entries(current, path);
45840 write_seqcount_end(&fs->seq);
45841 spin_unlock(&fs->lock);
45842 if (old_root.dentry)
45843@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45844 && fs->root.mnt == old_root->mnt) {
45845 path_get_longterm(new_root);
45846 fs->root = *new_root;
45847+ gr_set_chroot_entries(p, new_root);
45848 count++;
45849 }
45850 if (fs->pwd.dentry == old_root->dentry
45851@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45852 spin_lock(&fs->lock);
45853 write_seqcount_begin(&fs->seq);
45854 tsk->fs = NULL;
45855- kill = !--fs->users;
45856+ gr_clear_chroot_entries(tsk);
45857+ kill = !atomic_dec_return(&fs->users);
45858 write_seqcount_end(&fs->seq);
45859 spin_unlock(&fs->lock);
45860 task_unlock(tsk);
45861@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45862 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45863 /* We don't need to lock fs - think why ;-) */
45864 if (fs) {
45865- fs->users = 1;
45866+ atomic_set(&fs->users, 1);
45867 fs->in_exec = 0;
45868 spin_lock_init(&fs->lock);
45869 seqcount_init(&fs->seq);
45870@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45871 spin_lock(&old->lock);
45872 fs->root = old->root;
45873 path_get_longterm(&fs->root);
45874+ /* instead of calling gr_set_chroot_entries here,
45875+ we call it from every caller of this function
45876+ */
45877 fs->pwd = old->pwd;
45878 path_get_longterm(&fs->pwd);
45879 spin_unlock(&old->lock);
45880@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45881
45882 task_lock(current);
45883 spin_lock(&fs->lock);
45884- kill = !--fs->users;
45885+ kill = !atomic_dec_return(&fs->users);
45886 current->fs = new_fs;
45887+ gr_set_chroot_entries(current, &new_fs->root);
45888 spin_unlock(&fs->lock);
45889 task_unlock(current);
45890
45891@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45892
45893 int current_umask(void)
45894 {
45895- return current->fs->umask;
45896+ return current->fs->umask | gr_acl_umask();
45897 }
45898 EXPORT_SYMBOL(current_umask);
45899
45900 /* to be mentioned only in INIT_TASK */
45901 struct fs_struct init_fs = {
45902- .users = 1,
45903+ .users = ATOMIC_INIT(1),
45904 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45905 .seq = SEQCNT_ZERO,
45906 .umask = 0022,
45907@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45908 task_lock(current);
45909
45910 spin_lock(&init_fs.lock);
45911- init_fs.users++;
45912+ atomic_inc(&init_fs.users);
45913 spin_unlock(&init_fs.lock);
45914
45915 spin_lock(&fs->lock);
45916 current->fs = &init_fs;
45917- kill = !--fs->users;
45918+ gr_set_chroot_entries(current, &current->fs->root);
45919+ kill = !atomic_dec_return(&fs->users);
45920 spin_unlock(&fs->lock);
45921
45922 task_unlock(current);
45923diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45924index 9905350..02eaec4 100644
45925--- a/fs/fscache/cookie.c
45926+++ b/fs/fscache/cookie.c
45927@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45928 parent ? (char *) parent->def->name : "<no-parent>",
45929 def->name, netfs_data);
45930
45931- fscache_stat(&fscache_n_acquires);
45932+ fscache_stat_unchecked(&fscache_n_acquires);
45933
45934 /* if there's no parent cookie, then we don't create one here either */
45935 if (!parent) {
45936- fscache_stat(&fscache_n_acquires_null);
45937+ fscache_stat_unchecked(&fscache_n_acquires_null);
45938 _leave(" [no parent]");
45939 return NULL;
45940 }
45941@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45942 /* allocate and initialise a cookie */
45943 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45944 if (!cookie) {
45945- fscache_stat(&fscache_n_acquires_oom);
45946+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45947 _leave(" [ENOMEM]");
45948 return NULL;
45949 }
45950@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45951
45952 switch (cookie->def->type) {
45953 case FSCACHE_COOKIE_TYPE_INDEX:
45954- fscache_stat(&fscache_n_cookie_index);
45955+ fscache_stat_unchecked(&fscache_n_cookie_index);
45956 break;
45957 case FSCACHE_COOKIE_TYPE_DATAFILE:
45958- fscache_stat(&fscache_n_cookie_data);
45959+ fscache_stat_unchecked(&fscache_n_cookie_data);
45960 break;
45961 default:
45962- fscache_stat(&fscache_n_cookie_special);
45963+ fscache_stat_unchecked(&fscache_n_cookie_special);
45964 break;
45965 }
45966
45967@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45968 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45969 atomic_dec(&parent->n_children);
45970 __fscache_cookie_put(cookie);
45971- fscache_stat(&fscache_n_acquires_nobufs);
45972+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45973 _leave(" = NULL");
45974 return NULL;
45975 }
45976 }
45977
45978- fscache_stat(&fscache_n_acquires_ok);
45979+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45980 _leave(" = %p", cookie);
45981 return cookie;
45982 }
45983@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45984 cache = fscache_select_cache_for_object(cookie->parent);
45985 if (!cache) {
45986 up_read(&fscache_addremove_sem);
45987- fscache_stat(&fscache_n_acquires_no_cache);
45988+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45989 _leave(" = -ENOMEDIUM [no cache]");
45990 return -ENOMEDIUM;
45991 }
45992@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45993 object = cache->ops->alloc_object(cache, cookie);
45994 fscache_stat_d(&fscache_n_cop_alloc_object);
45995 if (IS_ERR(object)) {
45996- fscache_stat(&fscache_n_object_no_alloc);
45997+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45998 ret = PTR_ERR(object);
45999 goto error;
46000 }
46001
46002- fscache_stat(&fscache_n_object_alloc);
46003+ fscache_stat_unchecked(&fscache_n_object_alloc);
46004
46005 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
46006
46007@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
46008 struct fscache_object *object;
46009 struct hlist_node *_p;
46010
46011- fscache_stat(&fscache_n_updates);
46012+ fscache_stat_unchecked(&fscache_n_updates);
46013
46014 if (!cookie) {
46015- fscache_stat(&fscache_n_updates_null);
46016+ fscache_stat_unchecked(&fscache_n_updates_null);
46017 _leave(" [no cookie]");
46018 return;
46019 }
46020@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46021 struct fscache_object *object;
46022 unsigned long event;
46023
46024- fscache_stat(&fscache_n_relinquishes);
46025+ fscache_stat_unchecked(&fscache_n_relinquishes);
46026 if (retire)
46027- fscache_stat(&fscache_n_relinquishes_retire);
46028+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
46029
46030 if (!cookie) {
46031- fscache_stat(&fscache_n_relinquishes_null);
46032+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
46033 _leave(" [no cookie]");
46034 return;
46035 }
46036@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
46037
46038 /* wait for the cookie to finish being instantiated (or to fail) */
46039 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
46040- fscache_stat(&fscache_n_relinquishes_waitcrt);
46041+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
46042 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
46043 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
46044 }
46045diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
46046index f6aad48..88dcf26 100644
46047--- a/fs/fscache/internal.h
46048+++ b/fs/fscache/internal.h
46049@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
46050 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
46051 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
46052
46053-extern atomic_t fscache_n_op_pend;
46054-extern atomic_t fscache_n_op_run;
46055-extern atomic_t fscache_n_op_enqueue;
46056-extern atomic_t fscache_n_op_deferred_release;
46057-extern atomic_t fscache_n_op_release;
46058-extern atomic_t fscache_n_op_gc;
46059-extern atomic_t fscache_n_op_cancelled;
46060-extern atomic_t fscache_n_op_rejected;
46061+extern atomic_unchecked_t fscache_n_op_pend;
46062+extern atomic_unchecked_t fscache_n_op_run;
46063+extern atomic_unchecked_t fscache_n_op_enqueue;
46064+extern atomic_unchecked_t fscache_n_op_deferred_release;
46065+extern atomic_unchecked_t fscache_n_op_release;
46066+extern atomic_unchecked_t fscache_n_op_gc;
46067+extern atomic_unchecked_t fscache_n_op_cancelled;
46068+extern atomic_unchecked_t fscache_n_op_rejected;
46069
46070-extern atomic_t fscache_n_attr_changed;
46071-extern atomic_t fscache_n_attr_changed_ok;
46072-extern atomic_t fscache_n_attr_changed_nobufs;
46073-extern atomic_t fscache_n_attr_changed_nomem;
46074-extern atomic_t fscache_n_attr_changed_calls;
46075+extern atomic_unchecked_t fscache_n_attr_changed;
46076+extern atomic_unchecked_t fscache_n_attr_changed_ok;
46077+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
46078+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
46079+extern atomic_unchecked_t fscache_n_attr_changed_calls;
46080
46081-extern atomic_t fscache_n_allocs;
46082-extern atomic_t fscache_n_allocs_ok;
46083-extern atomic_t fscache_n_allocs_wait;
46084-extern atomic_t fscache_n_allocs_nobufs;
46085-extern atomic_t fscache_n_allocs_intr;
46086-extern atomic_t fscache_n_allocs_object_dead;
46087-extern atomic_t fscache_n_alloc_ops;
46088-extern atomic_t fscache_n_alloc_op_waits;
46089+extern atomic_unchecked_t fscache_n_allocs;
46090+extern atomic_unchecked_t fscache_n_allocs_ok;
46091+extern atomic_unchecked_t fscache_n_allocs_wait;
46092+extern atomic_unchecked_t fscache_n_allocs_nobufs;
46093+extern atomic_unchecked_t fscache_n_allocs_intr;
46094+extern atomic_unchecked_t fscache_n_allocs_object_dead;
46095+extern atomic_unchecked_t fscache_n_alloc_ops;
46096+extern atomic_unchecked_t fscache_n_alloc_op_waits;
46097
46098-extern atomic_t fscache_n_retrievals;
46099-extern atomic_t fscache_n_retrievals_ok;
46100-extern atomic_t fscache_n_retrievals_wait;
46101-extern atomic_t fscache_n_retrievals_nodata;
46102-extern atomic_t fscache_n_retrievals_nobufs;
46103-extern atomic_t fscache_n_retrievals_intr;
46104-extern atomic_t fscache_n_retrievals_nomem;
46105-extern atomic_t fscache_n_retrievals_object_dead;
46106-extern atomic_t fscache_n_retrieval_ops;
46107-extern atomic_t fscache_n_retrieval_op_waits;
46108+extern atomic_unchecked_t fscache_n_retrievals;
46109+extern atomic_unchecked_t fscache_n_retrievals_ok;
46110+extern atomic_unchecked_t fscache_n_retrievals_wait;
46111+extern atomic_unchecked_t fscache_n_retrievals_nodata;
46112+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
46113+extern atomic_unchecked_t fscache_n_retrievals_intr;
46114+extern atomic_unchecked_t fscache_n_retrievals_nomem;
46115+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
46116+extern atomic_unchecked_t fscache_n_retrieval_ops;
46117+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
46118
46119-extern atomic_t fscache_n_stores;
46120-extern atomic_t fscache_n_stores_ok;
46121-extern atomic_t fscache_n_stores_again;
46122-extern atomic_t fscache_n_stores_nobufs;
46123-extern atomic_t fscache_n_stores_oom;
46124-extern atomic_t fscache_n_store_ops;
46125-extern atomic_t fscache_n_store_calls;
46126-extern atomic_t fscache_n_store_pages;
46127-extern atomic_t fscache_n_store_radix_deletes;
46128-extern atomic_t fscache_n_store_pages_over_limit;
46129+extern atomic_unchecked_t fscache_n_stores;
46130+extern atomic_unchecked_t fscache_n_stores_ok;
46131+extern atomic_unchecked_t fscache_n_stores_again;
46132+extern atomic_unchecked_t fscache_n_stores_nobufs;
46133+extern atomic_unchecked_t fscache_n_stores_oom;
46134+extern atomic_unchecked_t fscache_n_store_ops;
46135+extern atomic_unchecked_t fscache_n_store_calls;
46136+extern atomic_unchecked_t fscache_n_store_pages;
46137+extern atomic_unchecked_t fscache_n_store_radix_deletes;
46138+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
46139
46140-extern atomic_t fscache_n_store_vmscan_not_storing;
46141-extern atomic_t fscache_n_store_vmscan_gone;
46142-extern atomic_t fscache_n_store_vmscan_busy;
46143-extern atomic_t fscache_n_store_vmscan_cancelled;
46144+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46145+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
46146+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
46147+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46148
46149-extern atomic_t fscache_n_marks;
46150-extern atomic_t fscache_n_uncaches;
46151+extern atomic_unchecked_t fscache_n_marks;
46152+extern atomic_unchecked_t fscache_n_uncaches;
46153
46154-extern atomic_t fscache_n_acquires;
46155-extern atomic_t fscache_n_acquires_null;
46156-extern atomic_t fscache_n_acquires_no_cache;
46157-extern atomic_t fscache_n_acquires_ok;
46158-extern atomic_t fscache_n_acquires_nobufs;
46159-extern atomic_t fscache_n_acquires_oom;
46160+extern atomic_unchecked_t fscache_n_acquires;
46161+extern atomic_unchecked_t fscache_n_acquires_null;
46162+extern atomic_unchecked_t fscache_n_acquires_no_cache;
46163+extern atomic_unchecked_t fscache_n_acquires_ok;
46164+extern atomic_unchecked_t fscache_n_acquires_nobufs;
46165+extern atomic_unchecked_t fscache_n_acquires_oom;
46166
46167-extern atomic_t fscache_n_updates;
46168-extern atomic_t fscache_n_updates_null;
46169-extern atomic_t fscache_n_updates_run;
46170+extern atomic_unchecked_t fscache_n_updates;
46171+extern atomic_unchecked_t fscache_n_updates_null;
46172+extern atomic_unchecked_t fscache_n_updates_run;
46173
46174-extern atomic_t fscache_n_relinquishes;
46175-extern atomic_t fscache_n_relinquishes_null;
46176-extern atomic_t fscache_n_relinquishes_waitcrt;
46177-extern atomic_t fscache_n_relinquishes_retire;
46178+extern atomic_unchecked_t fscache_n_relinquishes;
46179+extern atomic_unchecked_t fscache_n_relinquishes_null;
46180+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46181+extern atomic_unchecked_t fscache_n_relinquishes_retire;
46182
46183-extern atomic_t fscache_n_cookie_index;
46184-extern atomic_t fscache_n_cookie_data;
46185-extern atomic_t fscache_n_cookie_special;
46186+extern atomic_unchecked_t fscache_n_cookie_index;
46187+extern atomic_unchecked_t fscache_n_cookie_data;
46188+extern atomic_unchecked_t fscache_n_cookie_special;
46189
46190-extern atomic_t fscache_n_object_alloc;
46191-extern atomic_t fscache_n_object_no_alloc;
46192-extern atomic_t fscache_n_object_lookups;
46193-extern atomic_t fscache_n_object_lookups_negative;
46194-extern atomic_t fscache_n_object_lookups_positive;
46195-extern atomic_t fscache_n_object_lookups_timed_out;
46196-extern atomic_t fscache_n_object_created;
46197-extern atomic_t fscache_n_object_avail;
46198-extern atomic_t fscache_n_object_dead;
46199+extern atomic_unchecked_t fscache_n_object_alloc;
46200+extern atomic_unchecked_t fscache_n_object_no_alloc;
46201+extern atomic_unchecked_t fscache_n_object_lookups;
46202+extern atomic_unchecked_t fscache_n_object_lookups_negative;
46203+extern atomic_unchecked_t fscache_n_object_lookups_positive;
46204+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
46205+extern atomic_unchecked_t fscache_n_object_created;
46206+extern atomic_unchecked_t fscache_n_object_avail;
46207+extern atomic_unchecked_t fscache_n_object_dead;
46208
46209-extern atomic_t fscache_n_checkaux_none;
46210-extern atomic_t fscache_n_checkaux_okay;
46211-extern atomic_t fscache_n_checkaux_update;
46212-extern atomic_t fscache_n_checkaux_obsolete;
46213+extern atomic_unchecked_t fscache_n_checkaux_none;
46214+extern atomic_unchecked_t fscache_n_checkaux_okay;
46215+extern atomic_unchecked_t fscache_n_checkaux_update;
46216+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
46217
46218 extern atomic_t fscache_n_cop_alloc_object;
46219 extern atomic_t fscache_n_cop_lookup_object;
46220@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
46221 atomic_inc(stat);
46222 }
46223
46224+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
46225+{
46226+ atomic_inc_unchecked(stat);
46227+}
46228+
46229 static inline void fscache_stat_d(atomic_t *stat)
46230 {
46231 atomic_dec(stat);
46232@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
46233
46234 #define __fscache_stat(stat) (NULL)
46235 #define fscache_stat(stat) do {} while (0)
46236+#define fscache_stat_unchecked(stat) do {} while (0)
46237 #define fscache_stat_d(stat) do {} while (0)
46238 #endif
46239
46240diff --git a/fs/fscache/object.c b/fs/fscache/object.c
46241index b6b897c..0ffff9c 100644
46242--- a/fs/fscache/object.c
46243+++ b/fs/fscache/object.c
46244@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46245 /* update the object metadata on disk */
46246 case FSCACHE_OBJECT_UPDATING:
46247 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
46248- fscache_stat(&fscache_n_updates_run);
46249+ fscache_stat_unchecked(&fscache_n_updates_run);
46250 fscache_stat(&fscache_n_cop_update_object);
46251 object->cache->ops->update_object(object);
46252 fscache_stat_d(&fscache_n_cop_update_object);
46253@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46254 spin_lock(&object->lock);
46255 object->state = FSCACHE_OBJECT_DEAD;
46256 spin_unlock(&object->lock);
46257- fscache_stat(&fscache_n_object_dead);
46258+ fscache_stat_unchecked(&fscache_n_object_dead);
46259 goto terminal_transit;
46260
46261 /* handle the parent cache of this object being withdrawn from
46262@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46263 spin_lock(&object->lock);
46264 object->state = FSCACHE_OBJECT_DEAD;
46265 spin_unlock(&object->lock);
46266- fscache_stat(&fscache_n_object_dead);
46267+ fscache_stat_unchecked(&fscache_n_object_dead);
46268 goto terminal_transit;
46269
46270 /* complain about the object being woken up once it is
46271@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46272 parent->cookie->def->name, cookie->def->name,
46273 object->cache->tag->name);
46274
46275- fscache_stat(&fscache_n_object_lookups);
46276+ fscache_stat_unchecked(&fscache_n_object_lookups);
46277 fscache_stat(&fscache_n_cop_lookup_object);
46278 ret = object->cache->ops->lookup_object(object);
46279 fscache_stat_d(&fscache_n_cop_lookup_object);
46280@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46281 if (ret == -ETIMEDOUT) {
46282 /* probably stuck behind another object, so move this one to
46283 * the back of the queue */
46284- fscache_stat(&fscache_n_object_lookups_timed_out);
46285+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
46286 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46287 }
46288
46289@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
46290
46291 spin_lock(&object->lock);
46292 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46293- fscache_stat(&fscache_n_object_lookups_negative);
46294+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
46295
46296 /* transit here to allow write requests to begin stacking up
46297 * and read requests to begin returning ENODATA */
46298@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
46299 * result, in which case there may be data available */
46300 spin_lock(&object->lock);
46301 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46302- fscache_stat(&fscache_n_object_lookups_positive);
46303+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
46304
46305 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
46306
46307@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
46308 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46309 } else {
46310 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
46311- fscache_stat(&fscache_n_object_created);
46312+ fscache_stat_unchecked(&fscache_n_object_created);
46313
46314 object->state = FSCACHE_OBJECT_AVAILABLE;
46315 spin_unlock(&object->lock);
46316@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
46317 fscache_enqueue_dependents(object);
46318
46319 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
46320- fscache_stat(&fscache_n_object_avail);
46321+ fscache_stat_unchecked(&fscache_n_object_avail);
46322
46323 _leave("");
46324 }
46325@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46326 enum fscache_checkaux result;
46327
46328 if (!object->cookie->def->check_aux) {
46329- fscache_stat(&fscache_n_checkaux_none);
46330+ fscache_stat_unchecked(&fscache_n_checkaux_none);
46331 return FSCACHE_CHECKAUX_OKAY;
46332 }
46333
46334@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46335 switch (result) {
46336 /* entry okay as is */
46337 case FSCACHE_CHECKAUX_OKAY:
46338- fscache_stat(&fscache_n_checkaux_okay);
46339+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
46340 break;
46341
46342 /* entry requires update */
46343 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
46344- fscache_stat(&fscache_n_checkaux_update);
46345+ fscache_stat_unchecked(&fscache_n_checkaux_update);
46346 break;
46347
46348 /* entry requires deletion */
46349 case FSCACHE_CHECKAUX_OBSOLETE:
46350- fscache_stat(&fscache_n_checkaux_obsolete);
46351+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
46352 break;
46353
46354 default:
46355diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
46356index 30afdfa..2256596 100644
46357--- a/fs/fscache/operation.c
46358+++ b/fs/fscache/operation.c
46359@@ -17,7 +17,7 @@
46360 #include <linux/slab.h>
46361 #include "internal.h"
46362
46363-atomic_t fscache_op_debug_id;
46364+atomic_unchecked_t fscache_op_debug_id;
46365 EXPORT_SYMBOL(fscache_op_debug_id);
46366
46367 /**
46368@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
46369 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46370 ASSERTCMP(atomic_read(&op->usage), >, 0);
46371
46372- fscache_stat(&fscache_n_op_enqueue);
46373+ fscache_stat_unchecked(&fscache_n_op_enqueue);
46374 switch (op->flags & FSCACHE_OP_TYPE) {
46375 case FSCACHE_OP_ASYNC:
46376 _debug("queue async");
46377@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
46378 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46379 if (op->processor)
46380 fscache_enqueue_operation(op);
46381- fscache_stat(&fscache_n_op_run);
46382+ fscache_stat_unchecked(&fscache_n_op_run);
46383 }
46384
46385 /*
46386@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46387 if (object->n_ops > 1) {
46388 atomic_inc(&op->usage);
46389 list_add_tail(&op->pend_link, &object->pending_ops);
46390- fscache_stat(&fscache_n_op_pend);
46391+ fscache_stat_unchecked(&fscache_n_op_pend);
46392 } else if (!list_empty(&object->pending_ops)) {
46393 atomic_inc(&op->usage);
46394 list_add_tail(&op->pend_link, &object->pending_ops);
46395- fscache_stat(&fscache_n_op_pend);
46396+ fscache_stat_unchecked(&fscache_n_op_pend);
46397 fscache_start_operations(object);
46398 } else {
46399 ASSERTCMP(object->n_in_progress, ==, 0);
46400@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46401 object->n_exclusive++; /* reads and writes must wait */
46402 atomic_inc(&op->usage);
46403 list_add_tail(&op->pend_link, &object->pending_ops);
46404- fscache_stat(&fscache_n_op_pend);
46405+ fscache_stat_unchecked(&fscache_n_op_pend);
46406 ret = 0;
46407 } else {
46408 /* not allowed to submit ops in any other state */
46409@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
46410 if (object->n_exclusive > 0) {
46411 atomic_inc(&op->usage);
46412 list_add_tail(&op->pend_link, &object->pending_ops);
46413- fscache_stat(&fscache_n_op_pend);
46414+ fscache_stat_unchecked(&fscache_n_op_pend);
46415 } else if (!list_empty(&object->pending_ops)) {
46416 atomic_inc(&op->usage);
46417 list_add_tail(&op->pend_link, &object->pending_ops);
46418- fscache_stat(&fscache_n_op_pend);
46419+ fscache_stat_unchecked(&fscache_n_op_pend);
46420 fscache_start_operations(object);
46421 } else {
46422 ASSERTCMP(object->n_exclusive, ==, 0);
46423@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
46424 object->n_ops++;
46425 atomic_inc(&op->usage);
46426 list_add_tail(&op->pend_link, &object->pending_ops);
46427- fscache_stat(&fscache_n_op_pend);
46428+ fscache_stat_unchecked(&fscache_n_op_pend);
46429 ret = 0;
46430 } else if (object->state == FSCACHE_OBJECT_DYING ||
46431 object->state == FSCACHE_OBJECT_LC_DYING ||
46432 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46433- fscache_stat(&fscache_n_op_rejected);
46434+ fscache_stat_unchecked(&fscache_n_op_rejected);
46435 ret = -ENOBUFS;
46436 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46437 fscache_report_unexpected_submission(object, op, ostate);
46438@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
46439
46440 ret = -EBUSY;
46441 if (!list_empty(&op->pend_link)) {
46442- fscache_stat(&fscache_n_op_cancelled);
46443+ fscache_stat_unchecked(&fscache_n_op_cancelled);
46444 list_del_init(&op->pend_link);
46445 object->n_ops--;
46446 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
46447@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
46448 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46449 BUG();
46450
46451- fscache_stat(&fscache_n_op_release);
46452+ fscache_stat_unchecked(&fscache_n_op_release);
46453
46454 if (op->release) {
46455 op->release(op);
46456@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
46457 * lock, and defer it otherwise */
46458 if (!spin_trylock(&object->lock)) {
46459 _debug("defer put");
46460- fscache_stat(&fscache_n_op_deferred_release);
46461+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
46462
46463 cache = object->cache;
46464 spin_lock(&cache->op_gc_list_lock);
46465@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
46466
46467 _debug("GC DEFERRED REL OBJ%x OP%x",
46468 object->debug_id, op->debug_id);
46469- fscache_stat(&fscache_n_op_gc);
46470+ fscache_stat_unchecked(&fscache_n_op_gc);
46471
46472 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46473
46474diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46475index 3f7a59b..cf196cc 100644
46476--- a/fs/fscache/page.c
46477+++ b/fs/fscache/page.c
46478@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46479 val = radix_tree_lookup(&cookie->stores, page->index);
46480 if (!val) {
46481 rcu_read_unlock();
46482- fscache_stat(&fscache_n_store_vmscan_not_storing);
46483+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46484 __fscache_uncache_page(cookie, page);
46485 return true;
46486 }
46487@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46488 spin_unlock(&cookie->stores_lock);
46489
46490 if (xpage) {
46491- fscache_stat(&fscache_n_store_vmscan_cancelled);
46492- fscache_stat(&fscache_n_store_radix_deletes);
46493+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46494+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46495 ASSERTCMP(xpage, ==, page);
46496 } else {
46497- fscache_stat(&fscache_n_store_vmscan_gone);
46498+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46499 }
46500
46501 wake_up_bit(&cookie->flags, 0);
46502@@ -107,7 +107,7 @@ page_busy:
46503 /* we might want to wait here, but that could deadlock the allocator as
46504 * the work threads writing to the cache may all end up sleeping
46505 * on memory allocation */
46506- fscache_stat(&fscache_n_store_vmscan_busy);
46507+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46508 return false;
46509 }
46510 EXPORT_SYMBOL(__fscache_maybe_release_page);
46511@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
46512 FSCACHE_COOKIE_STORING_TAG);
46513 if (!radix_tree_tag_get(&cookie->stores, page->index,
46514 FSCACHE_COOKIE_PENDING_TAG)) {
46515- fscache_stat(&fscache_n_store_radix_deletes);
46516+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46517 xpage = radix_tree_delete(&cookie->stores, page->index);
46518 }
46519 spin_unlock(&cookie->stores_lock);
46520@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
46521
46522 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46523
46524- fscache_stat(&fscache_n_attr_changed_calls);
46525+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46526
46527 if (fscache_object_is_active(object)) {
46528 fscache_stat(&fscache_n_cop_attr_changed);
46529@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46530
46531 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46532
46533- fscache_stat(&fscache_n_attr_changed);
46534+ fscache_stat_unchecked(&fscache_n_attr_changed);
46535
46536 op = kzalloc(sizeof(*op), GFP_KERNEL);
46537 if (!op) {
46538- fscache_stat(&fscache_n_attr_changed_nomem);
46539+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46540 _leave(" = -ENOMEM");
46541 return -ENOMEM;
46542 }
46543@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46544 if (fscache_submit_exclusive_op(object, op) < 0)
46545 goto nobufs;
46546 spin_unlock(&cookie->lock);
46547- fscache_stat(&fscache_n_attr_changed_ok);
46548+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46549 fscache_put_operation(op);
46550 _leave(" = 0");
46551 return 0;
46552@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46553 nobufs:
46554 spin_unlock(&cookie->lock);
46555 kfree(op);
46556- fscache_stat(&fscache_n_attr_changed_nobufs);
46557+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46558 _leave(" = %d", -ENOBUFS);
46559 return -ENOBUFS;
46560 }
46561@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
46562 /* allocate a retrieval operation and attempt to submit it */
46563 op = kzalloc(sizeof(*op), GFP_NOIO);
46564 if (!op) {
46565- fscache_stat(&fscache_n_retrievals_nomem);
46566+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46567 return NULL;
46568 }
46569
46570@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46571 return 0;
46572 }
46573
46574- fscache_stat(&fscache_n_retrievals_wait);
46575+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
46576
46577 jif = jiffies;
46578 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46579 fscache_wait_bit_interruptible,
46580 TASK_INTERRUPTIBLE) != 0) {
46581- fscache_stat(&fscache_n_retrievals_intr);
46582+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46583 _leave(" = -ERESTARTSYS");
46584 return -ERESTARTSYS;
46585 }
46586@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46587 */
46588 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46589 struct fscache_retrieval *op,
46590- atomic_t *stat_op_waits,
46591- atomic_t *stat_object_dead)
46592+ atomic_unchecked_t *stat_op_waits,
46593+ atomic_unchecked_t *stat_object_dead)
46594 {
46595 int ret;
46596
46597@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46598 goto check_if_dead;
46599
46600 _debug(">>> WT");
46601- fscache_stat(stat_op_waits);
46602+ fscache_stat_unchecked(stat_op_waits);
46603 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46604 fscache_wait_bit_interruptible,
46605 TASK_INTERRUPTIBLE) < 0) {
46606@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46607
46608 check_if_dead:
46609 if (unlikely(fscache_object_is_dead(object))) {
46610- fscache_stat(stat_object_dead);
46611+ fscache_stat_unchecked(stat_object_dead);
46612 return -ENOBUFS;
46613 }
46614 return 0;
46615@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46616
46617 _enter("%p,%p,,,", cookie, page);
46618
46619- fscache_stat(&fscache_n_retrievals);
46620+ fscache_stat_unchecked(&fscache_n_retrievals);
46621
46622 if (hlist_empty(&cookie->backing_objects))
46623 goto nobufs;
46624@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46625 goto nobufs_unlock;
46626 spin_unlock(&cookie->lock);
46627
46628- fscache_stat(&fscache_n_retrieval_ops);
46629+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46630
46631 /* pin the netfs read context in case we need to do the actual netfs
46632 * read because we've encountered a cache read failure */
46633@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46634
46635 error:
46636 if (ret == -ENOMEM)
46637- fscache_stat(&fscache_n_retrievals_nomem);
46638+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46639 else if (ret == -ERESTARTSYS)
46640- fscache_stat(&fscache_n_retrievals_intr);
46641+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46642 else if (ret == -ENODATA)
46643- fscache_stat(&fscache_n_retrievals_nodata);
46644+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46645 else if (ret < 0)
46646- fscache_stat(&fscache_n_retrievals_nobufs);
46647+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46648 else
46649- fscache_stat(&fscache_n_retrievals_ok);
46650+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46651
46652 fscache_put_retrieval(op);
46653 _leave(" = %d", ret);
46654@@ -429,7 +429,7 @@ nobufs_unlock:
46655 spin_unlock(&cookie->lock);
46656 kfree(op);
46657 nobufs:
46658- fscache_stat(&fscache_n_retrievals_nobufs);
46659+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46660 _leave(" = -ENOBUFS");
46661 return -ENOBUFS;
46662 }
46663@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46664
46665 _enter("%p,,%d,,,", cookie, *nr_pages);
46666
46667- fscache_stat(&fscache_n_retrievals);
46668+ fscache_stat_unchecked(&fscache_n_retrievals);
46669
46670 if (hlist_empty(&cookie->backing_objects))
46671 goto nobufs;
46672@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46673 goto nobufs_unlock;
46674 spin_unlock(&cookie->lock);
46675
46676- fscache_stat(&fscache_n_retrieval_ops);
46677+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46678
46679 /* pin the netfs read context in case we need to do the actual netfs
46680 * read because we've encountered a cache read failure */
46681@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46682
46683 error:
46684 if (ret == -ENOMEM)
46685- fscache_stat(&fscache_n_retrievals_nomem);
46686+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46687 else if (ret == -ERESTARTSYS)
46688- fscache_stat(&fscache_n_retrievals_intr);
46689+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46690 else if (ret == -ENODATA)
46691- fscache_stat(&fscache_n_retrievals_nodata);
46692+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46693 else if (ret < 0)
46694- fscache_stat(&fscache_n_retrievals_nobufs);
46695+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46696 else
46697- fscache_stat(&fscache_n_retrievals_ok);
46698+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46699
46700 fscache_put_retrieval(op);
46701 _leave(" = %d", ret);
46702@@ -545,7 +545,7 @@ nobufs_unlock:
46703 spin_unlock(&cookie->lock);
46704 kfree(op);
46705 nobufs:
46706- fscache_stat(&fscache_n_retrievals_nobufs);
46707+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46708 _leave(" = -ENOBUFS");
46709 return -ENOBUFS;
46710 }
46711@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46712
46713 _enter("%p,%p,,,", cookie, page);
46714
46715- fscache_stat(&fscache_n_allocs);
46716+ fscache_stat_unchecked(&fscache_n_allocs);
46717
46718 if (hlist_empty(&cookie->backing_objects))
46719 goto nobufs;
46720@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46721 goto nobufs_unlock;
46722 spin_unlock(&cookie->lock);
46723
46724- fscache_stat(&fscache_n_alloc_ops);
46725+ fscache_stat_unchecked(&fscache_n_alloc_ops);
46726
46727 ret = fscache_wait_for_retrieval_activation(
46728 object, op,
46729@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46730
46731 error:
46732 if (ret == -ERESTARTSYS)
46733- fscache_stat(&fscache_n_allocs_intr);
46734+ fscache_stat_unchecked(&fscache_n_allocs_intr);
46735 else if (ret < 0)
46736- fscache_stat(&fscache_n_allocs_nobufs);
46737+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46738 else
46739- fscache_stat(&fscache_n_allocs_ok);
46740+ fscache_stat_unchecked(&fscache_n_allocs_ok);
46741
46742 fscache_put_retrieval(op);
46743 _leave(" = %d", ret);
46744@@ -625,7 +625,7 @@ nobufs_unlock:
46745 spin_unlock(&cookie->lock);
46746 kfree(op);
46747 nobufs:
46748- fscache_stat(&fscache_n_allocs_nobufs);
46749+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46750 _leave(" = -ENOBUFS");
46751 return -ENOBUFS;
46752 }
46753@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46754
46755 spin_lock(&cookie->stores_lock);
46756
46757- fscache_stat(&fscache_n_store_calls);
46758+ fscache_stat_unchecked(&fscache_n_store_calls);
46759
46760 /* find a page to store */
46761 page = NULL;
46762@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46763 page = results[0];
46764 _debug("gang %d [%lx]", n, page->index);
46765 if (page->index > op->store_limit) {
46766- fscache_stat(&fscache_n_store_pages_over_limit);
46767+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46768 goto superseded;
46769 }
46770
46771@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46772 spin_unlock(&cookie->stores_lock);
46773 spin_unlock(&object->lock);
46774
46775- fscache_stat(&fscache_n_store_pages);
46776+ fscache_stat_unchecked(&fscache_n_store_pages);
46777 fscache_stat(&fscache_n_cop_write_page);
46778 ret = object->cache->ops->write_page(op, page);
46779 fscache_stat_d(&fscache_n_cop_write_page);
46780@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46781 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46782 ASSERT(PageFsCache(page));
46783
46784- fscache_stat(&fscache_n_stores);
46785+ fscache_stat_unchecked(&fscache_n_stores);
46786
46787 op = kzalloc(sizeof(*op), GFP_NOIO);
46788 if (!op)
46789@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46790 spin_unlock(&cookie->stores_lock);
46791 spin_unlock(&object->lock);
46792
46793- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46794+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46795 op->store_limit = object->store_limit;
46796
46797 if (fscache_submit_op(object, &op->op) < 0)
46798@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46799
46800 spin_unlock(&cookie->lock);
46801 radix_tree_preload_end();
46802- fscache_stat(&fscache_n_store_ops);
46803- fscache_stat(&fscache_n_stores_ok);
46804+ fscache_stat_unchecked(&fscache_n_store_ops);
46805+ fscache_stat_unchecked(&fscache_n_stores_ok);
46806
46807 /* the work queue now carries its own ref on the object */
46808 fscache_put_operation(&op->op);
46809@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46810 return 0;
46811
46812 already_queued:
46813- fscache_stat(&fscache_n_stores_again);
46814+ fscache_stat_unchecked(&fscache_n_stores_again);
46815 already_pending:
46816 spin_unlock(&cookie->stores_lock);
46817 spin_unlock(&object->lock);
46818 spin_unlock(&cookie->lock);
46819 radix_tree_preload_end();
46820 kfree(op);
46821- fscache_stat(&fscache_n_stores_ok);
46822+ fscache_stat_unchecked(&fscache_n_stores_ok);
46823 _leave(" = 0");
46824 return 0;
46825
46826@@ -851,14 +851,14 @@ nobufs:
46827 spin_unlock(&cookie->lock);
46828 radix_tree_preload_end();
46829 kfree(op);
46830- fscache_stat(&fscache_n_stores_nobufs);
46831+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
46832 _leave(" = -ENOBUFS");
46833 return -ENOBUFS;
46834
46835 nomem_free:
46836 kfree(op);
46837 nomem:
46838- fscache_stat(&fscache_n_stores_oom);
46839+ fscache_stat_unchecked(&fscache_n_stores_oom);
46840 _leave(" = -ENOMEM");
46841 return -ENOMEM;
46842 }
46843@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46844 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46845 ASSERTCMP(page, !=, NULL);
46846
46847- fscache_stat(&fscache_n_uncaches);
46848+ fscache_stat_unchecked(&fscache_n_uncaches);
46849
46850 /* cache withdrawal may beat us to it */
46851 if (!PageFsCache(page))
46852@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46853 unsigned long loop;
46854
46855 #ifdef CONFIG_FSCACHE_STATS
46856- atomic_add(pagevec->nr, &fscache_n_marks);
46857+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46858 #endif
46859
46860 for (loop = 0; loop < pagevec->nr; loop++) {
46861diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46862index 4765190..2a067f2 100644
46863--- a/fs/fscache/stats.c
46864+++ b/fs/fscache/stats.c
46865@@ -18,95 +18,95 @@
46866 /*
46867 * operation counters
46868 */
46869-atomic_t fscache_n_op_pend;
46870-atomic_t fscache_n_op_run;
46871-atomic_t fscache_n_op_enqueue;
46872-atomic_t fscache_n_op_requeue;
46873-atomic_t fscache_n_op_deferred_release;
46874-atomic_t fscache_n_op_release;
46875-atomic_t fscache_n_op_gc;
46876-atomic_t fscache_n_op_cancelled;
46877-atomic_t fscache_n_op_rejected;
46878+atomic_unchecked_t fscache_n_op_pend;
46879+atomic_unchecked_t fscache_n_op_run;
46880+atomic_unchecked_t fscache_n_op_enqueue;
46881+atomic_unchecked_t fscache_n_op_requeue;
46882+atomic_unchecked_t fscache_n_op_deferred_release;
46883+atomic_unchecked_t fscache_n_op_release;
46884+atomic_unchecked_t fscache_n_op_gc;
46885+atomic_unchecked_t fscache_n_op_cancelled;
46886+atomic_unchecked_t fscache_n_op_rejected;
46887
46888-atomic_t fscache_n_attr_changed;
46889-atomic_t fscache_n_attr_changed_ok;
46890-atomic_t fscache_n_attr_changed_nobufs;
46891-atomic_t fscache_n_attr_changed_nomem;
46892-atomic_t fscache_n_attr_changed_calls;
46893+atomic_unchecked_t fscache_n_attr_changed;
46894+atomic_unchecked_t fscache_n_attr_changed_ok;
46895+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46896+atomic_unchecked_t fscache_n_attr_changed_nomem;
46897+atomic_unchecked_t fscache_n_attr_changed_calls;
46898
46899-atomic_t fscache_n_allocs;
46900-atomic_t fscache_n_allocs_ok;
46901-atomic_t fscache_n_allocs_wait;
46902-atomic_t fscache_n_allocs_nobufs;
46903-atomic_t fscache_n_allocs_intr;
46904-atomic_t fscache_n_allocs_object_dead;
46905-atomic_t fscache_n_alloc_ops;
46906-atomic_t fscache_n_alloc_op_waits;
46907+atomic_unchecked_t fscache_n_allocs;
46908+atomic_unchecked_t fscache_n_allocs_ok;
46909+atomic_unchecked_t fscache_n_allocs_wait;
46910+atomic_unchecked_t fscache_n_allocs_nobufs;
46911+atomic_unchecked_t fscache_n_allocs_intr;
46912+atomic_unchecked_t fscache_n_allocs_object_dead;
46913+atomic_unchecked_t fscache_n_alloc_ops;
46914+atomic_unchecked_t fscache_n_alloc_op_waits;
46915
46916-atomic_t fscache_n_retrievals;
46917-atomic_t fscache_n_retrievals_ok;
46918-atomic_t fscache_n_retrievals_wait;
46919-atomic_t fscache_n_retrievals_nodata;
46920-atomic_t fscache_n_retrievals_nobufs;
46921-atomic_t fscache_n_retrievals_intr;
46922-atomic_t fscache_n_retrievals_nomem;
46923-atomic_t fscache_n_retrievals_object_dead;
46924-atomic_t fscache_n_retrieval_ops;
46925-atomic_t fscache_n_retrieval_op_waits;
46926+atomic_unchecked_t fscache_n_retrievals;
46927+atomic_unchecked_t fscache_n_retrievals_ok;
46928+atomic_unchecked_t fscache_n_retrievals_wait;
46929+atomic_unchecked_t fscache_n_retrievals_nodata;
46930+atomic_unchecked_t fscache_n_retrievals_nobufs;
46931+atomic_unchecked_t fscache_n_retrievals_intr;
46932+atomic_unchecked_t fscache_n_retrievals_nomem;
46933+atomic_unchecked_t fscache_n_retrievals_object_dead;
46934+atomic_unchecked_t fscache_n_retrieval_ops;
46935+atomic_unchecked_t fscache_n_retrieval_op_waits;
46936
46937-atomic_t fscache_n_stores;
46938-atomic_t fscache_n_stores_ok;
46939-atomic_t fscache_n_stores_again;
46940-atomic_t fscache_n_stores_nobufs;
46941-atomic_t fscache_n_stores_oom;
46942-atomic_t fscache_n_store_ops;
46943-atomic_t fscache_n_store_calls;
46944-atomic_t fscache_n_store_pages;
46945-atomic_t fscache_n_store_radix_deletes;
46946-atomic_t fscache_n_store_pages_over_limit;
46947+atomic_unchecked_t fscache_n_stores;
46948+atomic_unchecked_t fscache_n_stores_ok;
46949+atomic_unchecked_t fscache_n_stores_again;
46950+atomic_unchecked_t fscache_n_stores_nobufs;
46951+atomic_unchecked_t fscache_n_stores_oom;
46952+atomic_unchecked_t fscache_n_store_ops;
46953+atomic_unchecked_t fscache_n_store_calls;
46954+atomic_unchecked_t fscache_n_store_pages;
46955+atomic_unchecked_t fscache_n_store_radix_deletes;
46956+atomic_unchecked_t fscache_n_store_pages_over_limit;
46957
46958-atomic_t fscache_n_store_vmscan_not_storing;
46959-atomic_t fscache_n_store_vmscan_gone;
46960-atomic_t fscache_n_store_vmscan_busy;
46961-atomic_t fscache_n_store_vmscan_cancelled;
46962+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46963+atomic_unchecked_t fscache_n_store_vmscan_gone;
46964+atomic_unchecked_t fscache_n_store_vmscan_busy;
46965+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46966
46967-atomic_t fscache_n_marks;
46968-atomic_t fscache_n_uncaches;
46969+atomic_unchecked_t fscache_n_marks;
46970+atomic_unchecked_t fscache_n_uncaches;
46971
46972-atomic_t fscache_n_acquires;
46973-atomic_t fscache_n_acquires_null;
46974-atomic_t fscache_n_acquires_no_cache;
46975-atomic_t fscache_n_acquires_ok;
46976-atomic_t fscache_n_acquires_nobufs;
46977-atomic_t fscache_n_acquires_oom;
46978+atomic_unchecked_t fscache_n_acquires;
46979+atomic_unchecked_t fscache_n_acquires_null;
46980+atomic_unchecked_t fscache_n_acquires_no_cache;
46981+atomic_unchecked_t fscache_n_acquires_ok;
46982+atomic_unchecked_t fscache_n_acquires_nobufs;
46983+atomic_unchecked_t fscache_n_acquires_oom;
46984
46985-atomic_t fscache_n_updates;
46986-atomic_t fscache_n_updates_null;
46987-atomic_t fscache_n_updates_run;
46988+atomic_unchecked_t fscache_n_updates;
46989+atomic_unchecked_t fscache_n_updates_null;
46990+atomic_unchecked_t fscache_n_updates_run;
46991
46992-atomic_t fscache_n_relinquishes;
46993-atomic_t fscache_n_relinquishes_null;
46994-atomic_t fscache_n_relinquishes_waitcrt;
46995-atomic_t fscache_n_relinquishes_retire;
46996+atomic_unchecked_t fscache_n_relinquishes;
46997+atomic_unchecked_t fscache_n_relinquishes_null;
46998+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46999+atomic_unchecked_t fscache_n_relinquishes_retire;
47000
47001-atomic_t fscache_n_cookie_index;
47002-atomic_t fscache_n_cookie_data;
47003-atomic_t fscache_n_cookie_special;
47004+atomic_unchecked_t fscache_n_cookie_index;
47005+atomic_unchecked_t fscache_n_cookie_data;
47006+atomic_unchecked_t fscache_n_cookie_special;
47007
47008-atomic_t fscache_n_object_alloc;
47009-atomic_t fscache_n_object_no_alloc;
47010-atomic_t fscache_n_object_lookups;
47011-atomic_t fscache_n_object_lookups_negative;
47012-atomic_t fscache_n_object_lookups_positive;
47013-atomic_t fscache_n_object_lookups_timed_out;
47014-atomic_t fscache_n_object_created;
47015-atomic_t fscache_n_object_avail;
47016-atomic_t fscache_n_object_dead;
47017+atomic_unchecked_t fscache_n_object_alloc;
47018+atomic_unchecked_t fscache_n_object_no_alloc;
47019+atomic_unchecked_t fscache_n_object_lookups;
47020+atomic_unchecked_t fscache_n_object_lookups_negative;
47021+atomic_unchecked_t fscache_n_object_lookups_positive;
47022+atomic_unchecked_t fscache_n_object_lookups_timed_out;
47023+atomic_unchecked_t fscache_n_object_created;
47024+atomic_unchecked_t fscache_n_object_avail;
47025+atomic_unchecked_t fscache_n_object_dead;
47026
47027-atomic_t fscache_n_checkaux_none;
47028-atomic_t fscache_n_checkaux_okay;
47029-atomic_t fscache_n_checkaux_update;
47030-atomic_t fscache_n_checkaux_obsolete;
47031+atomic_unchecked_t fscache_n_checkaux_none;
47032+atomic_unchecked_t fscache_n_checkaux_okay;
47033+atomic_unchecked_t fscache_n_checkaux_update;
47034+atomic_unchecked_t fscache_n_checkaux_obsolete;
47035
47036 atomic_t fscache_n_cop_alloc_object;
47037 atomic_t fscache_n_cop_lookup_object;
47038@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
47039 seq_puts(m, "FS-Cache statistics\n");
47040
47041 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
47042- atomic_read(&fscache_n_cookie_index),
47043- atomic_read(&fscache_n_cookie_data),
47044- atomic_read(&fscache_n_cookie_special));
47045+ atomic_read_unchecked(&fscache_n_cookie_index),
47046+ atomic_read_unchecked(&fscache_n_cookie_data),
47047+ atomic_read_unchecked(&fscache_n_cookie_special));
47048
47049 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
47050- atomic_read(&fscache_n_object_alloc),
47051- atomic_read(&fscache_n_object_no_alloc),
47052- atomic_read(&fscache_n_object_avail),
47053- atomic_read(&fscache_n_object_dead));
47054+ atomic_read_unchecked(&fscache_n_object_alloc),
47055+ atomic_read_unchecked(&fscache_n_object_no_alloc),
47056+ atomic_read_unchecked(&fscache_n_object_avail),
47057+ atomic_read_unchecked(&fscache_n_object_dead));
47058 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
47059- atomic_read(&fscache_n_checkaux_none),
47060- atomic_read(&fscache_n_checkaux_okay),
47061- atomic_read(&fscache_n_checkaux_update),
47062- atomic_read(&fscache_n_checkaux_obsolete));
47063+ atomic_read_unchecked(&fscache_n_checkaux_none),
47064+ atomic_read_unchecked(&fscache_n_checkaux_okay),
47065+ atomic_read_unchecked(&fscache_n_checkaux_update),
47066+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
47067
47068 seq_printf(m, "Pages : mrk=%u unc=%u\n",
47069- atomic_read(&fscache_n_marks),
47070- atomic_read(&fscache_n_uncaches));
47071+ atomic_read_unchecked(&fscache_n_marks),
47072+ atomic_read_unchecked(&fscache_n_uncaches));
47073
47074 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
47075 " oom=%u\n",
47076- atomic_read(&fscache_n_acquires),
47077- atomic_read(&fscache_n_acquires_null),
47078- atomic_read(&fscache_n_acquires_no_cache),
47079- atomic_read(&fscache_n_acquires_ok),
47080- atomic_read(&fscache_n_acquires_nobufs),
47081- atomic_read(&fscache_n_acquires_oom));
47082+ atomic_read_unchecked(&fscache_n_acquires),
47083+ atomic_read_unchecked(&fscache_n_acquires_null),
47084+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
47085+ atomic_read_unchecked(&fscache_n_acquires_ok),
47086+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
47087+ atomic_read_unchecked(&fscache_n_acquires_oom));
47088
47089 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
47090- atomic_read(&fscache_n_object_lookups),
47091- atomic_read(&fscache_n_object_lookups_negative),
47092- atomic_read(&fscache_n_object_lookups_positive),
47093- atomic_read(&fscache_n_object_created),
47094- atomic_read(&fscache_n_object_lookups_timed_out));
47095+ atomic_read_unchecked(&fscache_n_object_lookups),
47096+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
47097+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
47098+ atomic_read_unchecked(&fscache_n_object_created),
47099+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
47100
47101 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
47102- atomic_read(&fscache_n_updates),
47103- atomic_read(&fscache_n_updates_null),
47104- atomic_read(&fscache_n_updates_run));
47105+ atomic_read_unchecked(&fscache_n_updates),
47106+ atomic_read_unchecked(&fscache_n_updates_null),
47107+ atomic_read_unchecked(&fscache_n_updates_run));
47108
47109 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
47110- atomic_read(&fscache_n_relinquishes),
47111- atomic_read(&fscache_n_relinquishes_null),
47112- atomic_read(&fscache_n_relinquishes_waitcrt),
47113- atomic_read(&fscache_n_relinquishes_retire));
47114+ atomic_read_unchecked(&fscache_n_relinquishes),
47115+ atomic_read_unchecked(&fscache_n_relinquishes_null),
47116+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
47117+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
47118
47119 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
47120- atomic_read(&fscache_n_attr_changed),
47121- atomic_read(&fscache_n_attr_changed_ok),
47122- atomic_read(&fscache_n_attr_changed_nobufs),
47123- atomic_read(&fscache_n_attr_changed_nomem),
47124- atomic_read(&fscache_n_attr_changed_calls));
47125+ atomic_read_unchecked(&fscache_n_attr_changed),
47126+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
47127+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
47128+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
47129+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
47130
47131 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
47132- atomic_read(&fscache_n_allocs),
47133- atomic_read(&fscache_n_allocs_ok),
47134- atomic_read(&fscache_n_allocs_wait),
47135- atomic_read(&fscache_n_allocs_nobufs),
47136- atomic_read(&fscache_n_allocs_intr));
47137+ atomic_read_unchecked(&fscache_n_allocs),
47138+ atomic_read_unchecked(&fscache_n_allocs_ok),
47139+ atomic_read_unchecked(&fscache_n_allocs_wait),
47140+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
47141+ atomic_read_unchecked(&fscache_n_allocs_intr));
47142 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
47143- atomic_read(&fscache_n_alloc_ops),
47144- atomic_read(&fscache_n_alloc_op_waits),
47145- atomic_read(&fscache_n_allocs_object_dead));
47146+ atomic_read_unchecked(&fscache_n_alloc_ops),
47147+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
47148+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
47149
47150 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
47151 " int=%u oom=%u\n",
47152- atomic_read(&fscache_n_retrievals),
47153- atomic_read(&fscache_n_retrievals_ok),
47154- atomic_read(&fscache_n_retrievals_wait),
47155- atomic_read(&fscache_n_retrievals_nodata),
47156- atomic_read(&fscache_n_retrievals_nobufs),
47157- atomic_read(&fscache_n_retrievals_intr),
47158- atomic_read(&fscache_n_retrievals_nomem));
47159+ atomic_read_unchecked(&fscache_n_retrievals),
47160+ atomic_read_unchecked(&fscache_n_retrievals_ok),
47161+ atomic_read_unchecked(&fscache_n_retrievals_wait),
47162+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
47163+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
47164+ atomic_read_unchecked(&fscache_n_retrievals_intr),
47165+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
47166 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
47167- atomic_read(&fscache_n_retrieval_ops),
47168- atomic_read(&fscache_n_retrieval_op_waits),
47169- atomic_read(&fscache_n_retrievals_object_dead));
47170+ atomic_read_unchecked(&fscache_n_retrieval_ops),
47171+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
47172+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
47173
47174 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
47175- atomic_read(&fscache_n_stores),
47176- atomic_read(&fscache_n_stores_ok),
47177- atomic_read(&fscache_n_stores_again),
47178- atomic_read(&fscache_n_stores_nobufs),
47179- atomic_read(&fscache_n_stores_oom));
47180+ atomic_read_unchecked(&fscache_n_stores),
47181+ atomic_read_unchecked(&fscache_n_stores_ok),
47182+ atomic_read_unchecked(&fscache_n_stores_again),
47183+ atomic_read_unchecked(&fscache_n_stores_nobufs),
47184+ atomic_read_unchecked(&fscache_n_stores_oom));
47185 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
47186- atomic_read(&fscache_n_store_ops),
47187- atomic_read(&fscache_n_store_calls),
47188- atomic_read(&fscache_n_store_pages),
47189- atomic_read(&fscache_n_store_radix_deletes),
47190- atomic_read(&fscache_n_store_pages_over_limit));
47191+ atomic_read_unchecked(&fscache_n_store_ops),
47192+ atomic_read_unchecked(&fscache_n_store_calls),
47193+ atomic_read_unchecked(&fscache_n_store_pages),
47194+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
47195+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
47196
47197 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
47198- atomic_read(&fscache_n_store_vmscan_not_storing),
47199- atomic_read(&fscache_n_store_vmscan_gone),
47200- atomic_read(&fscache_n_store_vmscan_busy),
47201- atomic_read(&fscache_n_store_vmscan_cancelled));
47202+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
47203+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
47204+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
47205+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
47206
47207 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
47208- atomic_read(&fscache_n_op_pend),
47209- atomic_read(&fscache_n_op_run),
47210- atomic_read(&fscache_n_op_enqueue),
47211- atomic_read(&fscache_n_op_cancelled),
47212- atomic_read(&fscache_n_op_rejected));
47213+ atomic_read_unchecked(&fscache_n_op_pend),
47214+ atomic_read_unchecked(&fscache_n_op_run),
47215+ atomic_read_unchecked(&fscache_n_op_enqueue),
47216+ atomic_read_unchecked(&fscache_n_op_cancelled),
47217+ atomic_read_unchecked(&fscache_n_op_rejected));
47218 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
47219- atomic_read(&fscache_n_op_deferred_release),
47220- atomic_read(&fscache_n_op_release),
47221- atomic_read(&fscache_n_op_gc));
47222+ atomic_read_unchecked(&fscache_n_op_deferred_release),
47223+ atomic_read_unchecked(&fscache_n_op_release),
47224+ atomic_read_unchecked(&fscache_n_op_gc));
47225
47226 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
47227 atomic_read(&fscache_n_cop_alloc_object),
47228diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
47229index 3426521..3b75162 100644
47230--- a/fs/fuse/cuse.c
47231+++ b/fs/fuse/cuse.c
47232@@ -587,10 +587,12 @@ static int __init cuse_init(void)
47233 INIT_LIST_HEAD(&cuse_conntbl[i]);
47234
47235 /* inherit and extend fuse_dev_operations */
47236- cuse_channel_fops = fuse_dev_operations;
47237- cuse_channel_fops.owner = THIS_MODULE;
47238- cuse_channel_fops.open = cuse_channel_open;
47239- cuse_channel_fops.release = cuse_channel_release;
47240+ pax_open_kernel();
47241+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
47242+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
47243+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
47244+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
47245+ pax_close_kernel();
47246
47247 cuse_class = class_create(THIS_MODULE, "cuse");
47248 if (IS_ERR(cuse_class))
47249diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
47250index 5f3368a..8306426 100644
47251--- a/fs/fuse/dev.c
47252+++ b/fs/fuse/dev.c
47253@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
47254 ret = 0;
47255 pipe_lock(pipe);
47256
47257- if (!pipe->readers) {
47258+ if (!atomic_read(&pipe->readers)) {
47259 send_sig(SIGPIPE, current, 0);
47260 if (!ret)
47261 ret = -EPIPE;
47262diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
47263index 2066328..f5add3b 100644
47264--- a/fs/fuse/dir.c
47265+++ b/fs/fuse/dir.c
47266@@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
47267 return link;
47268 }
47269
47270-static void free_link(char *link)
47271+static void free_link(const char *link)
47272 {
47273 if (!IS_ERR(link))
47274 free_page((unsigned long) link);
47275diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
47276index 5698746..6086012 100644
47277--- a/fs/gfs2/inode.c
47278+++ b/fs/gfs2/inode.c
47279@@ -1487,7 +1487,7 @@ out:
47280
47281 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47282 {
47283- char *s = nd_get_link(nd);
47284+ const char *s = nd_get_link(nd);
47285 if (!IS_ERR(s))
47286 kfree(s);
47287 }
47288diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
47289index c60267e..193d9e4 100644
47290--- a/fs/hugetlbfs/inode.c
47291+++ b/fs/hugetlbfs/inode.c
47292@@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = {
47293 .kill_sb = kill_litter_super,
47294 };
47295
47296-static struct vfsmount *hugetlbfs_vfsmount;
47297+struct vfsmount *hugetlbfs_vfsmount;
47298
47299 static int can_do_hugetlb_shm(void)
47300 {
47301diff --git a/fs/inode.c b/fs/inode.c
47302index 83ab215..8842101 100644
47303--- a/fs/inode.c
47304+++ b/fs/inode.c
47305@@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
47306
47307 #ifdef CONFIG_SMP
47308 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
47309- static atomic_t shared_last_ino;
47310- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
47311+ static atomic_unchecked_t shared_last_ino;
47312+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
47313
47314 res = next - LAST_INO_BATCH;
47315 }
47316diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
47317index eafb8d3..f423d37 100644
47318--- a/fs/jffs2/erase.c
47319+++ b/fs/jffs2/erase.c
47320@@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
47321 struct jffs2_unknown_node marker = {
47322 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47323 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47324- .totlen = cpu_to_je32(c->cleanmarker_size)
47325+ .totlen = cpu_to_je32(c->cleanmarker_size),
47326+ .hdr_crc = cpu_to_je32(0)
47327 };
47328
47329 jffs2_prealloc_raw_node_refs(c, jeb, 1);
47330diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
47331index 30e8f47..21f600c 100644
47332--- a/fs/jffs2/wbuf.c
47333+++ b/fs/jffs2/wbuf.c
47334@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
47335 {
47336 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47337 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47338- .totlen = constant_cpu_to_je32(8)
47339+ .totlen = constant_cpu_to_je32(8),
47340+ .hdr_crc = constant_cpu_to_je32(0)
47341 };
47342
47343 /*
47344diff --git a/fs/jfs/super.c b/fs/jfs/super.c
47345index 682bca6..86b8e6e 100644
47346--- a/fs/jfs/super.c
47347+++ b/fs/jfs/super.c
47348@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
47349
47350 jfs_inode_cachep =
47351 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47352- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47353+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47354 init_once);
47355 if (jfs_inode_cachep == NULL)
47356 return -ENOMEM;
47357diff --git a/fs/libfs.c b/fs/libfs.c
47358index 5b2dbb3..7442d54 100644
47359--- a/fs/libfs.c
47360+++ b/fs/libfs.c
47361@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47362
47363 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47364 struct dentry *next;
47365+ char d_name[sizeof(next->d_iname)];
47366+ const unsigned char *name;
47367+
47368 next = list_entry(p, struct dentry, d_u.d_child);
47369 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47370 if (!simple_positive(next)) {
47371@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47372
47373 spin_unlock(&next->d_lock);
47374 spin_unlock(&dentry->d_lock);
47375- if (filldir(dirent, next->d_name.name,
47376+ name = next->d_name.name;
47377+ if (name == next->d_iname) {
47378+ memcpy(d_name, name, next->d_name.len);
47379+ name = d_name;
47380+ }
47381+ if (filldir(dirent, name,
47382 next->d_name.len, filp->f_pos,
47383 next->d_inode->i_ino,
47384 dt_type(next->d_inode)) < 0)
47385diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
47386index 8392cb8..80d6193 100644
47387--- a/fs/lockd/clntproc.c
47388+++ b/fs/lockd/clntproc.c
47389@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
47390 /*
47391 * Cookie counter for NLM requests
47392 */
47393-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47394+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47395
47396 void nlmclnt_next_cookie(struct nlm_cookie *c)
47397 {
47398- u32 cookie = atomic_inc_return(&nlm_cookie);
47399+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47400
47401 memcpy(c->data, &cookie, 4);
47402 c->len=4;
47403diff --git a/fs/locks.c b/fs/locks.c
47404index 0d68f1f..f216b79 100644
47405--- a/fs/locks.c
47406+++ b/fs/locks.c
47407@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
47408 return;
47409
47410 if (filp->f_op && filp->f_op->flock) {
47411- struct file_lock fl = {
47412+ struct file_lock flock = {
47413 .fl_pid = current->tgid,
47414 .fl_file = filp,
47415 .fl_flags = FL_FLOCK,
47416 .fl_type = F_UNLCK,
47417 .fl_end = OFFSET_MAX,
47418 };
47419- filp->f_op->flock(filp, F_SETLKW, &fl);
47420- if (fl.fl_ops && fl.fl_ops->fl_release_private)
47421- fl.fl_ops->fl_release_private(&fl);
47422+ filp->f_op->flock(filp, F_SETLKW, &flock);
47423+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
47424+ flock.fl_ops->fl_release_private(&flock);
47425 }
47426
47427 lock_flocks();
47428diff --git a/fs/namei.c b/fs/namei.c
47429index 46ea9cc..c7cf3a3 100644
47430--- a/fs/namei.c
47431+++ b/fs/namei.c
47432@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
47433 if (ret != -EACCES)
47434 return ret;
47435
47436+#ifdef CONFIG_GRKERNSEC
47437+ /* we'll block if we have to log due to a denied capability use */
47438+ if (mask & MAY_NOT_BLOCK)
47439+ return -ECHILD;
47440+#endif
47441+
47442 if (S_ISDIR(inode->i_mode)) {
47443 /* DACs are overridable for directories */
47444- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47445- return 0;
47446 if (!(mask & MAY_WRITE))
47447- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47448+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47449+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47450 return 0;
47451+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47452+ return 0;
47453 return -EACCES;
47454 }
47455 /*
47456+ * Searching includes executable on directories, else just read.
47457+ */
47458+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47459+ if (mask == MAY_READ)
47460+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47461+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47462+ return 0;
47463+
47464+ /*
47465 * Read/write DACs are always overridable.
47466 * Executable DACs are overridable when there is
47467 * at least one exec bit set.
47468@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
47469 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47470 return 0;
47471
47472- /*
47473- * Searching includes executable on directories, else just read.
47474- */
47475- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47476- if (mask == MAY_READ)
47477- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47478- return 0;
47479-
47480 return -EACCES;
47481 }
47482
47483@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47484 return error;
47485 }
47486
47487+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
47488+ dentry->d_inode, dentry, nd->path.mnt)) {
47489+ error = -EACCES;
47490+ *p = ERR_PTR(error); /* no ->put_link(), please */
47491+ path_put(&nd->path);
47492+ return error;
47493+ }
47494+
47495 nd->last_type = LAST_BIND;
47496 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47497 error = PTR_ERR(*p);
47498 if (!IS_ERR(*p)) {
47499- char *s = nd_get_link(nd);
47500+ const char *s = nd_get_link(nd);
47501 error = 0;
47502 if (s)
47503 error = __vfs_follow_link(nd, s);
47504@@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
47505 if (!err)
47506 err = complete_walk(nd);
47507
47508+ if (!(nd->flags & LOOKUP_PARENT)) {
47509+#ifdef CONFIG_GRKERNSEC
47510+ if (flags & LOOKUP_RCU) {
47511+ if (!err)
47512+ path_put(&nd->path);
47513+ err = -ECHILD;
47514+ } else
47515+#endif
47516+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47517+ if (!err)
47518+ path_put(&nd->path);
47519+ err = -ENOENT;
47520+ }
47521+ }
47522+
47523 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47524 if (!nd->inode->i_op->lookup) {
47525 path_put(&nd->path);
47526@@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
47527 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47528
47529 if (likely(!retval)) {
47530+ if (*name != '/' && nd->path.dentry && nd->inode) {
47531+#ifdef CONFIG_GRKERNSEC
47532+ if (flags & LOOKUP_RCU)
47533+ return -ECHILD;
47534+#endif
47535+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47536+ return -ENOENT;
47537+ }
47538+
47539 if (unlikely(!audit_dummy_context())) {
47540 if (nd->path.dentry && nd->inode)
47541 audit_inode(name, nd->path.dentry);
47542@@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
47543 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47544 return -EPERM;
47545
47546+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47547+ return -EPERM;
47548+ if (gr_handle_rawio(inode))
47549+ return -EPERM;
47550+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47551+ return -EACCES;
47552+
47553 return 0;
47554 }
47555
47556@@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47557 error = complete_walk(nd);
47558 if (error)
47559 return ERR_PTR(error);
47560+#ifdef CONFIG_GRKERNSEC
47561+ if (nd->flags & LOOKUP_RCU) {
47562+ error = -ECHILD;
47563+ goto exit;
47564+ }
47565+#endif
47566+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47567+ error = -ENOENT;
47568+ goto exit;
47569+ }
47570 audit_inode(pathname, nd->path.dentry);
47571 if (open_flag & O_CREAT) {
47572 error = -EISDIR;
47573@@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47574 error = complete_walk(nd);
47575 if (error)
47576 return ERR_PTR(error);
47577+#ifdef CONFIG_GRKERNSEC
47578+ if (nd->flags & LOOKUP_RCU) {
47579+ error = -ECHILD;
47580+ goto exit;
47581+ }
47582+#endif
47583+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47584+ error = -ENOENT;
47585+ goto exit;
47586+ }
47587 audit_inode(pathname, dir);
47588 goto ok;
47589 }
47590@@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47591 error = complete_walk(nd);
47592 if (error)
47593 return ERR_PTR(error);
47594+#ifdef CONFIG_GRKERNSEC
47595+ if (nd->flags & LOOKUP_RCU) {
47596+ error = -ECHILD;
47597+ goto exit;
47598+ }
47599+#endif
47600+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47601+ error = -ENOENT;
47602+ goto exit;
47603+ }
47604
47605 error = -ENOTDIR;
47606 if (nd->flags & LOOKUP_DIRECTORY) {
47607@@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47608 /* Negative dentry, just create the file */
47609 if (!dentry->d_inode) {
47610 umode_t mode = op->mode;
47611+
47612+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47613+ error = -EACCES;
47614+ goto exit_mutex_unlock;
47615+ }
47616+
47617 if (!IS_POSIXACL(dir->d_inode))
47618 mode &= ~current_umask();
47619 /*
47620@@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47621 error = vfs_create(dir->d_inode, dentry, mode, nd);
47622 if (error)
47623 goto exit_mutex_unlock;
47624+ else
47625+ gr_handle_create(path->dentry, path->mnt);
47626 mutex_unlock(&dir->d_inode->i_mutex);
47627 dput(nd->path.dentry);
47628 nd->path.dentry = dentry;
47629@@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47630 /*
47631 * It already exists.
47632 */
47633+
47634+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47635+ error = -ENOENT;
47636+ goto exit_mutex_unlock;
47637+ }
47638+
47639+ /* only check if O_CREAT is specified, all other checks need to go
47640+ into may_open */
47641+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47642+ error = -EACCES;
47643+ goto exit_mutex_unlock;
47644+ }
47645+
47646 mutex_unlock(&dir->d_inode->i_mutex);
47647 audit_inode(pathname, path->dentry);
47648
47649@@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47650 *path = nd.path;
47651 return dentry;
47652 eexist:
47653+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47654+ dput(dentry);
47655+ dentry = ERR_PTR(-ENOENT);
47656+ goto fail;
47657+ }
47658 dput(dentry);
47659 dentry = ERR_PTR(-EEXIST);
47660 fail:
47661@@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47662 }
47663 EXPORT_SYMBOL(user_path_create);
47664
47665+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47666+{
47667+ char *tmp = getname(pathname);
47668+ struct dentry *res;
47669+ if (IS_ERR(tmp))
47670+ return ERR_CAST(tmp);
47671+ res = kern_path_create(dfd, tmp, path, is_dir);
47672+ if (IS_ERR(res))
47673+ putname(tmp);
47674+ else
47675+ *to = tmp;
47676+ return res;
47677+}
47678+
47679 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
47680 {
47681 int error = may_create(dir, dentry);
47682@@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47683 error = mnt_want_write(path.mnt);
47684 if (error)
47685 goto out_dput;
47686+
47687+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47688+ error = -EPERM;
47689+ goto out_drop_write;
47690+ }
47691+
47692+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47693+ error = -EACCES;
47694+ goto out_drop_write;
47695+ }
47696+
47697 error = security_path_mknod(&path, dentry, mode, dev);
47698 if (error)
47699 goto out_drop_write;
47700@@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47701 }
47702 out_drop_write:
47703 mnt_drop_write(path.mnt);
47704+
47705+ if (!error)
47706+ gr_handle_create(dentry, path.mnt);
47707 out_dput:
47708 dput(dentry);
47709 mutex_unlock(&path.dentry->d_inode->i_mutex);
47710@@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
47711 error = mnt_want_write(path.mnt);
47712 if (error)
47713 goto out_dput;
47714+
47715+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47716+ error = -EACCES;
47717+ goto out_drop_write;
47718+ }
47719+
47720 error = security_path_mkdir(&path, dentry, mode);
47721 if (error)
47722 goto out_drop_write;
47723 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47724 out_drop_write:
47725 mnt_drop_write(path.mnt);
47726+
47727+ if (!error)
47728+ gr_handle_create(dentry, path.mnt);
47729 out_dput:
47730 dput(dentry);
47731 mutex_unlock(&path.dentry->d_inode->i_mutex);
47732@@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47733 char * name;
47734 struct dentry *dentry;
47735 struct nameidata nd;
47736+ ino_t saved_ino = 0;
47737+ dev_t saved_dev = 0;
47738
47739 error = user_path_parent(dfd, pathname, &nd, &name);
47740 if (error)
47741@@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47742 error = -ENOENT;
47743 goto exit3;
47744 }
47745+
47746+ saved_ino = dentry->d_inode->i_ino;
47747+ saved_dev = gr_get_dev_from_dentry(dentry);
47748+
47749+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47750+ error = -EACCES;
47751+ goto exit3;
47752+ }
47753+
47754 error = mnt_want_write(nd.path.mnt);
47755 if (error)
47756 goto exit3;
47757@@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47758 if (error)
47759 goto exit4;
47760 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47761+ if (!error && (saved_dev || saved_ino))
47762+ gr_handle_delete(saved_ino, saved_dev);
47763 exit4:
47764 mnt_drop_write(nd.path.mnt);
47765 exit3:
47766@@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47767 struct dentry *dentry;
47768 struct nameidata nd;
47769 struct inode *inode = NULL;
47770+ ino_t saved_ino = 0;
47771+ dev_t saved_dev = 0;
47772
47773 error = user_path_parent(dfd, pathname, &nd, &name);
47774 if (error)
47775@@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47776 if (!inode)
47777 goto slashes;
47778 ihold(inode);
47779+
47780+ if (inode->i_nlink <= 1) {
47781+ saved_ino = inode->i_ino;
47782+ saved_dev = gr_get_dev_from_dentry(dentry);
47783+ }
47784+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47785+ error = -EACCES;
47786+ goto exit2;
47787+ }
47788+
47789 error = mnt_want_write(nd.path.mnt);
47790 if (error)
47791 goto exit2;
47792@@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47793 if (error)
47794 goto exit3;
47795 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47796+ if (!error && (saved_ino || saved_dev))
47797+ gr_handle_delete(saved_ino, saved_dev);
47798 exit3:
47799 mnt_drop_write(nd.path.mnt);
47800 exit2:
47801@@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47802 error = mnt_want_write(path.mnt);
47803 if (error)
47804 goto out_dput;
47805+
47806+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47807+ error = -EACCES;
47808+ goto out_drop_write;
47809+ }
47810+
47811 error = security_path_symlink(&path, dentry, from);
47812 if (error)
47813 goto out_drop_write;
47814 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47815+ if (!error)
47816+ gr_handle_create(dentry, path.mnt);
47817 out_drop_write:
47818 mnt_drop_write(path.mnt);
47819 out_dput:
47820@@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47821 {
47822 struct dentry *new_dentry;
47823 struct path old_path, new_path;
47824+ char *to = NULL;
47825 int how = 0;
47826 int error;
47827
47828@@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47829 if (error)
47830 return error;
47831
47832- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47833+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47834 error = PTR_ERR(new_dentry);
47835 if (IS_ERR(new_dentry))
47836 goto out;
47837@@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47838 error = mnt_want_write(new_path.mnt);
47839 if (error)
47840 goto out_dput;
47841+
47842+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47843+ old_path.dentry->d_inode,
47844+ old_path.dentry->d_inode->i_mode, to)) {
47845+ error = -EACCES;
47846+ goto out_drop_write;
47847+ }
47848+
47849+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47850+ old_path.dentry, old_path.mnt, to)) {
47851+ error = -EACCES;
47852+ goto out_drop_write;
47853+ }
47854+
47855 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47856 if (error)
47857 goto out_drop_write;
47858 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47859+ if (!error)
47860+ gr_handle_create(new_dentry, new_path.mnt);
47861 out_drop_write:
47862 mnt_drop_write(new_path.mnt);
47863 out_dput:
47864+ putname(to);
47865 dput(new_dentry);
47866 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47867 path_put(&new_path);
47868@@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47869 if (new_dentry == trap)
47870 goto exit5;
47871
47872+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47873+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47874+ to);
47875+ if (error)
47876+ goto exit5;
47877+
47878 error = mnt_want_write(oldnd.path.mnt);
47879 if (error)
47880 goto exit5;
47881@@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47882 goto exit6;
47883 error = vfs_rename(old_dir->d_inode, old_dentry,
47884 new_dir->d_inode, new_dentry);
47885+ if (!error)
47886+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47887+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47888 exit6:
47889 mnt_drop_write(oldnd.path.mnt);
47890 exit5:
47891@@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47892
47893 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47894 {
47895+ char tmpbuf[64];
47896+ const char *newlink;
47897 int len;
47898
47899 len = PTR_ERR(link);
47900@@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47901 len = strlen(link);
47902 if (len > (unsigned) buflen)
47903 len = buflen;
47904- if (copy_to_user(buffer, link, len))
47905+
47906+ if (len < sizeof(tmpbuf)) {
47907+ memcpy(tmpbuf, link, len);
47908+ newlink = tmpbuf;
47909+ } else
47910+ newlink = link;
47911+
47912+ if (copy_to_user(buffer, newlink, len))
47913 len = -EFAULT;
47914 out:
47915 return len;
47916diff --git a/fs/namespace.c b/fs/namespace.c
47917index e608199..9609cb9 100644
47918--- a/fs/namespace.c
47919+++ b/fs/namespace.c
47920@@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
47921 if (!(sb->s_flags & MS_RDONLY))
47922 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47923 up_write(&sb->s_umount);
47924+
47925+ gr_log_remount(mnt->mnt_devname, retval);
47926+
47927 return retval;
47928 }
47929
47930@@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
47931 br_write_unlock(vfsmount_lock);
47932 up_write(&namespace_sem);
47933 release_mounts(&umount_list);
47934+
47935+ gr_log_unmount(mnt->mnt_devname, retval);
47936+
47937 return retval;
47938 }
47939
47940@@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47941 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47942 MS_STRICTATIME);
47943
47944+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47945+ retval = -EPERM;
47946+ goto dput_out;
47947+ }
47948+
47949+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47950+ retval = -EPERM;
47951+ goto dput_out;
47952+ }
47953+
47954 if (flags & MS_REMOUNT)
47955 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47956 data_page);
47957@@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47958 dev_name, data_page);
47959 dput_out:
47960 path_put(&path);
47961+
47962+ gr_log_mount(dev_name, dir_name, retval);
47963+
47964 return retval;
47965 }
47966
47967@@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47968 if (error)
47969 goto out2;
47970
47971+ if (gr_handle_chroot_pivot()) {
47972+ error = -EPERM;
47973+ goto out2;
47974+ }
47975+
47976 get_fs_root(current->fs, &root);
47977 error = lock_mount(&old);
47978 if (error)
47979diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
47980index 32c0658..b1c2045e 100644
47981--- a/fs/ncpfs/ncplib_kernel.h
47982+++ b/fs/ncpfs/ncplib_kernel.h
47983@@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
47984 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
47985 const unsigned char *, unsigned int, int);
47986 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47987- const unsigned char *, unsigned int, int);
47988+ const unsigned char *, unsigned int, int) __size_overflow(5);
47989
47990 #define NCP_ESC ':'
47991 #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
47992@@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47993 int ncp__io2vol(unsigned char *, unsigned int *,
47994 const unsigned char *, unsigned int, int);
47995 int ncp__vol2io(unsigned char *, unsigned int *,
47996- const unsigned char *, unsigned int, int);
47997+ const unsigned char *, unsigned int, int) __size_overflow(5);
47998
47999 #define NCP_IO_TABLE(sb) NULL
48000 #define ncp_tolower(t, c) tolower(c)
48001diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
48002index f649fba..236bf92 100644
48003--- a/fs/nfs/inode.c
48004+++ b/fs/nfs/inode.c
48005@@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
48006 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
48007 nfsi->attrtimeo_timestamp = jiffies;
48008
48009- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
48010+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
48011 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
48012 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
48013 else
48014@@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
48015 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
48016 }
48017
48018-static atomic_long_t nfs_attr_generation_counter;
48019+static atomic_long_unchecked_t nfs_attr_generation_counter;
48020
48021 static unsigned long nfs_read_attr_generation_counter(void)
48022 {
48023- return atomic_long_read(&nfs_attr_generation_counter);
48024+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
48025 }
48026
48027 unsigned long nfs_inc_attr_generation_counter(void)
48028 {
48029- return atomic_long_inc_return(&nfs_attr_generation_counter);
48030+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
48031 }
48032
48033 void nfs_fattr_init(struct nfs_fattr *fattr)
48034diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
48035index b96fe94..a4dbece 100644
48036--- a/fs/nfsd/vfs.c
48037+++ b/fs/nfsd/vfs.c
48038@@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48039 } else {
48040 oldfs = get_fs();
48041 set_fs(KERNEL_DS);
48042- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
48043+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
48044 set_fs(oldfs);
48045 }
48046
48047@@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
48048
48049 /* Write the data. */
48050 oldfs = get_fs(); set_fs(KERNEL_DS);
48051- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
48052+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
48053 set_fs(oldfs);
48054 if (host_err < 0)
48055 goto out_nfserr;
48056@@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
48057 */
48058
48059 oldfs = get_fs(); set_fs(KERNEL_DS);
48060- host_err = inode->i_op->readlink(dentry, buf, *lenp);
48061+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
48062 set_fs(oldfs);
48063
48064 if (host_err < 0)
48065diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
48066index 3568c8a..e0240d8 100644
48067--- a/fs/notify/fanotify/fanotify_user.c
48068+++ b/fs/notify/fanotify/fanotify_user.c
48069@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
48070 goto out_close_fd;
48071
48072 ret = -EFAULT;
48073- if (copy_to_user(buf, &fanotify_event_metadata,
48074+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
48075+ copy_to_user(buf, &fanotify_event_metadata,
48076 fanotify_event_metadata.event_len))
48077 goto out_kill_access_response;
48078
48079diff --git a/fs/notify/notification.c b/fs/notify/notification.c
48080index ee18815..7aa5d01 100644
48081--- a/fs/notify/notification.c
48082+++ b/fs/notify/notification.c
48083@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
48084 * get set to 0 so it will never get 'freed'
48085 */
48086 static struct fsnotify_event *q_overflow_event;
48087-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48088+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48089
48090 /**
48091 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
48092@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
48093 */
48094 u32 fsnotify_get_cookie(void)
48095 {
48096- return atomic_inc_return(&fsnotify_sync_cookie);
48097+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
48098 }
48099 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
48100
48101diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
48102index 99e3610..02c1068 100644
48103--- a/fs/ntfs/dir.c
48104+++ b/fs/ntfs/dir.c
48105@@ -1329,7 +1329,7 @@ find_next_index_buffer:
48106 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
48107 ~(s64)(ndir->itype.index.block_size - 1)));
48108 /* Bounds checks. */
48109- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48110+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48111 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48112 "inode 0x%lx or driver bug.", vdir->i_ino);
48113 goto err_out;
48114diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
48115index c587e2d..3641eaa 100644
48116--- a/fs/ntfs/file.c
48117+++ b/fs/ntfs/file.c
48118@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
48119 #endif /* NTFS_RW */
48120 };
48121
48122-const struct file_operations ntfs_empty_file_ops = {};
48123+const struct file_operations ntfs_empty_file_ops __read_only;
48124
48125-const struct inode_operations ntfs_empty_inode_ops = {};
48126+const struct inode_operations ntfs_empty_inode_ops __read_only;
48127diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
48128index 210c352..a174f83 100644
48129--- a/fs/ocfs2/localalloc.c
48130+++ b/fs/ocfs2/localalloc.c
48131@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
48132 goto bail;
48133 }
48134
48135- atomic_inc(&osb->alloc_stats.moves);
48136+ atomic_inc_unchecked(&osb->alloc_stats.moves);
48137
48138 bail:
48139 if (handle)
48140diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
48141index d355e6e..578d905 100644
48142--- a/fs/ocfs2/ocfs2.h
48143+++ b/fs/ocfs2/ocfs2.h
48144@@ -235,11 +235,11 @@ enum ocfs2_vol_state
48145
48146 struct ocfs2_alloc_stats
48147 {
48148- atomic_t moves;
48149- atomic_t local_data;
48150- atomic_t bitmap_data;
48151- atomic_t bg_allocs;
48152- atomic_t bg_extends;
48153+ atomic_unchecked_t moves;
48154+ atomic_unchecked_t local_data;
48155+ atomic_unchecked_t bitmap_data;
48156+ atomic_unchecked_t bg_allocs;
48157+ atomic_unchecked_t bg_extends;
48158 };
48159
48160 enum ocfs2_local_alloc_state
48161diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
48162index f169da4..9112253 100644
48163--- a/fs/ocfs2/suballoc.c
48164+++ b/fs/ocfs2/suballoc.c
48165@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
48166 mlog_errno(status);
48167 goto bail;
48168 }
48169- atomic_inc(&osb->alloc_stats.bg_extends);
48170+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48171
48172 /* You should never ask for this much metadata */
48173 BUG_ON(bits_wanted >
48174@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
48175 mlog_errno(status);
48176 goto bail;
48177 }
48178- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48179+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48180
48181 *suballoc_loc = res.sr_bg_blkno;
48182 *suballoc_bit_start = res.sr_bit_offset;
48183@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
48184 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
48185 res->sr_bits);
48186
48187- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48188+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48189
48190 BUG_ON(res->sr_bits != 1);
48191
48192@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
48193 mlog_errno(status);
48194 goto bail;
48195 }
48196- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48197+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48198
48199 BUG_ON(res.sr_bits != 1);
48200
48201@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48202 cluster_start,
48203 num_clusters);
48204 if (!status)
48205- atomic_inc(&osb->alloc_stats.local_data);
48206+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
48207 } else {
48208 if (min_clusters > (osb->bitmap_cpg - 1)) {
48209 /* The only paths asking for contiguousness
48210@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48211 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
48212 res.sr_bg_blkno,
48213 res.sr_bit_offset);
48214- atomic_inc(&osb->alloc_stats.bitmap_data);
48215+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
48216 *num_clusters = res.sr_bits;
48217 }
48218 }
48219diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
48220index 604e12c..8426483 100644
48221--- a/fs/ocfs2/super.c
48222+++ b/fs/ocfs2/super.c
48223@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
48224 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48225 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48226 "Stats",
48227- atomic_read(&osb->alloc_stats.bitmap_data),
48228- atomic_read(&osb->alloc_stats.local_data),
48229- atomic_read(&osb->alloc_stats.bg_allocs),
48230- atomic_read(&osb->alloc_stats.moves),
48231- atomic_read(&osb->alloc_stats.bg_extends));
48232+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48233+ atomic_read_unchecked(&osb->alloc_stats.local_data),
48234+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48235+ atomic_read_unchecked(&osb->alloc_stats.moves),
48236+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48237
48238 out += snprintf(buf + out, len - out,
48239 "%10s => State: %u Descriptor: %llu Size: %u bits "
48240@@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
48241 spin_lock_init(&osb->osb_xattr_lock);
48242 ocfs2_init_steal_slots(osb);
48243
48244- atomic_set(&osb->alloc_stats.moves, 0);
48245- atomic_set(&osb->alloc_stats.local_data, 0);
48246- atomic_set(&osb->alloc_stats.bitmap_data, 0);
48247- atomic_set(&osb->alloc_stats.bg_allocs, 0);
48248- atomic_set(&osb->alloc_stats.bg_extends, 0);
48249+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48250+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48251+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48252+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48253+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48254
48255 /* Copy the blockcheck stats from the superblock probe */
48256 osb->osb_ecc_stats = *stats;
48257diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
48258index 5d22872..523db20 100644
48259--- a/fs/ocfs2/symlink.c
48260+++ b/fs/ocfs2/symlink.c
48261@@ -142,7 +142,7 @@ bail:
48262
48263 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48264 {
48265- char *link = nd_get_link(nd);
48266+ const char *link = nd_get_link(nd);
48267 if (!IS_ERR(link))
48268 kfree(link);
48269 }
48270diff --git a/fs/open.c b/fs/open.c
48271index 77becc0..aad7bd9 100644
48272--- a/fs/open.c
48273+++ b/fs/open.c
48274@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
48275 error = locks_verify_truncate(inode, NULL, length);
48276 if (!error)
48277 error = security_path_truncate(&path);
48278+
48279+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48280+ error = -EACCES;
48281+
48282 if (!error)
48283 error = do_truncate(path.dentry, length, 0, NULL);
48284
48285@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
48286 if (__mnt_is_readonly(path.mnt))
48287 res = -EROFS;
48288
48289+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48290+ res = -EACCES;
48291+
48292 out_path_release:
48293 path_put(&path);
48294 out:
48295@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
48296 if (error)
48297 goto dput_and_out;
48298
48299+ gr_log_chdir(path.dentry, path.mnt);
48300+
48301 set_fs_pwd(current->fs, &path);
48302
48303 dput_and_out:
48304@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
48305 goto out_putf;
48306
48307 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
48308+
48309+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48310+ error = -EPERM;
48311+
48312+ if (!error)
48313+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48314+
48315 if (!error)
48316 set_fs_pwd(current->fs, &file->f_path);
48317 out_putf:
48318@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
48319 if (error)
48320 goto dput_and_out;
48321
48322+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48323+ goto dput_and_out;
48324+
48325 set_fs_root(current->fs, &path);
48326+
48327+ gr_handle_chroot_chdir(&path);
48328+
48329 error = 0;
48330 dput_and_out:
48331 path_put(&path);
48332@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
48333 if (error)
48334 return error;
48335 mutex_lock(&inode->i_mutex);
48336+
48337+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
48338+ error = -EACCES;
48339+ goto out_unlock;
48340+ }
48341+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
48342+ error = -EACCES;
48343+ goto out_unlock;
48344+ }
48345+
48346 error = security_path_chmod(path, mode);
48347 if (error)
48348 goto out_unlock;
48349@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
48350 int error;
48351 struct iattr newattrs;
48352
48353+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
48354+ return -EACCES;
48355+
48356 newattrs.ia_valid = ATTR_CTIME;
48357 if (user != (uid_t) -1) {
48358 newattrs.ia_valid |= ATTR_UID;
48359diff --git a/fs/pipe.c b/fs/pipe.c
48360index 82e651b..8a68573 100644
48361--- a/fs/pipe.c
48362+++ b/fs/pipe.c
48363@@ -437,9 +437,9 @@ redo:
48364 }
48365 if (bufs) /* More to do? */
48366 continue;
48367- if (!pipe->writers)
48368+ if (!atomic_read(&pipe->writers))
48369 break;
48370- if (!pipe->waiting_writers) {
48371+ if (!atomic_read(&pipe->waiting_writers)) {
48372 /* syscall merging: Usually we must not sleep
48373 * if O_NONBLOCK is set, or if we got some data.
48374 * But if a writer sleeps in kernel space, then
48375@@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48376 mutex_lock(&inode->i_mutex);
48377 pipe = inode->i_pipe;
48378
48379- if (!pipe->readers) {
48380+ if (!atomic_read(&pipe->readers)) {
48381 send_sig(SIGPIPE, current, 0);
48382 ret = -EPIPE;
48383 goto out;
48384@@ -552,7 +552,7 @@ redo1:
48385 for (;;) {
48386 int bufs;
48387
48388- if (!pipe->readers) {
48389+ if (!atomic_read(&pipe->readers)) {
48390 send_sig(SIGPIPE, current, 0);
48391 if (!ret)
48392 ret = -EPIPE;
48393@@ -643,9 +643,9 @@ redo2:
48394 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48395 do_wakeup = 0;
48396 }
48397- pipe->waiting_writers++;
48398+ atomic_inc(&pipe->waiting_writers);
48399 pipe_wait(pipe);
48400- pipe->waiting_writers--;
48401+ atomic_dec(&pipe->waiting_writers);
48402 }
48403 out:
48404 mutex_unlock(&inode->i_mutex);
48405@@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48406 mask = 0;
48407 if (filp->f_mode & FMODE_READ) {
48408 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48409- if (!pipe->writers && filp->f_version != pipe->w_counter)
48410+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48411 mask |= POLLHUP;
48412 }
48413
48414@@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48415 * Most Unices do not set POLLERR for FIFOs but on Linux they
48416 * behave exactly like pipes for poll().
48417 */
48418- if (!pipe->readers)
48419+ if (!atomic_read(&pipe->readers))
48420 mask |= POLLERR;
48421 }
48422
48423@@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48424
48425 mutex_lock(&inode->i_mutex);
48426 pipe = inode->i_pipe;
48427- pipe->readers -= decr;
48428- pipe->writers -= decw;
48429+ atomic_sub(decr, &pipe->readers);
48430+ atomic_sub(decw, &pipe->writers);
48431
48432- if (!pipe->readers && !pipe->writers) {
48433+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48434 free_pipe_info(inode);
48435 } else {
48436 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48437@@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48438
48439 if (inode->i_pipe) {
48440 ret = 0;
48441- inode->i_pipe->readers++;
48442+ atomic_inc(&inode->i_pipe->readers);
48443 }
48444
48445 mutex_unlock(&inode->i_mutex);
48446@@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48447
48448 if (inode->i_pipe) {
48449 ret = 0;
48450- inode->i_pipe->writers++;
48451+ atomic_inc(&inode->i_pipe->writers);
48452 }
48453
48454 mutex_unlock(&inode->i_mutex);
48455@@ -864,9 +864,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48456 if (inode->i_pipe) {
48457 ret = 0;
48458 if (filp->f_mode & FMODE_READ)
48459- inode->i_pipe->readers++;
48460+ atomic_inc(&inode->i_pipe->readers);
48461 if (filp->f_mode & FMODE_WRITE)
48462- inode->i_pipe->writers++;
48463+ atomic_inc(&inode->i_pipe->writers);
48464 }
48465
48466 mutex_unlock(&inode->i_mutex);
48467@@ -958,7 +958,7 @@ void free_pipe_info(struct inode *inode)
48468 inode->i_pipe = NULL;
48469 }
48470
48471-static struct vfsmount *pipe_mnt __read_mostly;
48472+struct vfsmount *pipe_mnt __read_mostly;
48473
48474 /*
48475 * pipefs_dname() is called from d_path().
48476@@ -988,7 +988,8 @@ static struct inode * get_pipe_inode(void)
48477 goto fail_iput;
48478 inode->i_pipe = pipe;
48479
48480- pipe->readers = pipe->writers = 1;
48481+ atomic_set(&pipe->readers, 1);
48482+ atomic_set(&pipe->writers, 1);
48483 inode->i_fop = &rdwr_pipefifo_fops;
48484
48485 /*
48486diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48487index 15af622..0e9f4467 100644
48488--- a/fs/proc/Kconfig
48489+++ b/fs/proc/Kconfig
48490@@ -30,12 +30,12 @@ config PROC_FS
48491
48492 config PROC_KCORE
48493 bool "/proc/kcore support" if !ARM
48494- depends on PROC_FS && MMU
48495+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48496
48497 config PROC_VMCORE
48498 bool "/proc/vmcore support"
48499- depends on PROC_FS && CRASH_DUMP
48500- default y
48501+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48502+ default n
48503 help
48504 Exports the dump image of crashed kernel in ELF format.
48505
48506@@ -59,8 +59,8 @@ config PROC_SYSCTL
48507 limited in memory.
48508
48509 config PROC_PAGE_MONITOR
48510- default y
48511- depends on PROC_FS && MMU
48512+ default n
48513+ depends on PROC_FS && MMU && !GRKERNSEC
48514 bool "Enable /proc page monitoring" if EXPERT
48515 help
48516 Various /proc files exist to monitor process memory utilization:
48517diff --git a/fs/proc/array.c b/fs/proc/array.c
48518index c602b8d..a7de642 100644
48519--- a/fs/proc/array.c
48520+++ b/fs/proc/array.c
48521@@ -60,6 +60,7 @@
48522 #include <linux/tty.h>
48523 #include <linux/string.h>
48524 #include <linux/mman.h>
48525+#include <linux/grsecurity.h>
48526 #include <linux/proc_fs.h>
48527 #include <linux/ioport.h>
48528 #include <linux/uaccess.h>
48529@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48530 seq_putc(m, '\n');
48531 }
48532
48533+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48534+static inline void task_pax(struct seq_file *m, struct task_struct *p)
48535+{
48536+ if (p->mm)
48537+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48538+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48539+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48540+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48541+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48542+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48543+ else
48544+ seq_printf(m, "PaX:\t-----\n");
48545+}
48546+#endif
48547+
48548 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48549 struct pid *pid, struct task_struct *task)
48550 {
48551@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48552 task_cpus_allowed(m, task);
48553 cpuset_task_status_allowed(m, task);
48554 task_context_switch_counts(m, task);
48555+
48556+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48557+ task_pax(m, task);
48558+#endif
48559+
48560+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48561+ task_grsec_rbac(m, task);
48562+#endif
48563+
48564 return 0;
48565 }
48566
48567+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48568+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48569+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48570+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48571+#endif
48572+
48573 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48574 struct pid *pid, struct task_struct *task, int whole)
48575 {
48576@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48577 char tcomm[sizeof(task->comm)];
48578 unsigned long flags;
48579
48580+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48581+ if (current->exec_id != m->exec_id) {
48582+ gr_log_badprocpid("stat");
48583+ return 0;
48584+ }
48585+#endif
48586+
48587 state = *get_task_state(task);
48588 vsize = eip = esp = 0;
48589 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48590@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48591 gtime = task->gtime;
48592 }
48593
48594+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48595+ if (PAX_RAND_FLAGS(mm)) {
48596+ eip = 0;
48597+ esp = 0;
48598+ wchan = 0;
48599+ }
48600+#endif
48601+#ifdef CONFIG_GRKERNSEC_HIDESYM
48602+ wchan = 0;
48603+ eip =0;
48604+ esp =0;
48605+#endif
48606+
48607 /* scale priority and nice values from timeslices to -20..20 */
48608 /* to make it look like a "normal" Unix priority/nice value */
48609 priority = task_prio(task);
48610@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48611 vsize,
48612 mm ? get_mm_rss(mm) : 0,
48613 rsslim,
48614+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48615+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48616+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
48617+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48618+#else
48619 mm ? (permitted ? mm->start_code : 1) : 0,
48620 mm ? (permitted ? mm->end_code : 1) : 0,
48621 (permitted && mm) ? mm->start_stack : 0,
48622+#endif
48623 esp,
48624 eip,
48625 /* The signal information here is obsolete.
48626@@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48627 struct pid *pid, struct task_struct *task)
48628 {
48629 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48630- struct mm_struct *mm = get_task_mm(task);
48631+ struct mm_struct *mm;
48632
48633+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48634+ if (current->exec_id != m->exec_id) {
48635+ gr_log_badprocpid("statm");
48636+ return 0;
48637+ }
48638+#endif
48639+ mm = get_task_mm(task);
48640 if (mm) {
48641 size = task_statm(mm, &shared, &text, &data, &resident);
48642 mmput(mm);
48643@@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48644
48645 return 0;
48646 }
48647+
48648+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48649+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48650+{
48651+ u32 curr_ip = 0;
48652+ unsigned long flags;
48653+
48654+ if (lock_task_sighand(task, &flags)) {
48655+ curr_ip = task->signal->curr_ip;
48656+ unlock_task_sighand(task, &flags);
48657+ }
48658+
48659+ return sprintf(buffer, "%pI4\n", &curr_ip);
48660+}
48661+#endif
48662diff --git a/fs/proc/base.c b/fs/proc/base.c
48663index d4548dd..d101f84 100644
48664--- a/fs/proc/base.c
48665+++ b/fs/proc/base.c
48666@@ -109,6 +109,14 @@ struct pid_entry {
48667 union proc_op op;
48668 };
48669
48670+struct getdents_callback {
48671+ struct linux_dirent __user * current_dir;
48672+ struct linux_dirent __user * previous;
48673+ struct file * file;
48674+ int count;
48675+ int error;
48676+};
48677+
48678 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48679 .name = (NAME), \
48680 .len = sizeof(NAME) - 1, \
48681@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48682 if (!mm->arg_end)
48683 goto out_mm; /* Shh! No looking before we're done */
48684
48685+ if (gr_acl_handle_procpidmem(task))
48686+ goto out_mm;
48687+
48688 len = mm->arg_end - mm->arg_start;
48689
48690 if (len > PAGE_SIZE)
48691@@ -240,12 +251,28 @@ out:
48692 return res;
48693 }
48694
48695+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48696+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48697+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48698+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48699+#endif
48700+
48701 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48702 {
48703 struct mm_struct *mm = mm_for_maps(task);
48704 int res = PTR_ERR(mm);
48705 if (mm && !IS_ERR(mm)) {
48706 unsigned int nwords = 0;
48707+
48708+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48709+ /* allow if we're currently ptracing this task */
48710+ if (PAX_RAND_FLAGS(mm) &&
48711+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48712+ mmput(mm);
48713+ return 0;
48714+ }
48715+#endif
48716+
48717 do {
48718 nwords += 2;
48719 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48720@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48721 }
48722
48723
48724-#ifdef CONFIG_KALLSYMS
48725+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48726 /*
48727 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48728 * Returns the resolved symbol. If that fails, simply return the address.
48729@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
48730 mutex_unlock(&task->signal->cred_guard_mutex);
48731 }
48732
48733-#ifdef CONFIG_STACKTRACE
48734+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48735
48736 #define MAX_STACK_TRACE_DEPTH 64
48737
48738@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48739 return count;
48740 }
48741
48742-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48743+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48744 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48745 {
48746 long nr;
48747@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48748 /************************************************************************/
48749
48750 /* permission checks */
48751-static int proc_fd_access_allowed(struct inode *inode)
48752+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48753 {
48754 struct task_struct *task;
48755 int allowed = 0;
48756@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48757 */
48758 task = get_proc_task(inode);
48759 if (task) {
48760- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48761+ if (log)
48762+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48763+ else
48764+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48765 put_task_struct(task);
48766 }
48767 return allowed;
48768@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
48769 struct task_struct *task,
48770 int hide_pid_min)
48771 {
48772+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48773+ return false;
48774+
48775+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48776+ rcu_read_lock();
48777+ {
48778+ const struct cred *tmpcred = current_cred();
48779+ const struct cred *cred = __task_cred(task);
48780+
48781+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48782+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48783+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48784+#endif
48785+ ) {
48786+ rcu_read_unlock();
48787+ return true;
48788+ }
48789+ }
48790+ rcu_read_unlock();
48791+
48792+ if (!pid->hide_pid)
48793+ return false;
48794+#endif
48795+
48796 if (pid->hide_pid < hide_pid_min)
48797 return true;
48798 if (in_group_p(pid->pid_gid))
48799 return true;
48800+
48801 return ptrace_may_access(task, PTRACE_MODE_READ);
48802 }
48803
48804@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48805 put_task_struct(task);
48806
48807 if (!has_perms) {
48808+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48809+ {
48810+#else
48811 if (pid->hide_pid == 2) {
48812+#endif
48813 /*
48814 * Let's make getdents(), stat(), and open()
48815 * consistent with each other. If a process
48816@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
48817 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48818 file->private_data = mm;
48819
48820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48821+ file->f_version = current->exec_id;
48822+#endif
48823+
48824 return 0;
48825 }
48826
48827@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48828 ssize_t copied;
48829 char *page;
48830
48831+#ifdef CONFIG_GRKERNSEC
48832+ if (write)
48833+ return -EPERM;
48834+#endif
48835+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48836+ if (file->f_version != current->exec_id) {
48837+ gr_log_badprocpid("mem");
48838+ return 0;
48839+ }
48840+#endif
48841+
48842 if (!mm)
48843 return 0;
48844
48845@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48846 if (!task)
48847 goto out_no_task;
48848
48849+ if (gr_acl_handle_procpidmem(task))
48850+ goto out;
48851+
48852 ret = -ENOMEM;
48853 page = (char *)__get_free_page(GFP_TEMPORARY);
48854 if (!page)
48855@@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48856 path_put(&nd->path);
48857
48858 /* Are we allowed to snoop on the tasks file descriptors? */
48859- if (!proc_fd_access_allowed(inode))
48860+ if (!proc_fd_access_allowed(inode, 0))
48861 goto out;
48862
48863 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48864@@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48865 struct path path;
48866
48867 /* Are we allowed to snoop on the tasks file descriptors? */
48868- if (!proc_fd_access_allowed(inode))
48869- goto out;
48870+ /* logging this is needed for learning on chromium to work properly,
48871+ but we don't want to flood the logs from 'ps' which does a readlink
48872+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48873+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48874+ */
48875+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48876+ if (!proc_fd_access_allowed(inode,0))
48877+ goto out;
48878+ } else {
48879+ if (!proc_fd_access_allowed(inode,1))
48880+ goto out;
48881+ }
48882
48883 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48884 if (error)
48885@@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48886 rcu_read_lock();
48887 cred = __task_cred(task);
48888 inode->i_uid = cred->euid;
48889+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48890+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48891+#else
48892 inode->i_gid = cred->egid;
48893+#endif
48894 rcu_read_unlock();
48895 }
48896 security_task_to_inode(task, inode);
48897@@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48898 return -ENOENT;
48899 }
48900 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48901+#ifdef CONFIG_GRKERNSEC_PROC_USER
48902+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48903+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48904+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48905+#endif
48906 task_dumpable(task)) {
48907 cred = __task_cred(task);
48908 stat->uid = cred->euid;
48909+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48910+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48911+#else
48912 stat->gid = cred->egid;
48913+#endif
48914 }
48915 }
48916 rcu_read_unlock();
48917@@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48918
48919 if (task) {
48920 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48921+#ifdef CONFIG_GRKERNSEC_PROC_USER
48922+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48923+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48924+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48925+#endif
48926 task_dumpable(task)) {
48927 rcu_read_lock();
48928 cred = __task_cred(task);
48929 inode->i_uid = cred->euid;
48930+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48931+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48932+#else
48933 inode->i_gid = cred->egid;
48934+#endif
48935 rcu_read_unlock();
48936 } else {
48937 inode->i_uid = 0;
48938@@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48939 int fd = proc_fd(inode);
48940
48941 if (task) {
48942- files = get_files_struct(task);
48943+ if (!gr_acl_handle_procpidmem(task))
48944+ files = get_files_struct(task);
48945 put_task_struct(task);
48946 }
48947 if (files) {
48948@@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
48949 */
48950 static int proc_fd_permission(struct inode *inode, int mask)
48951 {
48952+ struct task_struct *task;
48953 int rv = generic_permission(inode, mask);
48954- if (rv == 0)
48955- return 0;
48956+
48957 if (task_pid(current) == proc_pid(inode))
48958 rv = 0;
48959+
48960+ task = get_proc_task(inode);
48961+ if (task == NULL)
48962+ return rv;
48963+
48964+ if (gr_acl_handle_procpidmem(task))
48965+ rv = -EACCES;
48966+
48967+ put_task_struct(task);
48968+
48969 return rv;
48970 }
48971
48972@@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48973 if (!task)
48974 goto out_no_task;
48975
48976+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48977+ goto out;
48978+
48979 /*
48980 * Yes, it does not scale. And it should not. Don't add
48981 * new entries into /proc/<tgid>/ without very good reasons.
48982@@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
48983 if (!task)
48984 goto out_no_task;
48985
48986+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48987+ goto out;
48988+
48989 ret = 0;
48990 i = filp->f_pos;
48991 switch (i) {
48992@@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48993 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48994 void *cookie)
48995 {
48996- char *s = nd_get_link(nd);
48997+ const char *s = nd_get_link(nd);
48998 if (!IS_ERR(s))
48999 __putname(s);
49000 }
49001@@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
49002 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
49003 #endif
49004 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49005-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49006+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49007 INF("syscall", S_IRUGO, proc_pid_syscall),
49008 #endif
49009 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49010@@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
49011 #ifdef CONFIG_SECURITY
49012 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49013 #endif
49014-#ifdef CONFIG_KALLSYMS
49015+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49016 INF("wchan", S_IRUGO, proc_pid_wchan),
49017 #endif
49018-#ifdef CONFIG_STACKTRACE
49019+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49020 ONE("stack", S_IRUGO, proc_pid_stack),
49021 #endif
49022 #ifdef CONFIG_SCHEDSTATS
49023@@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
49024 #ifdef CONFIG_HARDWALL
49025 INF("hardwall", S_IRUGO, proc_pid_hardwall),
49026 #endif
49027+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49028+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
49029+#endif
49030 };
49031
49032 static int proc_tgid_base_readdir(struct file * filp,
49033@@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
49034 if (!inode)
49035 goto out;
49036
49037+#ifdef CONFIG_GRKERNSEC_PROC_USER
49038+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
49039+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49040+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49041+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
49042+#else
49043 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
49044+#endif
49045 inode->i_op = &proc_tgid_base_inode_operations;
49046 inode->i_fop = &proc_tgid_base_operations;
49047 inode->i_flags|=S_IMMUTABLE;
49048@@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
49049 if (!task)
49050 goto out;
49051
49052+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49053+ goto out_put_task;
49054+
49055 result = proc_pid_instantiate(dir, dentry, task, NULL);
49056+out_put_task:
49057 put_task_struct(task);
49058 out:
49059 return result;
49060@@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
49061 static int fake_filldir(void *buf, const char *name, int namelen,
49062 loff_t offset, u64 ino, unsigned d_type)
49063 {
49064+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
49065+ __buf->error = -EINVAL;
49066 return 0;
49067 }
49068
49069@@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
49070 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49071 #endif
49072 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49073-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49074+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49075 INF("syscall", S_IRUGO, proc_pid_syscall),
49076 #endif
49077 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49078@@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
49079 #ifdef CONFIG_SECURITY
49080 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49081 #endif
49082-#ifdef CONFIG_KALLSYMS
49083+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49084 INF("wchan", S_IRUGO, proc_pid_wchan),
49085 #endif
49086-#ifdef CONFIG_STACKTRACE
49087+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49088 ONE("stack", S_IRUGO, proc_pid_stack),
49089 #endif
49090 #ifdef CONFIG_SCHEDSTATS
49091diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49092index 82676e3..5f8518a 100644
49093--- a/fs/proc/cmdline.c
49094+++ b/fs/proc/cmdline.c
49095@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
49096
49097 static int __init proc_cmdline_init(void)
49098 {
49099+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49100+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49101+#else
49102 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49103+#endif
49104 return 0;
49105 }
49106 module_init(proc_cmdline_init);
49107diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49108index b143471..bb105e5 100644
49109--- a/fs/proc/devices.c
49110+++ b/fs/proc/devices.c
49111@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
49112
49113 static int __init proc_devices_init(void)
49114 {
49115+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49116+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49117+#else
49118 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49119+#endif
49120 return 0;
49121 }
49122 module_init(proc_devices_init);
49123diff --git a/fs/proc/inode.c b/fs/proc/inode.c
49124index 84fd323..f698a32 100644
49125--- a/fs/proc/inode.c
49126+++ b/fs/proc/inode.c
49127@@ -21,12 +21,18 @@
49128 #include <linux/seq_file.h>
49129 #include <linux/slab.h>
49130 #include <linux/mount.h>
49131+#include <linux/grsecurity.h>
49132
49133 #include <asm/system.h>
49134 #include <asm/uaccess.h>
49135
49136 #include "internal.h"
49137
49138+#ifdef CONFIG_PROC_SYSCTL
49139+extern const struct inode_operations proc_sys_inode_operations;
49140+extern const struct inode_operations proc_sys_dir_operations;
49141+#endif
49142+
49143 static void proc_evict_inode(struct inode *inode)
49144 {
49145 struct proc_dir_entry *de;
49146@@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
49147 ns_ops = PROC_I(inode)->ns_ops;
49148 if (ns_ops && ns_ops->put)
49149 ns_ops->put(PROC_I(inode)->ns);
49150+
49151+#ifdef CONFIG_PROC_SYSCTL
49152+ if (inode->i_op == &proc_sys_inode_operations ||
49153+ inode->i_op == &proc_sys_dir_operations)
49154+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49155+#endif
49156+
49157 }
49158
49159 static struct kmem_cache * proc_inode_cachep;
49160@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
49161 if (de->mode) {
49162 inode->i_mode = de->mode;
49163 inode->i_uid = de->uid;
49164+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49165+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49166+#else
49167 inode->i_gid = de->gid;
49168+#endif
49169 }
49170 if (de->size)
49171 inode->i_size = de->size;
49172diff --git a/fs/proc/internal.h b/fs/proc/internal.h
49173index 2925775..4f08fae 100644
49174--- a/fs/proc/internal.h
49175+++ b/fs/proc/internal.h
49176@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
49177 struct pid *pid, struct task_struct *task);
49178 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49179 struct pid *pid, struct task_struct *task);
49180+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49181+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49182+#endif
49183 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49184
49185 extern const struct file_operations proc_maps_operations;
49186diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
49187index d245cb2..f4e8498 100644
49188--- a/fs/proc/kcore.c
49189+++ b/fs/proc/kcore.c
49190@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49191 * the addresses in the elf_phdr on our list.
49192 */
49193 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49194- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49195+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49196+ if (tsz > buflen)
49197 tsz = buflen;
49198-
49199+
49200 while (buflen) {
49201 struct kcore_list *m;
49202
49203@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49204 kfree(elf_buf);
49205 } else {
49206 if (kern_addr_valid(start)) {
49207- unsigned long n;
49208+ char *elf_buf;
49209+ mm_segment_t oldfs;
49210
49211- n = copy_to_user(buffer, (char *)start, tsz);
49212- /*
49213- * We cannot distingush between fault on source
49214- * and fault on destination. When this happens
49215- * we clear too and hope it will trigger the
49216- * EFAULT again.
49217- */
49218- if (n) {
49219- if (clear_user(buffer + tsz - n,
49220- n))
49221+ elf_buf = kmalloc(tsz, GFP_KERNEL);
49222+ if (!elf_buf)
49223+ return -ENOMEM;
49224+ oldfs = get_fs();
49225+ set_fs(KERNEL_DS);
49226+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49227+ set_fs(oldfs);
49228+ if (copy_to_user(buffer, elf_buf, tsz)) {
49229+ kfree(elf_buf);
49230 return -EFAULT;
49231+ }
49232 }
49233+ set_fs(oldfs);
49234+ kfree(elf_buf);
49235 } else {
49236 if (clear_user(buffer, tsz))
49237 return -EFAULT;
49238@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49239
49240 static int open_kcore(struct inode *inode, struct file *filp)
49241 {
49242+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49243+ return -EPERM;
49244+#endif
49245 if (!capable(CAP_SYS_RAWIO))
49246 return -EPERM;
49247 if (kcore_need_update)
49248diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
49249index 80e4645..53e5fcf 100644
49250--- a/fs/proc/meminfo.c
49251+++ b/fs/proc/meminfo.c
49252@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49253 vmi.used >> 10,
49254 vmi.largest_chunk >> 10
49255 #ifdef CONFIG_MEMORY_FAILURE
49256- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49257+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49258 #endif
49259 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49260 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
49261diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49262index b1822dd..df622cb 100644
49263--- a/fs/proc/nommu.c
49264+++ b/fs/proc/nommu.c
49265@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
49266 if (len < 1)
49267 len = 1;
49268 seq_printf(m, "%*c", len, ' ');
49269- seq_path(m, &file->f_path, "");
49270+ seq_path(m, &file->f_path, "\n\\");
49271 }
49272
49273 seq_putc(m, '\n');
49274diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
49275index 06e1cc1..177cd98 100644
49276--- a/fs/proc/proc_net.c
49277+++ b/fs/proc/proc_net.c
49278@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
49279 struct task_struct *task;
49280 struct nsproxy *ns;
49281 struct net *net = NULL;
49282+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49283+ const struct cred *cred = current_cred();
49284+#endif
49285+
49286+#ifdef CONFIG_GRKERNSEC_PROC_USER
49287+ if (cred->fsuid)
49288+ return net;
49289+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49290+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49291+ return net;
49292+#endif
49293
49294 rcu_read_lock();
49295 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49296diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
49297index 53c3bce..10ad159 100644
49298--- a/fs/proc/proc_sysctl.c
49299+++ b/fs/proc/proc_sysctl.c
49300@@ -9,11 +9,13 @@
49301 #include <linux/namei.h>
49302 #include "internal.h"
49303
49304+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
49305+
49306 static const struct dentry_operations proc_sys_dentry_operations;
49307 static const struct file_operations proc_sys_file_operations;
49308-static const struct inode_operations proc_sys_inode_operations;
49309+const struct inode_operations proc_sys_inode_operations;
49310 static const struct file_operations proc_sys_dir_file_operations;
49311-static const struct inode_operations proc_sys_dir_operations;
49312+const struct inode_operations proc_sys_dir_operations;
49313
49314 void proc_sys_poll_notify(struct ctl_table_poll *poll)
49315 {
49316@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
49317
49318 err = NULL;
49319 d_set_d_op(dentry, &proc_sys_dentry_operations);
49320+
49321+ gr_handle_proc_create(dentry, inode);
49322+
49323 d_add(dentry, inode);
49324
49325+ if (gr_handle_sysctl(p, MAY_EXEC))
49326+ err = ERR_PTR(-ENOENT);
49327+
49328 out:
49329 sysctl_head_finish(head);
49330 return err;
49331@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49332 if (!table->proc_handler)
49333 goto out;
49334
49335+#ifdef CONFIG_GRKERNSEC
49336+ error = -EPERM;
49337+ if (write && !capable(CAP_SYS_ADMIN))
49338+ goto out;
49339+#endif
49340+
49341 /* careful: calling conventions are nasty here */
49342 res = count;
49343 error = table->proc_handler(table, write, buf, &res, ppos);
49344@@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
49345 return -ENOMEM;
49346 } else {
49347 d_set_d_op(child, &proc_sys_dentry_operations);
49348+
49349+ gr_handle_proc_create(child, inode);
49350+
49351 d_add(child, inode);
49352 }
49353 } else {
49354@@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
49355 if (*pos < file->f_pos)
49356 continue;
49357
49358+ if (gr_handle_sysctl(table, 0))
49359+ continue;
49360+
49361 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
49362 if (res)
49363 return res;
49364@@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49365 if (IS_ERR(head))
49366 return PTR_ERR(head);
49367
49368+ if (table && gr_handle_sysctl(table, MAY_EXEC))
49369+ return -ENOENT;
49370+
49371 generic_fillattr(inode, stat);
49372 if (table)
49373 stat->mode = (stat->mode & S_IFMT) | table->mode;
49374@@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
49375 .llseek = generic_file_llseek,
49376 };
49377
49378-static const struct inode_operations proc_sys_inode_operations = {
49379+const struct inode_operations proc_sys_inode_operations = {
49380 .permission = proc_sys_permission,
49381 .setattr = proc_sys_setattr,
49382 .getattr = proc_sys_getattr,
49383 };
49384
49385-static const struct inode_operations proc_sys_dir_operations = {
49386+const struct inode_operations proc_sys_dir_operations = {
49387 .lookup = proc_sys_lookup,
49388 .permission = proc_sys_permission,
49389 .setattr = proc_sys_setattr,
49390diff --git a/fs/proc/root.c b/fs/proc/root.c
49391index 46a15d8..335631a 100644
49392--- a/fs/proc/root.c
49393+++ b/fs/proc/root.c
49394@@ -187,7 +187,15 @@ void __init proc_root_init(void)
49395 #ifdef CONFIG_PROC_DEVICETREE
49396 proc_device_tree_init();
49397 #endif
49398+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49399+#ifdef CONFIG_GRKERNSEC_PROC_USER
49400+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49401+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49402+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49403+#endif
49404+#else
49405 proc_mkdir("bus", NULL);
49406+#endif
49407 proc_sys_init();
49408 }
49409
49410diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49411index 3efa725..23c925b 100644
49412--- a/fs/proc/task_mmu.c
49413+++ b/fs/proc/task_mmu.c
49414@@ -11,6 +11,7 @@
49415 #include <linux/rmap.h>
49416 #include <linux/swap.h>
49417 #include <linux/swapops.h>
49418+#include <linux/grsecurity.h>
49419
49420 #include <asm/elf.h>
49421 #include <asm/uaccess.h>
49422@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49423 "VmExe:\t%8lu kB\n"
49424 "VmLib:\t%8lu kB\n"
49425 "VmPTE:\t%8lu kB\n"
49426- "VmSwap:\t%8lu kB\n",
49427- hiwater_vm << (PAGE_SHIFT-10),
49428+ "VmSwap:\t%8lu kB\n"
49429+
49430+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49431+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49432+#endif
49433+
49434+ ,hiwater_vm << (PAGE_SHIFT-10),
49435 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49436 mm->locked_vm << (PAGE_SHIFT-10),
49437 mm->pinned_vm << (PAGE_SHIFT-10),
49438@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49439 data << (PAGE_SHIFT-10),
49440 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49441 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49442- swap << (PAGE_SHIFT-10));
49443+ swap << (PAGE_SHIFT-10)
49444+
49445+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49446+ , mm->context.user_cs_base, mm->context.user_cs_limit
49447+#endif
49448+
49449+ );
49450 }
49451
49452 unsigned long task_vsize(struct mm_struct *mm)
49453@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
49454 return ret;
49455 }
49456
49457+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49458+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49459+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
49460+ _mm->pax_flags & MF_PAX_SEGMEXEC))
49461+#endif
49462+
49463 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49464 {
49465 struct mm_struct *mm = vma->vm_mm;
49466@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49467 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49468 }
49469
49470- /* We don't show the stack guard page in /proc/maps */
49471+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49472+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49473+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49474+#else
49475 start = vma->vm_start;
49476- if (stack_guard_page_start(vma, start))
49477- start += PAGE_SIZE;
49478 end = vma->vm_end;
49479- if (stack_guard_page_end(vma, end))
49480- end -= PAGE_SIZE;
49481+#endif
49482
49483 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49484 start,
49485@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49486 flags & VM_WRITE ? 'w' : '-',
49487 flags & VM_EXEC ? 'x' : '-',
49488 flags & VM_MAYSHARE ? 's' : 'p',
49489+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49490+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49491+#else
49492 pgoff,
49493+#endif
49494 MAJOR(dev), MINOR(dev), ino, &len);
49495
49496 /*
49497@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49498 */
49499 if (file) {
49500 pad_len_spaces(m, len);
49501- seq_path(m, &file->f_path, "\n");
49502+ seq_path(m, &file->f_path, "\n\\");
49503 } else {
49504 const char *name = arch_vma_name(vma);
49505 if (!name) {
49506@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49507 if (vma->vm_start <= mm->brk &&
49508 vma->vm_end >= mm->start_brk) {
49509 name = "[heap]";
49510- } else if (vma->vm_start <= mm->start_stack &&
49511- vma->vm_end >= mm->start_stack) {
49512+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49513+ (vma->vm_start <= mm->start_stack &&
49514+ vma->vm_end >= mm->start_stack)) {
49515 name = "[stack]";
49516 }
49517 } else {
49518@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
49519 struct proc_maps_private *priv = m->private;
49520 struct task_struct *task = priv->task;
49521
49522+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49523+ if (current->exec_id != m->exec_id) {
49524+ gr_log_badprocpid("maps");
49525+ return 0;
49526+ }
49527+#endif
49528+
49529 show_map_vma(m, vma);
49530
49531 if (m->count < m->size) /* vma is copied successfully */
49532@@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
49533 .private = &mss,
49534 };
49535
49536+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49537+ if (current->exec_id != m->exec_id) {
49538+ gr_log_badprocpid("smaps");
49539+ return 0;
49540+ }
49541+#endif
49542 memset(&mss, 0, sizeof mss);
49543- mss.vma = vma;
49544- /* mmap_sem is held in m_start */
49545- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49546- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49547-
49548+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49549+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49550+#endif
49551+ mss.vma = vma;
49552+ /* mmap_sem is held in m_start */
49553+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49554+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49555+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49556+ }
49557+#endif
49558 show_map_vma(m, vma);
49559
49560 seq_printf(m,
49561@@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
49562 "KernelPageSize: %8lu kB\n"
49563 "MMUPageSize: %8lu kB\n"
49564 "Locked: %8lu kB\n",
49565+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49566+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49567+#else
49568 (vma->vm_end - vma->vm_start) >> 10,
49569+#endif
49570 mss.resident >> 10,
49571 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49572 mss.shared_clean >> 10,
49573@@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
49574 int n;
49575 char buffer[50];
49576
49577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49578+ if (current->exec_id != m->exec_id) {
49579+ gr_log_badprocpid("numa_maps");
49580+ return 0;
49581+ }
49582+#endif
49583+
49584 if (!mm)
49585 return 0;
49586
49587@@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
49588 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49589 mpol_cond_put(pol);
49590
49591+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49592+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49593+#else
49594 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49595+#endif
49596
49597 if (file) {
49598 seq_printf(m, " file=");
49599- seq_path(m, &file->f_path, "\n\t= ");
49600+ seq_path(m, &file->f_path, "\n\t\\= ");
49601 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49602 seq_printf(m, " heap");
49603 } else if (vma->vm_start <= mm->start_stack &&
49604diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49605index 980de54..2a4db5f 100644
49606--- a/fs/proc/task_nommu.c
49607+++ b/fs/proc/task_nommu.c
49608@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49609 else
49610 bytes += kobjsize(mm);
49611
49612- if (current->fs && current->fs->users > 1)
49613+ if (current->fs && atomic_read(&current->fs->users) > 1)
49614 sbytes += kobjsize(current->fs);
49615 else
49616 bytes += kobjsize(current->fs);
49617@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
49618
49619 if (file) {
49620 pad_len_spaces(m, len);
49621- seq_path(m, &file->f_path, "");
49622+ seq_path(m, &file->f_path, "\n\\");
49623 } else if (mm) {
49624 if (vma->vm_start <= mm->start_stack &&
49625 vma->vm_end >= mm->start_stack) {
49626diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49627index d67908b..d13f6a6 100644
49628--- a/fs/quota/netlink.c
49629+++ b/fs/quota/netlink.c
49630@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49631 void quota_send_warning(short type, unsigned int id, dev_t dev,
49632 const char warntype)
49633 {
49634- static atomic_t seq;
49635+ static atomic_unchecked_t seq;
49636 struct sk_buff *skb;
49637 void *msg_head;
49638 int ret;
49639@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49640 "VFS: Not enough memory to send quota warning.\n");
49641 return;
49642 }
49643- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49644+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49645 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49646 if (!msg_head) {
49647 printk(KERN_ERR
49648diff --git a/fs/readdir.c b/fs/readdir.c
49649index 356f715..c918d38 100644
49650--- a/fs/readdir.c
49651+++ b/fs/readdir.c
49652@@ -17,6 +17,7 @@
49653 #include <linux/security.h>
49654 #include <linux/syscalls.h>
49655 #include <linux/unistd.h>
49656+#include <linux/namei.h>
49657
49658 #include <asm/uaccess.h>
49659
49660@@ -67,6 +68,7 @@ struct old_linux_dirent {
49661
49662 struct readdir_callback {
49663 struct old_linux_dirent __user * dirent;
49664+ struct file * file;
49665 int result;
49666 };
49667
49668@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49669 buf->result = -EOVERFLOW;
49670 return -EOVERFLOW;
49671 }
49672+
49673+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49674+ return 0;
49675+
49676 buf->result++;
49677 dirent = buf->dirent;
49678 if (!access_ok(VERIFY_WRITE, dirent,
49679@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49680
49681 buf.result = 0;
49682 buf.dirent = dirent;
49683+ buf.file = file;
49684
49685 error = vfs_readdir(file, fillonedir, &buf);
49686 if (buf.result)
49687@@ -142,6 +149,7 @@ struct linux_dirent {
49688 struct getdents_callback {
49689 struct linux_dirent __user * current_dir;
49690 struct linux_dirent __user * previous;
49691+ struct file * file;
49692 int count;
49693 int error;
49694 };
49695@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49696 buf->error = -EOVERFLOW;
49697 return -EOVERFLOW;
49698 }
49699+
49700+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49701+ return 0;
49702+
49703 dirent = buf->previous;
49704 if (dirent) {
49705 if (__put_user(offset, &dirent->d_off))
49706@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49707 buf.previous = NULL;
49708 buf.count = count;
49709 buf.error = 0;
49710+ buf.file = file;
49711
49712 error = vfs_readdir(file, filldir, &buf);
49713 if (error >= 0)
49714@@ -229,6 +242,7 @@ out:
49715 struct getdents_callback64 {
49716 struct linux_dirent64 __user * current_dir;
49717 struct linux_dirent64 __user * previous;
49718+ struct file *file;
49719 int count;
49720 int error;
49721 };
49722@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49723 buf->error = -EINVAL; /* only used if we fail.. */
49724 if (reclen > buf->count)
49725 return -EINVAL;
49726+
49727+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49728+ return 0;
49729+
49730 dirent = buf->previous;
49731 if (dirent) {
49732 if (__put_user(offset, &dirent->d_off))
49733@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49734
49735 buf.current_dir = dirent;
49736 buf.previous = NULL;
49737+ buf.file = file;
49738 buf.count = count;
49739 buf.error = 0;
49740
49741@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49742 error = buf.error;
49743 lastdirent = buf.previous;
49744 if (lastdirent) {
49745- typeof(lastdirent->d_off) d_off = file->f_pos;
49746+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49747 if (__put_user(d_off, &lastdirent->d_off))
49748 error = -EFAULT;
49749 else
49750diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49751index 60c0804..d814f98 100644
49752--- a/fs/reiserfs/do_balan.c
49753+++ b/fs/reiserfs/do_balan.c
49754@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49755 return;
49756 }
49757
49758- atomic_inc(&(fs_generation(tb->tb_sb)));
49759+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49760 do_balance_starts(tb);
49761
49762 /* balance leaf returns 0 except if combining L R and S into
49763diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49764index 7a99811..a7c96c4 100644
49765--- a/fs/reiserfs/procfs.c
49766+++ b/fs/reiserfs/procfs.c
49767@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49768 "SMALL_TAILS " : "NO_TAILS ",
49769 replay_only(sb) ? "REPLAY_ONLY " : "",
49770 convert_reiserfs(sb) ? "CONV " : "",
49771- atomic_read(&r->s_generation_counter),
49772+ atomic_read_unchecked(&r->s_generation_counter),
49773 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49774 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49775 SF(s_good_search_by_key_reada), SF(s_bmaps),
49776diff --git a/fs/select.c b/fs/select.c
49777index e782258..3b4b44c 100644
49778--- a/fs/select.c
49779+++ b/fs/select.c
49780@@ -20,6 +20,7 @@
49781 #include <linux/module.h>
49782 #include <linux/slab.h>
49783 #include <linux/poll.h>
49784+#include <linux/security.h>
49785 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49786 #include <linux/file.h>
49787 #include <linux/fdtable.h>
49788@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49789 struct poll_list *walk = head;
49790 unsigned long todo = nfds;
49791
49792+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49793 if (nfds > rlimit(RLIMIT_NOFILE))
49794 return -EINVAL;
49795
49796diff --git a/fs/seq_file.c b/fs/seq_file.c
49797index 4023d6b..ab46c6a 100644
49798--- a/fs/seq_file.c
49799+++ b/fs/seq_file.c
49800@@ -9,6 +9,7 @@
49801 #include <linux/module.h>
49802 #include <linux/seq_file.h>
49803 #include <linux/slab.h>
49804+#include <linux/sched.h>
49805
49806 #include <asm/uaccess.h>
49807 #include <asm/page.h>
49808@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49809 memset(p, 0, sizeof(*p));
49810 mutex_init(&p->lock);
49811 p->op = op;
49812+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49813+ p->exec_id = current->exec_id;
49814+#endif
49815
49816 /*
49817 * Wrappers around seq_open(e.g. swaps_open) need to be
49818@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49819 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49820 void *data)
49821 {
49822- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49823+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49824 int res = -ENOMEM;
49825
49826 if (op) {
49827diff --git a/fs/splice.c b/fs/splice.c
49828index 96d7b28..fd465ac 100644
49829--- a/fs/splice.c
49830+++ b/fs/splice.c
49831@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49832 pipe_lock(pipe);
49833
49834 for (;;) {
49835- if (!pipe->readers) {
49836+ if (!atomic_read(&pipe->readers)) {
49837 send_sig(SIGPIPE, current, 0);
49838 if (!ret)
49839 ret = -EPIPE;
49840@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49841 do_wakeup = 0;
49842 }
49843
49844- pipe->waiting_writers++;
49845+ atomic_inc(&pipe->waiting_writers);
49846 pipe_wait(pipe);
49847- pipe->waiting_writers--;
49848+ atomic_dec(&pipe->waiting_writers);
49849 }
49850
49851 pipe_unlock(pipe);
49852@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49853 old_fs = get_fs();
49854 set_fs(get_ds());
49855 /* The cast to a user pointer is valid due to the set_fs() */
49856- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49857+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49858 set_fs(old_fs);
49859
49860 return res;
49861@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49862 old_fs = get_fs();
49863 set_fs(get_ds());
49864 /* The cast to a user pointer is valid due to the set_fs() */
49865- res = vfs_write(file, (const char __user *)buf, count, &pos);
49866+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49867 set_fs(old_fs);
49868
49869 return res;
49870@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49871 goto err;
49872
49873 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49874- vec[i].iov_base = (void __user *) page_address(page);
49875+ vec[i].iov_base = (void __force_user *) page_address(page);
49876 vec[i].iov_len = this_len;
49877 spd.pages[i] = page;
49878 spd.nr_pages++;
49879@@ -848,10 +848,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49880 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49881 {
49882 while (!pipe->nrbufs) {
49883- if (!pipe->writers)
49884+ if (!atomic_read(&pipe->writers))
49885 return 0;
49886
49887- if (!pipe->waiting_writers && sd->num_spliced)
49888+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49889 return 0;
49890
49891 if (sd->flags & SPLICE_F_NONBLOCK)
49892@@ -1184,7 +1184,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49893 * out of the pipe right after the splice_to_pipe(). So set
49894 * PIPE_READERS appropriately.
49895 */
49896- pipe->readers = 1;
49897+ atomic_set(&pipe->readers, 1);
49898
49899 current->splice_pipe = pipe;
49900 }
49901@@ -1736,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49902 ret = -ERESTARTSYS;
49903 break;
49904 }
49905- if (!pipe->writers)
49906+ if (!atomic_read(&pipe->writers))
49907 break;
49908- if (!pipe->waiting_writers) {
49909+ if (!atomic_read(&pipe->waiting_writers)) {
49910 if (flags & SPLICE_F_NONBLOCK) {
49911 ret = -EAGAIN;
49912 break;
49913@@ -1770,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49914 pipe_lock(pipe);
49915
49916 while (pipe->nrbufs >= pipe->buffers) {
49917- if (!pipe->readers) {
49918+ if (!atomic_read(&pipe->readers)) {
49919 send_sig(SIGPIPE, current, 0);
49920 ret = -EPIPE;
49921 break;
49922@@ -1783,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49923 ret = -ERESTARTSYS;
49924 break;
49925 }
49926- pipe->waiting_writers++;
49927+ atomic_inc(&pipe->waiting_writers);
49928 pipe_wait(pipe);
49929- pipe->waiting_writers--;
49930+ atomic_dec(&pipe->waiting_writers);
49931 }
49932
49933 pipe_unlock(pipe);
49934@@ -1821,14 +1821,14 @@ retry:
49935 pipe_double_lock(ipipe, opipe);
49936
49937 do {
49938- if (!opipe->readers) {
49939+ if (!atomic_read(&opipe->readers)) {
49940 send_sig(SIGPIPE, current, 0);
49941 if (!ret)
49942 ret = -EPIPE;
49943 break;
49944 }
49945
49946- if (!ipipe->nrbufs && !ipipe->writers)
49947+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49948 break;
49949
49950 /*
49951@@ -1925,7 +1925,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49952 pipe_double_lock(ipipe, opipe);
49953
49954 do {
49955- if (!opipe->readers) {
49956+ if (!atomic_read(&opipe->readers)) {
49957 send_sig(SIGPIPE, current, 0);
49958 if (!ret)
49959 ret = -EPIPE;
49960@@ -1970,7 +1970,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49961 * return EAGAIN if we have the potential of some data in the
49962 * future, otherwise just return 0
49963 */
49964- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49965+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49966 ret = -EAGAIN;
49967
49968 pipe_unlock(ipipe);
49969diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
49970index a475983..9c6a1f0 100644
49971--- a/fs/sysfs/bin.c
49972+++ b/fs/sysfs/bin.c
49973@@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count)
49974 }
49975
49976 static ssize_t
49977+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
49978+static ssize_t
49979 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
49980 {
49981 struct bin_buffer *bb = file->private_data;
49982diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49983index 7fdf6a7..e6cd8ad 100644
49984--- a/fs/sysfs/dir.c
49985+++ b/fs/sysfs/dir.c
49986@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49987 struct sysfs_dirent *sd;
49988 int rc;
49989
49990+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49991+ const char *parent_name = parent_sd->s_name;
49992+
49993+ mode = S_IFDIR | S_IRWXU;
49994+
49995+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49996+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49997+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49998+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49999+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
50000+#endif
50001+
50002 /* allocate */
50003 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
50004 if (!sd)
50005diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
50006index 00012e3..8392349 100644
50007--- a/fs/sysfs/file.c
50008+++ b/fs/sysfs/file.c
50009@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
50010
50011 struct sysfs_open_dirent {
50012 atomic_t refcnt;
50013- atomic_t event;
50014+ atomic_unchecked_t event;
50015 wait_queue_head_t poll;
50016 struct list_head buffers; /* goes through sysfs_buffer.list */
50017 };
50018@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
50019 if (!sysfs_get_active(attr_sd))
50020 return -ENODEV;
50021
50022- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50023+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50024 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50025
50026 sysfs_put_active(attr_sd);
50027@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
50028 return -ENOMEM;
50029
50030 atomic_set(&new_od->refcnt, 0);
50031- atomic_set(&new_od->event, 1);
50032+ atomic_set_unchecked(&new_od->event, 1);
50033 init_waitqueue_head(&new_od->poll);
50034 INIT_LIST_HEAD(&new_od->buffers);
50035 goto retry;
50036@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
50037
50038 sysfs_put_active(attr_sd);
50039
50040- if (buffer->event != atomic_read(&od->event))
50041+ if (buffer->event != atomic_read_unchecked(&od->event))
50042 goto trigger;
50043
50044 return DEFAULT_POLLMASK;
50045@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
50046
50047 od = sd->s_attr.open;
50048 if (od) {
50049- atomic_inc(&od->event);
50050+ atomic_inc_unchecked(&od->event);
50051 wake_up_interruptible(&od->poll);
50052 }
50053
50054diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50055index a7ac78f..02158e1 100644
50056--- a/fs/sysfs/symlink.c
50057+++ b/fs/sysfs/symlink.c
50058@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50059
50060 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50061 {
50062- char *page = nd_get_link(nd);
50063+ const char *page = nd_get_link(nd);
50064 if (!IS_ERR(page))
50065 free_page((unsigned long)page);
50066 }
50067diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
50068index f922cba..062fb02 100644
50069--- a/fs/ubifs/debug.c
50070+++ b/fs/ubifs/debug.c
50071@@ -2819,6 +2819,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
50072 * debugfs file. Returns %0 or %1 in case of success and a negative error code
50073 * in case of failure.
50074 */
50075+static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2);
50076 static int interpret_user_input(const char __user *u, size_t count)
50077 {
50078 size_t buf_size;
50079@@ -2837,6 +2838,8 @@ static int interpret_user_input(const char __user *u, size_t count)
50080 }
50081
50082 static ssize_t dfs_file_write(struct file *file, const char __user *u,
50083+ size_t count, loff_t *ppos) __size_overflow(3);
50084+static ssize_t dfs_file_write(struct file *file, const char __user *u,
50085 size_t count, loff_t *ppos)
50086 {
50087 struct ubifs_info *c = file->private_data;
50088diff --git a/fs/udf/misc.c b/fs/udf/misc.c
50089index c175b4d..8f36a16 100644
50090--- a/fs/udf/misc.c
50091+++ b/fs/udf/misc.c
50092@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
50093
50094 u8 udf_tag_checksum(const struct tag *t)
50095 {
50096- u8 *data = (u8 *)t;
50097+ const u8 *data = (const u8 *)t;
50098 u8 checksum = 0;
50099 int i;
50100 for (i = 0; i < sizeof(struct tag); ++i)
50101diff --git a/fs/utimes.c b/fs/utimes.c
50102index ba653f3..06ea4b1 100644
50103--- a/fs/utimes.c
50104+++ b/fs/utimes.c
50105@@ -1,6 +1,7 @@
50106 #include <linux/compiler.h>
50107 #include <linux/file.h>
50108 #include <linux/fs.h>
50109+#include <linux/security.h>
50110 #include <linux/linkage.h>
50111 #include <linux/mount.h>
50112 #include <linux/namei.h>
50113@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
50114 goto mnt_drop_write_and_out;
50115 }
50116 }
50117+
50118+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50119+ error = -EACCES;
50120+ goto mnt_drop_write_and_out;
50121+ }
50122+
50123 mutex_lock(&inode->i_mutex);
50124 error = notify_change(path->dentry, &newattrs);
50125 mutex_unlock(&inode->i_mutex);
50126diff --git a/fs/xattr.c b/fs/xattr.c
50127index 82f4337..236473c 100644
50128--- a/fs/xattr.c
50129+++ b/fs/xattr.c
50130@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50131 * Extended attribute SET operations
50132 */
50133 static long
50134-setxattr(struct dentry *d, const char __user *name, const void __user *value,
50135+setxattr(struct path *path, const char __user *name, const void __user *value,
50136 size_t size, int flags)
50137 {
50138 int error;
50139@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
50140 return PTR_ERR(kvalue);
50141 }
50142
50143- error = vfs_setxattr(d, kname, kvalue, size, flags);
50144+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50145+ error = -EACCES;
50146+ goto out;
50147+ }
50148+
50149+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50150+out:
50151 kfree(kvalue);
50152 return error;
50153 }
50154@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
50155 return error;
50156 error = mnt_want_write(path.mnt);
50157 if (!error) {
50158- error = setxattr(path.dentry, name, value, size, flags);
50159+ error = setxattr(&path, name, value, size, flags);
50160 mnt_drop_write(path.mnt);
50161 }
50162 path_put(&path);
50163@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
50164 return error;
50165 error = mnt_want_write(path.mnt);
50166 if (!error) {
50167- error = setxattr(path.dentry, name, value, size, flags);
50168+ error = setxattr(&path, name, value, size, flags);
50169 mnt_drop_write(path.mnt);
50170 }
50171 path_put(&path);
50172@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
50173 const void __user *,value, size_t, size, int, flags)
50174 {
50175 struct file *f;
50176- struct dentry *dentry;
50177 int error = -EBADF;
50178
50179 f = fget(fd);
50180 if (!f)
50181 return error;
50182- dentry = f->f_path.dentry;
50183- audit_inode(NULL, dentry);
50184+ audit_inode(NULL, f->f_path.dentry);
50185 error = mnt_want_write_file(f);
50186 if (!error) {
50187- error = setxattr(dentry, name, value, size, flags);
50188+ error = setxattr(&f->f_path, name, value, size, flags);
50189 mnt_drop_write_file(f);
50190 }
50191 fput(f);
50192diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50193index 8d5a506..7f62712 100644
50194--- a/fs/xattr_acl.c
50195+++ b/fs/xattr_acl.c
50196@@ -17,8 +17,8 @@
50197 struct posix_acl *
50198 posix_acl_from_xattr(const void *value, size_t size)
50199 {
50200- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50201- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50202+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50203+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50204 int count;
50205 struct posix_acl *acl;
50206 struct posix_acl_entry *acl_e;
50207diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
50208index 188ef2f..adcf864 100644
50209--- a/fs/xfs/xfs_bmap.c
50210+++ b/fs/xfs/xfs_bmap.c
50211@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
50212 int nmap,
50213 int ret_nmap);
50214 #else
50215-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50216+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50217 #endif /* DEBUG */
50218
50219 STATIC int
50220diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50221index 79d05e8..e3e5861 100644
50222--- a/fs/xfs/xfs_dir2_sf.c
50223+++ b/fs/xfs/xfs_dir2_sf.c
50224@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
50225 }
50226
50227 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
50228- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50229+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50230+ char name[sfep->namelen];
50231+ memcpy(name, sfep->name, sfep->namelen);
50232+ if (filldir(dirent, name, sfep->namelen,
50233+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
50234+ *offset = off & 0x7fffffff;
50235+ return 0;
50236+ }
50237+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50238 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50239 *offset = off & 0x7fffffff;
50240 return 0;
50241diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
50242index 76f3ca5..f57f712 100644
50243--- a/fs/xfs/xfs_ioctl.c
50244+++ b/fs/xfs/xfs_ioctl.c
50245@@ -128,7 +128,7 @@ xfs_find_handle(
50246 }
50247
50248 error = -EFAULT;
50249- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50250+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50251 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50252 goto out_put;
50253
50254diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
50255index ab30253..4d86958 100644
50256--- a/fs/xfs/xfs_iops.c
50257+++ b/fs/xfs/xfs_iops.c
50258@@ -447,7 +447,7 @@ xfs_vn_put_link(
50259 struct nameidata *nd,
50260 void *p)
50261 {
50262- char *s = nd_get_link(nd);
50263+ const char *s = nd_get_link(nd);
50264
50265 if (!IS_ERR(s))
50266 kfree(s);
50267diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50268new file mode 100644
50269index 0000000..2645296
50270--- /dev/null
50271+++ b/grsecurity/Kconfig
50272@@ -0,0 +1,1079 @@
50273+#
50274+# grecurity configuration
50275+#
50276+
50277+menu "Grsecurity"
50278+
50279+config GRKERNSEC
50280+ bool "Grsecurity"
50281+ select CRYPTO
50282+ select CRYPTO_SHA256
50283+ help
50284+ If you say Y here, you will be able to configure many features
50285+ that will enhance the security of your system. It is highly
50286+ recommended that you say Y here and read through the help
50287+ for each option so that you fully understand the features and
50288+ can evaluate their usefulness for your machine.
50289+
50290+choice
50291+ prompt "Security Level"
50292+ depends on GRKERNSEC
50293+ default GRKERNSEC_CUSTOM
50294+
50295+config GRKERNSEC_LOW
50296+ bool "Low"
50297+ select GRKERNSEC_LINK
50298+ select GRKERNSEC_FIFO
50299+ select GRKERNSEC_RANDNET
50300+ select GRKERNSEC_DMESG
50301+ select GRKERNSEC_CHROOT
50302+ select GRKERNSEC_CHROOT_CHDIR
50303+
50304+ help
50305+ If you choose this option, several of the grsecurity options will
50306+ be enabled that will give you greater protection against a number
50307+ of attacks, while assuring that none of your software will have any
50308+ conflicts with the additional security measures. If you run a lot
50309+ of unusual software, or you are having problems with the higher
50310+ security levels, you should say Y here. With this option, the
50311+ following features are enabled:
50312+
50313+ - Linking restrictions
50314+ - FIFO restrictions
50315+ - Restricted dmesg
50316+ - Enforced chdir("/") on chroot
50317+ - Runtime module disabling
50318+
50319+config GRKERNSEC_MEDIUM
50320+ bool "Medium"
50321+ select PAX
50322+ select PAX_EI_PAX
50323+ select PAX_PT_PAX_FLAGS
50324+ select PAX_HAVE_ACL_FLAGS
50325+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50326+ select GRKERNSEC_CHROOT
50327+ select GRKERNSEC_CHROOT_SYSCTL
50328+ select GRKERNSEC_LINK
50329+ select GRKERNSEC_FIFO
50330+ select GRKERNSEC_DMESG
50331+ select GRKERNSEC_RANDNET
50332+ select GRKERNSEC_FORKFAIL
50333+ select GRKERNSEC_TIME
50334+ select GRKERNSEC_SIGNAL
50335+ select GRKERNSEC_CHROOT
50336+ select GRKERNSEC_CHROOT_UNIX
50337+ select GRKERNSEC_CHROOT_MOUNT
50338+ select GRKERNSEC_CHROOT_PIVOT
50339+ select GRKERNSEC_CHROOT_DOUBLE
50340+ select GRKERNSEC_CHROOT_CHDIR
50341+ select GRKERNSEC_CHROOT_MKNOD
50342+ select GRKERNSEC_PROC
50343+ select GRKERNSEC_PROC_USERGROUP
50344+ select PAX_RANDUSTACK
50345+ select PAX_ASLR
50346+ select PAX_RANDMMAP
50347+ select PAX_REFCOUNT if (X86 || SPARC64)
50348+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50349+
50350+ help
50351+ If you say Y here, several features in addition to those included
50352+ in the low additional security level will be enabled. These
50353+ features provide even more security to your system, though in rare
50354+ cases they may be incompatible with very old or poorly written
50355+ software. If you enable this option, make sure that your auth
50356+ service (identd) is running as gid 1001. With this option,
50357+ the following features (in addition to those provided in the
50358+ low additional security level) will be enabled:
50359+
50360+ - Failed fork logging
50361+ - Time change logging
50362+ - Signal logging
50363+ - Deny mounts in chroot
50364+ - Deny double chrooting
50365+ - Deny sysctl writes in chroot
50366+ - Deny mknod in chroot
50367+ - Deny access to abstract AF_UNIX sockets out of chroot
50368+ - Deny pivot_root in chroot
50369+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
50370+ - /proc restrictions with special GID set to 10 (usually wheel)
50371+ - Address Space Layout Randomization (ASLR)
50372+ - Prevent exploitation of most refcount overflows
50373+ - Bounds checking of copying between the kernel and userland
50374+
50375+config GRKERNSEC_HIGH
50376+ bool "High"
50377+ select GRKERNSEC_LINK
50378+ select GRKERNSEC_FIFO
50379+ select GRKERNSEC_DMESG
50380+ select GRKERNSEC_FORKFAIL
50381+ select GRKERNSEC_TIME
50382+ select GRKERNSEC_SIGNAL
50383+ select GRKERNSEC_CHROOT
50384+ select GRKERNSEC_CHROOT_SHMAT
50385+ select GRKERNSEC_CHROOT_UNIX
50386+ select GRKERNSEC_CHROOT_MOUNT
50387+ select GRKERNSEC_CHROOT_FCHDIR
50388+ select GRKERNSEC_CHROOT_PIVOT
50389+ select GRKERNSEC_CHROOT_DOUBLE
50390+ select GRKERNSEC_CHROOT_CHDIR
50391+ select GRKERNSEC_CHROOT_MKNOD
50392+ select GRKERNSEC_CHROOT_CAPS
50393+ select GRKERNSEC_CHROOT_SYSCTL
50394+ select GRKERNSEC_CHROOT_FINDTASK
50395+ select GRKERNSEC_SYSFS_RESTRICT
50396+ select GRKERNSEC_PROC
50397+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50398+ select GRKERNSEC_HIDESYM
50399+ select GRKERNSEC_BRUTE
50400+ select GRKERNSEC_PROC_USERGROUP
50401+ select GRKERNSEC_KMEM
50402+ select GRKERNSEC_RESLOG
50403+ select GRKERNSEC_RANDNET
50404+ select GRKERNSEC_PROC_ADD
50405+ select GRKERNSEC_CHROOT_CHMOD
50406+ select GRKERNSEC_CHROOT_NICE
50407+ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
50408+ select GRKERNSEC_AUDIT_MOUNT
50409+ select GRKERNSEC_MODHARDEN if (MODULES)
50410+ select GRKERNSEC_HARDEN_PTRACE
50411+ select GRKERNSEC_PTRACE_READEXEC
50412+ select GRKERNSEC_VM86 if (X86_32)
50413+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50414+ select PAX
50415+ select PAX_RANDUSTACK
50416+ select PAX_ASLR
50417+ select PAX_RANDMMAP
50418+ select PAX_NOEXEC
50419+ select PAX_MPROTECT
50420+ select PAX_EI_PAX
50421+ select PAX_PT_PAX_FLAGS
50422+ select PAX_HAVE_ACL_FLAGS
50423+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50424+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50425+ select PAX_RANDKSTACK if (X86_TSC && X86)
50426+ select PAX_SEGMEXEC if (X86_32)
50427+ select PAX_PAGEEXEC
50428+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50429+ select PAX_EMUTRAMP if (PARISC)
50430+ select PAX_EMUSIGRT if (PARISC)
50431+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50432+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50433+ select PAX_REFCOUNT if (X86 || SPARC64)
50434+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50435+ help
50436+ If you say Y here, many of the features of grsecurity will be
50437+ enabled, which will protect you against many kinds of attacks
50438+ against your system. The heightened security comes at a cost
50439+ of an increased chance of incompatibilities with rare software
50440+ on your machine. Since this security level enables PaX, you should
50441+ view <http://pax.grsecurity.net> and read about the PaX
50442+ project. While you are there, download chpax and run it on
50443+ binaries that cause problems with PaX. Also remember that
50444+ since the /proc restrictions are enabled, you must run your
50445+ identd as gid 1001. This security level enables the following
50446+ features in addition to those listed in the low and medium
50447+ security levels:
50448+
50449+ - Additional /proc restrictions
50450+ - Chmod restrictions in chroot
50451+ - No signals, ptrace, or viewing of processes outside of chroot
50452+ - Capability restrictions in chroot
50453+ - Deny fchdir out of chroot
50454+ - Priority restrictions in chroot
50455+ - Segmentation-based implementation of PaX
50456+ - Mprotect restrictions
50457+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50458+ - Kernel stack randomization
50459+ - Mount/unmount/remount logging
50460+ - Kernel symbol hiding
50461+ - Hardening of module auto-loading
50462+ - Ptrace restrictions
50463+ - Restricted vm86 mode
50464+ - Restricted sysfs/debugfs
50465+ - Active kernel exploit response
50466+
50467+config GRKERNSEC_CUSTOM
50468+ bool "Custom"
50469+ help
50470+ If you say Y here, you will be able to configure every grsecurity
50471+ option, which allows you to enable many more features that aren't
50472+ covered in the basic security levels. These additional features
50473+ include TPE, socket restrictions, and the sysctl system for
50474+ grsecurity. It is advised that you read through the help for
50475+ each option to determine its usefulness in your situation.
50476+
50477+endchoice
50478+
50479+menu "Memory Protections"
50480+depends on GRKERNSEC
50481+
50482+config GRKERNSEC_KMEM
50483+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50484+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50485+ help
50486+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50487+ be written to or read from to modify or leak the contents of the running
50488+ kernel. /dev/port will also not be allowed to be opened. If you have module
50489+ support disabled, enabling this will close up four ways that are
50490+ currently used to insert malicious code into the running kernel.
50491+ Even with all these features enabled, we still highly recommend that
50492+ you use the RBAC system, as it is still possible for an attacker to
50493+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50494+ If you are not using XFree86, you may be able to stop this additional
50495+ case by enabling the 'Disable privileged I/O' option. Though nothing
50496+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50497+ but only to video memory, which is the only writing we allow in this
50498+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50499+ not be allowed to mprotect it with PROT_WRITE later.
50500+ It is highly recommended that you say Y here if you meet all the
50501+ conditions above.
50502+
50503+config GRKERNSEC_VM86
50504+ bool "Restrict VM86 mode"
50505+ depends on X86_32
50506+
50507+ help
50508+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50509+ make use of a special execution mode on 32bit x86 processors called
50510+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50511+ video cards and will still work with this option enabled. The purpose
50512+ of the option is to prevent exploitation of emulation errors in
50513+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50514+ Nearly all users should be able to enable this option.
50515+
50516+config GRKERNSEC_IO
50517+ bool "Disable privileged I/O"
50518+ depends on X86
50519+ select RTC_CLASS
50520+ select RTC_INTF_DEV
50521+ select RTC_DRV_CMOS
50522+
50523+ help
50524+ If you say Y here, all ioperm and iopl calls will return an error.
50525+ Ioperm and iopl can be used to modify the running kernel.
50526+ Unfortunately, some programs need this access to operate properly,
50527+ the most notable of which are XFree86 and hwclock. hwclock can be
50528+ remedied by having RTC support in the kernel, so real-time
50529+ clock support is enabled if this option is enabled, to ensure
50530+ that hwclock operates correctly. XFree86 still will not
50531+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50532+ IF YOU USE XFree86. If you use XFree86 and you still want to
50533+ protect your kernel against modification, use the RBAC system.
50534+
50535+config GRKERNSEC_PROC_MEMMAP
50536+ bool "Harden ASLR against information leaks and entropy reduction"
50537+ default y if (PAX_NOEXEC || PAX_ASLR)
50538+ depends on PAX_NOEXEC || PAX_ASLR
50539+ help
50540+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50541+ give no information about the addresses of its mappings if
50542+ PaX features that rely on random addresses are enabled on the task.
50543+ In addition to sanitizing this information and disabling other
50544+ dangerous sources of information, this option causes reads of sensitive
50545+ /proc/<pid> entries where the file descriptor was opened in a different
50546+ task than the one performing the read. Such attempts are logged.
50547+ This option also limits argv/env strings for suid/sgid binaries
50548+ to 512KB to prevent a complete exhaustion of the stack entropy provided
50549+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50550+ binaries to prevent alternative mmap layouts from being abused.
50551+
50552+ If you use PaX it is essential that you say Y here as it closes up
50553+ several holes that make full ASLR useless locally.
50554+
50555+config GRKERNSEC_BRUTE
50556+ bool "Deter exploit bruteforcing"
50557+ help
50558+ If you say Y here, attempts to bruteforce exploits against forking
50559+ daemons such as apache or sshd, as well as against suid/sgid binaries
50560+ will be deterred. When a child of a forking daemon is killed by PaX
50561+ or crashes due to an illegal instruction or other suspicious signal,
50562+ the parent process will be delayed 30 seconds upon every subsequent
50563+ fork until the administrator is able to assess the situation and
50564+ restart the daemon.
50565+ In the suid/sgid case, the attempt is logged, the user has all their
50566+ processes terminated, and they are prevented from executing any further
50567+ processes for 15 minutes.
50568+ It is recommended that you also enable signal logging in the auditing
50569+ section so that logs are generated when a process triggers a suspicious
50570+ signal.
50571+ If the sysctl option is enabled, a sysctl option with name
50572+ "deter_bruteforce" is created.
50573+
50574+
50575+config GRKERNSEC_MODHARDEN
50576+ bool "Harden module auto-loading"
50577+ depends on MODULES
50578+ help
50579+ If you say Y here, module auto-loading in response to use of some
50580+ feature implemented by an unloaded module will be restricted to
50581+ root users. Enabling this option helps defend against attacks
50582+ by unprivileged users who abuse the auto-loading behavior to
50583+ cause a vulnerable module to load that is then exploited.
50584+
50585+ If this option prevents a legitimate use of auto-loading for a
50586+ non-root user, the administrator can execute modprobe manually
50587+ with the exact name of the module mentioned in the alert log.
50588+ Alternatively, the administrator can add the module to the list
50589+ of modules loaded at boot by modifying init scripts.
50590+
50591+ Modification of init scripts will most likely be needed on
50592+ Ubuntu servers with encrypted home directory support enabled,
50593+ as the first non-root user logging in will cause the ecb(aes),
50594+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50595+
50596+config GRKERNSEC_HIDESYM
50597+ bool "Hide kernel symbols"
50598+ help
50599+ If you say Y here, getting information on loaded modules, and
50600+ displaying all kernel symbols through a syscall will be restricted
50601+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50602+ /proc/kallsyms will be restricted to the root user. The RBAC
50603+ system can hide that entry even from root.
50604+
50605+ This option also prevents leaking of kernel addresses through
50606+ several /proc entries.
50607+
50608+ Note that this option is only effective provided the following
50609+ conditions are met:
50610+ 1) The kernel using grsecurity is not precompiled by some distribution
50611+ 2) You have also enabled GRKERNSEC_DMESG
50612+ 3) You are using the RBAC system and hiding other files such as your
50613+ kernel image and System.map. Alternatively, enabling this option
50614+ causes the permissions on /boot, /lib/modules, and the kernel
50615+ source directory to change at compile time to prevent
50616+ reading by non-root users.
50617+ If the above conditions are met, this option will aid in providing a
50618+ useful protection against local kernel exploitation of overflows
50619+ and arbitrary read/write vulnerabilities.
50620+
50621+config GRKERNSEC_KERN_LOCKOUT
50622+ bool "Active kernel exploit response"
50623+ depends on X86 || ARM || PPC || SPARC
50624+ help
50625+ If you say Y here, when a PaX alert is triggered due to suspicious
50626+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50627+ or an OOPs occurs due to bad memory accesses, instead of just
50628+ terminating the offending process (and potentially allowing
50629+ a subsequent exploit from the same user), we will take one of two
50630+ actions:
50631+ If the user was root, we will panic the system
50632+ If the user was non-root, we will log the attempt, terminate
50633+ all processes owned by the user, then prevent them from creating
50634+ any new processes until the system is restarted
50635+ This deters repeated kernel exploitation/bruteforcing attempts
50636+ and is useful for later forensics.
50637+
50638+endmenu
50639+menu "Role Based Access Control Options"
50640+depends on GRKERNSEC
50641+
50642+config GRKERNSEC_RBAC_DEBUG
50643+ bool
50644+
50645+config GRKERNSEC_NO_RBAC
50646+ bool "Disable RBAC system"
50647+ help
50648+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50649+ preventing the RBAC system from being enabled. You should only say Y
50650+ here if you have no intention of using the RBAC system, so as to prevent
50651+ an attacker with root access from misusing the RBAC system to hide files
50652+ and processes when loadable module support and /dev/[k]mem have been
50653+ locked down.
50654+
50655+config GRKERNSEC_ACL_HIDEKERN
50656+ bool "Hide kernel processes"
50657+ help
50658+ If you say Y here, all kernel threads will be hidden to all
50659+ processes but those whose subject has the "view hidden processes"
50660+ flag.
50661+
50662+config GRKERNSEC_ACL_MAXTRIES
50663+ int "Maximum tries before password lockout"
50664+ default 3
50665+ help
50666+ This option enforces the maximum number of times a user can attempt
50667+ to authorize themselves with the grsecurity RBAC system before being
50668+ denied the ability to attempt authorization again for a specified time.
50669+ The lower the number, the harder it will be to brute-force a password.
50670+
50671+config GRKERNSEC_ACL_TIMEOUT
50672+ int "Time to wait after max password tries, in seconds"
50673+ default 30
50674+ help
50675+ This option specifies the time the user must wait after attempting to
50676+ authorize to the RBAC system with the maximum number of invalid
50677+ passwords. The higher the number, the harder it will be to brute-force
50678+ a password.
50679+
50680+endmenu
50681+menu "Filesystem Protections"
50682+depends on GRKERNSEC
50683+
50684+config GRKERNSEC_PROC
50685+ bool "Proc restrictions"
50686+ help
50687+ If you say Y here, the permissions of the /proc filesystem
50688+ will be altered to enhance system security and privacy. You MUST
50689+ choose either a user only restriction or a user and group restriction.
50690+ Depending upon the option you choose, you can either restrict users to
50691+ see only the processes they themselves run, or choose a group that can
50692+ view all processes and files normally restricted to root if you choose
50693+ the "restrict to user only" option. NOTE: If you're running identd or
50694+ ntpd as a non-root user, you will have to run it as the group you
50695+ specify here.
50696+
50697+config GRKERNSEC_PROC_USER
50698+ bool "Restrict /proc to user only"
50699+ depends on GRKERNSEC_PROC
50700+ help
50701+ If you say Y here, non-root users will only be able to view their own
50702+ processes, and restricts them from viewing network-related information,
50703+ and viewing kernel symbol and module information.
50704+
50705+config GRKERNSEC_PROC_USERGROUP
50706+ bool "Allow special group"
50707+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50708+ help
50709+ If you say Y here, you will be able to select a group that will be
50710+ able to view all processes and network-related information. If you've
50711+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50712+ remain hidden. This option is useful if you want to run identd as
50713+ a non-root user.
50714+
50715+config GRKERNSEC_PROC_GID
50716+ int "GID for special group"
50717+ depends on GRKERNSEC_PROC_USERGROUP
50718+ default 1001
50719+
50720+config GRKERNSEC_PROC_ADD
50721+ bool "Additional restrictions"
50722+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50723+ help
50724+ If you say Y here, additional restrictions will be placed on
50725+ /proc that keep normal users from viewing device information and
50726+ slabinfo information that could be useful for exploits.
50727+
50728+config GRKERNSEC_LINK
50729+ bool "Linking restrictions"
50730+ help
50731+ If you say Y here, /tmp race exploits will be prevented, since users
50732+ will no longer be able to follow symlinks owned by other users in
50733+ world-writable +t directories (e.g. /tmp), unless the owner of the
50734+ symlink is the owner of the directory. users will also not be
50735+ able to hardlink to files they do not own. If the sysctl option is
50736+ enabled, a sysctl option with name "linking_restrictions" is created.
50737+
50738+config GRKERNSEC_FIFO
50739+ bool "FIFO restrictions"
50740+ help
50741+ If you say Y here, users will not be able to write to FIFOs they don't
50742+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50743+ the FIFO is the same owner of the directory it's held in. If the sysctl
50744+ option is enabled, a sysctl option with name "fifo_restrictions" is
50745+ created.
50746+
50747+config GRKERNSEC_SYSFS_RESTRICT
50748+ bool "Sysfs/debugfs restriction"
50749+ depends on SYSFS
50750+ help
50751+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50752+ any filesystem normally mounted under it (e.g. debugfs) will be
50753+ mostly accessible only by root. These filesystems generally provide access
50754+ to hardware and debug information that isn't appropriate for unprivileged
50755+ users of the system. Sysfs and debugfs have also become a large source
50756+ of new vulnerabilities, ranging from infoleaks to local compromise.
50757+ There has been very little oversight with an eye toward security involved
50758+ in adding new exporters of information to these filesystems, so their
50759+ use is discouraged.
50760+ For reasons of compatibility, a few directories have been whitelisted
50761+ for access by non-root users:
50762+ /sys/fs/selinux
50763+ /sys/fs/fuse
50764+ /sys/devices/system/cpu
50765+
50766+config GRKERNSEC_ROFS
50767+ bool "Runtime read-only mount protection"
50768+ help
50769+ If you say Y here, a sysctl option with name "romount_protect" will
50770+ be created. By setting this option to 1 at runtime, filesystems
50771+ will be protected in the following ways:
50772+ * No new writable mounts will be allowed
50773+ * Existing read-only mounts won't be able to be remounted read/write
50774+ * Write operations will be denied on all block devices
50775+ This option acts independently of grsec_lock: once it is set to 1,
50776+ it cannot be turned off. Therefore, please be mindful of the resulting
50777+ behavior if this option is enabled in an init script on a read-only
50778+ filesystem. This feature is mainly intended for secure embedded systems.
50779+
50780+config GRKERNSEC_CHROOT
50781+ bool "Chroot jail restrictions"
50782+ help
50783+ If you say Y here, you will be able to choose several options that will
50784+ make breaking out of a chrooted jail much more difficult. If you
50785+ encounter no software incompatibilities with the following options, it
50786+ is recommended that you enable each one.
50787+
50788+config GRKERNSEC_CHROOT_MOUNT
50789+ bool "Deny mounts"
50790+ depends on GRKERNSEC_CHROOT
50791+ help
50792+ If you say Y here, processes inside a chroot will not be able to
50793+ mount or remount filesystems. If the sysctl option is enabled, a
50794+ sysctl option with name "chroot_deny_mount" is created.
50795+
50796+config GRKERNSEC_CHROOT_DOUBLE
50797+ bool "Deny double-chroots"
50798+ depends on GRKERNSEC_CHROOT
50799+ help
50800+ If you say Y here, processes inside a chroot will not be able to chroot
50801+ again outside the chroot. This is a widely used method of breaking
50802+ out of a chroot jail and should not be allowed. If the sysctl
50803+ option is enabled, a sysctl option with name
50804+ "chroot_deny_chroot" is created.
50805+
50806+config GRKERNSEC_CHROOT_PIVOT
50807+ bool "Deny pivot_root in chroot"
50808+ depends on GRKERNSEC_CHROOT
50809+ help
50810+ If you say Y here, processes inside a chroot will not be able to use
50811+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50812+ works similar to chroot in that it changes the root filesystem. This
50813+ function could be misused in a chrooted process to attempt to break out
50814+ of the chroot, and therefore should not be allowed. If the sysctl
50815+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50816+ created.
50817+
50818+config GRKERNSEC_CHROOT_CHDIR
50819+ bool "Enforce chdir(\"/\") on all chroots"
50820+ depends on GRKERNSEC_CHROOT
50821+ help
50822+ If you say Y here, the current working directory of all newly-chrooted
50823+ applications will be set to the the root directory of the chroot.
50824+ The man page on chroot(2) states:
50825+ Note that this call does not change the current working
50826+ directory, so that `.' can be outside the tree rooted at
50827+ `/'. In particular, the super-user can escape from a
50828+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50829+
50830+ It is recommended that you say Y here, since it's not known to break
50831+ any software. If the sysctl option is enabled, a sysctl option with
50832+ name "chroot_enforce_chdir" is created.
50833+
50834+config GRKERNSEC_CHROOT_CHMOD
50835+ bool "Deny (f)chmod +s"
50836+ depends on GRKERNSEC_CHROOT
50837+ help
50838+ If you say Y here, processes inside a chroot will not be able to chmod
50839+ or fchmod files to make them have suid or sgid bits. This protects
50840+ against another published method of breaking a chroot. If the sysctl
50841+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50842+ created.
50843+
50844+config GRKERNSEC_CHROOT_FCHDIR
50845+ bool "Deny fchdir out of chroot"
50846+ depends on GRKERNSEC_CHROOT
50847+ help
50848+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50849+ to a file descriptor of the chrooting process that points to a directory
50850+ outside the filesystem will be stopped. If the sysctl option
50851+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50852+
50853+config GRKERNSEC_CHROOT_MKNOD
50854+ bool "Deny mknod"
50855+ depends on GRKERNSEC_CHROOT
50856+ help
50857+ If you say Y here, processes inside a chroot will not be allowed to
50858+ mknod. The problem with using mknod inside a chroot is that it
50859+ would allow an attacker to create a device entry that is the same
50860+ as one on the physical root of your system, which could range from
50861+ anything from the console device to a device for your harddrive (which
50862+ they could then use to wipe the drive or steal data). It is recommended
50863+ that you say Y here, unless you run into software incompatibilities.
50864+ If the sysctl option is enabled, a sysctl option with name
50865+ "chroot_deny_mknod" is created.
50866+
50867+config GRKERNSEC_CHROOT_SHMAT
50868+ bool "Deny shmat() out of chroot"
50869+ depends on GRKERNSEC_CHROOT
50870+ help
50871+ If you say Y here, processes inside a chroot will not be able to attach
50872+ to shared memory segments that were created outside of the chroot jail.
50873+ It is recommended that you say Y here. If the sysctl option is enabled,
50874+ a sysctl option with name "chroot_deny_shmat" is created.
50875+
50876+config GRKERNSEC_CHROOT_UNIX
50877+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50878+ depends on GRKERNSEC_CHROOT
50879+ help
50880+ If you say Y here, processes inside a chroot will not be able to
50881+ connect to abstract (meaning not belonging to a filesystem) Unix
50882+ domain sockets that were bound outside of a chroot. It is recommended
50883+ that you say Y here. If the sysctl option is enabled, a sysctl option
50884+ with name "chroot_deny_unix" is created.
50885+
50886+config GRKERNSEC_CHROOT_FINDTASK
50887+ bool "Protect outside processes"
50888+ depends on GRKERNSEC_CHROOT
50889+ help
50890+ If you say Y here, processes inside a chroot will not be able to
50891+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50892+ getsid, or view any process outside of the chroot. If the sysctl
50893+ option is enabled, a sysctl option with name "chroot_findtask" is
50894+ created.
50895+
50896+config GRKERNSEC_CHROOT_NICE
50897+ bool "Restrict priority changes"
50898+ depends on GRKERNSEC_CHROOT
50899+ help
50900+ If you say Y here, processes inside a chroot will not be able to raise
50901+ the priority of processes in the chroot, or alter the priority of
50902+ processes outside the chroot. This provides more security than simply
50903+ removing CAP_SYS_NICE from the process' capability set. If the
50904+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50905+ is created.
50906+
50907+config GRKERNSEC_CHROOT_SYSCTL
50908+ bool "Deny sysctl writes"
50909+ depends on GRKERNSEC_CHROOT
50910+ help
50911+ If you say Y here, an attacker in a chroot will not be able to
50912+ write to sysctl entries, either by sysctl(2) or through a /proc
50913+ interface. It is strongly recommended that you say Y here. If the
50914+ sysctl option is enabled, a sysctl option with name
50915+ "chroot_deny_sysctl" is created.
50916+
50917+config GRKERNSEC_CHROOT_CAPS
50918+ bool "Capability restrictions"
50919+ depends on GRKERNSEC_CHROOT
50920+ help
50921+ If you say Y here, the capabilities on all processes within a
50922+ chroot jail will be lowered to stop module insertion, raw i/o,
50923+ system and net admin tasks, rebooting the system, modifying immutable
50924+ files, modifying IPC owned by another, and changing the system time.
50925+ This is left an option because it can break some apps. Disable this
50926+ if your chrooted apps are having problems performing those kinds of
50927+ tasks. If the sysctl option is enabled, a sysctl option with
50928+ name "chroot_caps" is created.
50929+
50930+endmenu
50931+menu "Kernel Auditing"
50932+depends on GRKERNSEC
50933+
50934+config GRKERNSEC_AUDIT_GROUP
50935+ bool "Single group for auditing"
50936+ help
50937+ If you say Y here, the exec, chdir, and (un)mount logging features
50938+ will only operate on a group you specify. This option is recommended
50939+ if you only want to watch certain users instead of having a large
50940+ amount of logs from the entire system. If the sysctl option is enabled,
50941+ a sysctl option with name "audit_group" is created.
50942+
50943+config GRKERNSEC_AUDIT_GID
50944+ int "GID for auditing"
50945+ depends on GRKERNSEC_AUDIT_GROUP
50946+ default 1007
50947+
50948+config GRKERNSEC_EXECLOG
50949+ bool "Exec logging"
50950+ help
50951+ If you say Y here, all execve() calls will be logged (since the
50952+ other exec*() calls are frontends to execve(), all execution
50953+ will be logged). Useful for shell-servers that like to keep track
50954+ of their users. If the sysctl option is enabled, a sysctl option with
50955+ name "exec_logging" is created.
50956+ WARNING: This option when enabled will produce a LOT of logs, especially
50957+ on an active system.
50958+
50959+config GRKERNSEC_RESLOG
50960+ bool "Resource logging"
50961+ help
50962+ If you say Y here, all attempts to overstep resource limits will
50963+ be logged with the resource name, the requested size, and the current
50964+ limit. It is highly recommended that you say Y here. If the sysctl
50965+ option is enabled, a sysctl option with name "resource_logging" is
50966+ created. If the RBAC system is enabled, the sysctl value is ignored.
50967+
50968+config GRKERNSEC_CHROOT_EXECLOG
50969+ bool "Log execs within chroot"
50970+ help
50971+ If you say Y here, all executions inside a chroot jail will be logged
50972+ to syslog. This can cause a large amount of logs if certain
50973+ applications (eg. djb's daemontools) are installed on the system, and
50974+ is therefore left as an option. If the sysctl option is enabled, a
50975+ sysctl option with name "chroot_execlog" is created.
50976+
50977+config GRKERNSEC_AUDIT_PTRACE
50978+ bool "Ptrace logging"
50979+ help
50980+ If you say Y here, all attempts to attach to a process via ptrace
50981+ will be logged. If the sysctl option is enabled, a sysctl option
50982+ with name "audit_ptrace" is created.
50983+
50984+config GRKERNSEC_AUDIT_CHDIR
50985+ bool "Chdir logging"
50986+ help
50987+ If you say Y here, all chdir() calls will be logged. If the sysctl
50988+ option is enabled, a sysctl option with name "audit_chdir" is created.
50989+
50990+config GRKERNSEC_AUDIT_MOUNT
50991+ bool "(Un)Mount logging"
50992+ help
50993+ If you say Y here, all mounts and unmounts will be logged. If the
50994+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50995+ created.
50996+
50997+config GRKERNSEC_SIGNAL
50998+ bool "Signal logging"
50999+ help
51000+ If you say Y here, certain important signals will be logged, such as
51001+ SIGSEGV, which will as a result inform you of when a error in a program
51002+ occurred, which in some cases could mean a possible exploit attempt.
51003+ If the sysctl option is enabled, a sysctl option with name
51004+ "signal_logging" is created.
51005+
51006+config GRKERNSEC_FORKFAIL
51007+ bool "Fork failure logging"
51008+ help
51009+ If you say Y here, all failed fork() attempts will be logged.
51010+ This could suggest a fork bomb, or someone attempting to overstep
51011+ their process limit. If the sysctl option is enabled, a sysctl option
51012+ with name "forkfail_logging" is created.
51013+
51014+config GRKERNSEC_TIME
51015+ bool "Time change logging"
51016+ help
51017+ If you say Y here, any changes of the system clock will be logged.
51018+ If the sysctl option is enabled, a sysctl option with name
51019+ "timechange_logging" is created.
51020+
51021+config GRKERNSEC_PROC_IPADDR
51022+ bool "/proc/<pid>/ipaddr support"
51023+ help
51024+ If you say Y here, a new entry will be added to each /proc/<pid>
51025+ directory that contains the IP address of the person using the task.
51026+ The IP is carried across local TCP and AF_UNIX stream sockets.
51027+ This information can be useful for IDS/IPSes to perform remote response
51028+ to a local attack. The entry is readable by only the owner of the
51029+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
51030+ the RBAC system), and thus does not create privacy concerns.
51031+
51032+config GRKERNSEC_RWXMAP_LOG
51033+ bool 'Denied RWX mmap/mprotect logging'
51034+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
51035+ help
51036+ If you say Y here, calls to mmap() and mprotect() with explicit
51037+ usage of PROT_WRITE and PROT_EXEC together will be logged when
51038+ denied by the PAX_MPROTECT feature. If the sysctl option is
51039+ enabled, a sysctl option with name "rwxmap_logging" is created.
51040+
51041+config GRKERNSEC_AUDIT_TEXTREL
51042+ bool 'ELF text relocations logging (READ HELP)'
51043+ depends on PAX_MPROTECT
51044+ help
51045+ If you say Y here, text relocations will be logged with the filename
51046+ of the offending library or binary. The purpose of the feature is
51047+ to help Linux distribution developers get rid of libraries and
51048+ binaries that need text relocations which hinder the future progress
51049+ of PaX. Only Linux distribution developers should say Y here, and
51050+ never on a production machine, as this option creates an information
51051+ leak that could aid an attacker in defeating the randomization of
51052+ a single memory region. If the sysctl option is enabled, a sysctl
51053+ option with name "audit_textrel" is created.
51054+
51055+endmenu
51056+
51057+menu "Executable Protections"
51058+depends on GRKERNSEC
51059+
51060+config GRKERNSEC_DMESG
51061+ bool "Dmesg(8) restriction"
51062+ help
51063+ If you say Y here, non-root users will not be able to use dmesg(8)
51064+ to view up to the last 4kb of messages in the kernel's log buffer.
51065+ The kernel's log buffer often contains kernel addresses and other
51066+ identifying information useful to an attacker in fingerprinting a
51067+ system for a targeted exploit.
51068+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
51069+ created.
51070+
51071+config GRKERNSEC_HARDEN_PTRACE
51072+ bool "Deter ptrace-based process snooping"
51073+ help
51074+ If you say Y here, TTY sniffers and other malicious monitoring
51075+ programs implemented through ptrace will be defeated. If you
51076+ have been using the RBAC system, this option has already been
51077+ enabled for several years for all users, with the ability to make
51078+ fine-grained exceptions.
51079+
51080+ This option only affects the ability of non-root users to ptrace
51081+ processes that are not a descendent of the ptracing process.
51082+ This means that strace ./binary and gdb ./binary will still work,
51083+ but attaching to arbitrary processes will not. If the sysctl
51084+ option is enabled, a sysctl option with name "harden_ptrace" is
51085+ created.
51086+
51087+config GRKERNSEC_PTRACE_READEXEC
51088+ bool "Require read access to ptrace sensitive binaries"
51089+ help
51090+ If you say Y here, unprivileged users will not be able to ptrace unreadable
51091+ binaries. This option is useful in environments that
51092+ remove the read bits (e.g. file mode 4711) from suid binaries to
51093+ prevent infoleaking of their contents. This option adds
51094+ consistency to the use of that file mode, as the binary could normally
51095+ be read out when run without privileges while ptracing.
51096+
51097+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
51098+ is created.
51099+
51100+config GRKERNSEC_SETXID
51101+ bool "Enforce consistent multithreaded privileges"
51102+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
51103+ help
51104+ If you say Y here, a change from a root uid to a non-root uid
51105+ in a multithreaded application will cause the resulting uids,
51106+ gids, supplementary groups, and capabilities in that thread
51107+ to be propagated to the other threads of the process. In most
51108+ cases this is unnecessary, as glibc will emulate this behavior
51109+ on behalf of the application. Other libcs do not act in the
51110+ same way, allowing the other threads of the process to continue
51111+ running with root privileges. If the sysctl option is enabled,
51112+ a sysctl option with name "consistent_setxid" is created.
51113+
51114+config GRKERNSEC_TPE
51115+ bool "Trusted Path Execution (TPE)"
51116+ help
51117+ If you say Y here, you will be able to choose a gid to add to the
51118+ supplementary groups of users you want to mark as "untrusted."
51119+ These users will not be able to execute any files that are not in
51120+ root-owned directories writable only by root. If the sysctl option
51121+ is enabled, a sysctl option with name "tpe" is created.
51122+
51123+config GRKERNSEC_TPE_ALL
51124+ bool "Partially restrict all non-root users"
51125+ depends on GRKERNSEC_TPE
51126+ help
51127+ If you say Y here, all non-root users will be covered under
51128+ a weaker TPE restriction. This is separate from, and in addition to,
51129+ the main TPE options that you have selected elsewhere. Thus, if a
51130+ "trusted" GID is chosen, this restriction applies to even that GID.
51131+ Under this restriction, all non-root users will only be allowed to
51132+ execute files in directories they own that are not group or
51133+ world-writable, or in directories owned by root and writable only by
51134+ root. If the sysctl option is enabled, a sysctl option with name
51135+ "tpe_restrict_all" is created.
51136+
51137+config GRKERNSEC_TPE_INVERT
51138+ bool "Invert GID option"
51139+ depends on GRKERNSEC_TPE
51140+ help
51141+ If you say Y here, the group you specify in the TPE configuration will
51142+ decide what group TPE restrictions will be *disabled* for. This
51143+ option is useful if you want TPE restrictions to be applied to most
51144+ users on the system. If the sysctl option is enabled, a sysctl option
51145+ with name "tpe_invert" is created. Unlike other sysctl options, this
51146+ entry will default to on for backward-compatibility.
51147+
51148+config GRKERNSEC_TPE_GID
51149+ int "GID for untrusted users"
51150+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51151+ default 1005
51152+ help
51153+ Setting this GID determines what group TPE restrictions will be
51154+ *enabled* for. If the sysctl option is enabled, a sysctl option
51155+ with name "tpe_gid" is created.
51156+
51157+config GRKERNSEC_TPE_GID
51158+ int "GID for trusted users"
51159+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51160+ default 1005
51161+ help
51162+ Setting this GID determines what group TPE restrictions will be
51163+ *disabled* for. If the sysctl option is enabled, a sysctl option
51164+ with name "tpe_gid" is created.
51165+
51166+endmenu
51167+menu "Network Protections"
51168+depends on GRKERNSEC
51169+
51170+config GRKERNSEC_RANDNET
51171+ bool "Larger entropy pools"
51172+ help
51173+ If you say Y here, the entropy pools used for many features of Linux
51174+ and grsecurity will be doubled in size. Since several grsecurity
51175+ features use additional randomness, it is recommended that you say Y
51176+ here. Saying Y here has a similar effect as modifying
51177+ /proc/sys/kernel/random/poolsize.
51178+
51179+config GRKERNSEC_BLACKHOLE
51180+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51181+ depends on NET
51182+ help
51183+ If you say Y here, neither TCP resets nor ICMP
51184+ destination-unreachable packets will be sent in response to packets
51185+ sent to ports for which no associated listening process exists.
51186+ This feature supports both IPV4 and IPV6 and exempts the
51187+ loopback interface from blackholing. Enabling this feature
51188+ makes a host more resilient to DoS attacks and reduces network
51189+ visibility against scanners.
51190+
51191+ The blackhole feature as-implemented is equivalent to the FreeBSD
51192+ blackhole feature, as it prevents RST responses to all packets, not
51193+ just SYNs. Under most application behavior this causes no
51194+ problems, but applications (like haproxy) may not close certain
51195+ connections in a way that cleanly terminates them on the remote
51196+ end, leaving the remote host in LAST_ACK state. Because of this
51197+ side-effect and to prevent intentional LAST_ACK DoSes, this
51198+ feature also adds automatic mitigation against such attacks.
51199+ The mitigation drastically reduces the amount of time a socket
51200+ can spend in LAST_ACK state. If you're using haproxy and not
51201+ all servers it connects to have this option enabled, consider
51202+ disabling this feature on the haproxy host.
51203+
51204+ If the sysctl option is enabled, two sysctl options with names
51205+ "ip_blackhole" and "lastack_retries" will be created.
51206+ While "ip_blackhole" takes the standard zero/non-zero on/off
51207+ toggle, "lastack_retries" uses the same kinds of values as
51208+ "tcp_retries1" and "tcp_retries2". The default value of 4
51209+ prevents a socket from lasting more than 45 seconds in LAST_ACK
51210+ state.
51211+
51212+config GRKERNSEC_SOCKET
51213+ bool "Socket restrictions"
51214+ depends on NET
51215+ help
51216+ If you say Y here, you will be able to choose from several options.
51217+ If you assign a GID on your system and add it to the supplementary
51218+ groups of users you want to restrict socket access to, this patch
51219+ will perform up to three things, based on the option(s) you choose.
51220+
51221+config GRKERNSEC_SOCKET_ALL
51222+ bool "Deny any sockets to group"
51223+ depends on GRKERNSEC_SOCKET
51224+ help
51225+ If you say Y here, you will be able to choose a GID of whose users will
51226+ be unable to connect to other hosts from your machine or run server
51227+ applications from your machine. If the sysctl option is enabled, a
51228+ sysctl option with name "socket_all" is created.
51229+
51230+config GRKERNSEC_SOCKET_ALL_GID
51231+ int "GID to deny all sockets for"
51232+ depends on GRKERNSEC_SOCKET_ALL
51233+ default 1004
51234+ help
51235+ Here you can choose the GID to disable socket access for. Remember to
51236+ add the users you want socket access disabled for to the GID
51237+ specified here. If the sysctl option is enabled, a sysctl option
51238+ with name "socket_all_gid" is created.
51239+
51240+config GRKERNSEC_SOCKET_CLIENT
51241+ bool "Deny client sockets to group"
51242+ depends on GRKERNSEC_SOCKET
51243+ help
51244+ If you say Y here, you will be able to choose a GID of whose users will
51245+ be unable to connect to other hosts from your machine, but will be
51246+ able to run servers. If this option is enabled, all users in the group
51247+ you specify will have to use passive mode when initiating ftp transfers
51248+ from the shell on your machine. If the sysctl option is enabled, a
51249+ sysctl option with name "socket_client" is created.
51250+
51251+config GRKERNSEC_SOCKET_CLIENT_GID
51252+ int "GID to deny client sockets for"
51253+ depends on GRKERNSEC_SOCKET_CLIENT
51254+ default 1003
51255+ help
51256+ Here you can choose the GID to disable client socket access for.
51257+ Remember to add the users you want client socket access disabled for to
51258+ the GID specified here. If the sysctl option is enabled, a sysctl
51259+ option with name "socket_client_gid" is created.
51260+
51261+config GRKERNSEC_SOCKET_SERVER
51262+ bool "Deny server sockets to group"
51263+ depends on GRKERNSEC_SOCKET
51264+ help
51265+ If you say Y here, you will be able to choose a GID of whose users will
51266+ be unable to run server applications from your machine. If the sysctl
51267+ option is enabled, a sysctl option with name "socket_server" is created.
51268+
51269+config GRKERNSEC_SOCKET_SERVER_GID
51270+ int "GID to deny server sockets for"
51271+ depends on GRKERNSEC_SOCKET_SERVER
51272+ default 1002
51273+ help
51274+ Here you can choose the GID to disable server socket access for.
51275+ Remember to add the users you want server socket access disabled for to
51276+ the GID specified here. If the sysctl option is enabled, a sysctl
51277+ option with name "socket_server_gid" is created.
51278+
51279+endmenu
51280+menu "Sysctl support"
51281+depends on GRKERNSEC && SYSCTL
51282+
51283+config GRKERNSEC_SYSCTL
51284+ bool "Sysctl support"
51285+ help
51286+ If you say Y here, you will be able to change the options that
51287+ grsecurity runs with at bootup, without having to recompile your
51288+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51289+ to enable (1) or disable (0) various features. All the sysctl entries
51290+ are mutable until the "grsec_lock" entry is set to a non-zero value.
51291+ All features enabled in the kernel configuration are disabled at boot
51292+ if you do not say Y to the "Turn on features by default" option.
51293+ All options should be set at startup, and the grsec_lock entry should
51294+ be set to a non-zero value after all the options are set.
51295+ *THIS IS EXTREMELY IMPORTANT*
51296+
51297+config GRKERNSEC_SYSCTL_DISTRO
51298+ bool "Extra sysctl support for distro makers (READ HELP)"
51299+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51300+ help
51301+ If you say Y here, additional sysctl options will be created
51302+ for features that affect processes running as root. Therefore,
51303+ it is critical when using this option that the grsec_lock entry be
51304+ enabled after boot. Only distros with prebuilt kernel packages
51305+ with this option enabled that can ensure grsec_lock is enabled
51306+ after boot should use this option.
51307+ *Failure to set grsec_lock after boot makes all grsec features
51308+ this option covers useless*
51309+
51310+ Currently this option creates the following sysctl entries:
51311+ "Disable Privileged I/O": "disable_priv_io"
51312+
51313+config GRKERNSEC_SYSCTL_ON
51314+ bool "Turn on features by default"
51315+ depends on GRKERNSEC_SYSCTL
51316+ help
51317+ If you say Y here, instead of having all features enabled in the
51318+ kernel configuration disabled at boot time, the features will be
51319+ enabled at boot time. It is recommended you say Y here unless
51320+ there is some reason you would want all sysctl-tunable features to
51321+ be disabled by default. As mentioned elsewhere, it is important
51322+ to enable the grsec_lock entry once you have finished modifying
51323+ the sysctl entries.
51324+
51325+endmenu
51326+menu "Logging Options"
51327+depends on GRKERNSEC
51328+
51329+config GRKERNSEC_FLOODTIME
51330+ int "Seconds in between log messages (minimum)"
51331+ default 10
51332+ help
51333+ This option allows you to enforce the number of seconds between
51334+ grsecurity log messages. The default should be suitable for most
51335+ people, however, if you choose to change it, choose a value small enough
51336+ to allow informative logs to be produced, but large enough to
51337+ prevent flooding.
51338+
51339+config GRKERNSEC_FLOODBURST
51340+ int "Number of messages in a burst (maximum)"
51341+ default 6
51342+ help
51343+ This option allows you to choose the maximum number of messages allowed
51344+ within the flood time interval you chose in a separate option. The
51345+ default should be suitable for most people, however if you find that
51346+ many of your logs are being interpreted as flooding, you may want to
51347+ raise this value.
51348+
51349+endmenu
51350+
51351+endmenu
51352diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51353new file mode 100644
51354index 0000000..1b9afa9
51355--- /dev/null
51356+++ b/grsecurity/Makefile
51357@@ -0,0 +1,38 @@
51358+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51359+# during 2001-2009 it has been completely redesigned by Brad Spengler
51360+# into an RBAC system
51361+#
51362+# All code in this directory and various hooks inserted throughout the kernel
51363+# are copyright Brad Spengler - Open Source Security, Inc., and released
51364+# under the GPL v2 or higher
51365+
51366+KBUILD_CFLAGS += -Werror
51367+
51368+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51369+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
51370+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51371+
51372+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51373+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51374+ gracl_learn.o grsec_log.o
51375+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51376+
51377+ifdef CONFIG_NET
51378+obj-y += grsec_sock.o
51379+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51380+endif
51381+
51382+ifndef CONFIG_GRKERNSEC
51383+obj-y += grsec_disabled.o
51384+endif
51385+
51386+ifdef CONFIG_GRKERNSEC_HIDESYM
51387+extra-y := grsec_hidesym.o
51388+$(obj)/grsec_hidesym.o:
51389+ @-chmod -f 500 /boot
51390+ @-chmod -f 500 /lib/modules
51391+ @-chmod -f 500 /lib64/modules
51392+ @-chmod -f 500 /lib32/modules
51393+ @-chmod -f 700 .
51394+ @echo ' grsec: protected kernel image paths'
51395+endif
51396diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51397new file mode 100644
51398index 0000000..a6d83f0
51399--- /dev/null
51400+++ b/grsecurity/gracl.c
51401@@ -0,0 +1,4193 @@
51402+#include <linux/kernel.h>
51403+#include <linux/module.h>
51404+#include <linux/sched.h>
51405+#include <linux/mm.h>
51406+#include <linux/file.h>
51407+#include <linux/fs.h>
51408+#include <linux/namei.h>
51409+#include <linux/mount.h>
51410+#include <linux/tty.h>
51411+#include <linux/proc_fs.h>
51412+#include <linux/lglock.h>
51413+#include <linux/slab.h>
51414+#include <linux/vmalloc.h>
51415+#include <linux/types.h>
51416+#include <linux/sysctl.h>
51417+#include <linux/netdevice.h>
51418+#include <linux/ptrace.h>
51419+#include <linux/gracl.h>
51420+#include <linux/gralloc.h>
51421+#include <linux/security.h>
51422+#include <linux/grinternal.h>
51423+#include <linux/pid_namespace.h>
51424+#include <linux/fdtable.h>
51425+#include <linux/percpu.h>
51426+#include "../fs/mount.h"
51427+
51428+#include <asm/uaccess.h>
51429+#include <asm/errno.h>
51430+#include <asm/mman.h>
51431+
51432+static struct acl_role_db acl_role_set;
51433+static struct name_db name_set;
51434+static struct inodev_db inodev_set;
51435+
51436+/* for keeping track of userspace pointers used for subjects, so we
51437+ can share references in the kernel as well
51438+*/
51439+
51440+static struct path real_root;
51441+
51442+static struct acl_subj_map_db subj_map_set;
51443+
51444+static struct acl_role_label *default_role;
51445+
51446+static struct acl_role_label *role_list;
51447+
51448+static u16 acl_sp_role_value;
51449+
51450+extern char *gr_shared_page[4];
51451+static DEFINE_MUTEX(gr_dev_mutex);
51452+DEFINE_RWLOCK(gr_inode_lock);
51453+
51454+struct gr_arg *gr_usermode;
51455+
51456+static unsigned int gr_status __read_only = GR_STATUS_INIT;
51457+
51458+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51459+extern void gr_clear_learn_entries(void);
51460+
51461+#ifdef CONFIG_GRKERNSEC_RESLOG
51462+extern void gr_log_resource(const struct task_struct *task,
51463+ const int res, const unsigned long wanted, const int gt);
51464+#endif
51465+
51466+unsigned char *gr_system_salt;
51467+unsigned char *gr_system_sum;
51468+
51469+static struct sprole_pw **acl_special_roles = NULL;
51470+static __u16 num_sprole_pws = 0;
51471+
51472+static struct acl_role_label *kernel_role = NULL;
51473+
51474+static unsigned int gr_auth_attempts = 0;
51475+static unsigned long gr_auth_expires = 0UL;
51476+
51477+#ifdef CONFIG_NET
51478+extern struct vfsmount *sock_mnt;
51479+#endif
51480+
51481+extern struct vfsmount *pipe_mnt;
51482+extern struct vfsmount *shm_mnt;
51483+#ifdef CONFIG_HUGETLBFS
51484+extern struct vfsmount *hugetlbfs_vfsmount;
51485+#endif
51486+
51487+static struct acl_object_label *fakefs_obj_rw;
51488+static struct acl_object_label *fakefs_obj_rwx;
51489+
51490+extern int gr_init_uidset(void);
51491+extern void gr_free_uidset(void);
51492+extern void gr_remove_uid(uid_t uid);
51493+extern int gr_find_uid(uid_t uid);
51494+
51495+DECLARE_BRLOCK(vfsmount_lock);
51496+
51497+__inline__ int
51498+gr_acl_is_enabled(void)
51499+{
51500+ return (gr_status & GR_READY);
51501+}
51502+
51503+#ifdef CONFIG_BTRFS_FS
51504+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51505+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51506+#endif
51507+
51508+static inline dev_t __get_dev(const struct dentry *dentry)
51509+{
51510+#ifdef CONFIG_BTRFS_FS
51511+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51512+ return get_btrfs_dev_from_inode(dentry->d_inode);
51513+ else
51514+#endif
51515+ return dentry->d_inode->i_sb->s_dev;
51516+}
51517+
51518+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51519+{
51520+ return __get_dev(dentry);
51521+}
51522+
51523+static char gr_task_roletype_to_char(struct task_struct *task)
51524+{
51525+ switch (task->role->roletype &
51526+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51527+ GR_ROLE_SPECIAL)) {
51528+ case GR_ROLE_DEFAULT:
51529+ return 'D';
51530+ case GR_ROLE_USER:
51531+ return 'U';
51532+ case GR_ROLE_GROUP:
51533+ return 'G';
51534+ case GR_ROLE_SPECIAL:
51535+ return 'S';
51536+ }
51537+
51538+ return 'X';
51539+}
51540+
51541+char gr_roletype_to_char(void)
51542+{
51543+ return gr_task_roletype_to_char(current);
51544+}
51545+
51546+__inline__ int
51547+gr_acl_tpe_check(void)
51548+{
51549+ if (unlikely(!(gr_status & GR_READY)))
51550+ return 0;
51551+ if (current->role->roletype & GR_ROLE_TPE)
51552+ return 1;
51553+ else
51554+ return 0;
51555+}
51556+
51557+int
51558+gr_handle_rawio(const struct inode *inode)
51559+{
51560+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51561+ if (inode && S_ISBLK(inode->i_mode) &&
51562+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51563+ !capable(CAP_SYS_RAWIO))
51564+ return 1;
51565+#endif
51566+ return 0;
51567+}
51568+
51569+static int
51570+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51571+{
51572+ if (likely(lena != lenb))
51573+ return 0;
51574+
51575+ return !memcmp(a, b, lena);
51576+}
51577+
51578+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51579+{
51580+ *buflen -= namelen;
51581+ if (*buflen < 0)
51582+ return -ENAMETOOLONG;
51583+ *buffer -= namelen;
51584+ memcpy(*buffer, str, namelen);
51585+ return 0;
51586+}
51587+
51588+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51589+{
51590+ return prepend(buffer, buflen, name->name, name->len);
51591+}
51592+
51593+static int prepend_path(const struct path *path, struct path *root,
51594+ char **buffer, int *buflen)
51595+{
51596+ struct dentry *dentry = path->dentry;
51597+ struct vfsmount *vfsmnt = path->mnt;
51598+ struct mount *mnt = real_mount(vfsmnt);
51599+ bool slash = false;
51600+ int error = 0;
51601+
51602+ while (dentry != root->dentry || vfsmnt != root->mnt) {
51603+ struct dentry * parent;
51604+
51605+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51606+ /* Global root? */
51607+ if (!mnt_has_parent(mnt)) {
51608+ goto out;
51609+ }
51610+ dentry = mnt->mnt_mountpoint;
51611+ mnt = mnt->mnt_parent;
51612+ vfsmnt = &mnt->mnt;
51613+ continue;
51614+ }
51615+ parent = dentry->d_parent;
51616+ prefetch(parent);
51617+ spin_lock(&dentry->d_lock);
51618+ error = prepend_name(buffer, buflen, &dentry->d_name);
51619+ spin_unlock(&dentry->d_lock);
51620+ if (!error)
51621+ error = prepend(buffer, buflen, "/", 1);
51622+ if (error)
51623+ break;
51624+
51625+ slash = true;
51626+ dentry = parent;
51627+ }
51628+
51629+out:
51630+ if (!error && !slash)
51631+ error = prepend(buffer, buflen, "/", 1);
51632+
51633+ return error;
51634+}
51635+
51636+/* this must be called with vfsmount_lock and rename_lock held */
51637+
51638+static char *__our_d_path(const struct path *path, struct path *root,
51639+ char *buf, int buflen)
51640+{
51641+ char *res = buf + buflen;
51642+ int error;
51643+
51644+ prepend(&res, &buflen, "\0", 1);
51645+ error = prepend_path(path, root, &res, &buflen);
51646+ if (error)
51647+ return ERR_PTR(error);
51648+
51649+ return res;
51650+}
51651+
51652+static char *
51653+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51654+{
51655+ char *retval;
51656+
51657+ retval = __our_d_path(path, root, buf, buflen);
51658+ if (unlikely(IS_ERR(retval)))
51659+ retval = strcpy(buf, "<path too long>");
51660+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51661+ retval[1] = '\0';
51662+
51663+ return retval;
51664+}
51665+
51666+static char *
51667+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51668+ char *buf, int buflen)
51669+{
51670+ struct path path;
51671+ char *res;
51672+
51673+ path.dentry = (struct dentry *)dentry;
51674+ path.mnt = (struct vfsmount *)vfsmnt;
51675+
51676+ /* we can use real_root.dentry, real_root.mnt, because this is only called
51677+ by the RBAC system */
51678+ res = gen_full_path(&path, &real_root, buf, buflen);
51679+
51680+ return res;
51681+}
51682+
51683+static char *
51684+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51685+ char *buf, int buflen)
51686+{
51687+ char *res;
51688+ struct path path;
51689+ struct path root;
51690+ struct task_struct *reaper = &init_task;
51691+
51692+ path.dentry = (struct dentry *)dentry;
51693+ path.mnt = (struct vfsmount *)vfsmnt;
51694+
51695+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51696+ get_fs_root(reaper->fs, &root);
51697+
51698+ write_seqlock(&rename_lock);
51699+ br_read_lock(vfsmount_lock);
51700+ res = gen_full_path(&path, &root, buf, buflen);
51701+ br_read_unlock(vfsmount_lock);
51702+ write_sequnlock(&rename_lock);
51703+
51704+ path_put(&root);
51705+ return res;
51706+}
51707+
51708+static char *
51709+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51710+{
51711+ char *ret;
51712+ write_seqlock(&rename_lock);
51713+ br_read_lock(vfsmount_lock);
51714+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51715+ PAGE_SIZE);
51716+ br_read_unlock(vfsmount_lock);
51717+ write_sequnlock(&rename_lock);
51718+ return ret;
51719+}
51720+
51721+static char *
51722+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51723+{
51724+ char *ret;
51725+ char *buf;
51726+ int buflen;
51727+
51728+ write_seqlock(&rename_lock);
51729+ br_read_lock(vfsmount_lock);
51730+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51731+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51732+ buflen = (int)(ret - buf);
51733+ if (buflen >= 5)
51734+ prepend(&ret, &buflen, "/proc", 5);
51735+ else
51736+ ret = strcpy(buf, "<path too long>");
51737+ br_read_unlock(vfsmount_lock);
51738+ write_sequnlock(&rename_lock);
51739+ return ret;
51740+}
51741+
51742+char *
51743+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51744+{
51745+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51746+ PAGE_SIZE);
51747+}
51748+
51749+char *
51750+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51751+{
51752+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51753+ PAGE_SIZE);
51754+}
51755+
51756+char *
51757+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51758+{
51759+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51760+ PAGE_SIZE);
51761+}
51762+
51763+char *
51764+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51765+{
51766+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51767+ PAGE_SIZE);
51768+}
51769+
51770+char *
51771+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51772+{
51773+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51774+ PAGE_SIZE);
51775+}
51776+
51777+__inline__ __u32
51778+to_gr_audit(const __u32 reqmode)
51779+{
51780+ /* masks off auditable permission flags, then shifts them to create
51781+ auditing flags, and adds the special case of append auditing if
51782+ we're requesting write */
51783+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51784+}
51785+
51786+struct acl_subject_label *
51787+lookup_subject_map(const struct acl_subject_label *userp)
51788+{
51789+ unsigned int index = shash(userp, subj_map_set.s_size);
51790+ struct subject_map *match;
51791+
51792+ match = subj_map_set.s_hash[index];
51793+
51794+ while (match && match->user != userp)
51795+ match = match->next;
51796+
51797+ if (match != NULL)
51798+ return match->kernel;
51799+ else
51800+ return NULL;
51801+}
51802+
51803+static void
51804+insert_subj_map_entry(struct subject_map *subjmap)
51805+{
51806+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51807+ struct subject_map **curr;
51808+
51809+ subjmap->prev = NULL;
51810+
51811+ curr = &subj_map_set.s_hash[index];
51812+ if (*curr != NULL)
51813+ (*curr)->prev = subjmap;
51814+
51815+ subjmap->next = *curr;
51816+ *curr = subjmap;
51817+
51818+ return;
51819+}
51820+
51821+static struct acl_role_label *
51822+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51823+ const gid_t gid)
51824+{
51825+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51826+ struct acl_role_label *match;
51827+ struct role_allowed_ip *ipp;
51828+ unsigned int x;
51829+ u32 curr_ip = task->signal->curr_ip;
51830+
51831+ task->signal->saved_ip = curr_ip;
51832+
51833+ match = acl_role_set.r_hash[index];
51834+
51835+ while (match) {
51836+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51837+ for (x = 0; x < match->domain_child_num; x++) {
51838+ if (match->domain_children[x] == uid)
51839+ goto found;
51840+ }
51841+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51842+ break;
51843+ match = match->next;
51844+ }
51845+found:
51846+ if (match == NULL) {
51847+ try_group:
51848+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51849+ match = acl_role_set.r_hash[index];
51850+
51851+ while (match) {
51852+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51853+ for (x = 0; x < match->domain_child_num; x++) {
51854+ if (match->domain_children[x] == gid)
51855+ goto found2;
51856+ }
51857+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51858+ break;
51859+ match = match->next;
51860+ }
51861+found2:
51862+ if (match == NULL)
51863+ match = default_role;
51864+ if (match->allowed_ips == NULL)
51865+ return match;
51866+ else {
51867+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51868+ if (likely
51869+ ((ntohl(curr_ip) & ipp->netmask) ==
51870+ (ntohl(ipp->addr) & ipp->netmask)))
51871+ return match;
51872+ }
51873+ match = default_role;
51874+ }
51875+ } else if (match->allowed_ips == NULL) {
51876+ return match;
51877+ } else {
51878+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51879+ if (likely
51880+ ((ntohl(curr_ip) & ipp->netmask) ==
51881+ (ntohl(ipp->addr) & ipp->netmask)))
51882+ return match;
51883+ }
51884+ goto try_group;
51885+ }
51886+
51887+ return match;
51888+}
51889+
51890+struct acl_subject_label *
51891+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51892+ const struct acl_role_label *role)
51893+{
51894+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51895+ struct acl_subject_label *match;
51896+
51897+ match = role->subj_hash[index];
51898+
51899+ while (match && (match->inode != ino || match->device != dev ||
51900+ (match->mode & GR_DELETED))) {
51901+ match = match->next;
51902+ }
51903+
51904+ if (match && !(match->mode & GR_DELETED))
51905+ return match;
51906+ else
51907+ return NULL;
51908+}
51909+
51910+struct acl_subject_label *
51911+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51912+ const struct acl_role_label *role)
51913+{
51914+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51915+ struct acl_subject_label *match;
51916+
51917+ match = role->subj_hash[index];
51918+
51919+ while (match && (match->inode != ino || match->device != dev ||
51920+ !(match->mode & GR_DELETED))) {
51921+ match = match->next;
51922+ }
51923+
51924+ if (match && (match->mode & GR_DELETED))
51925+ return match;
51926+ else
51927+ return NULL;
51928+}
51929+
51930+static struct acl_object_label *
51931+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51932+ const struct acl_subject_label *subj)
51933+{
51934+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51935+ struct acl_object_label *match;
51936+
51937+ match = subj->obj_hash[index];
51938+
51939+ while (match && (match->inode != ino || match->device != dev ||
51940+ (match->mode & GR_DELETED))) {
51941+ match = match->next;
51942+ }
51943+
51944+ if (match && !(match->mode & GR_DELETED))
51945+ return match;
51946+ else
51947+ return NULL;
51948+}
51949+
51950+static struct acl_object_label *
51951+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51952+ const struct acl_subject_label *subj)
51953+{
51954+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51955+ struct acl_object_label *match;
51956+
51957+ match = subj->obj_hash[index];
51958+
51959+ while (match && (match->inode != ino || match->device != dev ||
51960+ !(match->mode & GR_DELETED))) {
51961+ match = match->next;
51962+ }
51963+
51964+ if (match && (match->mode & GR_DELETED))
51965+ return match;
51966+
51967+ match = subj->obj_hash[index];
51968+
51969+ while (match && (match->inode != ino || match->device != dev ||
51970+ (match->mode & GR_DELETED))) {
51971+ match = match->next;
51972+ }
51973+
51974+ if (match && !(match->mode & GR_DELETED))
51975+ return match;
51976+ else
51977+ return NULL;
51978+}
51979+
51980+static struct name_entry *
51981+lookup_name_entry(const char *name)
51982+{
51983+ unsigned int len = strlen(name);
51984+ unsigned int key = full_name_hash(name, len);
51985+ unsigned int index = key % name_set.n_size;
51986+ struct name_entry *match;
51987+
51988+ match = name_set.n_hash[index];
51989+
51990+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51991+ match = match->next;
51992+
51993+ return match;
51994+}
51995+
51996+static struct name_entry *
51997+lookup_name_entry_create(const char *name)
51998+{
51999+ unsigned int len = strlen(name);
52000+ unsigned int key = full_name_hash(name, len);
52001+ unsigned int index = key % name_set.n_size;
52002+ struct name_entry *match;
52003+
52004+ match = name_set.n_hash[index];
52005+
52006+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52007+ !match->deleted))
52008+ match = match->next;
52009+
52010+ if (match && match->deleted)
52011+ return match;
52012+
52013+ match = name_set.n_hash[index];
52014+
52015+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52016+ match->deleted))
52017+ match = match->next;
52018+
52019+ if (match && !match->deleted)
52020+ return match;
52021+ else
52022+ return NULL;
52023+}
52024+
52025+static struct inodev_entry *
52026+lookup_inodev_entry(const ino_t ino, const dev_t dev)
52027+{
52028+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
52029+ struct inodev_entry *match;
52030+
52031+ match = inodev_set.i_hash[index];
52032+
52033+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
52034+ match = match->next;
52035+
52036+ return match;
52037+}
52038+
52039+static void
52040+insert_inodev_entry(struct inodev_entry *entry)
52041+{
52042+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
52043+ inodev_set.i_size);
52044+ struct inodev_entry **curr;
52045+
52046+ entry->prev = NULL;
52047+
52048+ curr = &inodev_set.i_hash[index];
52049+ if (*curr != NULL)
52050+ (*curr)->prev = entry;
52051+
52052+ entry->next = *curr;
52053+ *curr = entry;
52054+
52055+ return;
52056+}
52057+
52058+static void
52059+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52060+{
52061+ unsigned int index =
52062+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52063+ struct acl_role_label **curr;
52064+ struct acl_role_label *tmp, *tmp2;
52065+
52066+ curr = &acl_role_set.r_hash[index];
52067+
52068+ /* simple case, slot is empty, just set it to our role */
52069+ if (*curr == NULL) {
52070+ *curr = role;
52071+ } else {
52072+ /* example:
52073+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
52074+ 2 -> 3
52075+ */
52076+ /* first check to see if we can already be reached via this slot */
52077+ tmp = *curr;
52078+ while (tmp && tmp != role)
52079+ tmp = tmp->next;
52080+ if (tmp == role) {
52081+ /* we don't need to add ourselves to this slot's chain */
52082+ return;
52083+ }
52084+ /* we need to add ourselves to this chain, two cases */
52085+ if (role->next == NULL) {
52086+ /* simple case, append the current chain to our role */
52087+ role->next = *curr;
52088+ *curr = role;
52089+ } else {
52090+ /* 1 -> 2 -> 3 -> 4
52091+ 2 -> 3 -> 4
52092+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
52093+ */
52094+ /* trickier case: walk our role's chain until we find
52095+ the role for the start of the current slot's chain */
52096+ tmp = role;
52097+ tmp2 = *curr;
52098+ while (tmp->next && tmp->next != tmp2)
52099+ tmp = tmp->next;
52100+ if (tmp->next == tmp2) {
52101+ /* from example above, we found 3, so just
52102+ replace this slot's chain with ours */
52103+ *curr = role;
52104+ } else {
52105+ /* we didn't find a subset of our role's chain
52106+ in the current slot's chain, so append their
52107+ chain to ours, and set us as the first role in
52108+ the slot's chain
52109+
52110+ we could fold this case with the case above,
52111+ but making it explicit for clarity
52112+ */
52113+ tmp->next = tmp2;
52114+ *curr = role;
52115+ }
52116+ }
52117+ }
52118+
52119+ return;
52120+}
52121+
52122+static void
52123+insert_acl_role_label(struct acl_role_label *role)
52124+{
52125+ int i;
52126+
52127+ if (role_list == NULL) {
52128+ role_list = role;
52129+ role->prev = NULL;
52130+ } else {
52131+ role->prev = role_list;
52132+ role_list = role;
52133+ }
52134+
52135+ /* used for hash chains */
52136+ role->next = NULL;
52137+
52138+ if (role->roletype & GR_ROLE_DOMAIN) {
52139+ for (i = 0; i < role->domain_child_num; i++)
52140+ __insert_acl_role_label(role, role->domain_children[i]);
52141+ } else
52142+ __insert_acl_role_label(role, role->uidgid);
52143+}
52144+
52145+static int
52146+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52147+{
52148+ struct name_entry **curr, *nentry;
52149+ struct inodev_entry *ientry;
52150+ unsigned int len = strlen(name);
52151+ unsigned int key = full_name_hash(name, len);
52152+ unsigned int index = key % name_set.n_size;
52153+
52154+ curr = &name_set.n_hash[index];
52155+
52156+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52157+ curr = &((*curr)->next);
52158+
52159+ if (*curr != NULL)
52160+ return 1;
52161+
52162+ nentry = acl_alloc(sizeof (struct name_entry));
52163+ if (nentry == NULL)
52164+ return 0;
52165+ ientry = acl_alloc(sizeof (struct inodev_entry));
52166+ if (ientry == NULL)
52167+ return 0;
52168+ ientry->nentry = nentry;
52169+
52170+ nentry->key = key;
52171+ nentry->name = name;
52172+ nentry->inode = inode;
52173+ nentry->device = device;
52174+ nentry->len = len;
52175+ nentry->deleted = deleted;
52176+
52177+ nentry->prev = NULL;
52178+ curr = &name_set.n_hash[index];
52179+ if (*curr != NULL)
52180+ (*curr)->prev = nentry;
52181+ nentry->next = *curr;
52182+ *curr = nentry;
52183+
52184+ /* insert us into the table searchable by inode/dev */
52185+ insert_inodev_entry(ientry);
52186+
52187+ return 1;
52188+}
52189+
52190+static void
52191+insert_acl_obj_label(struct acl_object_label *obj,
52192+ struct acl_subject_label *subj)
52193+{
52194+ unsigned int index =
52195+ fhash(obj->inode, obj->device, subj->obj_hash_size);
52196+ struct acl_object_label **curr;
52197+
52198+
52199+ obj->prev = NULL;
52200+
52201+ curr = &subj->obj_hash[index];
52202+ if (*curr != NULL)
52203+ (*curr)->prev = obj;
52204+
52205+ obj->next = *curr;
52206+ *curr = obj;
52207+
52208+ return;
52209+}
52210+
52211+static void
52212+insert_acl_subj_label(struct acl_subject_label *obj,
52213+ struct acl_role_label *role)
52214+{
52215+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52216+ struct acl_subject_label **curr;
52217+
52218+ obj->prev = NULL;
52219+
52220+ curr = &role->subj_hash[index];
52221+ if (*curr != NULL)
52222+ (*curr)->prev = obj;
52223+
52224+ obj->next = *curr;
52225+ *curr = obj;
52226+
52227+ return;
52228+}
52229+
52230+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52231+
52232+static void *
52233+create_table(__u32 * len, int elementsize)
52234+{
52235+ unsigned int table_sizes[] = {
52236+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52237+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52238+ 4194301, 8388593, 16777213, 33554393, 67108859
52239+ };
52240+ void *newtable = NULL;
52241+ unsigned int pwr = 0;
52242+
52243+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52244+ table_sizes[pwr] <= *len)
52245+ pwr++;
52246+
52247+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52248+ return newtable;
52249+
52250+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52251+ newtable =
52252+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52253+ else
52254+ newtable = vmalloc(table_sizes[pwr] * elementsize);
52255+
52256+ *len = table_sizes[pwr];
52257+
52258+ return newtable;
52259+}
52260+
52261+static int
52262+init_variables(const struct gr_arg *arg)
52263+{
52264+ struct task_struct *reaper = &init_task;
52265+ unsigned int stacksize;
52266+
52267+ subj_map_set.s_size = arg->role_db.num_subjects;
52268+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52269+ name_set.n_size = arg->role_db.num_objects;
52270+ inodev_set.i_size = arg->role_db.num_objects;
52271+
52272+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
52273+ !name_set.n_size || !inodev_set.i_size)
52274+ return 1;
52275+
52276+ if (!gr_init_uidset())
52277+ return 1;
52278+
52279+ /* set up the stack that holds allocation info */
52280+
52281+ stacksize = arg->role_db.num_pointers + 5;
52282+
52283+ if (!acl_alloc_stack_init(stacksize))
52284+ return 1;
52285+
52286+ /* grab reference for the real root dentry and vfsmount */
52287+ get_fs_root(reaper->fs, &real_root);
52288+
52289+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52290+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52291+#endif
52292+
52293+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52294+ if (fakefs_obj_rw == NULL)
52295+ return 1;
52296+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52297+
52298+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52299+ if (fakefs_obj_rwx == NULL)
52300+ return 1;
52301+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
52302+
52303+ subj_map_set.s_hash =
52304+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52305+ acl_role_set.r_hash =
52306+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52307+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52308+ inodev_set.i_hash =
52309+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52310+
52311+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52312+ !name_set.n_hash || !inodev_set.i_hash)
52313+ return 1;
52314+
52315+ memset(subj_map_set.s_hash, 0,
52316+ sizeof(struct subject_map *) * subj_map_set.s_size);
52317+ memset(acl_role_set.r_hash, 0,
52318+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
52319+ memset(name_set.n_hash, 0,
52320+ sizeof (struct name_entry *) * name_set.n_size);
52321+ memset(inodev_set.i_hash, 0,
52322+ sizeof (struct inodev_entry *) * inodev_set.i_size);
52323+
52324+ return 0;
52325+}
52326+
52327+/* free information not needed after startup
52328+ currently contains user->kernel pointer mappings for subjects
52329+*/
52330+
52331+static void
52332+free_init_variables(void)
52333+{
52334+ __u32 i;
52335+
52336+ if (subj_map_set.s_hash) {
52337+ for (i = 0; i < subj_map_set.s_size; i++) {
52338+ if (subj_map_set.s_hash[i]) {
52339+ kfree(subj_map_set.s_hash[i]);
52340+ subj_map_set.s_hash[i] = NULL;
52341+ }
52342+ }
52343+
52344+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52345+ PAGE_SIZE)
52346+ kfree(subj_map_set.s_hash);
52347+ else
52348+ vfree(subj_map_set.s_hash);
52349+ }
52350+
52351+ return;
52352+}
52353+
52354+static void
52355+free_variables(void)
52356+{
52357+ struct acl_subject_label *s;
52358+ struct acl_role_label *r;
52359+ struct task_struct *task, *task2;
52360+ unsigned int x;
52361+
52362+ gr_clear_learn_entries();
52363+
52364+ read_lock(&tasklist_lock);
52365+ do_each_thread(task2, task) {
52366+ task->acl_sp_role = 0;
52367+ task->acl_role_id = 0;
52368+ task->acl = NULL;
52369+ task->role = NULL;
52370+ } while_each_thread(task2, task);
52371+ read_unlock(&tasklist_lock);
52372+
52373+ /* release the reference to the real root dentry and vfsmount */
52374+ path_put(&real_root);
52375+ memset(&real_root, 0, sizeof(real_root));
52376+
52377+ /* free all object hash tables */
52378+
52379+ FOR_EACH_ROLE_START(r)
52380+ if (r->subj_hash == NULL)
52381+ goto next_role;
52382+ FOR_EACH_SUBJECT_START(r, s, x)
52383+ if (s->obj_hash == NULL)
52384+ break;
52385+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52386+ kfree(s->obj_hash);
52387+ else
52388+ vfree(s->obj_hash);
52389+ FOR_EACH_SUBJECT_END(s, x)
52390+ FOR_EACH_NESTED_SUBJECT_START(r, s)
52391+ if (s->obj_hash == NULL)
52392+ break;
52393+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52394+ kfree(s->obj_hash);
52395+ else
52396+ vfree(s->obj_hash);
52397+ FOR_EACH_NESTED_SUBJECT_END(s)
52398+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52399+ kfree(r->subj_hash);
52400+ else
52401+ vfree(r->subj_hash);
52402+ r->subj_hash = NULL;
52403+next_role:
52404+ FOR_EACH_ROLE_END(r)
52405+
52406+ acl_free_all();
52407+
52408+ if (acl_role_set.r_hash) {
52409+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52410+ PAGE_SIZE)
52411+ kfree(acl_role_set.r_hash);
52412+ else
52413+ vfree(acl_role_set.r_hash);
52414+ }
52415+ if (name_set.n_hash) {
52416+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
52417+ PAGE_SIZE)
52418+ kfree(name_set.n_hash);
52419+ else
52420+ vfree(name_set.n_hash);
52421+ }
52422+
52423+ if (inodev_set.i_hash) {
52424+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52425+ PAGE_SIZE)
52426+ kfree(inodev_set.i_hash);
52427+ else
52428+ vfree(inodev_set.i_hash);
52429+ }
52430+
52431+ gr_free_uidset();
52432+
52433+ memset(&name_set, 0, sizeof (struct name_db));
52434+ memset(&inodev_set, 0, sizeof (struct inodev_db));
52435+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52436+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52437+
52438+ default_role = NULL;
52439+ kernel_role = NULL;
52440+ role_list = NULL;
52441+
52442+ return;
52443+}
52444+
52445+static __u32
52446+count_user_objs(struct acl_object_label *userp)
52447+{
52448+ struct acl_object_label o_tmp;
52449+ __u32 num = 0;
52450+
52451+ while (userp) {
52452+ if (copy_from_user(&o_tmp, userp,
52453+ sizeof (struct acl_object_label)))
52454+ break;
52455+
52456+ userp = o_tmp.prev;
52457+ num++;
52458+ }
52459+
52460+ return num;
52461+}
52462+
52463+static struct acl_subject_label *
52464+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52465+
52466+static int
52467+copy_user_glob(struct acl_object_label *obj)
52468+{
52469+ struct acl_object_label *g_tmp, **guser;
52470+ unsigned int len;
52471+ char *tmp;
52472+
52473+ if (obj->globbed == NULL)
52474+ return 0;
52475+
52476+ guser = &obj->globbed;
52477+ while (*guser) {
52478+ g_tmp = (struct acl_object_label *)
52479+ acl_alloc(sizeof (struct acl_object_label));
52480+ if (g_tmp == NULL)
52481+ return -ENOMEM;
52482+
52483+ if (copy_from_user(g_tmp, *guser,
52484+ sizeof (struct acl_object_label)))
52485+ return -EFAULT;
52486+
52487+ len = strnlen_user(g_tmp->filename, PATH_MAX);
52488+
52489+ if (!len || len >= PATH_MAX)
52490+ return -EINVAL;
52491+
52492+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52493+ return -ENOMEM;
52494+
52495+ if (copy_from_user(tmp, g_tmp->filename, len))
52496+ return -EFAULT;
52497+ tmp[len-1] = '\0';
52498+ g_tmp->filename = tmp;
52499+
52500+ *guser = g_tmp;
52501+ guser = &(g_tmp->next);
52502+ }
52503+
52504+ return 0;
52505+}
52506+
52507+static int
52508+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52509+ struct acl_role_label *role)
52510+{
52511+ struct acl_object_label *o_tmp;
52512+ unsigned int len;
52513+ int ret;
52514+ char *tmp;
52515+
52516+ while (userp) {
52517+ if ((o_tmp = (struct acl_object_label *)
52518+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
52519+ return -ENOMEM;
52520+
52521+ if (copy_from_user(o_tmp, userp,
52522+ sizeof (struct acl_object_label)))
52523+ return -EFAULT;
52524+
52525+ userp = o_tmp->prev;
52526+
52527+ len = strnlen_user(o_tmp->filename, PATH_MAX);
52528+
52529+ if (!len || len >= PATH_MAX)
52530+ return -EINVAL;
52531+
52532+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52533+ return -ENOMEM;
52534+
52535+ if (copy_from_user(tmp, o_tmp->filename, len))
52536+ return -EFAULT;
52537+ tmp[len-1] = '\0';
52538+ o_tmp->filename = tmp;
52539+
52540+ insert_acl_obj_label(o_tmp, subj);
52541+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52542+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52543+ return -ENOMEM;
52544+
52545+ ret = copy_user_glob(o_tmp);
52546+ if (ret)
52547+ return ret;
52548+
52549+ if (o_tmp->nested) {
52550+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52551+ if (IS_ERR(o_tmp->nested))
52552+ return PTR_ERR(o_tmp->nested);
52553+
52554+ /* insert into nested subject list */
52555+ o_tmp->nested->next = role->hash->first;
52556+ role->hash->first = o_tmp->nested;
52557+ }
52558+ }
52559+
52560+ return 0;
52561+}
52562+
52563+static __u32
52564+count_user_subjs(struct acl_subject_label *userp)
52565+{
52566+ struct acl_subject_label s_tmp;
52567+ __u32 num = 0;
52568+
52569+ while (userp) {
52570+ if (copy_from_user(&s_tmp, userp,
52571+ sizeof (struct acl_subject_label)))
52572+ break;
52573+
52574+ userp = s_tmp.prev;
52575+ /* do not count nested subjects against this count, since
52576+ they are not included in the hash table, but are
52577+ attached to objects. We have already counted
52578+ the subjects in userspace for the allocation
52579+ stack
52580+ */
52581+ if (!(s_tmp.mode & GR_NESTED))
52582+ num++;
52583+ }
52584+
52585+ return num;
52586+}
52587+
52588+static int
52589+copy_user_allowedips(struct acl_role_label *rolep)
52590+{
52591+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52592+
52593+ ruserip = rolep->allowed_ips;
52594+
52595+ while (ruserip) {
52596+ rlast = rtmp;
52597+
52598+ if ((rtmp = (struct role_allowed_ip *)
52599+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52600+ return -ENOMEM;
52601+
52602+ if (copy_from_user(rtmp, ruserip,
52603+ sizeof (struct role_allowed_ip)))
52604+ return -EFAULT;
52605+
52606+ ruserip = rtmp->prev;
52607+
52608+ if (!rlast) {
52609+ rtmp->prev = NULL;
52610+ rolep->allowed_ips = rtmp;
52611+ } else {
52612+ rlast->next = rtmp;
52613+ rtmp->prev = rlast;
52614+ }
52615+
52616+ if (!ruserip)
52617+ rtmp->next = NULL;
52618+ }
52619+
52620+ return 0;
52621+}
52622+
52623+static int
52624+copy_user_transitions(struct acl_role_label *rolep)
52625+{
52626+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
52627+
52628+ unsigned int len;
52629+ char *tmp;
52630+
52631+ rusertp = rolep->transitions;
52632+
52633+ while (rusertp) {
52634+ rlast = rtmp;
52635+
52636+ if ((rtmp = (struct role_transition *)
52637+ acl_alloc(sizeof (struct role_transition))) == NULL)
52638+ return -ENOMEM;
52639+
52640+ if (copy_from_user(rtmp, rusertp,
52641+ sizeof (struct role_transition)))
52642+ return -EFAULT;
52643+
52644+ rusertp = rtmp->prev;
52645+
52646+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52647+
52648+ if (!len || len >= GR_SPROLE_LEN)
52649+ return -EINVAL;
52650+
52651+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52652+ return -ENOMEM;
52653+
52654+ if (copy_from_user(tmp, rtmp->rolename, len))
52655+ return -EFAULT;
52656+ tmp[len-1] = '\0';
52657+ rtmp->rolename = tmp;
52658+
52659+ if (!rlast) {
52660+ rtmp->prev = NULL;
52661+ rolep->transitions = rtmp;
52662+ } else {
52663+ rlast->next = rtmp;
52664+ rtmp->prev = rlast;
52665+ }
52666+
52667+ if (!rusertp)
52668+ rtmp->next = NULL;
52669+ }
52670+
52671+ return 0;
52672+}
52673+
52674+static struct acl_subject_label *
52675+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52676+{
52677+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52678+ unsigned int len;
52679+ char *tmp;
52680+ __u32 num_objs;
52681+ struct acl_ip_label **i_tmp, *i_utmp2;
52682+ struct gr_hash_struct ghash;
52683+ struct subject_map *subjmap;
52684+ unsigned int i_num;
52685+ int err;
52686+
52687+ s_tmp = lookup_subject_map(userp);
52688+
52689+ /* we've already copied this subject into the kernel, just return
52690+ the reference to it, and don't copy it over again
52691+ */
52692+ if (s_tmp)
52693+ return(s_tmp);
52694+
52695+ if ((s_tmp = (struct acl_subject_label *)
52696+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52697+ return ERR_PTR(-ENOMEM);
52698+
52699+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52700+ if (subjmap == NULL)
52701+ return ERR_PTR(-ENOMEM);
52702+
52703+ subjmap->user = userp;
52704+ subjmap->kernel = s_tmp;
52705+ insert_subj_map_entry(subjmap);
52706+
52707+ if (copy_from_user(s_tmp, userp,
52708+ sizeof (struct acl_subject_label)))
52709+ return ERR_PTR(-EFAULT);
52710+
52711+ len = strnlen_user(s_tmp->filename, PATH_MAX);
52712+
52713+ if (!len || len >= PATH_MAX)
52714+ return ERR_PTR(-EINVAL);
52715+
52716+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52717+ return ERR_PTR(-ENOMEM);
52718+
52719+ if (copy_from_user(tmp, s_tmp->filename, len))
52720+ return ERR_PTR(-EFAULT);
52721+ tmp[len-1] = '\0';
52722+ s_tmp->filename = tmp;
52723+
52724+ if (!strcmp(s_tmp->filename, "/"))
52725+ role->root_label = s_tmp;
52726+
52727+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52728+ return ERR_PTR(-EFAULT);
52729+
52730+ /* copy user and group transition tables */
52731+
52732+ if (s_tmp->user_trans_num) {
52733+ uid_t *uidlist;
52734+
52735+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52736+ if (uidlist == NULL)
52737+ return ERR_PTR(-ENOMEM);
52738+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52739+ return ERR_PTR(-EFAULT);
52740+
52741+ s_tmp->user_transitions = uidlist;
52742+ }
52743+
52744+ if (s_tmp->group_trans_num) {
52745+ gid_t *gidlist;
52746+
52747+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52748+ if (gidlist == NULL)
52749+ return ERR_PTR(-ENOMEM);
52750+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52751+ return ERR_PTR(-EFAULT);
52752+
52753+ s_tmp->group_transitions = gidlist;
52754+ }
52755+
52756+ /* set up object hash table */
52757+ num_objs = count_user_objs(ghash.first);
52758+
52759+ s_tmp->obj_hash_size = num_objs;
52760+ s_tmp->obj_hash =
52761+ (struct acl_object_label **)
52762+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52763+
52764+ if (!s_tmp->obj_hash)
52765+ return ERR_PTR(-ENOMEM);
52766+
52767+ memset(s_tmp->obj_hash, 0,
52768+ s_tmp->obj_hash_size *
52769+ sizeof (struct acl_object_label *));
52770+
52771+ /* add in objects */
52772+ err = copy_user_objs(ghash.first, s_tmp, role);
52773+
52774+ if (err)
52775+ return ERR_PTR(err);
52776+
52777+ /* set pointer for parent subject */
52778+ if (s_tmp->parent_subject) {
52779+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52780+
52781+ if (IS_ERR(s_tmp2))
52782+ return s_tmp2;
52783+
52784+ s_tmp->parent_subject = s_tmp2;
52785+ }
52786+
52787+ /* add in ip acls */
52788+
52789+ if (!s_tmp->ip_num) {
52790+ s_tmp->ips = NULL;
52791+ goto insert;
52792+ }
52793+
52794+ i_tmp =
52795+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52796+ sizeof (struct acl_ip_label *));
52797+
52798+ if (!i_tmp)
52799+ return ERR_PTR(-ENOMEM);
52800+
52801+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52802+ *(i_tmp + i_num) =
52803+ (struct acl_ip_label *)
52804+ acl_alloc(sizeof (struct acl_ip_label));
52805+ if (!*(i_tmp + i_num))
52806+ return ERR_PTR(-ENOMEM);
52807+
52808+ if (copy_from_user
52809+ (&i_utmp2, s_tmp->ips + i_num,
52810+ sizeof (struct acl_ip_label *)))
52811+ return ERR_PTR(-EFAULT);
52812+
52813+ if (copy_from_user
52814+ (*(i_tmp + i_num), i_utmp2,
52815+ sizeof (struct acl_ip_label)))
52816+ return ERR_PTR(-EFAULT);
52817+
52818+ if ((*(i_tmp + i_num))->iface == NULL)
52819+ continue;
52820+
52821+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52822+ if (!len || len >= IFNAMSIZ)
52823+ return ERR_PTR(-EINVAL);
52824+ tmp = acl_alloc(len);
52825+ if (tmp == NULL)
52826+ return ERR_PTR(-ENOMEM);
52827+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52828+ return ERR_PTR(-EFAULT);
52829+ (*(i_tmp + i_num))->iface = tmp;
52830+ }
52831+
52832+ s_tmp->ips = i_tmp;
52833+
52834+insert:
52835+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52836+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52837+ return ERR_PTR(-ENOMEM);
52838+
52839+ return s_tmp;
52840+}
52841+
52842+static int
52843+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52844+{
52845+ struct acl_subject_label s_pre;
52846+ struct acl_subject_label * ret;
52847+ int err;
52848+
52849+ while (userp) {
52850+ if (copy_from_user(&s_pre, userp,
52851+ sizeof (struct acl_subject_label)))
52852+ return -EFAULT;
52853+
52854+ /* do not add nested subjects here, add
52855+ while parsing objects
52856+ */
52857+
52858+ if (s_pre.mode & GR_NESTED) {
52859+ userp = s_pre.prev;
52860+ continue;
52861+ }
52862+
52863+ ret = do_copy_user_subj(userp, role);
52864+
52865+ err = PTR_ERR(ret);
52866+ if (IS_ERR(ret))
52867+ return err;
52868+
52869+ insert_acl_subj_label(ret, role);
52870+
52871+ userp = s_pre.prev;
52872+ }
52873+
52874+ return 0;
52875+}
52876+
52877+static int
52878+copy_user_acl(struct gr_arg *arg)
52879+{
52880+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52881+ struct sprole_pw *sptmp;
52882+ struct gr_hash_struct *ghash;
52883+ uid_t *domainlist;
52884+ unsigned int r_num;
52885+ unsigned int len;
52886+ char *tmp;
52887+ int err = 0;
52888+ __u16 i;
52889+ __u32 num_subjs;
52890+
52891+ /* we need a default and kernel role */
52892+ if (arg->role_db.num_roles < 2)
52893+ return -EINVAL;
52894+
52895+ /* copy special role authentication info from userspace */
52896+
52897+ num_sprole_pws = arg->num_sprole_pws;
52898+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52899+
52900+ if (!acl_special_roles && num_sprole_pws)
52901+ return -ENOMEM;
52902+
52903+ for (i = 0; i < num_sprole_pws; i++) {
52904+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52905+ if (!sptmp)
52906+ return -ENOMEM;
52907+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52908+ sizeof (struct sprole_pw)))
52909+ return -EFAULT;
52910+
52911+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52912+
52913+ if (!len || len >= GR_SPROLE_LEN)
52914+ return -EINVAL;
52915+
52916+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52917+ return -ENOMEM;
52918+
52919+ if (copy_from_user(tmp, sptmp->rolename, len))
52920+ return -EFAULT;
52921+
52922+ tmp[len-1] = '\0';
52923+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52924+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52925+#endif
52926+ sptmp->rolename = tmp;
52927+ acl_special_roles[i] = sptmp;
52928+ }
52929+
52930+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52931+
52932+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52933+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52934+
52935+ if (!r_tmp)
52936+ return -ENOMEM;
52937+
52938+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52939+ sizeof (struct acl_role_label *)))
52940+ return -EFAULT;
52941+
52942+ if (copy_from_user(r_tmp, r_utmp2,
52943+ sizeof (struct acl_role_label)))
52944+ return -EFAULT;
52945+
52946+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52947+
52948+ if (!len || len >= PATH_MAX)
52949+ return -EINVAL;
52950+
52951+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52952+ return -ENOMEM;
52953+
52954+ if (copy_from_user(tmp, r_tmp->rolename, len))
52955+ return -EFAULT;
52956+
52957+ tmp[len-1] = '\0';
52958+ r_tmp->rolename = tmp;
52959+
52960+ if (!strcmp(r_tmp->rolename, "default")
52961+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52962+ default_role = r_tmp;
52963+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52964+ kernel_role = r_tmp;
52965+ }
52966+
52967+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52968+ return -ENOMEM;
52969+
52970+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52971+ return -EFAULT;
52972+
52973+ r_tmp->hash = ghash;
52974+
52975+ num_subjs = count_user_subjs(r_tmp->hash->first);
52976+
52977+ r_tmp->subj_hash_size = num_subjs;
52978+ r_tmp->subj_hash =
52979+ (struct acl_subject_label **)
52980+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52981+
52982+ if (!r_tmp->subj_hash)
52983+ return -ENOMEM;
52984+
52985+ err = copy_user_allowedips(r_tmp);
52986+ if (err)
52987+ return err;
52988+
52989+ /* copy domain info */
52990+ if (r_tmp->domain_children != NULL) {
52991+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52992+ if (domainlist == NULL)
52993+ return -ENOMEM;
52994+
52995+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52996+ return -EFAULT;
52997+
52998+ r_tmp->domain_children = domainlist;
52999+ }
53000+
53001+ err = copy_user_transitions(r_tmp);
53002+ if (err)
53003+ return err;
53004+
53005+ memset(r_tmp->subj_hash, 0,
53006+ r_tmp->subj_hash_size *
53007+ sizeof (struct acl_subject_label *));
53008+
53009+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
53010+
53011+ if (err)
53012+ return err;
53013+
53014+ /* set nested subject list to null */
53015+ r_tmp->hash->first = NULL;
53016+
53017+ insert_acl_role_label(r_tmp);
53018+ }
53019+
53020+ if (default_role == NULL || kernel_role == NULL)
53021+ return -EINVAL;
53022+
53023+ return err;
53024+}
53025+
53026+static int
53027+gracl_init(struct gr_arg *args)
53028+{
53029+ int error = 0;
53030+
53031+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
53032+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
53033+
53034+ if (init_variables(args)) {
53035+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
53036+ error = -ENOMEM;
53037+ free_variables();
53038+ goto out;
53039+ }
53040+
53041+ error = copy_user_acl(args);
53042+ free_init_variables();
53043+ if (error) {
53044+ free_variables();
53045+ goto out;
53046+ }
53047+
53048+ if ((error = gr_set_acls(0))) {
53049+ free_variables();
53050+ goto out;
53051+ }
53052+
53053+ pax_open_kernel();
53054+ gr_status |= GR_READY;
53055+ pax_close_kernel();
53056+
53057+ out:
53058+ return error;
53059+}
53060+
53061+/* derived from glibc fnmatch() 0: match, 1: no match*/
53062+
53063+static int
53064+glob_match(const char *p, const char *n)
53065+{
53066+ char c;
53067+
53068+ while ((c = *p++) != '\0') {
53069+ switch (c) {
53070+ case '?':
53071+ if (*n == '\0')
53072+ return 1;
53073+ else if (*n == '/')
53074+ return 1;
53075+ break;
53076+ case '\\':
53077+ if (*n != c)
53078+ return 1;
53079+ break;
53080+ case '*':
53081+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
53082+ if (*n == '/')
53083+ return 1;
53084+ else if (c == '?') {
53085+ if (*n == '\0')
53086+ return 1;
53087+ else
53088+ ++n;
53089+ }
53090+ }
53091+ if (c == '\0') {
53092+ return 0;
53093+ } else {
53094+ const char *endp;
53095+
53096+ if ((endp = strchr(n, '/')) == NULL)
53097+ endp = n + strlen(n);
53098+
53099+ if (c == '[') {
53100+ for (--p; n < endp; ++n)
53101+ if (!glob_match(p, n))
53102+ return 0;
53103+ } else if (c == '/') {
53104+ while (*n != '\0' && *n != '/')
53105+ ++n;
53106+ if (*n == '/' && !glob_match(p, n + 1))
53107+ return 0;
53108+ } else {
53109+ for (--p; n < endp; ++n)
53110+ if (*n == c && !glob_match(p, n))
53111+ return 0;
53112+ }
53113+
53114+ return 1;
53115+ }
53116+ case '[':
53117+ {
53118+ int not;
53119+ char cold;
53120+
53121+ if (*n == '\0' || *n == '/')
53122+ return 1;
53123+
53124+ not = (*p == '!' || *p == '^');
53125+ if (not)
53126+ ++p;
53127+
53128+ c = *p++;
53129+ for (;;) {
53130+ unsigned char fn = (unsigned char)*n;
53131+
53132+ if (c == '\0')
53133+ return 1;
53134+ else {
53135+ if (c == fn)
53136+ goto matched;
53137+ cold = c;
53138+ c = *p++;
53139+
53140+ if (c == '-' && *p != ']') {
53141+ unsigned char cend = *p++;
53142+
53143+ if (cend == '\0')
53144+ return 1;
53145+
53146+ if (cold <= fn && fn <= cend)
53147+ goto matched;
53148+
53149+ c = *p++;
53150+ }
53151+ }
53152+
53153+ if (c == ']')
53154+ break;
53155+ }
53156+ if (!not)
53157+ return 1;
53158+ break;
53159+ matched:
53160+ while (c != ']') {
53161+ if (c == '\0')
53162+ return 1;
53163+
53164+ c = *p++;
53165+ }
53166+ if (not)
53167+ return 1;
53168+ }
53169+ break;
53170+ default:
53171+ if (c != *n)
53172+ return 1;
53173+ }
53174+
53175+ ++n;
53176+ }
53177+
53178+ if (*n == '\0')
53179+ return 0;
53180+
53181+ if (*n == '/')
53182+ return 0;
53183+
53184+ return 1;
53185+}
53186+
53187+static struct acl_object_label *
53188+chk_glob_label(struct acl_object_label *globbed,
53189+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
53190+{
53191+ struct acl_object_label *tmp;
53192+
53193+ if (*path == NULL)
53194+ *path = gr_to_filename_nolock(dentry, mnt);
53195+
53196+ tmp = globbed;
53197+
53198+ while (tmp) {
53199+ if (!glob_match(tmp->filename, *path))
53200+ return tmp;
53201+ tmp = tmp->next;
53202+ }
53203+
53204+ return NULL;
53205+}
53206+
53207+static struct acl_object_label *
53208+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53209+ const ino_t curr_ino, const dev_t curr_dev,
53210+ const struct acl_subject_label *subj, char **path, const int checkglob)
53211+{
53212+ struct acl_subject_label *tmpsubj;
53213+ struct acl_object_label *retval;
53214+ struct acl_object_label *retval2;
53215+
53216+ tmpsubj = (struct acl_subject_label *) subj;
53217+ read_lock(&gr_inode_lock);
53218+ do {
53219+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53220+ if (retval) {
53221+ if (checkglob && retval->globbed) {
53222+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
53223+ if (retval2)
53224+ retval = retval2;
53225+ }
53226+ break;
53227+ }
53228+ } while ((tmpsubj = tmpsubj->parent_subject));
53229+ read_unlock(&gr_inode_lock);
53230+
53231+ return retval;
53232+}
53233+
53234+static __inline__ struct acl_object_label *
53235+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53236+ struct dentry *curr_dentry,
53237+ const struct acl_subject_label *subj, char **path, const int checkglob)
53238+{
53239+ int newglob = checkglob;
53240+ ino_t inode;
53241+ dev_t device;
53242+
53243+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53244+ as we don't want a / * rule to match instead of the / object
53245+ don't do this for create lookups that call this function though, since they're looking up
53246+ on the parent and thus need globbing checks on all paths
53247+ */
53248+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53249+ newglob = GR_NO_GLOB;
53250+
53251+ spin_lock(&curr_dentry->d_lock);
53252+ inode = curr_dentry->d_inode->i_ino;
53253+ device = __get_dev(curr_dentry);
53254+ spin_unlock(&curr_dentry->d_lock);
53255+
53256+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
53257+}
53258+
53259+static struct acl_object_label *
53260+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53261+ const struct acl_subject_label *subj, char *path, const int checkglob)
53262+{
53263+ struct dentry *dentry = (struct dentry *) l_dentry;
53264+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53265+ struct mount *real_mnt = real_mount(mnt);
53266+ struct acl_object_label *retval;
53267+ struct dentry *parent;
53268+
53269+ write_seqlock(&rename_lock);
53270+ br_read_lock(vfsmount_lock);
53271+
53272+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53273+#ifdef CONFIG_NET
53274+ mnt == sock_mnt ||
53275+#endif
53276+#ifdef CONFIG_HUGETLBFS
53277+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
53278+#endif
53279+ /* ignore Eric Biederman */
53280+ IS_PRIVATE(l_dentry->d_inode))) {
53281+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
53282+ goto out;
53283+ }
53284+
53285+ for (;;) {
53286+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53287+ break;
53288+
53289+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53290+ if (!mnt_has_parent(real_mnt))
53291+ break;
53292+
53293+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53294+ if (retval != NULL)
53295+ goto out;
53296+
53297+ dentry = real_mnt->mnt_mountpoint;
53298+ real_mnt = real_mnt->mnt_parent;
53299+ mnt = &real_mnt->mnt;
53300+ continue;
53301+ }
53302+
53303+ parent = dentry->d_parent;
53304+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53305+ if (retval != NULL)
53306+ goto out;
53307+
53308+ dentry = parent;
53309+ }
53310+
53311+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53312+
53313+ /* real_root is pinned so we don't have to hold a reference */
53314+ if (retval == NULL)
53315+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
53316+out:
53317+ br_read_unlock(vfsmount_lock);
53318+ write_sequnlock(&rename_lock);
53319+
53320+ BUG_ON(retval == NULL);
53321+
53322+ return retval;
53323+}
53324+
53325+static __inline__ struct acl_object_label *
53326+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53327+ const struct acl_subject_label *subj)
53328+{
53329+ char *path = NULL;
53330+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
53331+}
53332+
53333+static __inline__ struct acl_object_label *
53334+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53335+ const struct acl_subject_label *subj)
53336+{
53337+ char *path = NULL;
53338+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
53339+}
53340+
53341+static __inline__ struct acl_object_label *
53342+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53343+ const struct acl_subject_label *subj, char *path)
53344+{
53345+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
53346+}
53347+
53348+static struct acl_subject_label *
53349+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53350+ const struct acl_role_label *role)
53351+{
53352+ struct dentry *dentry = (struct dentry *) l_dentry;
53353+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53354+ struct mount *real_mnt = real_mount(mnt);
53355+ struct acl_subject_label *retval;
53356+ struct dentry *parent;
53357+
53358+ write_seqlock(&rename_lock);
53359+ br_read_lock(vfsmount_lock);
53360+
53361+ for (;;) {
53362+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53363+ break;
53364+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53365+ if (!mnt_has_parent(real_mnt))
53366+ break;
53367+
53368+ spin_lock(&dentry->d_lock);
53369+ read_lock(&gr_inode_lock);
53370+ retval =
53371+ lookup_acl_subj_label(dentry->d_inode->i_ino,
53372+ __get_dev(dentry), role);
53373+ read_unlock(&gr_inode_lock);
53374+ spin_unlock(&dentry->d_lock);
53375+ if (retval != NULL)
53376+ goto out;
53377+
53378+ dentry = real_mnt->mnt_mountpoint;
53379+ real_mnt = real_mnt->mnt_parent;
53380+ mnt = &real_mnt->mnt;
53381+ continue;
53382+ }
53383+
53384+ spin_lock(&dentry->d_lock);
53385+ read_lock(&gr_inode_lock);
53386+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53387+ __get_dev(dentry), role);
53388+ read_unlock(&gr_inode_lock);
53389+ parent = dentry->d_parent;
53390+ spin_unlock(&dentry->d_lock);
53391+
53392+ if (retval != NULL)
53393+ goto out;
53394+
53395+ dentry = parent;
53396+ }
53397+
53398+ spin_lock(&dentry->d_lock);
53399+ read_lock(&gr_inode_lock);
53400+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53401+ __get_dev(dentry), role);
53402+ read_unlock(&gr_inode_lock);
53403+ spin_unlock(&dentry->d_lock);
53404+
53405+ if (unlikely(retval == NULL)) {
53406+ /* real_root is pinned, we don't need to hold a reference */
53407+ read_lock(&gr_inode_lock);
53408+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53409+ __get_dev(real_root.dentry), role);
53410+ read_unlock(&gr_inode_lock);
53411+ }
53412+out:
53413+ br_read_unlock(vfsmount_lock);
53414+ write_sequnlock(&rename_lock);
53415+
53416+ BUG_ON(retval == NULL);
53417+
53418+ return retval;
53419+}
53420+
53421+static void
53422+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53423+{
53424+ struct task_struct *task = current;
53425+ const struct cred *cred = current_cred();
53426+
53427+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53428+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53429+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53430+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53431+
53432+ return;
53433+}
53434+
53435+static void
53436+gr_log_learn_sysctl(const char *path, const __u32 mode)
53437+{
53438+ struct task_struct *task = current;
53439+ const struct cred *cred = current_cred();
53440+
53441+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53442+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53443+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53444+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
53445+
53446+ return;
53447+}
53448+
53449+static void
53450+gr_log_learn_id_change(const char type, const unsigned int real,
53451+ const unsigned int effective, const unsigned int fs)
53452+{
53453+ struct task_struct *task = current;
53454+ const struct cred *cred = current_cred();
53455+
53456+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53457+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53458+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53459+ type, real, effective, fs, &task->signal->saved_ip);
53460+
53461+ return;
53462+}
53463+
53464+__u32
53465+gr_search_file(const struct dentry * dentry, const __u32 mode,
53466+ const struct vfsmount * mnt)
53467+{
53468+ __u32 retval = mode;
53469+ struct acl_subject_label *curracl;
53470+ struct acl_object_label *currobj;
53471+
53472+ if (unlikely(!(gr_status & GR_READY)))
53473+ return (mode & ~GR_AUDITS);
53474+
53475+ curracl = current->acl;
53476+
53477+ currobj = chk_obj_label(dentry, mnt, curracl);
53478+ retval = currobj->mode & mode;
53479+
53480+ /* if we're opening a specified transfer file for writing
53481+ (e.g. /dev/initctl), then transfer our role to init
53482+ */
53483+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53484+ current->role->roletype & GR_ROLE_PERSIST)) {
53485+ struct task_struct *task = init_pid_ns.child_reaper;
53486+
53487+ if (task->role != current->role) {
53488+ task->acl_sp_role = 0;
53489+ task->acl_role_id = current->acl_role_id;
53490+ task->role = current->role;
53491+ rcu_read_lock();
53492+ read_lock(&grsec_exec_file_lock);
53493+ gr_apply_subject_to_task(task);
53494+ read_unlock(&grsec_exec_file_lock);
53495+ rcu_read_unlock();
53496+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53497+ }
53498+ }
53499+
53500+ if (unlikely
53501+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53502+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53503+ __u32 new_mode = mode;
53504+
53505+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53506+
53507+ retval = new_mode;
53508+
53509+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53510+ new_mode |= GR_INHERIT;
53511+
53512+ if (!(mode & GR_NOLEARN))
53513+ gr_log_learn(dentry, mnt, new_mode);
53514+ }
53515+
53516+ return retval;
53517+}
53518+
53519+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53520+ const struct dentry *parent,
53521+ const struct vfsmount *mnt)
53522+{
53523+ struct name_entry *match;
53524+ struct acl_object_label *matchpo;
53525+ struct acl_subject_label *curracl;
53526+ char *path;
53527+
53528+ if (unlikely(!(gr_status & GR_READY)))
53529+ return NULL;
53530+
53531+ preempt_disable();
53532+ path = gr_to_filename_rbac(new_dentry, mnt);
53533+ match = lookup_name_entry_create(path);
53534+
53535+ curracl = current->acl;
53536+
53537+ if (match) {
53538+ read_lock(&gr_inode_lock);
53539+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53540+ read_unlock(&gr_inode_lock);
53541+
53542+ if (matchpo) {
53543+ preempt_enable();
53544+ return matchpo;
53545+ }
53546+ }
53547+
53548+ // lookup parent
53549+
53550+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53551+
53552+ preempt_enable();
53553+ return matchpo;
53554+}
53555+
53556+__u32
53557+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53558+ const struct vfsmount * mnt, const __u32 mode)
53559+{
53560+ struct acl_object_label *matchpo;
53561+ __u32 retval;
53562+
53563+ if (unlikely(!(gr_status & GR_READY)))
53564+ return (mode & ~GR_AUDITS);
53565+
53566+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
53567+
53568+ retval = matchpo->mode & mode;
53569+
53570+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53571+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53572+ __u32 new_mode = mode;
53573+
53574+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53575+
53576+ gr_log_learn(new_dentry, mnt, new_mode);
53577+ return new_mode;
53578+ }
53579+
53580+ return retval;
53581+}
53582+
53583+__u32
53584+gr_check_link(const struct dentry * new_dentry,
53585+ const struct dentry * parent_dentry,
53586+ const struct vfsmount * parent_mnt,
53587+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53588+{
53589+ struct acl_object_label *obj;
53590+ __u32 oldmode, newmode;
53591+ __u32 needmode;
53592+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53593+ GR_DELETE | GR_INHERIT;
53594+
53595+ if (unlikely(!(gr_status & GR_READY)))
53596+ return (GR_CREATE | GR_LINK);
53597+
53598+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53599+ oldmode = obj->mode;
53600+
53601+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53602+ newmode = obj->mode;
53603+
53604+ needmode = newmode & checkmodes;
53605+
53606+ // old name for hardlink must have at least the permissions of the new name
53607+ if ((oldmode & needmode) != needmode)
53608+ goto bad;
53609+
53610+ // if old name had restrictions/auditing, make sure the new name does as well
53611+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53612+
53613+ // don't allow hardlinking of suid/sgid files without permission
53614+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53615+ needmode |= GR_SETID;
53616+
53617+ if ((newmode & needmode) != needmode)
53618+ goto bad;
53619+
53620+ // enforce minimum permissions
53621+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53622+ return newmode;
53623+bad:
53624+ needmode = oldmode;
53625+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53626+ needmode |= GR_SETID;
53627+
53628+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53629+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53630+ return (GR_CREATE | GR_LINK);
53631+ } else if (newmode & GR_SUPPRESS)
53632+ return GR_SUPPRESS;
53633+ else
53634+ return 0;
53635+}
53636+
53637+int
53638+gr_check_hidden_task(const struct task_struct *task)
53639+{
53640+ if (unlikely(!(gr_status & GR_READY)))
53641+ return 0;
53642+
53643+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53644+ return 1;
53645+
53646+ return 0;
53647+}
53648+
53649+int
53650+gr_check_protected_task(const struct task_struct *task)
53651+{
53652+ if (unlikely(!(gr_status & GR_READY) || !task))
53653+ return 0;
53654+
53655+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53656+ task->acl != current->acl)
53657+ return 1;
53658+
53659+ return 0;
53660+}
53661+
53662+int
53663+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53664+{
53665+ struct task_struct *p;
53666+ int ret = 0;
53667+
53668+ if (unlikely(!(gr_status & GR_READY) || !pid))
53669+ return ret;
53670+
53671+ read_lock(&tasklist_lock);
53672+ do_each_pid_task(pid, type, p) {
53673+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53674+ p->acl != current->acl) {
53675+ ret = 1;
53676+ goto out;
53677+ }
53678+ } while_each_pid_task(pid, type, p);
53679+out:
53680+ read_unlock(&tasklist_lock);
53681+
53682+ return ret;
53683+}
53684+
53685+void
53686+gr_copy_label(struct task_struct *tsk)
53687+{
53688+ /* plain copying of fields is already done by dup_task_struct */
53689+ tsk->signal->used_accept = 0;
53690+ tsk->acl_sp_role = 0;
53691+ //tsk->acl_role_id = current->acl_role_id;
53692+ //tsk->acl = current->acl;
53693+ //tsk->role = current->role;
53694+ tsk->signal->curr_ip = current->signal->curr_ip;
53695+ tsk->signal->saved_ip = current->signal->saved_ip;
53696+ if (current->exec_file)
53697+ get_file(current->exec_file);
53698+ //tsk->exec_file = current->exec_file;
53699+ //tsk->is_writable = current->is_writable;
53700+ if (unlikely(current->signal->used_accept)) {
53701+ current->signal->curr_ip = 0;
53702+ current->signal->saved_ip = 0;
53703+ }
53704+
53705+ return;
53706+}
53707+
53708+static void
53709+gr_set_proc_res(struct task_struct *task)
53710+{
53711+ struct acl_subject_label *proc;
53712+ unsigned short i;
53713+
53714+ proc = task->acl;
53715+
53716+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53717+ return;
53718+
53719+ for (i = 0; i < RLIM_NLIMITS; i++) {
53720+ if (!(proc->resmask & (1 << i)))
53721+ continue;
53722+
53723+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53724+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53725+ }
53726+
53727+ return;
53728+}
53729+
53730+extern int __gr_process_user_ban(struct user_struct *user);
53731+
53732+int
53733+gr_check_user_change(int real, int effective, int fs)
53734+{
53735+ unsigned int i;
53736+ __u16 num;
53737+ uid_t *uidlist;
53738+ int curuid;
53739+ int realok = 0;
53740+ int effectiveok = 0;
53741+ int fsok = 0;
53742+
53743+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53744+ struct user_struct *user;
53745+
53746+ if (real == -1)
53747+ goto skipit;
53748+
53749+ user = find_user(real);
53750+ if (user == NULL)
53751+ goto skipit;
53752+
53753+ if (__gr_process_user_ban(user)) {
53754+ /* for find_user */
53755+ free_uid(user);
53756+ return 1;
53757+ }
53758+
53759+ /* for find_user */
53760+ free_uid(user);
53761+
53762+skipit:
53763+#endif
53764+
53765+ if (unlikely(!(gr_status & GR_READY)))
53766+ return 0;
53767+
53768+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53769+ gr_log_learn_id_change('u', real, effective, fs);
53770+
53771+ num = current->acl->user_trans_num;
53772+ uidlist = current->acl->user_transitions;
53773+
53774+ if (uidlist == NULL)
53775+ return 0;
53776+
53777+ if (real == -1)
53778+ realok = 1;
53779+ if (effective == -1)
53780+ effectiveok = 1;
53781+ if (fs == -1)
53782+ fsok = 1;
53783+
53784+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53785+ for (i = 0; i < num; i++) {
53786+ curuid = (int)uidlist[i];
53787+ if (real == curuid)
53788+ realok = 1;
53789+ if (effective == curuid)
53790+ effectiveok = 1;
53791+ if (fs == curuid)
53792+ fsok = 1;
53793+ }
53794+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53795+ for (i = 0; i < num; i++) {
53796+ curuid = (int)uidlist[i];
53797+ if (real == curuid)
53798+ break;
53799+ if (effective == curuid)
53800+ break;
53801+ if (fs == curuid)
53802+ break;
53803+ }
53804+ /* not in deny list */
53805+ if (i == num) {
53806+ realok = 1;
53807+ effectiveok = 1;
53808+ fsok = 1;
53809+ }
53810+ }
53811+
53812+ if (realok && effectiveok && fsok)
53813+ return 0;
53814+ else {
53815+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53816+ return 1;
53817+ }
53818+}
53819+
53820+int
53821+gr_check_group_change(int real, int effective, int fs)
53822+{
53823+ unsigned int i;
53824+ __u16 num;
53825+ gid_t *gidlist;
53826+ int curgid;
53827+ int realok = 0;
53828+ int effectiveok = 0;
53829+ int fsok = 0;
53830+
53831+ if (unlikely(!(gr_status & GR_READY)))
53832+ return 0;
53833+
53834+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53835+ gr_log_learn_id_change('g', real, effective, fs);
53836+
53837+ num = current->acl->group_trans_num;
53838+ gidlist = current->acl->group_transitions;
53839+
53840+ if (gidlist == NULL)
53841+ return 0;
53842+
53843+ if (real == -1)
53844+ realok = 1;
53845+ if (effective == -1)
53846+ effectiveok = 1;
53847+ if (fs == -1)
53848+ fsok = 1;
53849+
53850+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53851+ for (i = 0; i < num; i++) {
53852+ curgid = (int)gidlist[i];
53853+ if (real == curgid)
53854+ realok = 1;
53855+ if (effective == curgid)
53856+ effectiveok = 1;
53857+ if (fs == curgid)
53858+ fsok = 1;
53859+ }
53860+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53861+ for (i = 0; i < num; i++) {
53862+ curgid = (int)gidlist[i];
53863+ if (real == curgid)
53864+ break;
53865+ if (effective == curgid)
53866+ break;
53867+ if (fs == curgid)
53868+ break;
53869+ }
53870+ /* not in deny list */
53871+ if (i == num) {
53872+ realok = 1;
53873+ effectiveok = 1;
53874+ fsok = 1;
53875+ }
53876+ }
53877+
53878+ if (realok && effectiveok && fsok)
53879+ return 0;
53880+ else {
53881+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53882+ return 1;
53883+ }
53884+}
53885+
53886+extern int gr_acl_is_capable(const int cap);
53887+
53888+void
53889+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53890+{
53891+ struct acl_role_label *role = task->role;
53892+ struct acl_subject_label *subj = NULL;
53893+ struct acl_object_label *obj;
53894+ struct file *filp;
53895+
53896+ if (unlikely(!(gr_status & GR_READY)))
53897+ return;
53898+
53899+ filp = task->exec_file;
53900+
53901+ /* kernel process, we'll give them the kernel role */
53902+ if (unlikely(!filp)) {
53903+ task->role = kernel_role;
53904+ task->acl = kernel_role->root_label;
53905+ return;
53906+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53907+ role = lookup_acl_role_label(task, uid, gid);
53908+
53909+ /* don't change the role if we're not a privileged process */
53910+ if (role && task->role != role &&
53911+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53912+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53913+ return;
53914+
53915+ /* perform subject lookup in possibly new role
53916+ we can use this result below in the case where role == task->role
53917+ */
53918+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53919+
53920+ /* if we changed uid/gid, but result in the same role
53921+ and are using inheritance, don't lose the inherited subject
53922+ if current subject is other than what normal lookup
53923+ would result in, we arrived via inheritance, don't
53924+ lose subject
53925+ */
53926+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53927+ (subj == task->acl)))
53928+ task->acl = subj;
53929+
53930+ task->role = role;
53931+
53932+ task->is_writable = 0;
53933+
53934+ /* ignore additional mmap checks for processes that are writable
53935+ by the default ACL */
53936+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53937+ if (unlikely(obj->mode & GR_WRITE))
53938+ task->is_writable = 1;
53939+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53940+ if (unlikely(obj->mode & GR_WRITE))
53941+ task->is_writable = 1;
53942+
53943+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53944+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53945+#endif
53946+
53947+ gr_set_proc_res(task);
53948+
53949+ return;
53950+}
53951+
53952+int
53953+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53954+ const int unsafe_flags)
53955+{
53956+ struct task_struct *task = current;
53957+ struct acl_subject_label *newacl;
53958+ struct acl_object_label *obj;
53959+ __u32 retmode;
53960+
53961+ if (unlikely(!(gr_status & GR_READY)))
53962+ return 0;
53963+
53964+ newacl = chk_subj_label(dentry, mnt, task->role);
53965+
53966+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53967+ did an exec
53968+ */
53969+ rcu_read_lock();
53970+ read_lock(&tasklist_lock);
53971+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53972+ (task->parent->acl->mode & GR_POVERRIDE))) {
53973+ read_unlock(&tasklist_lock);
53974+ rcu_read_unlock();
53975+ goto skip_check;
53976+ }
53977+ read_unlock(&tasklist_lock);
53978+ rcu_read_unlock();
53979+
53980+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53981+ !(task->role->roletype & GR_ROLE_GOD) &&
53982+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53983+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53984+ if (unsafe_flags & LSM_UNSAFE_SHARE)
53985+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53986+ else
53987+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53988+ return -EACCES;
53989+ }
53990+
53991+skip_check:
53992+
53993+ obj = chk_obj_label(dentry, mnt, task->acl);
53994+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53995+
53996+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53997+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53998+ if (obj->nested)
53999+ task->acl = obj->nested;
54000+ else
54001+ task->acl = newacl;
54002+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
54003+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
54004+
54005+ task->is_writable = 0;
54006+
54007+ /* ignore additional mmap checks for processes that are writable
54008+ by the default ACL */
54009+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
54010+ if (unlikely(obj->mode & GR_WRITE))
54011+ task->is_writable = 1;
54012+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
54013+ if (unlikely(obj->mode & GR_WRITE))
54014+ task->is_writable = 1;
54015+
54016+ gr_set_proc_res(task);
54017+
54018+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54019+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54020+#endif
54021+ return 0;
54022+}
54023+
54024+/* always called with valid inodev ptr */
54025+static void
54026+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
54027+{
54028+ struct acl_object_label *matchpo;
54029+ struct acl_subject_label *matchps;
54030+ struct acl_subject_label *subj;
54031+ struct acl_role_label *role;
54032+ unsigned int x;
54033+
54034+ FOR_EACH_ROLE_START(role)
54035+ FOR_EACH_SUBJECT_START(role, subj, x)
54036+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
54037+ matchpo->mode |= GR_DELETED;
54038+ FOR_EACH_SUBJECT_END(subj,x)
54039+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54040+ if (subj->inode == ino && subj->device == dev)
54041+ subj->mode |= GR_DELETED;
54042+ FOR_EACH_NESTED_SUBJECT_END(subj)
54043+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
54044+ matchps->mode |= GR_DELETED;
54045+ FOR_EACH_ROLE_END(role)
54046+
54047+ inodev->nentry->deleted = 1;
54048+
54049+ return;
54050+}
54051+
54052+void
54053+gr_handle_delete(const ino_t ino, const dev_t dev)
54054+{
54055+ struct inodev_entry *inodev;
54056+
54057+ if (unlikely(!(gr_status & GR_READY)))
54058+ return;
54059+
54060+ write_lock(&gr_inode_lock);
54061+ inodev = lookup_inodev_entry(ino, dev);
54062+ if (inodev != NULL)
54063+ do_handle_delete(inodev, ino, dev);
54064+ write_unlock(&gr_inode_lock);
54065+
54066+ return;
54067+}
54068+
54069+static void
54070+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54071+ const ino_t newinode, const dev_t newdevice,
54072+ struct acl_subject_label *subj)
54073+{
54074+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54075+ struct acl_object_label *match;
54076+
54077+ match = subj->obj_hash[index];
54078+
54079+ while (match && (match->inode != oldinode ||
54080+ match->device != olddevice ||
54081+ !(match->mode & GR_DELETED)))
54082+ match = match->next;
54083+
54084+ if (match && (match->inode == oldinode)
54085+ && (match->device == olddevice)
54086+ && (match->mode & GR_DELETED)) {
54087+ if (match->prev == NULL) {
54088+ subj->obj_hash[index] = match->next;
54089+ if (match->next != NULL)
54090+ match->next->prev = NULL;
54091+ } else {
54092+ match->prev->next = match->next;
54093+ if (match->next != NULL)
54094+ match->next->prev = match->prev;
54095+ }
54096+ match->prev = NULL;
54097+ match->next = NULL;
54098+ match->inode = newinode;
54099+ match->device = newdevice;
54100+ match->mode &= ~GR_DELETED;
54101+
54102+ insert_acl_obj_label(match, subj);
54103+ }
54104+
54105+ return;
54106+}
54107+
54108+static void
54109+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54110+ const ino_t newinode, const dev_t newdevice,
54111+ struct acl_role_label *role)
54112+{
54113+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54114+ struct acl_subject_label *match;
54115+
54116+ match = role->subj_hash[index];
54117+
54118+ while (match && (match->inode != oldinode ||
54119+ match->device != olddevice ||
54120+ !(match->mode & GR_DELETED)))
54121+ match = match->next;
54122+
54123+ if (match && (match->inode == oldinode)
54124+ && (match->device == olddevice)
54125+ && (match->mode & GR_DELETED)) {
54126+ if (match->prev == NULL) {
54127+ role->subj_hash[index] = match->next;
54128+ if (match->next != NULL)
54129+ match->next->prev = NULL;
54130+ } else {
54131+ match->prev->next = match->next;
54132+ if (match->next != NULL)
54133+ match->next->prev = match->prev;
54134+ }
54135+ match->prev = NULL;
54136+ match->next = NULL;
54137+ match->inode = newinode;
54138+ match->device = newdevice;
54139+ match->mode &= ~GR_DELETED;
54140+
54141+ insert_acl_subj_label(match, role);
54142+ }
54143+
54144+ return;
54145+}
54146+
54147+static void
54148+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54149+ const ino_t newinode, const dev_t newdevice)
54150+{
54151+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54152+ struct inodev_entry *match;
54153+
54154+ match = inodev_set.i_hash[index];
54155+
54156+ while (match && (match->nentry->inode != oldinode ||
54157+ match->nentry->device != olddevice || !match->nentry->deleted))
54158+ match = match->next;
54159+
54160+ if (match && (match->nentry->inode == oldinode)
54161+ && (match->nentry->device == olddevice) &&
54162+ match->nentry->deleted) {
54163+ if (match->prev == NULL) {
54164+ inodev_set.i_hash[index] = match->next;
54165+ if (match->next != NULL)
54166+ match->next->prev = NULL;
54167+ } else {
54168+ match->prev->next = match->next;
54169+ if (match->next != NULL)
54170+ match->next->prev = match->prev;
54171+ }
54172+ match->prev = NULL;
54173+ match->next = NULL;
54174+ match->nentry->inode = newinode;
54175+ match->nentry->device = newdevice;
54176+ match->nentry->deleted = 0;
54177+
54178+ insert_inodev_entry(match);
54179+ }
54180+
54181+ return;
54182+}
54183+
54184+static void
54185+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
54186+{
54187+ struct acl_subject_label *subj;
54188+ struct acl_role_label *role;
54189+ unsigned int x;
54190+
54191+ FOR_EACH_ROLE_START(role)
54192+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
54193+
54194+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54195+ if ((subj->inode == ino) && (subj->device == dev)) {
54196+ subj->inode = ino;
54197+ subj->device = dev;
54198+ }
54199+ FOR_EACH_NESTED_SUBJECT_END(subj)
54200+ FOR_EACH_SUBJECT_START(role, subj, x)
54201+ update_acl_obj_label(matchn->inode, matchn->device,
54202+ ino, dev, subj);
54203+ FOR_EACH_SUBJECT_END(subj,x)
54204+ FOR_EACH_ROLE_END(role)
54205+
54206+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
54207+
54208+ return;
54209+}
54210+
54211+static void
54212+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54213+ const struct vfsmount *mnt)
54214+{
54215+ ino_t ino = dentry->d_inode->i_ino;
54216+ dev_t dev = __get_dev(dentry);
54217+
54218+ __do_handle_create(matchn, ino, dev);
54219+
54220+ return;
54221+}
54222+
54223+void
54224+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54225+{
54226+ struct name_entry *matchn;
54227+
54228+ if (unlikely(!(gr_status & GR_READY)))
54229+ return;
54230+
54231+ preempt_disable();
54232+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54233+
54234+ if (unlikely((unsigned long)matchn)) {
54235+ write_lock(&gr_inode_lock);
54236+ do_handle_create(matchn, dentry, mnt);
54237+ write_unlock(&gr_inode_lock);
54238+ }
54239+ preempt_enable();
54240+
54241+ return;
54242+}
54243+
54244+void
54245+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54246+{
54247+ struct name_entry *matchn;
54248+
54249+ if (unlikely(!(gr_status & GR_READY)))
54250+ return;
54251+
54252+ preempt_disable();
54253+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54254+
54255+ if (unlikely((unsigned long)matchn)) {
54256+ write_lock(&gr_inode_lock);
54257+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54258+ write_unlock(&gr_inode_lock);
54259+ }
54260+ preempt_enable();
54261+
54262+ return;
54263+}
54264+
54265+void
54266+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54267+ struct dentry *old_dentry,
54268+ struct dentry *new_dentry,
54269+ struct vfsmount *mnt, const __u8 replace)
54270+{
54271+ struct name_entry *matchn;
54272+ struct inodev_entry *inodev;
54273+ struct inode *inode = new_dentry->d_inode;
54274+ ino_t old_ino = old_dentry->d_inode->i_ino;
54275+ dev_t old_dev = __get_dev(old_dentry);
54276+
54277+ /* vfs_rename swaps the name and parent link for old_dentry and
54278+ new_dentry
54279+ at this point, old_dentry has the new name, parent link, and inode
54280+ for the renamed file
54281+ if a file is being replaced by a rename, new_dentry has the inode
54282+ and name for the replaced file
54283+ */
54284+
54285+ if (unlikely(!(gr_status & GR_READY)))
54286+ return;
54287+
54288+ preempt_disable();
54289+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54290+
54291+ /* we wouldn't have to check d_inode if it weren't for
54292+ NFS silly-renaming
54293+ */
54294+
54295+ write_lock(&gr_inode_lock);
54296+ if (unlikely(replace && inode)) {
54297+ ino_t new_ino = inode->i_ino;
54298+ dev_t new_dev = __get_dev(new_dentry);
54299+
54300+ inodev = lookup_inodev_entry(new_ino, new_dev);
54301+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
54302+ do_handle_delete(inodev, new_ino, new_dev);
54303+ }
54304+
54305+ inodev = lookup_inodev_entry(old_ino, old_dev);
54306+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
54307+ do_handle_delete(inodev, old_ino, old_dev);
54308+
54309+ if (unlikely((unsigned long)matchn))
54310+ do_handle_create(matchn, old_dentry, mnt);
54311+
54312+ write_unlock(&gr_inode_lock);
54313+ preempt_enable();
54314+
54315+ return;
54316+}
54317+
54318+static int
54319+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54320+ unsigned char **sum)
54321+{
54322+ struct acl_role_label *r;
54323+ struct role_allowed_ip *ipp;
54324+ struct role_transition *trans;
54325+ unsigned int i;
54326+ int found = 0;
54327+ u32 curr_ip = current->signal->curr_ip;
54328+
54329+ current->signal->saved_ip = curr_ip;
54330+
54331+ /* check transition table */
54332+
54333+ for (trans = current->role->transitions; trans; trans = trans->next) {
54334+ if (!strcmp(rolename, trans->rolename)) {
54335+ found = 1;
54336+ break;
54337+ }
54338+ }
54339+
54340+ if (!found)
54341+ return 0;
54342+
54343+ /* handle special roles that do not require authentication
54344+ and check ip */
54345+
54346+ FOR_EACH_ROLE_START(r)
54347+ if (!strcmp(rolename, r->rolename) &&
54348+ (r->roletype & GR_ROLE_SPECIAL)) {
54349+ found = 0;
54350+ if (r->allowed_ips != NULL) {
54351+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
54352+ if ((ntohl(curr_ip) & ipp->netmask) ==
54353+ (ntohl(ipp->addr) & ipp->netmask))
54354+ found = 1;
54355+ }
54356+ } else
54357+ found = 2;
54358+ if (!found)
54359+ return 0;
54360+
54361+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54362+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54363+ *salt = NULL;
54364+ *sum = NULL;
54365+ return 1;
54366+ }
54367+ }
54368+ FOR_EACH_ROLE_END(r)
54369+
54370+ for (i = 0; i < num_sprole_pws; i++) {
54371+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54372+ *salt = acl_special_roles[i]->salt;
54373+ *sum = acl_special_roles[i]->sum;
54374+ return 1;
54375+ }
54376+ }
54377+
54378+ return 0;
54379+}
54380+
54381+static void
54382+assign_special_role(char *rolename)
54383+{
54384+ struct acl_object_label *obj;
54385+ struct acl_role_label *r;
54386+ struct acl_role_label *assigned = NULL;
54387+ struct task_struct *tsk;
54388+ struct file *filp;
54389+
54390+ FOR_EACH_ROLE_START(r)
54391+ if (!strcmp(rolename, r->rolename) &&
54392+ (r->roletype & GR_ROLE_SPECIAL)) {
54393+ assigned = r;
54394+ break;
54395+ }
54396+ FOR_EACH_ROLE_END(r)
54397+
54398+ if (!assigned)
54399+ return;
54400+
54401+ read_lock(&tasklist_lock);
54402+ read_lock(&grsec_exec_file_lock);
54403+
54404+ tsk = current->real_parent;
54405+ if (tsk == NULL)
54406+ goto out_unlock;
54407+
54408+ filp = tsk->exec_file;
54409+ if (filp == NULL)
54410+ goto out_unlock;
54411+
54412+ tsk->is_writable = 0;
54413+
54414+ tsk->acl_sp_role = 1;
54415+ tsk->acl_role_id = ++acl_sp_role_value;
54416+ tsk->role = assigned;
54417+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54418+
54419+ /* ignore additional mmap checks for processes that are writable
54420+ by the default ACL */
54421+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54422+ if (unlikely(obj->mode & GR_WRITE))
54423+ tsk->is_writable = 1;
54424+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54425+ if (unlikely(obj->mode & GR_WRITE))
54426+ tsk->is_writable = 1;
54427+
54428+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54429+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54430+#endif
54431+
54432+out_unlock:
54433+ read_unlock(&grsec_exec_file_lock);
54434+ read_unlock(&tasklist_lock);
54435+ return;
54436+}
54437+
54438+int gr_check_secure_terminal(struct task_struct *task)
54439+{
54440+ struct task_struct *p, *p2, *p3;
54441+ struct files_struct *files;
54442+ struct fdtable *fdt;
54443+ struct file *our_file = NULL, *file;
54444+ int i;
54445+
54446+ if (task->signal->tty == NULL)
54447+ return 1;
54448+
54449+ files = get_files_struct(task);
54450+ if (files != NULL) {
54451+ rcu_read_lock();
54452+ fdt = files_fdtable(files);
54453+ for (i=0; i < fdt->max_fds; i++) {
54454+ file = fcheck_files(files, i);
54455+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54456+ get_file(file);
54457+ our_file = file;
54458+ }
54459+ }
54460+ rcu_read_unlock();
54461+ put_files_struct(files);
54462+ }
54463+
54464+ if (our_file == NULL)
54465+ return 1;
54466+
54467+ read_lock(&tasklist_lock);
54468+ do_each_thread(p2, p) {
54469+ files = get_files_struct(p);
54470+ if (files == NULL ||
54471+ (p->signal && p->signal->tty == task->signal->tty)) {
54472+ if (files != NULL)
54473+ put_files_struct(files);
54474+ continue;
54475+ }
54476+ rcu_read_lock();
54477+ fdt = files_fdtable(files);
54478+ for (i=0; i < fdt->max_fds; i++) {
54479+ file = fcheck_files(files, i);
54480+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54481+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54482+ p3 = task;
54483+ while (p3->pid > 0) {
54484+ if (p3 == p)
54485+ break;
54486+ p3 = p3->real_parent;
54487+ }
54488+ if (p3 == p)
54489+ break;
54490+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54491+ gr_handle_alertkill(p);
54492+ rcu_read_unlock();
54493+ put_files_struct(files);
54494+ read_unlock(&tasklist_lock);
54495+ fput(our_file);
54496+ return 0;
54497+ }
54498+ }
54499+ rcu_read_unlock();
54500+ put_files_struct(files);
54501+ } while_each_thread(p2, p);
54502+ read_unlock(&tasklist_lock);
54503+
54504+ fput(our_file);
54505+ return 1;
54506+}
54507+
54508+ssize_t
54509+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54510+{
54511+ struct gr_arg_wrapper uwrap;
54512+ unsigned char *sprole_salt = NULL;
54513+ unsigned char *sprole_sum = NULL;
54514+ int error = sizeof (struct gr_arg_wrapper);
54515+ int error2 = 0;
54516+
54517+ mutex_lock(&gr_dev_mutex);
54518+
54519+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54520+ error = -EPERM;
54521+ goto out;
54522+ }
54523+
54524+ if (count != sizeof (struct gr_arg_wrapper)) {
54525+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54526+ error = -EINVAL;
54527+ goto out;
54528+ }
54529+
54530+
54531+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54532+ gr_auth_expires = 0;
54533+ gr_auth_attempts = 0;
54534+ }
54535+
54536+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54537+ error = -EFAULT;
54538+ goto out;
54539+ }
54540+
54541+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54542+ error = -EINVAL;
54543+ goto out;
54544+ }
54545+
54546+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54547+ error = -EFAULT;
54548+ goto out;
54549+ }
54550+
54551+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54552+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54553+ time_after(gr_auth_expires, get_seconds())) {
54554+ error = -EBUSY;
54555+ goto out;
54556+ }
54557+
54558+ /* if non-root trying to do anything other than use a special role,
54559+ do not attempt authentication, do not count towards authentication
54560+ locking
54561+ */
54562+
54563+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54564+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54565+ current_uid()) {
54566+ error = -EPERM;
54567+ goto out;
54568+ }
54569+
54570+ /* ensure pw and special role name are null terminated */
54571+
54572+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54573+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54574+
54575+ /* Okay.
54576+ * We have our enough of the argument structure..(we have yet
54577+ * to copy_from_user the tables themselves) . Copy the tables
54578+ * only if we need them, i.e. for loading operations. */
54579+
54580+ switch (gr_usermode->mode) {
54581+ case GR_STATUS:
54582+ if (gr_status & GR_READY) {
54583+ error = 1;
54584+ if (!gr_check_secure_terminal(current))
54585+ error = 3;
54586+ } else
54587+ error = 2;
54588+ goto out;
54589+ case GR_SHUTDOWN:
54590+ if ((gr_status & GR_READY)
54591+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54592+ pax_open_kernel();
54593+ gr_status &= ~GR_READY;
54594+ pax_close_kernel();
54595+
54596+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54597+ free_variables();
54598+ memset(gr_usermode, 0, sizeof (struct gr_arg));
54599+ memset(gr_system_salt, 0, GR_SALT_LEN);
54600+ memset(gr_system_sum, 0, GR_SHA_LEN);
54601+ } else if (gr_status & GR_READY) {
54602+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54603+ error = -EPERM;
54604+ } else {
54605+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54606+ error = -EAGAIN;
54607+ }
54608+ break;
54609+ case GR_ENABLE:
54610+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54611+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54612+ else {
54613+ if (gr_status & GR_READY)
54614+ error = -EAGAIN;
54615+ else
54616+ error = error2;
54617+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54618+ }
54619+ break;
54620+ case GR_RELOAD:
54621+ if (!(gr_status & GR_READY)) {
54622+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54623+ error = -EAGAIN;
54624+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54625+ preempt_disable();
54626+
54627+ pax_open_kernel();
54628+ gr_status &= ~GR_READY;
54629+ pax_close_kernel();
54630+
54631+ free_variables();
54632+ if (!(error2 = gracl_init(gr_usermode))) {
54633+ preempt_enable();
54634+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54635+ } else {
54636+ preempt_enable();
54637+ error = error2;
54638+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54639+ }
54640+ } else {
54641+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54642+ error = -EPERM;
54643+ }
54644+ break;
54645+ case GR_SEGVMOD:
54646+ if (unlikely(!(gr_status & GR_READY))) {
54647+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54648+ error = -EAGAIN;
54649+ break;
54650+ }
54651+
54652+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54653+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54654+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54655+ struct acl_subject_label *segvacl;
54656+ segvacl =
54657+ lookup_acl_subj_label(gr_usermode->segv_inode,
54658+ gr_usermode->segv_device,
54659+ current->role);
54660+ if (segvacl) {
54661+ segvacl->crashes = 0;
54662+ segvacl->expires = 0;
54663+ }
54664+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54665+ gr_remove_uid(gr_usermode->segv_uid);
54666+ }
54667+ } else {
54668+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54669+ error = -EPERM;
54670+ }
54671+ break;
54672+ case GR_SPROLE:
54673+ case GR_SPROLEPAM:
54674+ if (unlikely(!(gr_status & GR_READY))) {
54675+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54676+ error = -EAGAIN;
54677+ break;
54678+ }
54679+
54680+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54681+ current->role->expires = 0;
54682+ current->role->auth_attempts = 0;
54683+ }
54684+
54685+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54686+ time_after(current->role->expires, get_seconds())) {
54687+ error = -EBUSY;
54688+ goto out;
54689+ }
54690+
54691+ if (lookup_special_role_auth
54692+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54693+ && ((!sprole_salt && !sprole_sum)
54694+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54695+ char *p = "";
54696+ assign_special_role(gr_usermode->sp_role);
54697+ read_lock(&tasklist_lock);
54698+ if (current->real_parent)
54699+ p = current->real_parent->role->rolename;
54700+ read_unlock(&tasklist_lock);
54701+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54702+ p, acl_sp_role_value);
54703+ } else {
54704+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54705+ error = -EPERM;
54706+ if(!(current->role->auth_attempts++))
54707+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54708+
54709+ goto out;
54710+ }
54711+ break;
54712+ case GR_UNSPROLE:
54713+ if (unlikely(!(gr_status & GR_READY))) {
54714+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54715+ error = -EAGAIN;
54716+ break;
54717+ }
54718+
54719+ if (current->role->roletype & GR_ROLE_SPECIAL) {
54720+ char *p = "";
54721+ int i = 0;
54722+
54723+ read_lock(&tasklist_lock);
54724+ if (current->real_parent) {
54725+ p = current->real_parent->role->rolename;
54726+ i = current->real_parent->acl_role_id;
54727+ }
54728+ read_unlock(&tasklist_lock);
54729+
54730+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54731+ gr_set_acls(1);
54732+ } else {
54733+ error = -EPERM;
54734+ goto out;
54735+ }
54736+ break;
54737+ default:
54738+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54739+ error = -EINVAL;
54740+ break;
54741+ }
54742+
54743+ if (error != -EPERM)
54744+ goto out;
54745+
54746+ if(!(gr_auth_attempts++))
54747+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54748+
54749+ out:
54750+ mutex_unlock(&gr_dev_mutex);
54751+ return error;
54752+}
54753+
54754+/* must be called with
54755+ rcu_read_lock();
54756+ read_lock(&tasklist_lock);
54757+ read_lock(&grsec_exec_file_lock);
54758+*/
54759+int gr_apply_subject_to_task(struct task_struct *task)
54760+{
54761+ struct acl_object_label *obj;
54762+ char *tmpname;
54763+ struct acl_subject_label *tmpsubj;
54764+ struct file *filp;
54765+ struct name_entry *nmatch;
54766+
54767+ filp = task->exec_file;
54768+ if (filp == NULL)
54769+ return 0;
54770+
54771+ /* the following is to apply the correct subject
54772+ on binaries running when the RBAC system
54773+ is enabled, when the binaries have been
54774+ replaced or deleted since their execution
54775+ -----
54776+ when the RBAC system starts, the inode/dev
54777+ from exec_file will be one the RBAC system
54778+ is unaware of. It only knows the inode/dev
54779+ of the present file on disk, or the absence
54780+ of it.
54781+ */
54782+ preempt_disable();
54783+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54784+
54785+ nmatch = lookup_name_entry(tmpname);
54786+ preempt_enable();
54787+ tmpsubj = NULL;
54788+ if (nmatch) {
54789+ if (nmatch->deleted)
54790+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54791+ else
54792+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54793+ if (tmpsubj != NULL)
54794+ task->acl = tmpsubj;
54795+ }
54796+ if (tmpsubj == NULL)
54797+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54798+ task->role);
54799+ if (task->acl) {
54800+ task->is_writable = 0;
54801+ /* ignore additional mmap checks for processes that are writable
54802+ by the default ACL */
54803+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54804+ if (unlikely(obj->mode & GR_WRITE))
54805+ task->is_writable = 1;
54806+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54807+ if (unlikely(obj->mode & GR_WRITE))
54808+ task->is_writable = 1;
54809+
54810+ gr_set_proc_res(task);
54811+
54812+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54813+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54814+#endif
54815+ } else {
54816+ return 1;
54817+ }
54818+
54819+ return 0;
54820+}
54821+
54822+int
54823+gr_set_acls(const int type)
54824+{
54825+ struct task_struct *task, *task2;
54826+ struct acl_role_label *role = current->role;
54827+ __u16 acl_role_id = current->acl_role_id;
54828+ const struct cred *cred;
54829+ int ret;
54830+
54831+ rcu_read_lock();
54832+ read_lock(&tasklist_lock);
54833+ read_lock(&grsec_exec_file_lock);
54834+ do_each_thread(task2, task) {
54835+ /* check to see if we're called from the exit handler,
54836+ if so, only replace ACLs that have inherited the admin
54837+ ACL */
54838+
54839+ if (type && (task->role != role ||
54840+ task->acl_role_id != acl_role_id))
54841+ continue;
54842+
54843+ task->acl_role_id = 0;
54844+ task->acl_sp_role = 0;
54845+
54846+ if (task->exec_file) {
54847+ cred = __task_cred(task);
54848+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54849+ ret = gr_apply_subject_to_task(task);
54850+ if (ret) {
54851+ read_unlock(&grsec_exec_file_lock);
54852+ read_unlock(&tasklist_lock);
54853+ rcu_read_unlock();
54854+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54855+ return ret;
54856+ }
54857+ } else {
54858+ // it's a kernel process
54859+ task->role = kernel_role;
54860+ task->acl = kernel_role->root_label;
54861+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54862+ task->acl->mode &= ~GR_PROCFIND;
54863+#endif
54864+ }
54865+ } while_each_thread(task2, task);
54866+ read_unlock(&grsec_exec_file_lock);
54867+ read_unlock(&tasklist_lock);
54868+ rcu_read_unlock();
54869+
54870+ return 0;
54871+}
54872+
54873+void
54874+gr_learn_resource(const struct task_struct *task,
54875+ const int res, const unsigned long wanted, const int gt)
54876+{
54877+ struct acl_subject_label *acl;
54878+ const struct cred *cred;
54879+
54880+ if (unlikely((gr_status & GR_READY) &&
54881+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54882+ goto skip_reslog;
54883+
54884+#ifdef CONFIG_GRKERNSEC_RESLOG
54885+ gr_log_resource(task, res, wanted, gt);
54886+#endif
54887+ skip_reslog:
54888+
54889+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54890+ return;
54891+
54892+ acl = task->acl;
54893+
54894+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54895+ !(acl->resmask & (1 << (unsigned short) res))))
54896+ return;
54897+
54898+ if (wanted >= acl->res[res].rlim_cur) {
54899+ unsigned long res_add;
54900+
54901+ res_add = wanted;
54902+ switch (res) {
54903+ case RLIMIT_CPU:
54904+ res_add += GR_RLIM_CPU_BUMP;
54905+ break;
54906+ case RLIMIT_FSIZE:
54907+ res_add += GR_RLIM_FSIZE_BUMP;
54908+ break;
54909+ case RLIMIT_DATA:
54910+ res_add += GR_RLIM_DATA_BUMP;
54911+ break;
54912+ case RLIMIT_STACK:
54913+ res_add += GR_RLIM_STACK_BUMP;
54914+ break;
54915+ case RLIMIT_CORE:
54916+ res_add += GR_RLIM_CORE_BUMP;
54917+ break;
54918+ case RLIMIT_RSS:
54919+ res_add += GR_RLIM_RSS_BUMP;
54920+ break;
54921+ case RLIMIT_NPROC:
54922+ res_add += GR_RLIM_NPROC_BUMP;
54923+ break;
54924+ case RLIMIT_NOFILE:
54925+ res_add += GR_RLIM_NOFILE_BUMP;
54926+ break;
54927+ case RLIMIT_MEMLOCK:
54928+ res_add += GR_RLIM_MEMLOCK_BUMP;
54929+ break;
54930+ case RLIMIT_AS:
54931+ res_add += GR_RLIM_AS_BUMP;
54932+ break;
54933+ case RLIMIT_LOCKS:
54934+ res_add += GR_RLIM_LOCKS_BUMP;
54935+ break;
54936+ case RLIMIT_SIGPENDING:
54937+ res_add += GR_RLIM_SIGPENDING_BUMP;
54938+ break;
54939+ case RLIMIT_MSGQUEUE:
54940+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54941+ break;
54942+ case RLIMIT_NICE:
54943+ res_add += GR_RLIM_NICE_BUMP;
54944+ break;
54945+ case RLIMIT_RTPRIO:
54946+ res_add += GR_RLIM_RTPRIO_BUMP;
54947+ break;
54948+ case RLIMIT_RTTIME:
54949+ res_add += GR_RLIM_RTTIME_BUMP;
54950+ break;
54951+ }
54952+
54953+ acl->res[res].rlim_cur = res_add;
54954+
54955+ if (wanted > acl->res[res].rlim_max)
54956+ acl->res[res].rlim_max = res_add;
54957+
54958+ /* only log the subject filename, since resource logging is supported for
54959+ single-subject learning only */
54960+ rcu_read_lock();
54961+ cred = __task_cred(task);
54962+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54963+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54964+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54965+ "", (unsigned long) res, &task->signal->saved_ip);
54966+ rcu_read_unlock();
54967+ }
54968+
54969+ return;
54970+}
54971+
54972+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54973+void
54974+pax_set_initial_flags(struct linux_binprm *bprm)
54975+{
54976+ struct task_struct *task = current;
54977+ struct acl_subject_label *proc;
54978+ unsigned long flags;
54979+
54980+ if (unlikely(!(gr_status & GR_READY)))
54981+ return;
54982+
54983+ flags = pax_get_flags(task);
54984+
54985+ proc = task->acl;
54986+
54987+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54988+ flags &= ~MF_PAX_PAGEEXEC;
54989+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54990+ flags &= ~MF_PAX_SEGMEXEC;
54991+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54992+ flags &= ~MF_PAX_RANDMMAP;
54993+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54994+ flags &= ~MF_PAX_EMUTRAMP;
54995+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54996+ flags &= ~MF_PAX_MPROTECT;
54997+
54998+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54999+ flags |= MF_PAX_PAGEEXEC;
55000+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
55001+ flags |= MF_PAX_SEGMEXEC;
55002+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
55003+ flags |= MF_PAX_RANDMMAP;
55004+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
55005+ flags |= MF_PAX_EMUTRAMP;
55006+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
55007+ flags |= MF_PAX_MPROTECT;
55008+
55009+ pax_set_flags(task, flags);
55010+
55011+ return;
55012+}
55013+#endif
55014+
55015+#ifdef CONFIG_SYSCTL
55016+/* Eric Biederman likes breaking userland ABI and every inode-based security
55017+ system to save 35kb of memory */
55018+
55019+/* we modify the passed in filename, but adjust it back before returning */
55020+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
55021+{
55022+ struct name_entry *nmatch;
55023+ char *p, *lastp = NULL;
55024+ struct acl_object_label *obj = NULL, *tmp;
55025+ struct acl_subject_label *tmpsubj;
55026+ char c = '\0';
55027+
55028+ read_lock(&gr_inode_lock);
55029+
55030+ p = name + len - 1;
55031+ do {
55032+ nmatch = lookup_name_entry(name);
55033+ if (lastp != NULL)
55034+ *lastp = c;
55035+
55036+ if (nmatch == NULL)
55037+ goto next_component;
55038+ tmpsubj = current->acl;
55039+ do {
55040+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
55041+ if (obj != NULL) {
55042+ tmp = obj->globbed;
55043+ while (tmp) {
55044+ if (!glob_match(tmp->filename, name)) {
55045+ obj = tmp;
55046+ goto found_obj;
55047+ }
55048+ tmp = tmp->next;
55049+ }
55050+ goto found_obj;
55051+ }
55052+ } while ((tmpsubj = tmpsubj->parent_subject));
55053+next_component:
55054+ /* end case */
55055+ if (p == name)
55056+ break;
55057+
55058+ while (*p != '/')
55059+ p--;
55060+ if (p == name)
55061+ lastp = p + 1;
55062+ else {
55063+ lastp = p;
55064+ p--;
55065+ }
55066+ c = *lastp;
55067+ *lastp = '\0';
55068+ } while (1);
55069+found_obj:
55070+ read_unlock(&gr_inode_lock);
55071+ /* obj returned will always be non-null */
55072+ return obj;
55073+}
55074+
55075+/* returns 0 when allowing, non-zero on error
55076+ op of 0 is used for readdir, so we don't log the names of hidden files
55077+*/
55078+__u32
55079+gr_handle_sysctl(const struct ctl_table *table, const int op)
55080+{
55081+ struct ctl_table *tmp;
55082+ const char *proc_sys = "/proc/sys";
55083+ char *path;
55084+ struct acl_object_label *obj;
55085+ unsigned short len = 0, pos = 0, depth = 0, i;
55086+ __u32 err = 0;
55087+ __u32 mode = 0;
55088+
55089+ if (unlikely(!(gr_status & GR_READY)))
55090+ return 0;
55091+
55092+ /* for now, ignore operations on non-sysctl entries if it's not a
55093+ readdir*/
55094+ if (table->child != NULL && op != 0)
55095+ return 0;
55096+
55097+ mode |= GR_FIND;
55098+ /* it's only a read if it's an entry, read on dirs is for readdir */
55099+ if (op & MAY_READ)
55100+ mode |= GR_READ;
55101+ if (op & MAY_WRITE)
55102+ mode |= GR_WRITE;
55103+
55104+ preempt_disable();
55105+
55106+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55107+
55108+ /* it's only a read/write if it's an actual entry, not a dir
55109+ (which are opened for readdir)
55110+ */
55111+
55112+ /* convert the requested sysctl entry into a pathname */
55113+
55114+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
55115+ len += strlen(tmp->procname);
55116+ len++;
55117+ depth++;
55118+ }
55119+
55120+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
55121+ /* deny */
55122+ goto out;
55123+ }
55124+
55125+ memset(path, 0, PAGE_SIZE);
55126+
55127+ memcpy(path, proc_sys, strlen(proc_sys));
55128+
55129+ pos += strlen(proc_sys);
55130+
55131+ for (; depth > 0; depth--) {
55132+ path[pos] = '/';
55133+ pos++;
55134+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
55135+ if (depth == i) {
55136+ memcpy(path + pos, tmp->procname,
55137+ strlen(tmp->procname));
55138+ pos += strlen(tmp->procname);
55139+ }
55140+ i++;
55141+ }
55142+ }
55143+
55144+ obj = gr_lookup_by_name(path, pos);
55145+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
55146+
55147+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
55148+ ((err & mode) != mode))) {
55149+ __u32 new_mode = mode;
55150+
55151+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55152+
55153+ err = 0;
55154+ gr_log_learn_sysctl(path, new_mode);
55155+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
55156+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
55157+ err = -ENOENT;
55158+ } else if (!(err & GR_FIND)) {
55159+ err = -ENOENT;
55160+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
55161+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
55162+ path, (mode & GR_READ) ? " reading" : "",
55163+ (mode & GR_WRITE) ? " writing" : "");
55164+ err = -EACCES;
55165+ } else if ((err & mode) != mode) {
55166+ err = -EACCES;
55167+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
55168+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
55169+ path, (mode & GR_READ) ? " reading" : "",
55170+ (mode & GR_WRITE) ? " writing" : "");
55171+ err = 0;
55172+ } else
55173+ err = 0;
55174+
55175+ out:
55176+ preempt_enable();
55177+
55178+ return err;
55179+}
55180+#endif
55181+
55182+int
55183+gr_handle_proc_ptrace(struct task_struct *task)
55184+{
55185+ struct file *filp;
55186+ struct task_struct *tmp = task;
55187+ struct task_struct *curtemp = current;
55188+ __u32 retmode;
55189+
55190+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55191+ if (unlikely(!(gr_status & GR_READY)))
55192+ return 0;
55193+#endif
55194+
55195+ read_lock(&tasklist_lock);
55196+ read_lock(&grsec_exec_file_lock);
55197+ filp = task->exec_file;
55198+
55199+ while (tmp->pid > 0) {
55200+ if (tmp == curtemp)
55201+ break;
55202+ tmp = tmp->real_parent;
55203+ }
55204+
55205+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55206+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
55207+ read_unlock(&grsec_exec_file_lock);
55208+ read_unlock(&tasklist_lock);
55209+ return 1;
55210+ }
55211+
55212+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55213+ if (!(gr_status & GR_READY)) {
55214+ read_unlock(&grsec_exec_file_lock);
55215+ read_unlock(&tasklist_lock);
55216+ return 0;
55217+ }
55218+#endif
55219+
55220+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
55221+ read_unlock(&grsec_exec_file_lock);
55222+ read_unlock(&tasklist_lock);
55223+
55224+ if (retmode & GR_NOPTRACE)
55225+ return 1;
55226+
55227+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
55228+ && (current->acl != task->acl || (current->acl != current->role->root_label
55229+ && current->pid != task->pid)))
55230+ return 1;
55231+
55232+ return 0;
55233+}
55234+
55235+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
55236+{
55237+ if (unlikely(!(gr_status & GR_READY)))
55238+ return;
55239+
55240+ if (!(current->role->roletype & GR_ROLE_GOD))
55241+ return;
55242+
55243+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55244+ p->role->rolename, gr_task_roletype_to_char(p),
55245+ p->acl->filename);
55246+}
55247+
55248+int
55249+gr_handle_ptrace(struct task_struct *task, const long request)
55250+{
55251+ struct task_struct *tmp = task;
55252+ struct task_struct *curtemp = current;
55253+ __u32 retmode;
55254+
55255+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55256+ if (unlikely(!(gr_status & GR_READY)))
55257+ return 0;
55258+#endif
55259+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
55260+ read_lock(&tasklist_lock);
55261+ while (tmp->pid > 0) {
55262+ if (tmp == curtemp)
55263+ break;
55264+ tmp = tmp->real_parent;
55265+ }
55266+
55267+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55268+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55269+ read_unlock(&tasklist_lock);
55270+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55271+ return 1;
55272+ }
55273+ read_unlock(&tasklist_lock);
55274+ }
55275+
55276+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55277+ if (!(gr_status & GR_READY))
55278+ return 0;
55279+#endif
55280+
55281+ read_lock(&grsec_exec_file_lock);
55282+ if (unlikely(!task->exec_file)) {
55283+ read_unlock(&grsec_exec_file_lock);
55284+ return 0;
55285+ }
55286+
55287+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55288+ read_unlock(&grsec_exec_file_lock);
55289+
55290+ if (retmode & GR_NOPTRACE) {
55291+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55292+ return 1;
55293+ }
55294+
55295+ if (retmode & GR_PTRACERD) {
55296+ switch (request) {
55297+ case PTRACE_SEIZE:
55298+ case PTRACE_POKETEXT:
55299+ case PTRACE_POKEDATA:
55300+ case PTRACE_POKEUSR:
55301+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55302+ case PTRACE_SETREGS:
55303+ case PTRACE_SETFPREGS:
55304+#endif
55305+#ifdef CONFIG_X86
55306+ case PTRACE_SETFPXREGS:
55307+#endif
55308+#ifdef CONFIG_ALTIVEC
55309+ case PTRACE_SETVRREGS:
55310+#endif
55311+ return 1;
55312+ default:
55313+ return 0;
55314+ }
55315+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
55316+ !(current->role->roletype & GR_ROLE_GOD) &&
55317+ (current->acl != task->acl)) {
55318+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55319+ return 1;
55320+ }
55321+
55322+ return 0;
55323+}
55324+
55325+static int is_writable_mmap(const struct file *filp)
55326+{
55327+ struct task_struct *task = current;
55328+ struct acl_object_label *obj, *obj2;
55329+
55330+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
55331+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
55332+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55333+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55334+ task->role->root_label);
55335+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55336+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55337+ return 1;
55338+ }
55339+ }
55340+ return 0;
55341+}
55342+
55343+int
55344+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55345+{
55346+ __u32 mode;
55347+
55348+ if (unlikely(!file || !(prot & PROT_EXEC)))
55349+ return 1;
55350+
55351+ if (is_writable_mmap(file))
55352+ return 0;
55353+
55354+ mode =
55355+ gr_search_file(file->f_path.dentry,
55356+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55357+ file->f_path.mnt);
55358+
55359+ if (!gr_tpe_allow(file))
55360+ return 0;
55361+
55362+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55363+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55364+ return 0;
55365+ } else if (unlikely(!(mode & GR_EXEC))) {
55366+ return 0;
55367+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55368+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55369+ return 1;
55370+ }
55371+
55372+ return 1;
55373+}
55374+
55375+int
55376+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55377+{
55378+ __u32 mode;
55379+
55380+ if (unlikely(!file || !(prot & PROT_EXEC)))
55381+ return 1;
55382+
55383+ if (is_writable_mmap(file))
55384+ return 0;
55385+
55386+ mode =
55387+ gr_search_file(file->f_path.dentry,
55388+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55389+ file->f_path.mnt);
55390+
55391+ if (!gr_tpe_allow(file))
55392+ return 0;
55393+
55394+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55395+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55396+ return 0;
55397+ } else if (unlikely(!(mode & GR_EXEC))) {
55398+ return 0;
55399+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55400+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55401+ return 1;
55402+ }
55403+
55404+ return 1;
55405+}
55406+
55407+void
55408+gr_acl_handle_psacct(struct task_struct *task, const long code)
55409+{
55410+ unsigned long runtime;
55411+ unsigned long cputime;
55412+ unsigned int wday, cday;
55413+ __u8 whr, chr;
55414+ __u8 wmin, cmin;
55415+ __u8 wsec, csec;
55416+ struct timespec timeval;
55417+
55418+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55419+ !(task->acl->mode & GR_PROCACCT)))
55420+ return;
55421+
55422+ do_posix_clock_monotonic_gettime(&timeval);
55423+ runtime = timeval.tv_sec - task->start_time.tv_sec;
55424+ wday = runtime / (3600 * 24);
55425+ runtime -= wday * (3600 * 24);
55426+ whr = runtime / 3600;
55427+ runtime -= whr * 3600;
55428+ wmin = runtime / 60;
55429+ runtime -= wmin * 60;
55430+ wsec = runtime;
55431+
55432+ cputime = (task->utime + task->stime) / HZ;
55433+ cday = cputime / (3600 * 24);
55434+ cputime -= cday * (3600 * 24);
55435+ chr = cputime / 3600;
55436+ cputime -= chr * 3600;
55437+ cmin = cputime / 60;
55438+ cputime -= cmin * 60;
55439+ csec = cputime;
55440+
55441+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55442+
55443+ return;
55444+}
55445+
55446+void gr_set_kernel_label(struct task_struct *task)
55447+{
55448+ if (gr_status & GR_READY) {
55449+ task->role = kernel_role;
55450+ task->acl = kernel_role->root_label;
55451+ }
55452+ return;
55453+}
55454+
55455+#ifdef CONFIG_TASKSTATS
55456+int gr_is_taskstats_denied(int pid)
55457+{
55458+ struct task_struct *task;
55459+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55460+ const struct cred *cred;
55461+#endif
55462+ int ret = 0;
55463+
55464+ /* restrict taskstats viewing to un-chrooted root users
55465+ who have the 'view' subject flag if the RBAC system is enabled
55466+ */
55467+
55468+ rcu_read_lock();
55469+ read_lock(&tasklist_lock);
55470+ task = find_task_by_vpid(pid);
55471+ if (task) {
55472+#ifdef CONFIG_GRKERNSEC_CHROOT
55473+ if (proc_is_chrooted(task))
55474+ ret = -EACCES;
55475+#endif
55476+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55477+ cred = __task_cred(task);
55478+#ifdef CONFIG_GRKERNSEC_PROC_USER
55479+ if (cred->uid != 0)
55480+ ret = -EACCES;
55481+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55482+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55483+ ret = -EACCES;
55484+#endif
55485+#endif
55486+ if (gr_status & GR_READY) {
55487+ if (!(task->acl->mode & GR_VIEW))
55488+ ret = -EACCES;
55489+ }
55490+ } else
55491+ ret = -ENOENT;
55492+
55493+ read_unlock(&tasklist_lock);
55494+ rcu_read_unlock();
55495+
55496+ return ret;
55497+}
55498+#endif
55499+
55500+/* AUXV entries are filled via a descendant of search_binary_handler
55501+ after we've already applied the subject for the target
55502+*/
55503+int gr_acl_enable_at_secure(void)
55504+{
55505+ if (unlikely(!(gr_status & GR_READY)))
55506+ return 0;
55507+
55508+ if (current->acl->mode & GR_ATSECURE)
55509+ return 1;
55510+
55511+ return 0;
55512+}
55513+
55514+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55515+{
55516+ struct task_struct *task = current;
55517+ struct dentry *dentry = file->f_path.dentry;
55518+ struct vfsmount *mnt = file->f_path.mnt;
55519+ struct acl_object_label *obj, *tmp;
55520+ struct acl_subject_label *subj;
55521+ unsigned int bufsize;
55522+ int is_not_root;
55523+ char *path;
55524+ dev_t dev = __get_dev(dentry);
55525+
55526+ if (unlikely(!(gr_status & GR_READY)))
55527+ return 1;
55528+
55529+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55530+ return 1;
55531+
55532+ /* ignore Eric Biederman */
55533+ if (IS_PRIVATE(dentry->d_inode))
55534+ return 1;
55535+
55536+ subj = task->acl;
55537+ do {
55538+ obj = lookup_acl_obj_label(ino, dev, subj);
55539+ if (obj != NULL)
55540+ return (obj->mode & GR_FIND) ? 1 : 0;
55541+ } while ((subj = subj->parent_subject));
55542+
55543+ /* this is purely an optimization since we're looking for an object
55544+ for the directory we're doing a readdir on
55545+ if it's possible for any globbed object to match the entry we're
55546+ filling into the directory, then the object we find here will be
55547+ an anchor point with attached globbed objects
55548+ */
55549+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55550+ if (obj->globbed == NULL)
55551+ return (obj->mode & GR_FIND) ? 1 : 0;
55552+
55553+ is_not_root = ((obj->filename[0] == '/') &&
55554+ (obj->filename[1] == '\0')) ? 0 : 1;
55555+ bufsize = PAGE_SIZE - namelen - is_not_root;
55556+
55557+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
55558+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55559+ return 1;
55560+
55561+ preempt_disable();
55562+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55563+ bufsize);
55564+
55565+ bufsize = strlen(path);
55566+
55567+ /* if base is "/", don't append an additional slash */
55568+ if (is_not_root)
55569+ *(path + bufsize) = '/';
55570+ memcpy(path + bufsize + is_not_root, name, namelen);
55571+ *(path + bufsize + namelen + is_not_root) = '\0';
55572+
55573+ tmp = obj->globbed;
55574+ while (tmp) {
55575+ if (!glob_match(tmp->filename, path)) {
55576+ preempt_enable();
55577+ return (tmp->mode & GR_FIND) ? 1 : 0;
55578+ }
55579+ tmp = tmp->next;
55580+ }
55581+ preempt_enable();
55582+ return (obj->mode & GR_FIND) ? 1 : 0;
55583+}
55584+
55585+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55586+EXPORT_SYMBOL(gr_acl_is_enabled);
55587+#endif
55588+EXPORT_SYMBOL(gr_learn_resource);
55589+EXPORT_SYMBOL(gr_set_kernel_label);
55590+#ifdef CONFIG_SECURITY
55591+EXPORT_SYMBOL(gr_check_user_change);
55592+EXPORT_SYMBOL(gr_check_group_change);
55593+#endif
55594+
55595diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55596new file mode 100644
55597index 0000000..34fefda
55598--- /dev/null
55599+++ b/grsecurity/gracl_alloc.c
55600@@ -0,0 +1,105 @@
55601+#include <linux/kernel.h>
55602+#include <linux/mm.h>
55603+#include <linux/slab.h>
55604+#include <linux/vmalloc.h>
55605+#include <linux/gracl.h>
55606+#include <linux/grsecurity.h>
55607+
55608+static unsigned long alloc_stack_next = 1;
55609+static unsigned long alloc_stack_size = 1;
55610+static void **alloc_stack;
55611+
55612+static __inline__ int
55613+alloc_pop(void)
55614+{
55615+ if (alloc_stack_next == 1)
55616+ return 0;
55617+
55618+ kfree(alloc_stack[alloc_stack_next - 2]);
55619+
55620+ alloc_stack_next--;
55621+
55622+ return 1;
55623+}
55624+
55625+static __inline__ int
55626+alloc_push(void *buf)
55627+{
55628+ if (alloc_stack_next >= alloc_stack_size)
55629+ return 1;
55630+
55631+ alloc_stack[alloc_stack_next - 1] = buf;
55632+
55633+ alloc_stack_next++;
55634+
55635+ return 0;
55636+}
55637+
55638+void *
55639+acl_alloc(unsigned long len)
55640+{
55641+ void *ret = NULL;
55642+
55643+ if (!len || len > PAGE_SIZE)
55644+ goto out;
55645+
55646+ ret = kmalloc(len, GFP_KERNEL);
55647+
55648+ if (ret) {
55649+ if (alloc_push(ret)) {
55650+ kfree(ret);
55651+ ret = NULL;
55652+ }
55653+ }
55654+
55655+out:
55656+ return ret;
55657+}
55658+
55659+void *
55660+acl_alloc_num(unsigned long num, unsigned long len)
55661+{
55662+ if (!len || (num > (PAGE_SIZE / len)))
55663+ return NULL;
55664+
55665+ return acl_alloc(num * len);
55666+}
55667+
55668+void
55669+acl_free_all(void)
55670+{
55671+ if (gr_acl_is_enabled() || !alloc_stack)
55672+ return;
55673+
55674+ while (alloc_pop()) ;
55675+
55676+ if (alloc_stack) {
55677+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55678+ kfree(alloc_stack);
55679+ else
55680+ vfree(alloc_stack);
55681+ }
55682+
55683+ alloc_stack = NULL;
55684+ alloc_stack_size = 1;
55685+ alloc_stack_next = 1;
55686+
55687+ return;
55688+}
55689+
55690+int
55691+acl_alloc_stack_init(unsigned long size)
55692+{
55693+ if ((size * sizeof (void *)) <= PAGE_SIZE)
55694+ alloc_stack =
55695+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55696+ else
55697+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
55698+
55699+ alloc_stack_size = size;
55700+
55701+ if (!alloc_stack)
55702+ return 0;
55703+ else
55704+ return 1;
55705+}
55706diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55707new file mode 100644
55708index 0000000..6d21049
55709--- /dev/null
55710+++ b/grsecurity/gracl_cap.c
55711@@ -0,0 +1,110 @@
55712+#include <linux/kernel.h>
55713+#include <linux/module.h>
55714+#include <linux/sched.h>
55715+#include <linux/gracl.h>
55716+#include <linux/grsecurity.h>
55717+#include <linux/grinternal.h>
55718+
55719+extern const char *captab_log[];
55720+extern int captab_log_entries;
55721+
55722+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55723+{
55724+ struct acl_subject_label *curracl;
55725+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55726+ kernel_cap_t cap_audit = __cap_empty_set;
55727+
55728+ if (!gr_acl_is_enabled())
55729+ return 1;
55730+
55731+ curracl = task->acl;
55732+
55733+ cap_drop = curracl->cap_lower;
55734+ cap_mask = curracl->cap_mask;
55735+ cap_audit = curracl->cap_invert_audit;
55736+
55737+ while ((curracl = curracl->parent_subject)) {
55738+ /* if the cap isn't specified in the current computed mask but is specified in the
55739+ current level subject, and is lowered in the current level subject, then add
55740+ it to the set of dropped capabilities
55741+ otherwise, add the current level subject's mask to the current computed mask
55742+ */
55743+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55744+ cap_raise(cap_mask, cap);
55745+ if (cap_raised(curracl->cap_lower, cap))
55746+ cap_raise(cap_drop, cap);
55747+ if (cap_raised(curracl->cap_invert_audit, cap))
55748+ cap_raise(cap_audit, cap);
55749+ }
55750+ }
55751+
55752+ if (!cap_raised(cap_drop, cap)) {
55753+ if (cap_raised(cap_audit, cap))
55754+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55755+ return 1;
55756+ }
55757+
55758+ curracl = task->acl;
55759+
55760+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55761+ && cap_raised(cred->cap_effective, cap)) {
55762+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55763+ task->role->roletype, cred->uid,
55764+ cred->gid, task->exec_file ?
55765+ gr_to_filename(task->exec_file->f_path.dentry,
55766+ task->exec_file->f_path.mnt) : curracl->filename,
55767+ curracl->filename, 0UL,
55768+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55769+ return 1;
55770+ }
55771+
55772+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55773+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55774+
55775+ return 0;
55776+}
55777+
55778+int
55779+gr_acl_is_capable(const int cap)
55780+{
55781+ return gr_task_acl_is_capable(current, current_cred(), cap);
55782+}
55783+
55784+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55785+{
55786+ struct acl_subject_label *curracl;
55787+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55788+
55789+ if (!gr_acl_is_enabled())
55790+ return 1;
55791+
55792+ curracl = task->acl;
55793+
55794+ cap_drop = curracl->cap_lower;
55795+ cap_mask = curracl->cap_mask;
55796+
55797+ while ((curracl = curracl->parent_subject)) {
55798+ /* if the cap isn't specified in the current computed mask but is specified in the
55799+ current level subject, and is lowered in the current level subject, then add
55800+ it to the set of dropped capabilities
55801+ otherwise, add the current level subject's mask to the current computed mask
55802+ */
55803+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55804+ cap_raise(cap_mask, cap);
55805+ if (cap_raised(curracl->cap_lower, cap))
55806+ cap_raise(cap_drop, cap);
55807+ }
55808+ }
55809+
55810+ if (!cap_raised(cap_drop, cap))
55811+ return 1;
55812+
55813+ return 0;
55814+}
55815+
55816+int
55817+gr_acl_is_capable_nolog(const int cap)
55818+{
55819+ return gr_task_acl_is_capable_nolog(current, cap);
55820+}
55821+
55822diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55823new file mode 100644
55824index 0000000..88d0e87
55825--- /dev/null
55826+++ b/grsecurity/gracl_fs.c
55827@@ -0,0 +1,435 @@
55828+#include <linux/kernel.h>
55829+#include <linux/sched.h>
55830+#include <linux/types.h>
55831+#include <linux/fs.h>
55832+#include <linux/file.h>
55833+#include <linux/stat.h>
55834+#include <linux/grsecurity.h>
55835+#include <linux/grinternal.h>
55836+#include <linux/gracl.h>
55837+
55838+umode_t
55839+gr_acl_umask(void)
55840+{
55841+ if (unlikely(!gr_acl_is_enabled()))
55842+ return 0;
55843+
55844+ return current->role->umask;
55845+}
55846+
55847+__u32
55848+gr_acl_handle_hidden_file(const struct dentry * dentry,
55849+ const struct vfsmount * mnt)
55850+{
55851+ __u32 mode;
55852+
55853+ if (unlikely(!dentry->d_inode))
55854+ return GR_FIND;
55855+
55856+ mode =
55857+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55858+
55859+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55860+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55861+ return mode;
55862+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55863+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55864+ return 0;
55865+ } else if (unlikely(!(mode & GR_FIND)))
55866+ return 0;
55867+
55868+ return GR_FIND;
55869+}
55870+
55871+__u32
55872+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55873+ int acc_mode)
55874+{
55875+ __u32 reqmode = GR_FIND;
55876+ __u32 mode;
55877+
55878+ if (unlikely(!dentry->d_inode))
55879+ return reqmode;
55880+
55881+ if (acc_mode & MAY_APPEND)
55882+ reqmode |= GR_APPEND;
55883+ else if (acc_mode & MAY_WRITE)
55884+ reqmode |= GR_WRITE;
55885+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55886+ reqmode |= GR_READ;
55887+
55888+ mode =
55889+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55890+ mnt);
55891+
55892+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55893+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55894+ reqmode & GR_READ ? " reading" : "",
55895+ reqmode & GR_WRITE ? " writing" : reqmode &
55896+ GR_APPEND ? " appending" : "");
55897+ return reqmode;
55898+ } else
55899+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55900+ {
55901+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55902+ reqmode & GR_READ ? " reading" : "",
55903+ reqmode & GR_WRITE ? " writing" : reqmode &
55904+ GR_APPEND ? " appending" : "");
55905+ return 0;
55906+ } else if (unlikely((mode & reqmode) != reqmode))
55907+ return 0;
55908+
55909+ return reqmode;
55910+}
55911+
55912+__u32
55913+gr_acl_handle_creat(const struct dentry * dentry,
55914+ const struct dentry * p_dentry,
55915+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55916+ const int imode)
55917+{
55918+ __u32 reqmode = GR_WRITE | GR_CREATE;
55919+ __u32 mode;
55920+
55921+ if (acc_mode & MAY_APPEND)
55922+ reqmode |= GR_APPEND;
55923+ // if a directory was required or the directory already exists, then
55924+ // don't count this open as a read
55925+ if ((acc_mode & MAY_READ) &&
55926+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55927+ reqmode |= GR_READ;
55928+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55929+ reqmode |= GR_SETID;
55930+
55931+ mode =
55932+ gr_check_create(dentry, p_dentry, p_mnt,
55933+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55934+
55935+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55936+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55937+ reqmode & GR_READ ? " reading" : "",
55938+ reqmode & GR_WRITE ? " writing" : reqmode &
55939+ GR_APPEND ? " appending" : "");
55940+ return reqmode;
55941+ } else
55942+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55943+ {
55944+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55945+ reqmode & GR_READ ? " reading" : "",
55946+ reqmode & GR_WRITE ? " writing" : reqmode &
55947+ GR_APPEND ? " appending" : "");
55948+ return 0;
55949+ } else if (unlikely((mode & reqmode) != reqmode))
55950+ return 0;
55951+
55952+ return reqmode;
55953+}
55954+
55955+__u32
55956+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55957+ const int fmode)
55958+{
55959+ __u32 mode, reqmode = GR_FIND;
55960+
55961+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55962+ reqmode |= GR_EXEC;
55963+ if (fmode & S_IWOTH)
55964+ reqmode |= GR_WRITE;
55965+ if (fmode & S_IROTH)
55966+ reqmode |= GR_READ;
55967+
55968+ mode =
55969+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55970+ mnt);
55971+
55972+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55973+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55974+ reqmode & GR_READ ? " reading" : "",
55975+ reqmode & GR_WRITE ? " writing" : "",
55976+ reqmode & GR_EXEC ? " executing" : "");
55977+ return reqmode;
55978+ } else
55979+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55980+ {
55981+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55982+ reqmode & GR_READ ? " reading" : "",
55983+ reqmode & GR_WRITE ? " writing" : "",
55984+ reqmode & GR_EXEC ? " executing" : "");
55985+ return 0;
55986+ } else if (unlikely((mode & reqmode) != reqmode))
55987+ return 0;
55988+
55989+ return reqmode;
55990+}
55991+
55992+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55993+{
55994+ __u32 mode;
55995+
55996+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55997+
55998+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55999+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
56000+ return mode;
56001+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56002+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
56003+ return 0;
56004+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
56005+ return 0;
56006+
56007+ return (reqmode);
56008+}
56009+
56010+__u32
56011+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56012+{
56013+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
56014+}
56015+
56016+__u32
56017+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
56018+{
56019+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
56020+}
56021+
56022+__u32
56023+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
56024+{
56025+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
56026+}
56027+
56028+__u32
56029+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
56030+{
56031+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
56032+}
56033+
56034+__u32
56035+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
56036+ umode_t *modeptr)
56037+{
56038+ umode_t mode;
56039+
56040+ *modeptr &= ~gr_acl_umask();
56041+ mode = *modeptr;
56042+
56043+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
56044+ return 1;
56045+
56046+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
56047+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56048+ GR_CHMOD_ACL_MSG);
56049+ } else {
56050+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
56051+ }
56052+}
56053+
56054+__u32
56055+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
56056+{
56057+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
56058+}
56059+
56060+__u32
56061+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
56062+{
56063+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
56064+}
56065+
56066+__u32
56067+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
56068+{
56069+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
56070+}
56071+
56072+__u32
56073+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
56074+{
56075+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
56076+ GR_UNIXCONNECT_ACL_MSG);
56077+}
56078+
56079+/* hardlinks require at minimum create and link permission,
56080+ any additional privilege required is based on the
56081+ privilege of the file being linked to
56082+*/
56083+__u32
56084+gr_acl_handle_link(const struct dentry * new_dentry,
56085+ const struct dentry * parent_dentry,
56086+ const struct vfsmount * parent_mnt,
56087+ const struct dentry * old_dentry,
56088+ const struct vfsmount * old_mnt, const char *to)
56089+{
56090+ __u32 mode;
56091+ __u32 needmode = GR_CREATE | GR_LINK;
56092+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
56093+
56094+ mode =
56095+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
56096+ old_mnt);
56097+
56098+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
56099+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56100+ return mode;
56101+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56102+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56103+ return 0;
56104+ } else if (unlikely((mode & needmode) != needmode))
56105+ return 0;
56106+
56107+ return 1;
56108+}
56109+
56110+__u32
56111+gr_acl_handle_symlink(const struct dentry * new_dentry,
56112+ const struct dentry * parent_dentry,
56113+ const struct vfsmount * parent_mnt, const char *from)
56114+{
56115+ __u32 needmode = GR_WRITE | GR_CREATE;
56116+ __u32 mode;
56117+
56118+ mode =
56119+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
56120+ GR_CREATE | GR_AUDIT_CREATE |
56121+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
56122+
56123+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
56124+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56125+ return mode;
56126+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56127+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56128+ return 0;
56129+ } else if (unlikely((mode & needmode) != needmode))
56130+ return 0;
56131+
56132+ return (GR_WRITE | GR_CREATE);
56133+}
56134+
56135+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
56136+{
56137+ __u32 mode;
56138+
56139+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56140+
56141+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56142+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
56143+ return mode;
56144+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56145+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
56146+ return 0;
56147+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
56148+ return 0;
56149+
56150+ return (reqmode);
56151+}
56152+
56153+__u32
56154+gr_acl_handle_mknod(const struct dentry * new_dentry,
56155+ const struct dentry * parent_dentry,
56156+ const struct vfsmount * parent_mnt,
56157+ const int mode)
56158+{
56159+ __u32 reqmode = GR_WRITE | GR_CREATE;
56160+ if (unlikely(mode & (S_ISUID | S_ISGID)))
56161+ reqmode |= GR_SETID;
56162+
56163+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56164+ reqmode, GR_MKNOD_ACL_MSG);
56165+}
56166+
56167+__u32
56168+gr_acl_handle_mkdir(const struct dentry *new_dentry,
56169+ const struct dentry *parent_dentry,
56170+ const struct vfsmount *parent_mnt)
56171+{
56172+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56173+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
56174+}
56175+
56176+#define RENAME_CHECK_SUCCESS(old, new) \
56177+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
56178+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
56179+
56180+int
56181+gr_acl_handle_rename(struct dentry *new_dentry,
56182+ struct dentry *parent_dentry,
56183+ const struct vfsmount *parent_mnt,
56184+ struct dentry *old_dentry,
56185+ struct inode *old_parent_inode,
56186+ struct vfsmount *old_mnt, const char *newname)
56187+{
56188+ __u32 comp1, comp2;
56189+ int error = 0;
56190+
56191+ if (unlikely(!gr_acl_is_enabled()))
56192+ return 0;
56193+
56194+ if (!new_dentry->d_inode) {
56195+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
56196+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
56197+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
56198+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
56199+ GR_DELETE | GR_AUDIT_DELETE |
56200+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56201+ GR_SUPPRESS, old_mnt);
56202+ } else {
56203+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
56204+ GR_CREATE | GR_DELETE |
56205+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
56206+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56207+ GR_SUPPRESS, parent_mnt);
56208+ comp2 =
56209+ gr_search_file(old_dentry,
56210+ GR_READ | GR_WRITE | GR_AUDIT_READ |
56211+ GR_DELETE | GR_AUDIT_DELETE |
56212+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
56213+ }
56214+
56215+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
56216+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
56217+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56218+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
56219+ && !(comp2 & GR_SUPPRESS)) {
56220+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56221+ error = -EACCES;
56222+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
56223+ error = -EACCES;
56224+
56225+ return error;
56226+}
56227+
56228+void
56229+gr_acl_handle_exit(void)
56230+{
56231+ u16 id;
56232+ char *rolename;
56233+ struct file *exec_file;
56234+
56235+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56236+ !(current->role->roletype & GR_ROLE_PERSIST))) {
56237+ id = current->acl_role_id;
56238+ rolename = current->role->rolename;
56239+ gr_set_acls(1);
56240+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56241+ }
56242+
56243+ write_lock(&grsec_exec_file_lock);
56244+ exec_file = current->exec_file;
56245+ current->exec_file = NULL;
56246+ write_unlock(&grsec_exec_file_lock);
56247+
56248+ if (exec_file)
56249+ fput(exec_file);
56250+}
56251+
56252+int
56253+gr_acl_handle_procpidmem(const struct task_struct *task)
56254+{
56255+ if (unlikely(!gr_acl_is_enabled()))
56256+ return 0;
56257+
56258+ if (task != current && task->acl->mode & GR_PROTPROCFD)
56259+ return -EACCES;
56260+
56261+ return 0;
56262+}
56263diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56264new file mode 100644
56265index 0000000..58800a7
56266--- /dev/null
56267+++ b/grsecurity/gracl_ip.c
56268@@ -0,0 +1,384 @@
56269+#include <linux/kernel.h>
56270+#include <asm/uaccess.h>
56271+#include <asm/errno.h>
56272+#include <net/sock.h>
56273+#include <linux/file.h>
56274+#include <linux/fs.h>
56275+#include <linux/net.h>
56276+#include <linux/in.h>
56277+#include <linux/skbuff.h>
56278+#include <linux/ip.h>
56279+#include <linux/udp.h>
56280+#include <linux/types.h>
56281+#include <linux/sched.h>
56282+#include <linux/netdevice.h>
56283+#include <linux/inetdevice.h>
56284+#include <linux/gracl.h>
56285+#include <linux/grsecurity.h>
56286+#include <linux/grinternal.h>
56287+
56288+#define GR_BIND 0x01
56289+#define GR_CONNECT 0x02
56290+#define GR_INVERT 0x04
56291+#define GR_BINDOVERRIDE 0x08
56292+#define GR_CONNECTOVERRIDE 0x10
56293+#define GR_SOCK_FAMILY 0x20
56294+
56295+static const char * gr_protocols[IPPROTO_MAX] = {
56296+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56297+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56298+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56299+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56300+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56301+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56302+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56303+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56304+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56305+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56306+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56307+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56308+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56309+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56310+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56311+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56312+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56313+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56314+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56315+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56316+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56317+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56318+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56319+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56320+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56321+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56322+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56323+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56324+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56325+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56326+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56327+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56328+ };
56329+
56330+static const char * gr_socktypes[SOCK_MAX] = {
56331+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56332+ "unknown:7", "unknown:8", "unknown:9", "packet"
56333+ };
56334+
56335+static const char * gr_sockfamilies[AF_MAX+1] = {
56336+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56337+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
56338+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56339+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
56340+ };
56341+
56342+const char *
56343+gr_proto_to_name(unsigned char proto)
56344+{
56345+ return gr_protocols[proto];
56346+}
56347+
56348+const char *
56349+gr_socktype_to_name(unsigned char type)
56350+{
56351+ return gr_socktypes[type];
56352+}
56353+
56354+const char *
56355+gr_sockfamily_to_name(unsigned char family)
56356+{
56357+ return gr_sockfamilies[family];
56358+}
56359+
56360+int
56361+gr_search_socket(const int domain, const int type, const int protocol)
56362+{
56363+ struct acl_subject_label *curr;
56364+ const struct cred *cred = current_cred();
56365+
56366+ if (unlikely(!gr_acl_is_enabled()))
56367+ goto exit;
56368+
56369+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
56370+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56371+ goto exit; // let the kernel handle it
56372+
56373+ curr = current->acl;
56374+
56375+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56376+ /* the family is allowed, if this is PF_INET allow it only if
56377+ the extra sock type/protocol checks pass */
56378+ if (domain == PF_INET)
56379+ goto inet_check;
56380+ goto exit;
56381+ } else {
56382+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56383+ __u32 fakeip = 0;
56384+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56385+ current->role->roletype, cred->uid,
56386+ cred->gid, current->exec_file ?
56387+ gr_to_filename(current->exec_file->f_path.dentry,
56388+ current->exec_file->f_path.mnt) :
56389+ curr->filename, curr->filename,
56390+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56391+ &current->signal->saved_ip);
56392+ goto exit;
56393+ }
56394+ goto exit_fail;
56395+ }
56396+
56397+inet_check:
56398+ /* the rest of this checking is for IPv4 only */
56399+ if (!curr->ips)
56400+ goto exit;
56401+
56402+ if ((curr->ip_type & (1 << type)) &&
56403+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56404+ goto exit;
56405+
56406+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56407+ /* we don't place acls on raw sockets , and sometimes
56408+ dgram/ip sockets are opened for ioctl and not
56409+ bind/connect, so we'll fake a bind learn log */
56410+ if (type == SOCK_RAW || type == SOCK_PACKET) {
56411+ __u32 fakeip = 0;
56412+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56413+ current->role->roletype, cred->uid,
56414+ cred->gid, current->exec_file ?
56415+ gr_to_filename(current->exec_file->f_path.dentry,
56416+ current->exec_file->f_path.mnt) :
56417+ curr->filename, curr->filename,
56418+ &fakeip, 0, type,
56419+ protocol, GR_CONNECT, &current->signal->saved_ip);
56420+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56421+ __u32 fakeip = 0;
56422+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56423+ current->role->roletype, cred->uid,
56424+ cred->gid, current->exec_file ?
56425+ gr_to_filename(current->exec_file->f_path.dentry,
56426+ current->exec_file->f_path.mnt) :
56427+ curr->filename, curr->filename,
56428+ &fakeip, 0, type,
56429+ protocol, GR_BIND, &current->signal->saved_ip);
56430+ }
56431+ /* we'll log when they use connect or bind */
56432+ goto exit;
56433+ }
56434+
56435+exit_fail:
56436+ if (domain == PF_INET)
56437+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56438+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
56439+ else
56440+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56441+ gr_socktype_to_name(type), protocol);
56442+
56443+ return 0;
56444+exit:
56445+ return 1;
56446+}
56447+
56448+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56449+{
56450+ if ((ip->mode & mode) &&
56451+ (ip_port >= ip->low) &&
56452+ (ip_port <= ip->high) &&
56453+ ((ntohl(ip_addr) & our_netmask) ==
56454+ (ntohl(our_addr) & our_netmask))
56455+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56456+ && (ip->type & (1 << type))) {
56457+ if (ip->mode & GR_INVERT)
56458+ return 2; // specifically denied
56459+ else
56460+ return 1; // allowed
56461+ }
56462+
56463+ return 0; // not specifically allowed, may continue parsing
56464+}
56465+
56466+static int
56467+gr_search_connectbind(const int full_mode, struct sock *sk,
56468+ struct sockaddr_in *addr, const int type)
56469+{
56470+ char iface[IFNAMSIZ] = {0};
56471+ struct acl_subject_label *curr;
56472+ struct acl_ip_label *ip;
56473+ struct inet_sock *isk;
56474+ struct net_device *dev;
56475+ struct in_device *idev;
56476+ unsigned long i;
56477+ int ret;
56478+ int mode = full_mode & (GR_BIND | GR_CONNECT);
56479+ __u32 ip_addr = 0;
56480+ __u32 our_addr;
56481+ __u32 our_netmask;
56482+ char *p;
56483+ __u16 ip_port = 0;
56484+ const struct cred *cred = current_cred();
56485+
56486+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56487+ return 0;
56488+
56489+ curr = current->acl;
56490+ isk = inet_sk(sk);
56491+
56492+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56493+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56494+ addr->sin_addr.s_addr = curr->inaddr_any_override;
56495+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56496+ struct sockaddr_in saddr;
56497+ int err;
56498+
56499+ saddr.sin_family = AF_INET;
56500+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
56501+ saddr.sin_port = isk->inet_sport;
56502+
56503+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56504+ if (err)
56505+ return err;
56506+
56507+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56508+ if (err)
56509+ return err;
56510+ }
56511+
56512+ if (!curr->ips)
56513+ return 0;
56514+
56515+ ip_addr = addr->sin_addr.s_addr;
56516+ ip_port = ntohs(addr->sin_port);
56517+
56518+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56519+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56520+ current->role->roletype, cred->uid,
56521+ cred->gid, current->exec_file ?
56522+ gr_to_filename(current->exec_file->f_path.dentry,
56523+ current->exec_file->f_path.mnt) :
56524+ curr->filename, curr->filename,
56525+ &ip_addr, ip_port, type,
56526+ sk->sk_protocol, mode, &current->signal->saved_ip);
56527+ return 0;
56528+ }
56529+
56530+ for (i = 0; i < curr->ip_num; i++) {
56531+ ip = *(curr->ips + i);
56532+ if (ip->iface != NULL) {
56533+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
56534+ p = strchr(iface, ':');
56535+ if (p != NULL)
56536+ *p = '\0';
56537+ dev = dev_get_by_name(sock_net(sk), iface);
56538+ if (dev == NULL)
56539+ continue;
56540+ idev = in_dev_get(dev);
56541+ if (idev == NULL) {
56542+ dev_put(dev);
56543+ continue;
56544+ }
56545+ rcu_read_lock();
56546+ for_ifa(idev) {
56547+ if (!strcmp(ip->iface, ifa->ifa_label)) {
56548+ our_addr = ifa->ifa_address;
56549+ our_netmask = 0xffffffff;
56550+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56551+ if (ret == 1) {
56552+ rcu_read_unlock();
56553+ in_dev_put(idev);
56554+ dev_put(dev);
56555+ return 0;
56556+ } else if (ret == 2) {
56557+ rcu_read_unlock();
56558+ in_dev_put(idev);
56559+ dev_put(dev);
56560+ goto denied;
56561+ }
56562+ }
56563+ } endfor_ifa(idev);
56564+ rcu_read_unlock();
56565+ in_dev_put(idev);
56566+ dev_put(dev);
56567+ } else {
56568+ our_addr = ip->addr;
56569+ our_netmask = ip->netmask;
56570+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56571+ if (ret == 1)
56572+ return 0;
56573+ else if (ret == 2)
56574+ goto denied;
56575+ }
56576+ }
56577+
56578+denied:
56579+ if (mode == GR_BIND)
56580+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56581+ else if (mode == GR_CONNECT)
56582+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56583+
56584+ return -EACCES;
56585+}
56586+
56587+int
56588+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56589+{
56590+ /* always allow disconnection of dgram sockets with connect */
56591+ if (addr->sin_family == AF_UNSPEC)
56592+ return 0;
56593+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56594+}
56595+
56596+int
56597+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56598+{
56599+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56600+}
56601+
56602+int gr_search_listen(struct socket *sock)
56603+{
56604+ struct sock *sk = sock->sk;
56605+ struct sockaddr_in addr;
56606+
56607+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56608+ addr.sin_port = inet_sk(sk)->inet_sport;
56609+
56610+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56611+}
56612+
56613+int gr_search_accept(struct socket *sock)
56614+{
56615+ struct sock *sk = sock->sk;
56616+ struct sockaddr_in addr;
56617+
56618+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56619+ addr.sin_port = inet_sk(sk)->inet_sport;
56620+
56621+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56622+}
56623+
56624+int
56625+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56626+{
56627+ if (addr)
56628+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56629+ else {
56630+ struct sockaddr_in sin;
56631+ const struct inet_sock *inet = inet_sk(sk);
56632+
56633+ sin.sin_addr.s_addr = inet->inet_daddr;
56634+ sin.sin_port = inet->inet_dport;
56635+
56636+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56637+ }
56638+}
56639+
56640+int
56641+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56642+{
56643+ struct sockaddr_in sin;
56644+
56645+ if (unlikely(skb->len < sizeof (struct udphdr)))
56646+ return 0; // skip this packet
56647+
56648+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56649+ sin.sin_port = udp_hdr(skb)->source;
56650+
56651+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56652+}
56653diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56654new file mode 100644
56655index 0000000..25f54ef
56656--- /dev/null
56657+++ b/grsecurity/gracl_learn.c
56658@@ -0,0 +1,207 @@
56659+#include <linux/kernel.h>
56660+#include <linux/mm.h>
56661+#include <linux/sched.h>
56662+#include <linux/poll.h>
56663+#include <linux/string.h>
56664+#include <linux/file.h>
56665+#include <linux/types.h>
56666+#include <linux/vmalloc.h>
56667+#include <linux/grinternal.h>
56668+
56669+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56670+ size_t count, loff_t *ppos);
56671+extern int gr_acl_is_enabled(void);
56672+
56673+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56674+static int gr_learn_attached;
56675+
56676+/* use a 512k buffer */
56677+#define LEARN_BUFFER_SIZE (512 * 1024)
56678+
56679+static DEFINE_SPINLOCK(gr_learn_lock);
56680+static DEFINE_MUTEX(gr_learn_user_mutex);
56681+
56682+/* we need to maintain two buffers, so that the kernel context of grlearn
56683+ uses a semaphore around the userspace copying, and the other kernel contexts
56684+ use a spinlock when copying into the buffer, since they cannot sleep
56685+*/
56686+static char *learn_buffer;
56687+static char *learn_buffer_user;
56688+static int learn_buffer_len;
56689+static int learn_buffer_user_len;
56690+
56691+static ssize_t
56692+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56693+{
56694+ DECLARE_WAITQUEUE(wait, current);
56695+ ssize_t retval = 0;
56696+
56697+ add_wait_queue(&learn_wait, &wait);
56698+ set_current_state(TASK_INTERRUPTIBLE);
56699+ do {
56700+ mutex_lock(&gr_learn_user_mutex);
56701+ spin_lock(&gr_learn_lock);
56702+ if (learn_buffer_len)
56703+ break;
56704+ spin_unlock(&gr_learn_lock);
56705+ mutex_unlock(&gr_learn_user_mutex);
56706+ if (file->f_flags & O_NONBLOCK) {
56707+ retval = -EAGAIN;
56708+ goto out;
56709+ }
56710+ if (signal_pending(current)) {
56711+ retval = -ERESTARTSYS;
56712+ goto out;
56713+ }
56714+
56715+ schedule();
56716+ } while (1);
56717+
56718+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56719+ learn_buffer_user_len = learn_buffer_len;
56720+ retval = learn_buffer_len;
56721+ learn_buffer_len = 0;
56722+
56723+ spin_unlock(&gr_learn_lock);
56724+
56725+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56726+ retval = -EFAULT;
56727+
56728+ mutex_unlock(&gr_learn_user_mutex);
56729+out:
56730+ set_current_state(TASK_RUNNING);
56731+ remove_wait_queue(&learn_wait, &wait);
56732+ return retval;
56733+}
56734+
56735+static unsigned int
56736+poll_learn(struct file * file, poll_table * wait)
56737+{
56738+ poll_wait(file, &learn_wait, wait);
56739+
56740+ if (learn_buffer_len)
56741+ return (POLLIN | POLLRDNORM);
56742+
56743+ return 0;
56744+}
56745+
56746+void
56747+gr_clear_learn_entries(void)
56748+{
56749+ char *tmp;
56750+
56751+ mutex_lock(&gr_learn_user_mutex);
56752+ spin_lock(&gr_learn_lock);
56753+ tmp = learn_buffer;
56754+ learn_buffer = NULL;
56755+ spin_unlock(&gr_learn_lock);
56756+ if (tmp)
56757+ vfree(tmp);
56758+ if (learn_buffer_user != NULL) {
56759+ vfree(learn_buffer_user);
56760+ learn_buffer_user = NULL;
56761+ }
56762+ learn_buffer_len = 0;
56763+ mutex_unlock(&gr_learn_user_mutex);
56764+
56765+ return;
56766+}
56767+
56768+void
56769+gr_add_learn_entry(const char *fmt, ...)
56770+{
56771+ va_list args;
56772+ unsigned int len;
56773+
56774+ if (!gr_learn_attached)
56775+ return;
56776+
56777+ spin_lock(&gr_learn_lock);
56778+
56779+ /* leave a gap at the end so we know when it's "full" but don't have to
56780+ compute the exact length of the string we're trying to append
56781+ */
56782+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56783+ spin_unlock(&gr_learn_lock);
56784+ wake_up_interruptible(&learn_wait);
56785+ return;
56786+ }
56787+ if (learn_buffer == NULL) {
56788+ spin_unlock(&gr_learn_lock);
56789+ return;
56790+ }
56791+
56792+ va_start(args, fmt);
56793+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56794+ va_end(args);
56795+
56796+ learn_buffer_len += len + 1;
56797+
56798+ spin_unlock(&gr_learn_lock);
56799+ wake_up_interruptible(&learn_wait);
56800+
56801+ return;
56802+}
56803+
56804+static int
56805+open_learn(struct inode *inode, struct file *file)
56806+{
56807+ if (file->f_mode & FMODE_READ && gr_learn_attached)
56808+ return -EBUSY;
56809+ if (file->f_mode & FMODE_READ) {
56810+ int retval = 0;
56811+ mutex_lock(&gr_learn_user_mutex);
56812+ if (learn_buffer == NULL)
56813+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56814+ if (learn_buffer_user == NULL)
56815+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56816+ if (learn_buffer == NULL) {
56817+ retval = -ENOMEM;
56818+ goto out_error;
56819+ }
56820+ if (learn_buffer_user == NULL) {
56821+ retval = -ENOMEM;
56822+ goto out_error;
56823+ }
56824+ learn_buffer_len = 0;
56825+ learn_buffer_user_len = 0;
56826+ gr_learn_attached = 1;
56827+out_error:
56828+ mutex_unlock(&gr_learn_user_mutex);
56829+ return retval;
56830+ }
56831+ return 0;
56832+}
56833+
56834+static int
56835+close_learn(struct inode *inode, struct file *file)
56836+{
56837+ if (file->f_mode & FMODE_READ) {
56838+ char *tmp = NULL;
56839+ mutex_lock(&gr_learn_user_mutex);
56840+ spin_lock(&gr_learn_lock);
56841+ tmp = learn_buffer;
56842+ learn_buffer = NULL;
56843+ spin_unlock(&gr_learn_lock);
56844+ if (tmp)
56845+ vfree(tmp);
56846+ if (learn_buffer_user != NULL) {
56847+ vfree(learn_buffer_user);
56848+ learn_buffer_user = NULL;
56849+ }
56850+ learn_buffer_len = 0;
56851+ learn_buffer_user_len = 0;
56852+ gr_learn_attached = 0;
56853+ mutex_unlock(&gr_learn_user_mutex);
56854+ }
56855+
56856+ return 0;
56857+}
56858+
56859+const struct file_operations grsec_fops = {
56860+ .read = read_learn,
56861+ .write = write_grsec_handler,
56862+ .open = open_learn,
56863+ .release = close_learn,
56864+ .poll = poll_learn,
56865+};
56866diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56867new file mode 100644
56868index 0000000..39645c9
56869--- /dev/null
56870+++ b/grsecurity/gracl_res.c
56871@@ -0,0 +1,68 @@
56872+#include <linux/kernel.h>
56873+#include <linux/sched.h>
56874+#include <linux/gracl.h>
56875+#include <linux/grinternal.h>
56876+
56877+static const char *restab_log[] = {
56878+ [RLIMIT_CPU] = "RLIMIT_CPU",
56879+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56880+ [RLIMIT_DATA] = "RLIMIT_DATA",
56881+ [RLIMIT_STACK] = "RLIMIT_STACK",
56882+ [RLIMIT_CORE] = "RLIMIT_CORE",
56883+ [RLIMIT_RSS] = "RLIMIT_RSS",
56884+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56885+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56886+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56887+ [RLIMIT_AS] = "RLIMIT_AS",
56888+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56889+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56890+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56891+ [RLIMIT_NICE] = "RLIMIT_NICE",
56892+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56893+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56894+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56895+};
56896+
56897+void
56898+gr_log_resource(const struct task_struct *task,
56899+ const int res, const unsigned long wanted, const int gt)
56900+{
56901+ const struct cred *cred;
56902+ unsigned long rlim;
56903+
56904+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56905+ return;
56906+
56907+ // not yet supported resource
56908+ if (unlikely(!restab_log[res]))
56909+ return;
56910+
56911+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56912+ rlim = task_rlimit_max(task, res);
56913+ else
56914+ rlim = task_rlimit(task, res);
56915+
56916+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56917+ return;
56918+
56919+ rcu_read_lock();
56920+ cred = __task_cred(task);
56921+
56922+ if (res == RLIMIT_NPROC &&
56923+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56924+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56925+ goto out_rcu_unlock;
56926+ else if (res == RLIMIT_MEMLOCK &&
56927+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56928+ goto out_rcu_unlock;
56929+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56930+ goto out_rcu_unlock;
56931+ rcu_read_unlock();
56932+
56933+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56934+
56935+ return;
56936+out_rcu_unlock:
56937+ rcu_read_unlock();
56938+ return;
56939+}
56940diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56941new file mode 100644
56942index 0000000..5556be3
56943--- /dev/null
56944+++ b/grsecurity/gracl_segv.c
56945@@ -0,0 +1,299 @@
56946+#include <linux/kernel.h>
56947+#include <linux/mm.h>
56948+#include <asm/uaccess.h>
56949+#include <asm/errno.h>
56950+#include <asm/mman.h>
56951+#include <net/sock.h>
56952+#include <linux/file.h>
56953+#include <linux/fs.h>
56954+#include <linux/net.h>
56955+#include <linux/in.h>
56956+#include <linux/slab.h>
56957+#include <linux/types.h>
56958+#include <linux/sched.h>
56959+#include <linux/timer.h>
56960+#include <linux/gracl.h>
56961+#include <linux/grsecurity.h>
56962+#include <linux/grinternal.h>
56963+
56964+static struct crash_uid *uid_set;
56965+static unsigned short uid_used;
56966+static DEFINE_SPINLOCK(gr_uid_lock);
56967+extern rwlock_t gr_inode_lock;
56968+extern struct acl_subject_label *
56969+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56970+ struct acl_role_label *role);
56971+
56972+#ifdef CONFIG_BTRFS_FS
56973+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56974+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56975+#endif
56976+
56977+static inline dev_t __get_dev(const struct dentry *dentry)
56978+{
56979+#ifdef CONFIG_BTRFS_FS
56980+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56981+ return get_btrfs_dev_from_inode(dentry->d_inode);
56982+ else
56983+#endif
56984+ return dentry->d_inode->i_sb->s_dev;
56985+}
56986+
56987+int
56988+gr_init_uidset(void)
56989+{
56990+ uid_set =
56991+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56992+ uid_used = 0;
56993+
56994+ return uid_set ? 1 : 0;
56995+}
56996+
56997+void
56998+gr_free_uidset(void)
56999+{
57000+ if (uid_set)
57001+ kfree(uid_set);
57002+
57003+ return;
57004+}
57005+
57006+int
57007+gr_find_uid(const uid_t uid)
57008+{
57009+ struct crash_uid *tmp = uid_set;
57010+ uid_t buid;
57011+ int low = 0, high = uid_used - 1, mid;
57012+
57013+ while (high >= low) {
57014+ mid = (low + high) >> 1;
57015+ buid = tmp[mid].uid;
57016+ if (buid == uid)
57017+ return mid;
57018+ if (buid > uid)
57019+ high = mid - 1;
57020+ if (buid < uid)
57021+ low = mid + 1;
57022+ }
57023+
57024+ return -1;
57025+}
57026+
57027+static __inline__ void
57028+gr_insertsort(void)
57029+{
57030+ unsigned short i, j;
57031+ struct crash_uid index;
57032+
57033+ for (i = 1; i < uid_used; i++) {
57034+ index = uid_set[i];
57035+ j = i;
57036+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
57037+ uid_set[j] = uid_set[j - 1];
57038+ j--;
57039+ }
57040+ uid_set[j] = index;
57041+ }
57042+
57043+ return;
57044+}
57045+
57046+static __inline__ void
57047+gr_insert_uid(const uid_t uid, const unsigned long expires)
57048+{
57049+ int loc;
57050+
57051+ if (uid_used == GR_UIDTABLE_MAX)
57052+ return;
57053+
57054+ loc = gr_find_uid(uid);
57055+
57056+ if (loc >= 0) {
57057+ uid_set[loc].expires = expires;
57058+ return;
57059+ }
57060+
57061+ uid_set[uid_used].uid = uid;
57062+ uid_set[uid_used].expires = expires;
57063+ uid_used++;
57064+
57065+ gr_insertsort();
57066+
57067+ return;
57068+}
57069+
57070+void
57071+gr_remove_uid(const unsigned short loc)
57072+{
57073+ unsigned short i;
57074+
57075+ for (i = loc + 1; i < uid_used; i++)
57076+ uid_set[i - 1] = uid_set[i];
57077+
57078+ uid_used--;
57079+
57080+ return;
57081+}
57082+
57083+int
57084+gr_check_crash_uid(const uid_t uid)
57085+{
57086+ int loc;
57087+ int ret = 0;
57088+
57089+ if (unlikely(!gr_acl_is_enabled()))
57090+ return 0;
57091+
57092+ spin_lock(&gr_uid_lock);
57093+ loc = gr_find_uid(uid);
57094+
57095+ if (loc < 0)
57096+ goto out_unlock;
57097+
57098+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
57099+ gr_remove_uid(loc);
57100+ else
57101+ ret = 1;
57102+
57103+out_unlock:
57104+ spin_unlock(&gr_uid_lock);
57105+ return ret;
57106+}
57107+
57108+static __inline__ int
57109+proc_is_setxid(const struct cred *cred)
57110+{
57111+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
57112+ cred->uid != cred->fsuid)
57113+ return 1;
57114+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
57115+ cred->gid != cred->fsgid)
57116+ return 1;
57117+
57118+ return 0;
57119+}
57120+
57121+extern int gr_fake_force_sig(int sig, struct task_struct *t);
57122+
57123+void
57124+gr_handle_crash(struct task_struct *task, const int sig)
57125+{
57126+ struct acl_subject_label *curr;
57127+ struct task_struct *tsk, *tsk2;
57128+ const struct cred *cred;
57129+ const struct cred *cred2;
57130+
57131+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
57132+ return;
57133+
57134+ if (unlikely(!gr_acl_is_enabled()))
57135+ return;
57136+
57137+ curr = task->acl;
57138+
57139+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
57140+ return;
57141+
57142+ if (time_before_eq(curr->expires, get_seconds())) {
57143+ curr->expires = 0;
57144+ curr->crashes = 0;
57145+ }
57146+
57147+ curr->crashes++;
57148+
57149+ if (!curr->expires)
57150+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
57151+
57152+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57153+ time_after(curr->expires, get_seconds())) {
57154+ rcu_read_lock();
57155+ cred = __task_cred(task);
57156+ if (cred->uid && proc_is_setxid(cred)) {
57157+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57158+ spin_lock(&gr_uid_lock);
57159+ gr_insert_uid(cred->uid, curr->expires);
57160+ spin_unlock(&gr_uid_lock);
57161+ curr->expires = 0;
57162+ curr->crashes = 0;
57163+ read_lock(&tasklist_lock);
57164+ do_each_thread(tsk2, tsk) {
57165+ cred2 = __task_cred(tsk);
57166+ if (tsk != task && cred2->uid == cred->uid)
57167+ gr_fake_force_sig(SIGKILL, tsk);
57168+ } while_each_thread(tsk2, tsk);
57169+ read_unlock(&tasklist_lock);
57170+ } else {
57171+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57172+ read_lock(&tasklist_lock);
57173+ read_lock(&grsec_exec_file_lock);
57174+ do_each_thread(tsk2, tsk) {
57175+ if (likely(tsk != task)) {
57176+ // if this thread has the same subject as the one that triggered
57177+ // RES_CRASH and it's the same binary, kill it
57178+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
57179+ gr_fake_force_sig(SIGKILL, tsk);
57180+ }
57181+ } while_each_thread(tsk2, tsk);
57182+ read_unlock(&grsec_exec_file_lock);
57183+ read_unlock(&tasklist_lock);
57184+ }
57185+ rcu_read_unlock();
57186+ }
57187+
57188+ return;
57189+}
57190+
57191+int
57192+gr_check_crash_exec(const struct file *filp)
57193+{
57194+ struct acl_subject_label *curr;
57195+
57196+ if (unlikely(!gr_acl_is_enabled()))
57197+ return 0;
57198+
57199+ read_lock(&gr_inode_lock);
57200+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
57201+ __get_dev(filp->f_path.dentry),
57202+ current->role);
57203+ read_unlock(&gr_inode_lock);
57204+
57205+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
57206+ (!curr->crashes && !curr->expires))
57207+ return 0;
57208+
57209+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57210+ time_after(curr->expires, get_seconds()))
57211+ return 1;
57212+ else if (time_before_eq(curr->expires, get_seconds())) {
57213+ curr->crashes = 0;
57214+ curr->expires = 0;
57215+ }
57216+
57217+ return 0;
57218+}
57219+
57220+void
57221+gr_handle_alertkill(struct task_struct *task)
57222+{
57223+ struct acl_subject_label *curracl;
57224+ __u32 curr_ip;
57225+ struct task_struct *p, *p2;
57226+
57227+ if (unlikely(!gr_acl_is_enabled()))
57228+ return;
57229+
57230+ curracl = task->acl;
57231+ curr_ip = task->signal->curr_ip;
57232+
57233+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57234+ read_lock(&tasklist_lock);
57235+ do_each_thread(p2, p) {
57236+ if (p->signal->curr_ip == curr_ip)
57237+ gr_fake_force_sig(SIGKILL, p);
57238+ } while_each_thread(p2, p);
57239+ read_unlock(&tasklist_lock);
57240+ } else if (curracl->mode & GR_KILLPROC)
57241+ gr_fake_force_sig(SIGKILL, task);
57242+
57243+ return;
57244+}
57245diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57246new file mode 100644
57247index 0000000..9d83a69
57248--- /dev/null
57249+++ b/grsecurity/gracl_shm.c
57250@@ -0,0 +1,40 @@
57251+#include <linux/kernel.h>
57252+#include <linux/mm.h>
57253+#include <linux/sched.h>
57254+#include <linux/file.h>
57255+#include <linux/ipc.h>
57256+#include <linux/gracl.h>
57257+#include <linux/grsecurity.h>
57258+#include <linux/grinternal.h>
57259+
57260+int
57261+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57262+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57263+{
57264+ struct task_struct *task;
57265+
57266+ if (!gr_acl_is_enabled())
57267+ return 1;
57268+
57269+ rcu_read_lock();
57270+ read_lock(&tasklist_lock);
57271+
57272+ task = find_task_by_vpid(shm_cprid);
57273+
57274+ if (unlikely(!task))
57275+ task = find_task_by_vpid(shm_lapid);
57276+
57277+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57278+ (task->pid == shm_lapid)) &&
57279+ (task->acl->mode & GR_PROTSHM) &&
57280+ (task->acl != current->acl))) {
57281+ read_unlock(&tasklist_lock);
57282+ rcu_read_unlock();
57283+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57284+ return 0;
57285+ }
57286+ read_unlock(&tasklist_lock);
57287+ rcu_read_unlock();
57288+
57289+ return 1;
57290+}
57291diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57292new file mode 100644
57293index 0000000..bc0be01
57294--- /dev/null
57295+++ b/grsecurity/grsec_chdir.c
57296@@ -0,0 +1,19 @@
57297+#include <linux/kernel.h>
57298+#include <linux/sched.h>
57299+#include <linux/fs.h>
57300+#include <linux/file.h>
57301+#include <linux/grsecurity.h>
57302+#include <linux/grinternal.h>
57303+
57304+void
57305+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57306+{
57307+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57308+ if ((grsec_enable_chdir && grsec_enable_group &&
57309+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57310+ !grsec_enable_group)) {
57311+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57312+ }
57313+#endif
57314+ return;
57315+}
57316diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57317new file mode 100644
57318index 0000000..9807ee2
57319--- /dev/null
57320+++ b/grsecurity/grsec_chroot.c
57321@@ -0,0 +1,368 @@
57322+#include <linux/kernel.h>
57323+#include <linux/module.h>
57324+#include <linux/sched.h>
57325+#include <linux/file.h>
57326+#include <linux/fs.h>
57327+#include <linux/mount.h>
57328+#include <linux/types.h>
57329+#include "../fs/mount.h"
57330+#include <linux/grsecurity.h>
57331+#include <linux/grinternal.h>
57332+
57333+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57334+{
57335+#ifdef CONFIG_GRKERNSEC
57336+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
57337+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
57338+ task->gr_is_chrooted = 1;
57339+ else
57340+ task->gr_is_chrooted = 0;
57341+
57342+ task->gr_chroot_dentry = path->dentry;
57343+#endif
57344+ return;
57345+}
57346+
57347+void gr_clear_chroot_entries(struct task_struct *task)
57348+{
57349+#ifdef CONFIG_GRKERNSEC
57350+ task->gr_is_chrooted = 0;
57351+ task->gr_chroot_dentry = NULL;
57352+#endif
57353+ return;
57354+}
57355+
57356+int
57357+gr_handle_chroot_unix(const pid_t pid)
57358+{
57359+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57360+ struct task_struct *p;
57361+
57362+ if (unlikely(!grsec_enable_chroot_unix))
57363+ return 1;
57364+
57365+ if (likely(!proc_is_chrooted(current)))
57366+ return 1;
57367+
57368+ rcu_read_lock();
57369+ read_lock(&tasklist_lock);
57370+ p = find_task_by_vpid_unrestricted(pid);
57371+ if (unlikely(p && !have_same_root(current, p))) {
57372+ read_unlock(&tasklist_lock);
57373+ rcu_read_unlock();
57374+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57375+ return 0;
57376+ }
57377+ read_unlock(&tasklist_lock);
57378+ rcu_read_unlock();
57379+#endif
57380+ return 1;
57381+}
57382+
57383+int
57384+gr_handle_chroot_nice(void)
57385+{
57386+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57387+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57388+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57389+ return -EPERM;
57390+ }
57391+#endif
57392+ return 0;
57393+}
57394+
57395+int
57396+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57397+{
57398+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57399+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57400+ && proc_is_chrooted(current)) {
57401+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57402+ return -EACCES;
57403+ }
57404+#endif
57405+ return 0;
57406+}
57407+
57408+int
57409+gr_handle_chroot_rawio(const struct inode *inode)
57410+{
57411+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57412+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57413+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57414+ return 1;
57415+#endif
57416+ return 0;
57417+}
57418+
57419+int
57420+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57421+{
57422+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57423+ struct task_struct *p;
57424+ int ret = 0;
57425+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57426+ return ret;
57427+
57428+ read_lock(&tasklist_lock);
57429+ do_each_pid_task(pid, type, p) {
57430+ if (!have_same_root(current, p)) {
57431+ ret = 1;
57432+ goto out;
57433+ }
57434+ } while_each_pid_task(pid, type, p);
57435+out:
57436+ read_unlock(&tasklist_lock);
57437+ return ret;
57438+#endif
57439+ return 0;
57440+}
57441+
57442+int
57443+gr_pid_is_chrooted(struct task_struct *p)
57444+{
57445+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57446+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57447+ return 0;
57448+
57449+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57450+ !have_same_root(current, p)) {
57451+ return 1;
57452+ }
57453+#endif
57454+ return 0;
57455+}
57456+
57457+EXPORT_SYMBOL(gr_pid_is_chrooted);
57458+
57459+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57460+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57461+{
57462+ struct path path, currentroot;
57463+ int ret = 0;
57464+
57465+ path.dentry = (struct dentry *)u_dentry;
57466+ path.mnt = (struct vfsmount *)u_mnt;
57467+ get_fs_root(current->fs, &currentroot);
57468+ if (path_is_under(&path, &currentroot))
57469+ ret = 1;
57470+ path_put(&currentroot);
57471+
57472+ return ret;
57473+}
57474+#endif
57475+
57476+int
57477+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57478+{
57479+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57480+ if (!grsec_enable_chroot_fchdir)
57481+ return 1;
57482+
57483+ if (!proc_is_chrooted(current))
57484+ return 1;
57485+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57486+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57487+ return 0;
57488+ }
57489+#endif
57490+ return 1;
57491+}
57492+
57493+int
57494+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57495+ const time_t shm_createtime)
57496+{
57497+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57498+ struct task_struct *p;
57499+ time_t starttime;
57500+
57501+ if (unlikely(!grsec_enable_chroot_shmat))
57502+ return 1;
57503+
57504+ if (likely(!proc_is_chrooted(current)))
57505+ return 1;
57506+
57507+ rcu_read_lock();
57508+ read_lock(&tasklist_lock);
57509+
57510+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57511+ starttime = p->start_time.tv_sec;
57512+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57513+ if (have_same_root(current, p)) {
57514+ goto allow;
57515+ } else {
57516+ read_unlock(&tasklist_lock);
57517+ rcu_read_unlock();
57518+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57519+ return 0;
57520+ }
57521+ }
57522+ /* creator exited, pid reuse, fall through to next check */
57523+ }
57524+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57525+ if (unlikely(!have_same_root(current, p))) {
57526+ read_unlock(&tasklist_lock);
57527+ rcu_read_unlock();
57528+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57529+ return 0;
57530+ }
57531+ }
57532+
57533+allow:
57534+ read_unlock(&tasklist_lock);
57535+ rcu_read_unlock();
57536+#endif
57537+ return 1;
57538+}
57539+
57540+void
57541+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57542+{
57543+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57544+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57545+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57546+#endif
57547+ return;
57548+}
57549+
57550+int
57551+gr_handle_chroot_mknod(const struct dentry *dentry,
57552+ const struct vfsmount *mnt, const int mode)
57553+{
57554+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57555+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57556+ proc_is_chrooted(current)) {
57557+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57558+ return -EPERM;
57559+ }
57560+#endif
57561+ return 0;
57562+}
57563+
57564+int
57565+gr_handle_chroot_mount(const struct dentry *dentry,
57566+ const struct vfsmount *mnt, const char *dev_name)
57567+{
57568+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57569+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57570+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57571+ return -EPERM;
57572+ }
57573+#endif
57574+ return 0;
57575+}
57576+
57577+int
57578+gr_handle_chroot_pivot(void)
57579+{
57580+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57581+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57582+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57583+ return -EPERM;
57584+ }
57585+#endif
57586+ return 0;
57587+}
57588+
57589+int
57590+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57591+{
57592+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57593+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57594+ !gr_is_outside_chroot(dentry, mnt)) {
57595+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57596+ return -EPERM;
57597+ }
57598+#endif
57599+ return 0;
57600+}
57601+
57602+extern const char *captab_log[];
57603+extern int captab_log_entries;
57604+
57605+int
57606+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57607+{
57608+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57609+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57610+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57611+ if (cap_raised(chroot_caps, cap)) {
57612+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57613+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
57614+ }
57615+ return 0;
57616+ }
57617+ }
57618+#endif
57619+ return 1;
57620+}
57621+
57622+int
57623+gr_chroot_is_capable(const int cap)
57624+{
57625+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57626+ return gr_task_chroot_is_capable(current, current_cred(), cap);
57627+#endif
57628+ return 1;
57629+}
57630+
57631+int
57632+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57633+{
57634+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57635+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57636+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57637+ if (cap_raised(chroot_caps, cap)) {
57638+ return 0;
57639+ }
57640+ }
57641+#endif
57642+ return 1;
57643+}
57644+
57645+int
57646+gr_chroot_is_capable_nolog(const int cap)
57647+{
57648+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57649+ return gr_task_chroot_is_capable_nolog(current, cap);
57650+#endif
57651+ return 1;
57652+}
57653+
57654+int
57655+gr_handle_chroot_sysctl(const int op)
57656+{
57657+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57658+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57659+ proc_is_chrooted(current))
57660+ return -EACCES;
57661+#endif
57662+ return 0;
57663+}
57664+
57665+void
57666+gr_handle_chroot_chdir(struct path *path)
57667+{
57668+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57669+ if (grsec_enable_chroot_chdir)
57670+ set_fs_pwd(current->fs, path);
57671+#endif
57672+ return;
57673+}
57674+
57675+int
57676+gr_handle_chroot_chmod(const struct dentry *dentry,
57677+ const struct vfsmount *mnt, const int mode)
57678+{
57679+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57680+ /* allow chmod +s on directories, but not files */
57681+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57682+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57683+ proc_is_chrooted(current)) {
57684+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57685+ return -EPERM;
57686+ }
57687+#endif
57688+ return 0;
57689+}
57690diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57691new file mode 100644
57692index 0000000..213ad8b
57693--- /dev/null
57694+++ b/grsecurity/grsec_disabled.c
57695@@ -0,0 +1,437 @@
57696+#include <linux/kernel.h>
57697+#include <linux/module.h>
57698+#include <linux/sched.h>
57699+#include <linux/file.h>
57700+#include <linux/fs.h>
57701+#include <linux/kdev_t.h>
57702+#include <linux/net.h>
57703+#include <linux/in.h>
57704+#include <linux/ip.h>
57705+#include <linux/skbuff.h>
57706+#include <linux/sysctl.h>
57707+
57708+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57709+void
57710+pax_set_initial_flags(struct linux_binprm *bprm)
57711+{
57712+ return;
57713+}
57714+#endif
57715+
57716+#ifdef CONFIG_SYSCTL
57717+__u32
57718+gr_handle_sysctl(const struct ctl_table * table, const int op)
57719+{
57720+ return 0;
57721+}
57722+#endif
57723+
57724+#ifdef CONFIG_TASKSTATS
57725+int gr_is_taskstats_denied(int pid)
57726+{
57727+ return 0;
57728+}
57729+#endif
57730+
57731+int
57732+gr_acl_is_enabled(void)
57733+{
57734+ return 0;
57735+}
57736+
57737+void
57738+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57739+{
57740+ return;
57741+}
57742+
57743+int
57744+gr_handle_rawio(const struct inode *inode)
57745+{
57746+ return 0;
57747+}
57748+
57749+void
57750+gr_acl_handle_psacct(struct task_struct *task, const long code)
57751+{
57752+ return;
57753+}
57754+
57755+int
57756+gr_handle_ptrace(struct task_struct *task, const long request)
57757+{
57758+ return 0;
57759+}
57760+
57761+int
57762+gr_handle_proc_ptrace(struct task_struct *task)
57763+{
57764+ return 0;
57765+}
57766+
57767+void
57768+gr_learn_resource(const struct task_struct *task,
57769+ const int res, const unsigned long wanted, const int gt)
57770+{
57771+ return;
57772+}
57773+
57774+int
57775+gr_set_acls(const int type)
57776+{
57777+ return 0;
57778+}
57779+
57780+int
57781+gr_check_hidden_task(const struct task_struct *tsk)
57782+{
57783+ return 0;
57784+}
57785+
57786+int
57787+gr_check_protected_task(const struct task_struct *task)
57788+{
57789+ return 0;
57790+}
57791+
57792+int
57793+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57794+{
57795+ return 0;
57796+}
57797+
57798+void
57799+gr_copy_label(struct task_struct *tsk)
57800+{
57801+ return;
57802+}
57803+
57804+void
57805+gr_set_pax_flags(struct task_struct *task)
57806+{
57807+ return;
57808+}
57809+
57810+int
57811+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57812+ const int unsafe_share)
57813+{
57814+ return 0;
57815+}
57816+
57817+void
57818+gr_handle_delete(const ino_t ino, const dev_t dev)
57819+{
57820+ return;
57821+}
57822+
57823+void
57824+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57825+{
57826+ return;
57827+}
57828+
57829+void
57830+gr_handle_crash(struct task_struct *task, const int sig)
57831+{
57832+ return;
57833+}
57834+
57835+int
57836+gr_check_crash_exec(const struct file *filp)
57837+{
57838+ return 0;
57839+}
57840+
57841+int
57842+gr_check_crash_uid(const uid_t uid)
57843+{
57844+ return 0;
57845+}
57846+
57847+void
57848+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57849+ struct dentry *old_dentry,
57850+ struct dentry *new_dentry,
57851+ struct vfsmount *mnt, const __u8 replace)
57852+{
57853+ return;
57854+}
57855+
57856+int
57857+gr_search_socket(const int family, const int type, const int protocol)
57858+{
57859+ return 1;
57860+}
57861+
57862+int
57863+gr_search_connectbind(const int mode, const struct socket *sock,
57864+ const struct sockaddr_in *addr)
57865+{
57866+ return 0;
57867+}
57868+
57869+void
57870+gr_handle_alertkill(struct task_struct *task)
57871+{
57872+ return;
57873+}
57874+
57875+__u32
57876+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57877+{
57878+ return 1;
57879+}
57880+
57881+__u32
57882+gr_acl_handle_hidden_file(const struct dentry * dentry,
57883+ const struct vfsmount * mnt)
57884+{
57885+ return 1;
57886+}
57887+
57888+__u32
57889+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57890+ int acc_mode)
57891+{
57892+ return 1;
57893+}
57894+
57895+__u32
57896+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57897+{
57898+ return 1;
57899+}
57900+
57901+__u32
57902+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57903+{
57904+ return 1;
57905+}
57906+
57907+int
57908+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57909+ unsigned int *vm_flags)
57910+{
57911+ return 1;
57912+}
57913+
57914+__u32
57915+gr_acl_handle_truncate(const struct dentry * dentry,
57916+ const struct vfsmount * mnt)
57917+{
57918+ return 1;
57919+}
57920+
57921+__u32
57922+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57923+{
57924+ return 1;
57925+}
57926+
57927+__u32
57928+gr_acl_handle_access(const struct dentry * dentry,
57929+ const struct vfsmount * mnt, const int fmode)
57930+{
57931+ return 1;
57932+}
57933+
57934+__u32
57935+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57936+ umode_t *mode)
57937+{
57938+ return 1;
57939+}
57940+
57941+__u32
57942+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57943+{
57944+ return 1;
57945+}
57946+
57947+__u32
57948+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57949+{
57950+ return 1;
57951+}
57952+
57953+void
57954+grsecurity_init(void)
57955+{
57956+ return;
57957+}
57958+
57959+umode_t gr_acl_umask(void)
57960+{
57961+ return 0;
57962+}
57963+
57964+__u32
57965+gr_acl_handle_mknod(const struct dentry * new_dentry,
57966+ const struct dentry * parent_dentry,
57967+ const struct vfsmount * parent_mnt,
57968+ const int mode)
57969+{
57970+ return 1;
57971+}
57972+
57973+__u32
57974+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57975+ const struct dentry * parent_dentry,
57976+ const struct vfsmount * parent_mnt)
57977+{
57978+ return 1;
57979+}
57980+
57981+__u32
57982+gr_acl_handle_symlink(const struct dentry * new_dentry,
57983+ const struct dentry * parent_dentry,
57984+ const struct vfsmount * parent_mnt, const char *from)
57985+{
57986+ return 1;
57987+}
57988+
57989+__u32
57990+gr_acl_handle_link(const struct dentry * new_dentry,
57991+ const struct dentry * parent_dentry,
57992+ const struct vfsmount * parent_mnt,
57993+ const struct dentry * old_dentry,
57994+ const struct vfsmount * old_mnt, const char *to)
57995+{
57996+ return 1;
57997+}
57998+
57999+int
58000+gr_acl_handle_rename(const struct dentry *new_dentry,
58001+ const struct dentry *parent_dentry,
58002+ const struct vfsmount *parent_mnt,
58003+ const struct dentry *old_dentry,
58004+ const struct inode *old_parent_inode,
58005+ const struct vfsmount *old_mnt, const char *newname)
58006+{
58007+ return 0;
58008+}
58009+
58010+int
58011+gr_acl_handle_filldir(const struct file *file, const char *name,
58012+ const int namelen, const ino_t ino)
58013+{
58014+ return 1;
58015+}
58016+
58017+int
58018+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58019+ const time_t shm_createtime, const uid_t cuid, const int shmid)
58020+{
58021+ return 1;
58022+}
58023+
58024+int
58025+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
58026+{
58027+ return 0;
58028+}
58029+
58030+int
58031+gr_search_accept(const struct socket *sock)
58032+{
58033+ return 0;
58034+}
58035+
58036+int
58037+gr_search_listen(const struct socket *sock)
58038+{
58039+ return 0;
58040+}
58041+
58042+int
58043+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
58044+{
58045+ return 0;
58046+}
58047+
58048+__u32
58049+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
58050+{
58051+ return 1;
58052+}
58053+
58054+__u32
58055+gr_acl_handle_creat(const struct dentry * dentry,
58056+ const struct dentry * p_dentry,
58057+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58058+ const int imode)
58059+{
58060+ return 1;
58061+}
58062+
58063+void
58064+gr_acl_handle_exit(void)
58065+{
58066+ return;
58067+}
58068+
58069+int
58070+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58071+{
58072+ return 1;
58073+}
58074+
58075+void
58076+gr_set_role_label(const uid_t uid, const gid_t gid)
58077+{
58078+ return;
58079+}
58080+
58081+int
58082+gr_acl_handle_procpidmem(const struct task_struct *task)
58083+{
58084+ return 0;
58085+}
58086+
58087+int
58088+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
58089+{
58090+ return 0;
58091+}
58092+
58093+int
58094+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
58095+{
58096+ return 0;
58097+}
58098+
58099+void
58100+gr_set_kernel_label(struct task_struct *task)
58101+{
58102+ return;
58103+}
58104+
58105+int
58106+gr_check_user_change(int real, int effective, int fs)
58107+{
58108+ return 0;
58109+}
58110+
58111+int
58112+gr_check_group_change(int real, int effective, int fs)
58113+{
58114+ return 0;
58115+}
58116+
58117+int gr_acl_enable_at_secure(void)
58118+{
58119+ return 0;
58120+}
58121+
58122+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58123+{
58124+ return dentry->d_inode->i_sb->s_dev;
58125+}
58126+
58127+EXPORT_SYMBOL(gr_learn_resource);
58128+EXPORT_SYMBOL(gr_set_kernel_label);
58129+#ifdef CONFIG_SECURITY
58130+EXPORT_SYMBOL(gr_check_user_change);
58131+EXPORT_SYMBOL(gr_check_group_change);
58132+#endif
58133diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
58134new file mode 100644
58135index 0000000..abfa971
58136--- /dev/null
58137+++ b/grsecurity/grsec_exec.c
58138@@ -0,0 +1,174 @@
58139+#include <linux/kernel.h>
58140+#include <linux/sched.h>
58141+#include <linux/file.h>
58142+#include <linux/binfmts.h>
58143+#include <linux/fs.h>
58144+#include <linux/types.h>
58145+#include <linux/grdefs.h>
58146+#include <linux/grsecurity.h>
58147+#include <linux/grinternal.h>
58148+#include <linux/capability.h>
58149+#include <linux/module.h>
58150+
58151+#include <asm/uaccess.h>
58152+
58153+#ifdef CONFIG_GRKERNSEC_EXECLOG
58154+static char gr_exec_arg_buf[132];
58155+static DEFINE_MUTEX(gr_exec_arg_mutex);
58156+#endif
58157+
58158+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58159+
58160+void
58161+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58162+{
58163+#ifdef CONFIG_GRKERNSEC_EXECLOG
58164+ char *grarg = gr_exec_arg_buf;
58165+ unsigned int i, x, execlen = 0;
58166+ char c;
58167+
58168+ if (!((grsec_enable_execlog && grsec_enable_group &&
58169+ in_group_p(grsec_audit_gid))
58170+ || (grsec_enable_execlog && !grsec_enable_group)))
58171+ return;
58172+
58173+ mutex_lock(&gr_exec_arg_mutex);
58174+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
58175+
58176+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
58177+ const char __user *p;
58178+ unsigned int len;
58179+
58180+ p = get_user_arg_ptr(argv, i);
58181+ if (IS_ERR(p))
58182+ goto log;
58183+
58184+ len = strnlen_user(p, 128 - execlen);
58185+ if (len > 128 - execlen)
58186+ len = 128 - execlen;
58187+ else if (len > 0)
58188+ len--;
58189+ if (copy_from_user(grarg + execlen, p, len))
58190+ goto log;
58191+
58192+ /* rewrite unprintable characters */
58193+ for (x = 0; x < len; x++) {
58194+ c = *(grarg + execlen + x);
58195+ if (c < 32 || c > 126)
58196+ *(grarg + execlen + x) = ' ';
58197+ }
58198+
58199+ execlen += len;
58200+ *(grarg + execlen) = ' ';
58201+ *(grarg + execlen + 1) = '\0';
58202+ execlen++;
58203+ }
58204+
58205+ log:
58206+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
58207+ bprm->file->f_path.mnt, grarg);
58208+ mutex_unlock(&gr_exec_arg_mutex);
58209+#endif
58210+ return;
58211+}
58212+
58213+#ifdef CONFIG_GRKERNSEC
58214+extern int gr_acl_is_capable(const int cap);
58215+extern int gr_acl_is_capable_nolog(const int cap);
58216+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58217+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
58218+extern int gr_chroot_is_capable(const int cap);
58219+extern int gr_chroot_is_capable_nolog(const int cap);
58220+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
58221+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
58222+#endif
58223+
58224+const char *captab_log[] = {
58225+ "CAP_CHOWN",
58226+ "CAP_DAC_OVERRIDE",
58227+ "CAP_DAC_READ_SEARCH",
58228+ "CAP_FOWNER",
58229+ "CAP_FSETID",
58230+ "CAP_KILL",
58231+ "CAP_SETGID",
58232+ "CAP_SETUID",
58233+ "CAP_SETPCAP",
58234+ "CAP_LINUX_IMMUTABLE",
58235+ "CAP_NET_BIND_SERVICE",
58236+ "CAP_NET_BROADCAST",
58237+ "CAP_NET_ADMIN",
58238+ "CAP_NET_RAW",
58239+ "CAP_IPC_LOCK",
58240+ "CAP_IPC_OWNER",
58241+ "CAP_SYS_MODULE",
58242+ "CAP_SYS_RAWIO",
58243+ "CAP_SYS_CHROOT",
58244+ "CAP_SYS_PTRACE",
58245+ "CAP_SYS_PACCT",
58246+ "CAP_SYS_ADMIN",
58247+ "CAP_SYS_BOOT",
58248+ "CAP_SYS_NICE",
58249+ "CAP_SYS_RESOURCE",
58250+ "CAP_SYS_TIME",
58251+ "CAP_SYS_TTY_CONFIG",
58252+ "CAP_MKNOD",
58253+ "CAP_LEASE",
58254+ "CAP_AUDIT_WRITE",
58255+ "CAP_AUDIT_CONTROL",
58256+ "CAP_SETFCAP",
58257+ "CAP_MAC_OVERRIDE",
58258+ "CAP_MAC_ADMIN",
58259+ "CAP_SYSLOG",
58260+ "CAP_WAKE_ALARM"
58261+};
58262+
58263+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
58264+
58265+int gr_is_capable(const int cap)
58266+{
58267+#ifdef CONFIG_GRKERNSEC
58268+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58269+ return 1;
58270+ return 0;
58271+#else
58272+ return 1;
58273+#endif
58274+}
58275+
58276+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58277+{
58278+#ifdef CONFIG_GRKERNSEC
58279+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
58280+ return 1;
58281+ return 0;
58282+#else
58283+ return 1;
58284+#endif
58285+}
58286+
58287+int gr_is_capable_nolog(const int cap)
58288+{
58289+#ifdef CONFIG_GRKERNSEC
58290+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58291+ return 1;
58292+ return 0;
58293+#else
58294+ return 1;
58295+#endif
58296+}
58297+
58298+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
58299+{
58300+#ifdef CONFIG_GRKERNSEC
58301+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
58302+ return 1;
58303+ return 0;
58304+#else
58305+ return 1;
58306+#endif
58307+}
58308+
58309+EXPORT_SYMBOL(gr_is_capable);
58310+EXPORT_SYMBOL(gr_is_capable_nolog);
58311+EXPORT_SYMBOL(gr_task_is_capable);
58312+EXPORT_SYMBOL(gr_task_is_capable_nolog);
58313diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58314new file mode 100644
58315index 0000000..d3ee748
58316--- /dev/null
58317+++ b/grsecurity/grsec_fifo.c
58318@@ -0,0 +1,24 @@
58319+#include <linux/kernel.h>
58320+#include <linux/sched.h>
58321+#include <linux/fs.h>
58322+#include <linux/file.h>
58323+#include <linux/grinternal.h>
58324+
58325+int
58326+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58327+ const struct dentry *dir, const int flag, const int acc_mode)
58328+{
58329+#ifdef CONFIG_GRKERNSEC_FIFO
58330+ const struct cred *cred = current_cred();
58331+
58332+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58333+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58334+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58335+ (cred->fsuid != dentry->d_inode->i_uid)) {
58336+ if (!inode_permission(dentry->d_inode, acc_mode))
58337+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58338+ return -EACCES;
58339+ }
58340+#endif
58341+ return 0;
58342+}
58343diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58344new file mode 100644
58345index 0000000..8ca18bf
58346--- /dev/null
58347+++ b/grsecurity/grsec_fork.c
58348@@ -0,0 +1,23 @@
58349+#include <linux/kernel.h>
58350+#include <linux/sched.h>
58351+#include <linux/grsecurity.h>
58352+#include <linux/grinternal.h>
58353+#include <linux/errno.h>
58354+
58355+void
58356+gr_log_forkfail(const int retval)
58357+{
58358+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58359+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58360+ switch (retval) {
58361+ case -EAGAIN:
58362+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58363+ break;
58364+ case -ENOMEM:
58365+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58366+ break;
58367+ }
58368+ }
58369+#endif
58370+ return;
58371+}
58372diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58373new file mode 100644
58374index 0000000..01ddde4
58375--- /dev/null
58376+++ b/grsecurity/grsec_init.c
58377@@ -0,0 +1,277 @@
58378+#include <linux/kernel.h>
58379+#include <linux/sched.h>
58380+#include <linux/mm.h>
58381+#include <linux/gracl.h>
58382+#include <linux/slab.h>
58383+#include <linux/vmalloc.h>
58384+#include <linux/percpu.h>
58385+#include <linux/module.h>
58386+
58387+int grsec_enable_ptrace_readexec;
58388+int grsec_enable_setxid;
58389+int grsec_enable_brute;
58390+int grsec_enable_link;
58391+int grsec_enable_dmesg;
58392+int grsec_enable_harden_ptrace;
58393+int grsec_enable_fifo;
58394+int grsec_enable_execlog;
58395+int grsec_enable_signal;
58396+int grsec_enable_forkfail;
58397+int grsec_enable_audit_ptrace;
58398+int grsec_enable_time;
58399+int grsec_enable_audit_textrel;
58400+int grsec_enable_group;
58401+int grsec_audit_gid;
58402+int grsec_enable_chdir;
58403+int grsec_enable_mount;
58404+int grsec_enable_rofs;
58405+int grsec_enable_chroot_findtask;
58406+int grsec_enable_chroot_mount;
58407+int grsec_enable_chroot_shmat;
58408+int grsec_enable_chroot_fchdir;
58409+int grsec_enable_chroot_double;
58410+int grsec_enable_chroot_pivot;
58411+int grsec_enable_chroot_chdir;
58412+int grsec_enable_chroot_chmod;
58413+int grsec_enable_chroot_mknod;
58414+int grsec_enable_chroot_nice;
58415+int grsec_enable_chroot_execlog;
58416+int grsec_enable_chroot_caps;
58417+int grsec_enable_chroot_sysctl;
58418+int grsec_enable_chroot_unix;
58419+int grsec_enable_tpe;
58420+int grsec_tpe_gid;
58421+int grsec_enable_blackhole;
58422+#ifdef CONFIG_IPV6_MODULE
58423+EXPORT_SYMBOL(grsec_enable_blackhole);
58424+#endif
58425+int grsec_lastack_retries;
58426+int grsec_enable_tpe_all;
58427+int grsec_enable_tpe_invert;
58428+int grsec_enable_socket_all;
58429+int grsec_socket_all_gid;
58430+int grsec_enable_socket_client;
58431+int grsec_socket_client_gid;
58432+int grsec_enable_socket_server;
58433+int grsec_socket_server_gid;
58434+int grsec_resource_logging;
58435+int grsec_disable_privio;
58436+int grsec_enable_log_rwxmaps;
58437+int grsec_lock;
58438+
58439+DEFINE_SPINLOCK(grsec_alert_lock);
58440+unsigned long grsec_alert_wtime = 0;
58441+unsigned long grsec_alert_fyet = 0;
58442+
58443+DEFINE_SPINLOCK(grsec_audit_lock);
58444+
58445+DEFINE_RWLOCK(grsec_exec_file_lock);
58446+
58447+char *gr_shared_page[4];
58448+
58449+char *gr_alert_log_fmt;
58450+char *gr_audit_log_fmt;
58451+char *gr_alert_log_buf;
58452+char *gr_audit_log_buf;
58453+
58454+extern struct gr_arg *gr_usermode;
58455+extern unsigned char *gr_system_salt;
58456+extern unsigned char *gr_system_sum;
58457+
58458+void __init
58459+grsecurity_init(void)
58460+{
58461+ int j;
58462+ /* create the per-cpu shared pages */
58463+
58464+#ifdef CONFIG_X86
58465+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58466+#endif
58467+
58468+ for (j = 0; j < 4; j++) {
58469+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58470+ if (gr_shared_page[j] == NULL) {
58471+ panic("Unable to allocate grsecurity shared page");
58472+ return;
58473+ }
58474+ }
58475+
58476+ /* allocate log buffers */
58477+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58478+ if (!gr_alert_log_fmt) {
58479+ panic("Unable to allocate grsecurity alert log format buffer");
58480+ return;
58481+ }
58482+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58483+ if (!gr_audit_log_fmt) {
58484+ panic("Unable to allocate grsecurity audit log format buffer");
58485+ return;
58486+ }
58487+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58488+ if (!gr_alert_log_buf) {
58489+ panic("Unable to allocate grsecurity alert log buffer");
58490+ return;
58491+ }
58492+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58493+ if (!gr_audit_log_buf) {
58494+ panic("Unable to allocate grsecurity audit log buffer");
58495+ return;
58496+ }
58497+
58498+ /* allocate memory for authentication structure */
58499+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58500+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58501+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58502+
58503+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58504+ panic("Unable to allocate grsecurity authentication structure");
58505+ return;
58506+ }
58507+
58508+
58509+#ifdef CONFIG_GRKERNSEC_IO
58510+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58511+ grsec_disable_privio = 1;
58512+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58513+ grsec_disable_privio = 1;
58514+#else
58515+ grsec_disable_privio = 0;
58516+#endif
58517+#endif
58518+
58519+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58520+ /* for backward compatibility, tpe_invert always defaults to on if
58521+ enabled in the kernel
58522+ */
58523+ grsec_enable_tpe_invert = 1;
58524+#endif
58525+
58526+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58527+#ifndef CONFIG_GRKERNSEC_SYSCTL
58528+ grsec_lock = 1;
58529+#endif
58530+
58531+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58532+ grsec_enable_audit_textrel = 1;
58533+#endif
58534+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58535+ grsec_enable_log_rwxmaps = 1;
58536+#endif
58537+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58538+ grsec_enable_group = 1;
58539+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58540+#endif
58541+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58542+ grsec_enable_ptrace_readexec = 1;
58543+#endif
58544+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58545+ grsec_enable_chdir = 1;
58546+#endif
58547+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58548+ grsec_enable_harden_ptrace = 1;
58549+#endif
58550+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58551+ grsec_enable_mount = 1;
58552+#endif
58553+#ifdef CONFIG_GRKERNSEC_LINK
58554+ grsec_enable_link = 1;
58555+#endif
58556+#ifdef CONFIG_GRKERNSEC_BRUTE
58557+ grsec_enable_brute = 1;
58558+#endif
58559+#ifdef CONFIG_GRKERNSEC_DMESG
58560+ grsec_enable_dmesg = 1;
58561+#endif
58562+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58563+ grsec_enable_blackhole = 1;
58564+ grsec_lastack_retries = 4;
58565+#endif
58566+#ifdef CONFIG_GRKERNSEC_FIFO
58567+ grsec_enable_fifo = 1;
58568+#endif
58569+#ifdef CONFIG_GRKERNSEC_EXECLOG
58570+ grsec_enable_execlog = 1;
58571+#endif
58572+#ifdef CONFIG_GRKERNSEC_SETXID
58573+ grsec_enable_setxid = 1;
58574+#endif
58575+#ifdef CONFIG_GRKERNSEC_SIGNAL
58576+ grsec_enable_signal = 1;
58577+#endif
58578+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58579+ grsec_enable_forkfail = 1;
58580+#endif
58581+#ifdef CONFIG_GRKERNSEC_TIME
58582+ grsec_enable_time = 1;
58583+#endif
58584+#ifdef CONFIG_GRKERNSEC_RESLOG
58585+ grsec_resource_logging = 1;
58586+#endif
58587+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58588+ grsec_enable_chroot_findtask = 1;
58589+#endif
58590+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58591+ grsec_enable_chroot_unix = 1;
58592+#endif
58593+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58594+ grsec_enable_chroot_mount = 1;
58595+#endif
58596+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58597+ grsec_enable_chroot_fchdir = 1;
58598+#endif
58599+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58600+ grsec_enable_chroot_shmat = 1;
58601+#endif
58602+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58603+ grsec_enable_audit_ptrace = 1;
58604+#endif
58605+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58606+ grsec_enable_chroot_double = 1;
58607+#endif
58608+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58609+ grsec_enable_chroot_pivot = 1;
58610+#endif
58611+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58612+ grsec_enable_chroot_chdir = 1;
58613+#endif
58614+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58615+ grsec_enable_chroot_chmod = 1;
58616+#endif
58617+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58618+ grsec_enable_chroot_mknod = 1;
58619+#endif
58620+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58621+ grsec_enable_chroot_nice = 1;
58622+#endif
58623+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58624+ grsec_enable_chroot_execlog = 1;
58625+#endif
58626+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58627+ grsec_enable_chroot_caps = 1;
58628+#endif
58629+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58630+ grsec_enable_chroot_sysctl = 1;
58631+#endif
58632+#ifdef CONFIG_GRKERNSEC_TPE
58633+ grsec_enable_tpe = 1;
58634+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58635+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58636+ grsec_enable_tpe_all = 1;
58637+#endif
58638+#endif
58639+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58640+ grsec_enable_socket_all = 1;
58641+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58642+#endif
58643+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58644+ grsec_enable_socket_client = 1;
58645+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58646+#endif
58647+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58648+ grsec_enable_socket_server = 1;
58649+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58650+#endif
58651+#endif
58652+
58653+ return;
58654+}
58655diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58656new file mode 100644
58657index 0000000..3efe141
58658--- /dev/null
58659+++ b/grsecurity/grsec_link.c
58660@@ -0,0 +1,43 @@
58661+#include <linux/kernel.h>
58662+#include <linux/sched.h>
58663+#include <linux/fs.h>
58664+#include <linux/file.h>
58665+#include <linux/grinternal.h>
58666+
58667+int
58668+gr_handle_follow_link(const struct inode *parent,
58669+ const struct inode *inode,
58670+ const struct dentry *dentry, const struct vfsmount *mnt)
58671+{
58672+#ifdef CONFIG_GRKERNSEC_LINK
58673+ const struct cred *cred = current_cred();
58674+
58675+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58676+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
58677+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
58678+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58679+ return -EACCES;
58680+ }
58681+#endif
58682+ return 0;
58683+}
58684+
58685+int
58686+gr_handle_hardlink(const struct dentry *dentry,
58687+ const struct vfsmount *mnt,
58688+ struct inode *inode, const int mode, const char *to)
58689+{
58690+#ifdef CONFIG_GRKERNSEC_LINK
58691+ const struct cred *cred = current_cred();
58692+
58693+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
58694+ (!S_ISREG(mode) || (mode & S_ISUID) ||
58695+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
58696+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58697+ !capable(CAP_FOWNER) && cred->uid) {
58698+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58699+ return -EPERM;
58700+ }
58701+#endif
58702+ return 0;
58703+}
58704diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58705new file mode 100644
58706index 0000000..a45d2e9
58707--- /dev/null
58708+++ b/grsecurity/grsec_log.c
58709@@ -0,0 +1,322 @@
58710+#include <linux/kernel.h>
58711+#include <linux/sched.h>
58712+#include <linux/file.h>
58713+#include <linux/tty.h>
58714+#include <linux/fs.h>
58715+#include <linux/grinternal.h>
58716+
58717+#ifdef CONFIG_TREE_PREEMPT_RCU
58718+#define DISABLE_PREEMPT() preempt_disable()
58719+#define ENABLE_PREEMPT() preempt_enable()
58720+#else
58721+#define DISABLE_PREEMPT()
58722+#define ENABLE_PREEMPT()
58723+#endif
58724+
58725+#define BEGIN_LOCKS(x) \
58726+ DISABLE_PREEMPT(); \
58727+ rcu_read_lock(); \
58728+ read_lock(&tasklist_lock); \
58729+ read_lock(&grsec_exec_file_lock); \
58730+ if (x != GR_DO_AUDIT) \
58731+ spin_lock(&grsec_alert_lock); \
58732+ else \
58733+ spin_lock(&grsec_audit_lock)
58734+
58735+#define END_LOCKS(x) \
58736+ if (x != GR_DO_AUDIT) \
58737+ spin_unlock(&grsec_alert_lock); \
58738+ else \
58739+ spin_unlock(&grsec_audit_lock); \
58740+ read_unlock(&grsec_exec_file_lock); \
58741+ read_unlock(&tasklist_lock); \
58742+ rcu_read_unlock(); \
58743+ ENABLE_PREEMPT(); \
58744+ if (x == GR_DONT_AUDIT) \
58745+ gr_handle_alertkill(current)
58746+
58747+enum {
58748+ FLOODING,
58749+ NO_FLOODING
58750+};
58751+
58752+extern char *gr_alert_log_fmt;
58753+extern char *gr_audit_log_fmt;
58754+extern char *gr_alert_log_buf;
58755+extern char *gr_audit_log_buf;
58756+
58757+static int gr_log_start(int audit)
58758+{
58759+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58760+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58761+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58762+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58763+ unsigned long curr_secs = get_seconds();
58764+
58765+ if (audit == GR_DO_AUDIT)
58766+ goto set_fmt;
58767+
58768+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58769+ grsec_alert_wtime = curr_secs;
58770+ grsec_alert_fyet = 0;
58771+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58772+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58773+ grsec_alert_fyet++;
58774+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58775+ grsec_alert_wtime = curr_secs;
58776+ grsec_alert_fyet++;
58777+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58778+ return FLOODING;
58779+ }
58780+ else return FLOODING;
58781+
58782+set_fmt:
58783+#endif
58784+ memset(buf, 0, PAGE_SIZE);
58785+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
58786+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58787+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58788+ } else if (current->signal->curr_ip) {
58789+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58790+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58791+ } else if (gr_acl_is_enabled()) {
58792+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58793+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58794+ } else {
58795+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
58796+ strcpy(buf, fmt);
58797+ }
58798+
58799+ return NO_FLOODING;
58800+}
58801+
58802+static void gr_log_middle(int audit, const char *msg, va_list ap)
58803+ __attribute__ ((format (printf, 2, 0)));
58804+
58805+static void gr_log_middle(int audit, const char *msg, va_list ap)
58806+{
58807+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58808+ unsigned int len = strlen(buf);
58809+
58810+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58811+
58812+ return;
58813+}
58814+
58815+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58816+ __attribute__ ((format (printf, 2, 3)));
58817+
58818+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58819+{
58820+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58821+ unsigned int len = strlen(buf);
58822+ va_list ap;
58823+
58824+ va_start(ap, msg);
58825+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58826+ va_end(ap);
58827+
58828+ return;
58829+}
58830+
58831+static void gr_log_end(int audit, int append_default)
58832+{
58833+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58834+
58835+ if (append_default) {
58836+ unsigned int len = strlen(buf);
58837+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58838+ }
58839+
58840+ printk("%s\n", buf);
58841+
58842+ return;
58843+}
58844+
58845+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58846+{
58847+ int logtype;
58848+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58849+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58850+ void *voidptr = NULL;
58851+ int num1 = 0, num2 = 0;
58852+ unsigned long ulong1 = 0, ulong2 = 0;
58853+ struct dentry *dentry = NULL;
58854+ struct vfsmount *mnt = NULL;
58855+ struct file *file = NULL;
58856+ struct task_struct *task = NULL;
58857+ const struct cred *cred, *pcred;
58858+ va_list ap;
58859+
58860+ BEGIN_LOCKS(audit);
58861+ logtype = gr_log_start(audit);
58862+ if (logtype == FLOODING) {
58863+ END_LOCKS(audit);
58864+ return;
58865+ }
58866+ va_start(ap, argtypes);
58867+ switch (argtypes) {
58868+ case GR_TTYSNIFF:
58869+ task = va_arg(ap, struct task_struct *);
58870+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58871+ break;
58872+ case GR_SYSCTL_HIDDEN:
58873+ str1 = va_arg(ap, char *);
58874+ gr_log_middle_varargs(audit, msg, result, str1);
58875+ break;
58876+ case GR_RBAC:
58877+ dentry = va_arg(ap, struct dentry *);
58878+ mnt = va_arg(ap, struct vfsmount *);
58879+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58880+ break;
58881+ case GR_RBAC_STR:
58882+ dentry = va_arg(ap, struct dentry *);
58883+ mnt = va_arg(ap, struct vfsmount *);
58884+ str1 = va_arg(ap, char *);
58885+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58886+ break;
58887+ case GR_STR_RBAC:
58888+ str1 = va_arg(ap, char *);
58889+ dentry = va_arg(ap, struct dentry *);
58890+ mnt = va_arg(ap, struct vfsmount *);
58891+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58892+ break;
58893+ case GR_RBAC_MODE2:
58894+ dentry = va_arg(ap, struct dentry *);
58895+ mnt = va_arg(ap, struct vfsmount *);
58896+ str1 = va_arg(ap, char *);
58897+ str2 = va_arg(ap, char *);
58898+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58899+ break;
58900+ case GR_RBAC_MODE3:
58901+ dentry = va_arg(ap, struct dentry *);
58902+ mnt = va_arg(ap, struct vfsmount *);
58903+ str1 = va_arg(ap, char *);
58904+ str2 = va_arg(ap, char *);
58905+ str3 = va_arg(ap, char *);
58906+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58907+ break;
58908+ case GR_FILENAME:
58909+ dentry = va_arg(ap, struct dentry *);
58910+ mnt = va_arg(ap, struct vfsmount *);
58911+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58912+ break;
58913+ case GR_STR_FILENAME:
58914+ str1 = va_arg(ap, char *);
58915+ dentry = va_arg(ap, struct dentry *);
58916+ mnt = va_arg(ap, struct vfsmount *);
58917+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58918+ break;
58919+ case GR_FILENAME_STR:
58920+ dentry = va_arg(ap, struct dentry *);
58921+ mnt = va_arg(ap, struct vfsmount *);
58922+ str1 = va_arg(ap, char *);
58923+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58924+ break;
58925+ case GR_FILENAME_TWO_INT:
58926+ dentry = va_arg(ap, struct dentry *);
58927+ mnt = va_arg(ap, struct vfsmount *);
58928+ num1 = va_arg(ap, int);
58929+ num2 = va_arg(ap, int);
58930+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58931+ break;
58932+ case GR_FILENAME_TWO_INT_STR:
58933+ dentry = va_arg(ap, struct dentry *);
58934+ mnt = va_arg(ap, struct vfsmount *);
58935+ num1 = va_arg(ap, int);
58936+ num2 = va_arg(ap, int);
58937+ str1 = va_arg(ap, char *);
58938+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58939+ break;
58940+ case GR_TEXTREL:
58941+ file = va_arg(ap, struct file *);
58942+ ulong1 = va_arg(ap, unsigned long);
58943+ ulong2 = va_arg(ap, unsigned long);
58944+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58945+ break;
58946+ case GR_PTRACE:
58947+ task = va_arg(ap, struct task_struct *);
58948+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58949+ break;
58950+ case GR_RESOURCE:
58951+ task = va_arg(ap, struct task_struct *);
58952+ cred = __task_cred(task);
58953+ pcred = __task_cred(task->real_parent);
58954+ ulong1 = va_arg(ap, unsigned long);
58955+ str1 = va_arg(ap, char *);
58956+ ulong2 = va_arg(ap, unsigned long);
58957+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58958+ break;
58959+ case GR_CAP:
58960+ task = va_arg(ap, struct task_struct *);
58961+ cred = __task_cred(task);
58962+ pcred = __task_cred(task->real_parent);
58963+ str1 = va_arg(ap, char *);
58964+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58965+ break;
58966+ case GR_SIG:
58967+ str1 = va_arg(ap, char *);
58968+ voidptr = va_arg(ap, void *);
58969+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58970+ break;
58971+ case GR_SIG2:
58972+ task = va_arg(ap, struct task_struct *);
58973+ cred = __task_cred(task);
58974+ pcred = __task_cred(task->real_parent);
58975+ num1 = va_arg(ap, int);
58976+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58977+ break;
58978+ case GR_CRASH1:
58979+ task = va_arg(ap, struct task_struct *);
58980+ cred = __task_cred(task);
58981+ pcred = __task_cred(task->real_parent);
58982+ ulong1 = va_arg(ap, unsigned long);
58983+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58984+ break;
58985+ case GR_CRASH2:
58986+ task = va_arg(ap, struct task_struct *);
58987+ cred = __task_cred(task);
58988+ pcred = __task_cred(task->real_parent);
58989+ ulong1 = va_arg(ap, unsigned long);
58990+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58991+ break;
58992+ case GR_RWXMAP:
58993+ file = va_arg(ap, struct file *);
58994+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58995+ break;
58996+ case GR_PSACCT:
58997+ {
58998+ unsigned int wday, cday;
58999+ __u8 whr, chr;
59000+ __u8 wmin, cmin;
59001+ __u8 wsec, csec;
59002+ char cur_tty[64] = { 0 };
59003+ char parent_tty[64] = { 0 };
59004+
59005+ task = va_arg(ap, struct task_struct *);
59006+ wday = va_arg(ap, unsigned int);
59007+ cday = va_arg(ap, unsigned int);
59008+ whr = va_arg(ap, int);
59009+ chr = va_arg(ap, int);
59010+ wmin = va_arg(ap, int);
59011+ cmin = va_arg(ap, int);
59012+ wsec = va_arg(ap, int);
59013+ csec = va_arg(ap, int);
59014+ ulong1 = va_arg(ap, unsigned long);
59015+ cred = __task_cred(task);
59016+ pcred = __task_cred(task->real_parent);
59017+
59018+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
59019+ }
59020+ break;
59021+ default:
59022+ gr_log_middle(audit, msg, ap);
59023+ }
59024+ va_end(ap);
59025+ // these don't need DEFAULTSECARGS printed on the end
59026+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
59027+ gr_log_end(audit, 0);
59028+ else
59029+ gr_log_end(audit, 1);
59030+ END_LOCKS(audit);
59031+}
59032diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
59033new file mode 100644
59034index 0000000..f536303
59035--- /dev/null
59036+++ b/grsecurity/grsec_mem.c
59037@@ -0,0 +1,40 @@
59038+#include <linux/kernel.h>
59039+#include <linux/sched.h>
59040+#include <linux/mm.h>
59041+#include <linux/mman.h>
59042+#include <linux/grinternal.h>
59043+
59044+void
59045+gr_handle_ioperm(void)
59046+{
59047+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
59048+ return;
59049+}
59050+
59051+void
59052+gr_handle_iopl(void)
59053+{
59054+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
59055+ return;
59056+}
59057+
59058+void
59059+gr_handle_mem_readwrite(u64 from, u64 to)
59060+{
59061+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
59062+ return;
59063+}
59064+
59065+void
59066+gr_handle_vm86(void)
59067+{
59068+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
59069+ return;
59070+}
59071+
59072+void
59073+gr_log_badprocpid(const char *entry)
59074+{
59075+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
59076+ return;
59077+}
59078diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
59079new file mode 100644
59080index 0000000..2131422
59081--- /dev/null
59082+++ b/grsecurity/grsec_mount.c
59083@@ -0,0 +1,62 @@
59084+#include <linux/kernel.h>
59085+#include <linux/sched.h>
59086+#include <linux/mount.h>
59087+#include <linux/grsecurity.h>
59088+#include <linux/grinternal.h>
59089+
59090+void
59091+gr_log_remount(const char *devname, const int retval)
59092+{
59093+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59094+ if (grsec_enable_mount && (retval >= 0))
59095+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
59096+#endif
59097+ return;
59098+}
59099+
59100+void
59101+gr_log_unmount(const char *devname, const int retval)
59102+{
59103+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59104+ if (grsec_enable_mount && (retval >= 0))
59105+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
59106+#endif
59107+ return;
59108+}
59109+
59110+void
59111+gr_log_mount(const char *from, const char *to, const int retval)
59112+{
59113+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59114+ if (grsec_enable_mount && (retval >= 0))
59115+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
59116+#endif
59117+ return;
59118+}
59119+
59120+int
59121+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
59122+{
59123+#ifdef CONFIG_GRKERNSEC_ROFS
59124+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
59125+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
59126+ return -EPERM;
59127+ } else
59128+ return 0;
59129+#endif
59130+ return 0;
59131+}
59132+
59133+int
59134+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
59135+{
59136+#ifdef CONFIG_GRKERNSEC_ROFS
59137+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
59138+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
59139+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
59140+ return -EPERM;
59141+ } else
59142+ return 0;
59143+#endif
59144+ return 0;
59145+}
59146diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
59147new file mode 100644
59148index 0000000..a3b12a0
59149--- /dev/null
59150+++ b/grsecurity/grsec_pax.c
59151@@ -0,0 +1,36 @@
59152+#include <linux/kernel.h>
59153+#include <linux/sched.h>
59154+#include <linux/mm.h>
59155+#include <linux/file.h>
59156+#include <linux/grinternal.h>
59157+#include <linux/grsecurity.h>
59158+
59159+void
59160+gr_log_textrel(struct vm_area_struct * vma)
59161+{
59162+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59163+ if (grsec_enable_audit_textrel)
59164+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
59165+#endif
59166+ return;
59167+}
59168+
59169+void
59170+gr_log_rwxmmap(struct file *file)
59171+{
59172+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59173+ if (grsec_enable_log_rwxmaps)
59174+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
59175+#endif
59176+ return;
59177+}
59178+
59179+void
59180+gr_log_rwxmprotect(struct file *file)
59181+{
59182+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59183+ if (grsec_enable_log_rwxmaps)
59184+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
59185+#endif
59186+ return;
59187+}
59188diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
59189new file mode 100644
59190index 0000000..f7f29aa
59191--- /dev/null
59192+++ b/grsecurity/grsec_ptrace.c
59193@@ -0,0 +1,30 @@
59194+#include <linux/kernel.h>
59195+#include <linux/sched.h>
59196+#include <linux/grinternal.h>
59197+#include <linux/security.h>
59198+
59199+void
59200+gr_audit_ptrace(struct task_struct *task)
59201+{
59202+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59203+ if (grsec_enable_audit_ptrace)
59204+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
59205+#endif
59206+ return;
59207+}
59208+
59209+int
59210+gr_ptrace_readexec(struct file *file, int unsafe_flags)
59211+{
59212+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59213+ const struct dentry *dentry = file->f_path.dentry;
59214+ const struct vfsmount *mnt = file->f_path.mnt;
59215+
59216+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
59217+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
59218+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
59219+ return -EACCES;
59220+ }
59221+#endif
59222+ return 0;
59223+}
59224diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59225new file mode 100644
59226index 0000000..7a5b2de
59227--- /dev/null
59228+++ b/grsecurity/grsec_sig.c
59229@@ -0,0 +1,207 @@
59230+#include <linux/kernel.h>
59231+#include <linux/sched.h>
59232+#include <linux/delay.h>
59233+#include <linux/grsecurity.h>
59234+#include <linux/grinternal.h>
59235+#include <linux/hardirq.h>
59236+
59237+char *signames[] = {
59238+ [SIGSEGV] = "Segmentation fault",
59239+ [SIGILL] = "Illegal instruction",
59240+ [SIGABRT] = "Abort",
59241+ [SIGBUS] = "Invalid alignment/Bus error"
59242+};
59243+
59244+void
59245+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59246+{
59247+#ifdef CONFIG_GRKERNSEC_SIGNAL
59248+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59249+ (sig == SIGABRT) || (sig == SIGBUS))) {
59250+ if (t->pid == current->pid) {
59251+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59252+ } else {
59253+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59254+ }
59255+ }
59256+#endif
59257+ return;
59258+}
59259+
59260+int
59261+gr_handle_signal(const struct task_struct *p, const int sig)
59262+{
59263+#ifdef CONFIG_GRKERNSEC
59264+ /* ignore the 0 signal for protected task checks */
59265+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
59266+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59267+ return -EPERM;
59268+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59269+ return -EPERM;
59270+ }
59271+#endif
59272+ return 0;
59273+}
59274+
59275+#ifdef CONFIG_GRKERNSEC
59276+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59277+
59278+int gr_fake_force_sig(int sig, struct task_struct *t)
59279+{
59280+ unsigned long int flags;
59281+ int ret, blocked, ignored;
59282+ struct k_sigaction *action;
59283+
59284+ spin_lock_irqsave(&t->sighand->siglock, flags);
59285+ action = &t->sighand->action[sig-1];
59286+ ignored = action->sa.sa_handler == SIG_IGN;
59287+ blocked = sigismember(&t->blocked, sig);
59288+ if (blocked || ignored) {
59289+ action->sa.sa_handler = SIG_DFL;
59290+ if (blocked) {
59291+ sigdelset(&t->blocked, sig);
59292+ recalc_sigpending_and_wake(t);
59293+ }
59294+ }
59295+ if (action->sa.sa_handler == SIG_DFL)
59296+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
59297+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59298+
59299+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
59300+
59301+ return ret;
59302+}
59303+#endif
59304+
59305+#ifdef CONFIG_GRKERNSEC_BRUTE
59306+#define GR_USER_BAN_TIME (15 * 60)
59307+
59308+static int __get_dumpable(unsigned long mm_flags)
59309+{
59310+ int ret;
59311+
59312+ ret = mm_flags & MMF_DUMPABLE_MASK;
59313+ return (ret >= 2) ? 2 : ret;
59314+}
59315+#endif
59316+
59317+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
59318+{
59319+#ifdef CONFIG_GRKERNSEC_BRUTE
59320+ uid_t uid = 0;
59321+
59322+ if (!grsec_enable_brute)
59323+ return;
59324+
59325+ rcu_read_lock();
59326+ read_lock(&tasklist_lock);
59327+ read_lock(&grsec_exec_file_lock);
59328+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59329+ p->real_parent->brute = 1;
59330+ else {
59331+ const struct cred *cred = __task_cred(p), *cred2;
59332+ struct task_struct *tsk, *tsk2;
59333+
59334+ if (!__get_dumpable(mm_flags) && cred->uid) {
59335+ struct user_struct *user;
59336+
59337+ uid = cred->uid;
59338+
59339+ /* this is put upon execution past expiration */
59340+ user = find_user(uid);
59341+ if (user == NULL)
59342+ goto unlock;
59343+ user->banned = 1;
59344+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59345+ if (user->ban_expires == ~0UL)
59346+ user->ban_expires--;
59347+
59348+ do_each_thread(tsk2, tsk) {
59349+ cred2 = __task_cred(tsk);
59350+ if (tsk != p && cred2->uid == uid)
59351+ gr_fake_force_sig(SIGKILL, tsk);
59352+ } while_each_thread(tsk2, tsk);
59353+ }
59354+ }
59355+unlock:
59356+ read_unlock(&grsec_exec_file_lock);
59357+ read_unlock(&tasklist_lock);
59358+ rcu_read_unlock();
59359+
59360+ if (uid)
59361+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
59362+
59363+#endif
59364+ return;
59365+}
59366+
59367+void gr_handle_brute_check(void)
59368+{
59369+#ifdef CONFIG_GRKERNSEC_BRUTE
59370+ if (current->brute)
59371+ msleep(30 * 1000);
59372+#endif
59373+ return;
59374+}
59375+
59376+void gr_handle_kernel_exploit(void)
59377+{
59378+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59379+ const struct cred *cred;
59380+ struct task_struct *tsk, *tsk2;
59381+ struct user_struct *user;
59382+ uid_t uid;
59383+
59384+ if (in_irq() || in_serving_softirq() || in_nmi())
59385+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59386+
59387+ uid = current_uid();
59388+
59389+ if (uid == 0)
59390+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
59391+ else {
59392+ /* kill all the processes of this user, hold a reference
59393+ to their creds struct, and prevent them from creating
59394+ another process until system reset
59395+ */
59396+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
59397+ /* we intentionally leak this ref */
59398+ user = get_uid(current->cred->user);
59399+ if (user) {
59400+ user->banned = 1;
59401+ user->ban_expires = ~0UL;
59402+ }
59403+
59404+ read_lock(&tasklist_lock);
59405+ do_each_thread(tsk2, tsk) {
59406+ cred = __task_cred(tsk);
59407+ if (cred->uid == uid)
59408+ gr_fake_force_sig(SIGKILL, tsk);
59409+ } while_each_thread(tsk2, tsk);
59410+ read_unlock(&tasklist_lock);
59411+ }
59412+#endif
59413+}
59414+
59415+int __gr_process_user_ban(struct user_struct *user)
59416+{
59417+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59418+ if (unlikely(user->banned)) {
59419+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59420+ user->banned = 0;
59421+ user->ban_expires = 0;
59422+ free_uid(user);
59423+ } else
59424+ return -EPERM;
59425+ }
59426+#endif
59427+ return 0;
59428+}
59429+
59430+int gr_process_user_ban(void)
59431+{
59432+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59433+ return __gr_process_user_ban(current->cred->user);
59434+#endif
59435+ return 0;
59436+}
59437diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59438new file mode 100644
59439index 0000000..4030d57
59440--- /dev/null
59441+++ b/grsecurity/grsec_sock.c
59442@@ -0,0 +1,244 @@
59443+#include <linux/kernel.h>
59444+#include <linux/module.h>
59445+#include <linux/sched.h>
59446+#include <linux/file.h>
59447+#include <linux/net.h>
59448+#include <linux/in.h>
59449+#include <linux/ip.h>
59450+#include <net/sock.h>
59451+#include <net/inet_sock.h>
59452+#include <linux/grsecurity.h>
59453+#include <linux/grinternal.h>
59454+#include <linux/gracl.h>
59455+
59456+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59457+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59458+
59459+EXPORT_SYMBOL(gr_search_udp_recvmsg);
59460+EXPORT_SYMBOL(gr_search_udp_sendmsg);
59461+
59462+#ifdef CONFIG_UNIX_MODULE
59463+EXPORT_SYMBOL(gr_acl_handle_unix);
59464+EXPORT_SYMBOL(gr_acl_handle_mknod);
59465+EXPORT_SYMBOL(gr_handle_chroot_unix);
59466+EXPORT_SYMBOL(gr_handle_create);
59467+#endif
59468+
59469+#ifdef CONFIG_GRKERNSEC
59470+#define gr_conn_table_size 32749
59471+struct conn_table_entry {
59472+ struct conn_table_entry *next;
59473+ struct signal_struct *sig;
59474+};
59475+
59476+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59477+DEFINE_SPINLOCK(gr_conn_table_lock);
59478+
59479+extern const char * gr_socktype_to_name(unsigned char type);
59480+extern const char * gr_proto_to_name(unsigned char proto);
59481+extern const char * gr_sockfamily_to_name(unsigned char family);
59482+
59483+static __inline__ int
59484+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59485+{
59486+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59487+}
59488+
59489+static __inline__ int
59490+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59491+ __u16 sport, __u16 dport)
59492+{
59493+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59494+ sig->gr_sport == sport && sig->gr_dport == dport))
59495+ return 1;
59496+ else
59497+ return 0;
59498+}
59499+
59500+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59501+{
59502+ struct conn_table_entry **match;
59503+ unsigned int index;
59504+
59505+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59506+ sig->gr_sport, sig->gr_dport,
59507+ gr_conn_table_size);
59508+
59509+ newent->sig = sig;
59510+
59511+ match = &gr_conn_table[index];
59512+ newent->next = *match;
59513+ *match = newent;
59514+
59515+ return;
59516+}
59517+
59518+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59519+{
59520+ struct conn_table_entry *match, *last = NULL;
59521+ unsigned int index;
59522+
59523+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59524+ sig->gr_sport, sig->gr_dport,
59525+ gr_conn_table_size);
59526+
59527+ match = gr_conn_table[index];
59528+ while (match && !conn_match(match->sig,
59529+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59530+ sig->gr_dport)) {
59531+ last = match;
59532+ match = match->next;
59533+ }
59534+
59535+ if (match) {
59536+ if (last)
59537+ last->next = match->next;
59538+ else
59539+ gr_conn_table[index] = NULL;
59540+ kfree(match);
59541+ }
59542+
59543+ return;
59544+}
59545+
59546+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59547+ __u16 sport, __u16 dport)
59548+{
59549+ struct conn_table_entry *match;
59550+ unsigned int index;
59551+
59552+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59553+
59554+ match = gr_conn_table[index];
59555+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59556+ match = match->next;
59557+
59558+ if (match)
59559+ return match->sig;
59560+ else
59561+ return NULL;
59562+}
59563+
59564+#endif
59565+
59566+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59567+{
59568+#ifdef CONFIG_GRKERNSEC
59569+ struct signal_struct *sig = task->signal;
59570+ struct conn_table_entry *newent;
59571+
59572+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59573+ if (newent == NULL)
59574+ return;
59575+ /* no bh lock needed since we are called with bh disabled */
59576+ spin_lock(&gr_conn_table_lock);
59577+ gr_del_task_from_ip_table_nolock(sig);
59578+ sig->gr_saddr = inet->inet_rcv_saddr;
59579+ sig->gr_daddr = inet->inet_daddr;
59580+ sig->gr_sport = inet->inet_sport;
59581+ sig->gr_dport = inet->inet_dport;
59582+ gr_add_to_task_ip_table_nolock(sig, newent);
59583+ spin_unlock(&gr_conn_table_lock);
59584+#endif
59585+ return;
59586+}
59587+
59588+void gr_del_task_from_ip_table(struct task_struct *task)
59589+{
59590+#ifdef CONFIG_GRKERNSEC
59591+ spin_lock_bh(&gr_conn_table_lock);
59592+ gr_del_task_from_ip_table_nolock(task->signal);
59593+ spin_unlock_bh(&gr_conn_table_lock);
59594+#endif
59595+ return;
59596+}
59597+
59598+void
59599+gr_attach_curr_ip(const struct sock *sk)
59600+{
59601+#ifdef CONFIG_GRKERNSEC
59602+ struct signal_struct *p, *set;
59603+ const struct inet_sock *inet = inet_sk(sk);
59604+
59605+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59606+ return;
59607+
59608+ set = current->signal;
59609+
59610+ spin_lock_bh(&gr_conn_table_lock);
59611+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59612+ inet->inet_dport, inet->inet_sport);
59613+ if (unlikely(p != NULL)) {
59614+ set->curr_ip = p->curr_ip;
59615+ set->used_accept = 1;
59616+ gr_del_task_from_ip_table_nolock(p);
59617+ spin_unlock_bh(&gr_conn_table_lock);
59618+ return;
59619+ }
59620+ spin_unlock_bh(&gr_conn_table_lock);
59621+
59622+ set->curr_ip = inet->inet_daddr;
59623+ set->used_accept = 1;
59624+#endif
59625+ return;
59626+}
59627+
59628+int
59629+gr_handle_sock_all(const int family, const int type, const int protocol)
59630+{
59631+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59632+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59633+ (family != AF_UNIX)) {
59634+ if (family == AF_INET)
59635+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59636+ else
59637+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59638+ return -EACCES;
59639+ }
59640+#endif
59641+ return 0;
59642+}
59643+
59644+int
59645+gr_handle_sock_server(const struct sockaddr *sck)
59646+{
59647+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59648+ if (grsec_enable_socket_server &&
59649+ in_group_p(grsec_socket_server_gid) &&
59650+ sck && (sck->sa_family != AF_UNIX) &&
59651+ (sck->sa_family != AF_LOCAL)) {
59652+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59653+ return -EACCES;
59654+ }
59655+#endif
59656+ return 0;
59657+}
59658+
59659+int
59660+gr_handle_sock_server_other(const struct sock *sck)
59661+{
59662+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59663+ if (grsec_enable_socket_server &&
59664+ in_group_p(grsec_socket_server_gid) &&
59665+ sck && (sck->sk_family != AF_UNIX) &&
59666+ (sck->sk_family != AF_LOCAL)) {
59667+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59668+ return -EACCES;
59669+ }
59670+#endif
59671+ return 0;
59672+}
59673+
59674+int
59675+gr_handle_sock_client(const struct sockaddr *sck)
59676+{
59677+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59678+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59679+ sck && (sck->sa_family != AF_UNIX) &&
59680+ (sck->sa_family != AF_LOCAL)) {
59681+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59682+ return -EACCES;
59683+ }
59684+#endif
59685+ return 0;
59686+}
59687diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59688new file mode 100644
59689index 0000000..a1aedd7
59690--- /dev/null
59691+++ b/grsecurity/grsec_sysctl.c
59692@@ -0,0 +1,451 @@
59693+#include <linux/kernel.h>
59694+#include <linux/sched.h>
59695+#include <linux/sysctl.h>
59696+#include <linux/grsecurity.h>
59697+#include <linux/grinternal.h>
59698+
59699+int
59700+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59701+{
59702+#ifdef CONFIG_GRKERNSEC_SYSCTL
59703+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59704+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59705+ return -EACCES;
59706+ }
59707+#endif
59708+ return 0;
59709+}
59710+
59711+#ifdef CONFIG_GRKERNSEC_ROFS
59712+static int __maybe_unused one = 1;
59713+#endif
59714+
59715+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59716+struct ctl_table grsecurity_table[] = {
59717+#ifdef CONFIG_GRKERNSEC_SYSCTL
59718+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59719+#ifdef CONFIG_GRKERNSEC_IO
59720+ {
59721+ .procname = "disable_priv_io",
59722+ .data = &grsec_disable_privio,
59723+ .maxlen = sizeof(int),
59724+ .mode = 0600,
59725+ .proc_handler = &proc_dointvec,
59726+ },
59727+#endif
59728+#endif
59729+#ifdef CONFIG_GRKERNSEC_LINK
59730+ {
59731+ .procname = "linking_restrictions",
59732+ .data = &grsec_enable_link,
59733+ .maxlen = sizeof(int),
59734+ .mode = 0600,
59735+ .proc_handler = &proc_dointvec,
59736+ },
59737+#endif
59738+#ifdef CONFIG_GRKERNSEC_BRUTE
59739+ {
59740+ .procname = "deter_bruteforce",
59741+ .data = &grsec_enable_brute,
59742+ .maxlen = sizeof(int),
59743+ .mode = 0600,
59744+ .proc_handler = &proc_dointvec,
59745+ },
59746+#endif
59747+#ifdef CONFIG_GRKERNSEC_FIFO
59748+ {
59749+ .procname = "fifo_restrictions",
59750+ .data = &grsec_enable_fifo,
59751+ .maxlen = sizeof(int),
59752+ .mode = 0600,
59753+ .proc_handler = &proc_dointvec,
59754+ },
59755+#endif
59756+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59757+ {
59758+ .procname = "ptrace_readexec",
59759+ .data = &grsec_enable_ptrace_readexec,
59760+ .maxlen = sizeof(int),
59761+ .mode = 0600,
59762+ .proc_handler = &proc_dointvec,
59763+ },
59764+#endif
59765+#ifdef CONFIG_GRKERNSEC_SETXID
59766+ {
59767+ .procname = "consistent_setxid",
59768+ .data = &grsec_enable_setxid,
59769+ .maxlen = sizeof(int),
59770+ .mode = 0600,
59771+ .proc_handler = &proc_dointvec,
59772+ },
59773+#endif
59774+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59775+ {
59776+ .procname = "ip_blackhole",
59777+ .data = &grsec_enable_blackhole,
59778+ .maxlen = sizeof(int),
59779+ .mode = 0600,
59780+ .proc_handler = &proc_dointvec,
59781+ },
59782+ {
59783+ .procname = "lastack_retries",
59784+ .data = &grsec_lastack_retries,
59785+ .maxlen = sizeof(int),
59786+ .mode = 0600,
59787+ .proc_handler = &proc_dointvec,
59788+ },
59789+#endif
59790+#ifdef CONFIG_GRKERNSEC_EXECLOG
59791+ {
59792+ .procname = "exec_logging",
59793+ .data = &grsec_enable_execlog,
59794+ .maxlen = sizeof(int),
59795+ .mode = 0600,
59796+ .proc_handler = &proc_dointvec,
59797+ },
59798+#endif
59799+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59800+ {
59801+ .procname = "rwxmap_logging",
59802+ .data = &grsec_enable_log_rwxmaps,
59803+ .maxlen = sizeof(int),
59804+ .mode = 0600,
59805+ .proc_handler = &proc_dointvec,
59806+ },
59807+#endif
59808+#ifdef CONFIG_GRKERNSEC_SIGNAL
59809+ {
59810+ .procname = "signal_logging",
59811+ .data = &grsec_enable_signal,
59812+ .maxlen = sizeof(int),
59813+ .mode = 0600,
59814+ .proc_handler = &proc_dointvec,
59815+ },
59816+#endif
59817+#ifdef CONFIG_GRKERNSEC_FORKFAIL
59818+ {
59819+ .procname = "forkfail_logging",
59820+ .data = &grsec_enable_forkfail,
59821+ .maxlen = sizeof(int),
59822+ .mode = 0600,
59823+ .proc_handler = &proc_dointvec,
59824+ },
59825+#endif
59826+#ifdef CONFIG_GRKERNSEC_TIME
59827+ {
59828+ .procname = "timechange_logging",
59829+ .data = &grsec_enable_time,
59830+ .maxlen = sizeof(int),
59831+ .mode = 0600,
59832+ .proc_handler = &proc_dointvec,
59833+ },
59834+#endif
59835+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59836+ {
59837+ .procname = "chroot_deny_shmat",
59838+ .data = &grsec_enable_chroot_shmat,
59839+ .maxlen = sizeof(int),
59840+ .mode = 0600,
59841+ .proc_handler = &proc_dointvec,
59842+ },
59843+#endif
59844+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59845+ {
59846+ .procname = "chroot_deny_unix",
59847+ .data = &grsec_enable_chroot_unix,
59848+ .maxlen = sizeof(int),
59849+ .mode = 0600,
59850+ .proc_handler = &proc_dointvec,
59851+ },
59852+#endif
59853+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59854+ {
59855+ .procname = "chroot_deny_mount",
59856+ .data = &grsec_enable_chroot_mount,
59857+ .maxlen = sizeof(int),
59858+ .mode = 0600,
59859+ .proc_handler = &proc_dointvec,
59860+ },
59861+#endif
59862+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59863+ {
59864+ .procname = "chroot_deny_fchdir",
59865+ .data = &grsec_enable_chroot_fchdir,
59866+ .maxlen = sizeof(int),
59867+ .mode = 0600,
59868+ .proc_handler = &proc_dointvec,
59869+ },
59870+#endif
59871+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59872+ {
59873+ .procname = "chroot_deny_chroot",
59874+ .data = &grsec_enable_chroot_double,
59875+ .maxlen = sizeof(int),
59876+ .mode = 0600,
59877+ .proc_handler = &proc_dointvec,
59878+ },
59879+#endif
59880+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59881+ {
59882+ .procname = "chroot_deny_pivot",
59883+ .data = &grsec_enable_chroot_pivot,
59884+ .maxlen = sizeof(int),
59885+ .mode = 0600,
59886+ .proc_handler = &proc_dointvec,
59887+ },
59888+#endif
59889+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59890+ {
59891+ .procname = "chroot_enforce_chdir",
59892+ .data = &grsec_enable_chroot_chdir,
59893+ .maxlen = sizeof(int),
59894+ .mode = 0600,
59895+ .proc_handler = &proc_dointvec,
59896+ },
59897+#endif
59898+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59899+ {
59900+ .procname = "chroot_deny_chmod",
59901+ .data = &grsec_enable_chroot_chmod,
59902+ .maxlen = sizeof(int),
59903+ .mode = 0600,
59904+ .proc_handler = &proc_dointvec,
59905+ },
59906+#endif
59907+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59908+ {
59909+ .procname = "chroot_deny_mknod",
59910+ .data = &grsec_enable_chroot_mknod,
59911+ .maxlen = sizeof(int),
59912+ .mode = 0600,
59913+ .proc_handler = &proc_dointvec,
59914+ },
59915+#endif
59916+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59917+ {
59918+ .procname = "chroot_restrict_nice",
59919+ .data = &grsec_enable_chroot_nice,
59920+ .maxlen = sizeof(int),
59921+ .mode = 0600,
59922+ .proc_handler = &proc_dointvec,
59923+ },
59924+#endif
59925+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59926+ {
59927+ .procname = "chroot_execlog",
59928+ .data = &grsec_enable_chroot_execlog,
59929+ .maxlen = sizeof(int),
59930+ .mode = 0600,
59931+ .proc_handler = &proc_dointvec,
59932+ },
59933+#endif
59934+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59935+ {
59936+ .procname = "chroot_caps",
59937+ .data = &grsec_enable_chroot_caps,
59938+ .maxlen = sizeof(int),
59939+ .mode = 0600,
59940+ .proc_handler = &proc_dointvec,
59941+ },
59942+#endif
59943+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59944+ {
59945+ .procname = "chroot_deny_sysctl",
59946+ .data = &grsec_enable_chroot_sysctl,
59947+ .maxlen = sizeof(int),
59948+ .mode = 0600,
59949+ .proc_handler = &proc_dointvec,
59950+ },
59951+#endif
59952+#ifdef CONFIG_GRKERNSEC_TPE
59953+ {
59954+ .procname = "tpe",
59955+ .data = &grsec_enable_tpe,
59956+ .maxlen = sizeof(int),
59957+ .mode = 0600,
59958+ .proc_handler = &proc_dointvec,
59959+ },
59960+ {
59961+ .procname = "tpe_gid",
59962+ .data = &grsec_tpe_gid,
59963+ .maxlen = sizeof(int),
59964+ .mode = 0600,
59965+ .proc_handler = &proc_dointvec,
59966+ },
59967+#endif
59968+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59969+ {
59970+ .procname = "tpe_invert",
59971+ .data = &grsec_enable_tpe_invert,
59972+ .maxlen = sizeof(int),
59973+ .mode = 0600,
59974+ .proc_handler = &proc_dointvec,
59975+ },
59976+#endif
59977+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59978+ {
59979+ .procname = "tpe_restrict_all",
59980+ .data = &grsec_enable_tpe_all,
59981+ .maxlen = sizeof(int),
59982+ .mode = 0600,
59983+ .proc_handler = &proc_dointvec,
59984+ },
59985+#endif
59986+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59987+ {
59988+ .procname = "socket_all",
59989+ .data = &grsec_enable_socket_all,
59990+ .maxlen = sizeof(int),
59991+ .mode = 0600,
59992+ .proc_handler = &proc_dointvec,
59993+ },
59994+ {
59995+ .procname = "socket_all_gid",
59996+ .data = &grsec_socket_all_gid,
59997+ .maxlen = sizeof(int),
59998+ .mode = 0600,
59999+ .proc_handler = &proc_dointvec,
60000+ },
60001+#endif
60002+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60003+ {
60004+ .procname = "socket_client",
60005+ .data = &grsec_enable_socket_client,
60006+ .maxlen = sizeof(int),
60007+ .mode = 0600,
60008+ .proc_handler = &proc_dointvec,
60009+ },
60010+ {
60011+ .procname = "socket_client_gid",
60012+ .data = &grsec_socket_client_gid,
60013+ .maxlen = sizeof(int),
60014+ .mode = 0600,
60015+ .proc_handler = &proc_dointvec,
60016+ },
60017+#endif
60018+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60019+ {
60020+ .procname = "socket_server",
60021+ .data = &grsec_enable_socket_server,
60022+ .maxlen = sizeof(int),
60023+ .mode = 0600,
60024+ .proc_handler = &proc_dointvec,
60025+ },
60026+ {
60027+ .procname = "socket_server_gid",
60028+ .data = &grsec_socket_server_gid,
60029+ .maxlen = sizeof(int),
60030+ .mode = 0600,
60031+ .proc_handler = &proc_dointvec,
60032+ },
60033+#endif
60034+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60035+ {
60036+ .procname = "audit_group",
60037+ .data = &grsec_enable_group,
60038+ .maxlen = sizeof(int),
60039+ .mode = 0600,
60040+ .proc_handler = &proc_dointvec,
60041+ },
60042+ {
60043+ .procname = "audit_gid",
60044+ .data = &grsec_audit_gid,
60045+ .maxlen = sizeof(int),
60046+ .mode = 0600,
60047+ .proc_handler = &proc_dointvec,
60048+ },
60049+#endif
60050+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60051+ {
60052+ .procname = "audit_chdir",
60053+ .data = &grsec_enable_chdir,
60054+ .maxlen = sizeof(int),
60055+ .mode = 0600,
60056+ .proc_handler = &proc_dointvec,
60057+ },
60058+#endif
60059+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60060+ {
60061+ .procname = "audit_mount",
60062+ .data = &grsec_enable_mount,
60063+ .maxlen = sizeof(int),
60064+ .mode = 0600,
60065+ .proc_handler = &proc_dointvec,
60066+ },
60067+#endif
60068+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60069+ {
60070+ .procname = "audit_textrel",
60071+ .data = &grsec_enable_audit_textrel,
60072+ .maxlen = sizeof(int),
60073+ .mode = 0600,
60074+ .proc_handler = &proc_dointvec,
60075+ },
60076+#endif
60077+#ifdef CONFIG_GRKERNSEC_DMESG
60078+ {
60079+ .procname = "dmesg",
60080+ .data = &grsec_enable_dmesg,
60081+ .maxlen = sizeof(int),
60082+ .mode = 0600,
60083+ .proc_handler = &proc_dointvec,
60084+ },
60085+#endif
60086+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60087+ {
60088+ .procname = "chroot_findtask",
60089+ .data = &grsec_enable_chroot_findtask,
60090+ .maxlen = sizeof(int),
60091+ .mode = 0600,
60092+ .proc_handler = &proc_dointvec,
60093+ },
60094+#endif
60095+#ifdef CONFIG_GRKERNSEC_RESLOG
60096+ {
60097+ .procname = "resource_logging",
60098+ .data = &grsec_resource_logging,
60099+ .maxlen = sizeof(int),
60100+ .mode = 0600,
60101+ .proc_handler = &proc_dointvec,
60102+ },
60103+#endif
60104+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60105+ {
60106+ .procname = "audit_ptrace",
60107+ .data = &grsec_enable_audit_ptrace,
60108+ .maxlen = sizeof(int),
60109+ .mode = 0600,
60110+ .proc_handler = &proc_dointvec,
60111+ },
60112+#endif
60113+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60114+ {
60115+ .procname = "harden_ptrace",
60116+ .data = &grsec_enable_harden_ptrace,
60117+ .maxlen = sizeof(int),
60118+ .mode = 0600,
60119+ .proc_handler = &proc_dointvec,
60120+ },
60121+#endif
60122+ {
60123+ .procname = "grsec_lock",
60124+ .data = &grsec_lock,
60125+ .maxlen = sizeof(int),
60126+ .mode = 0600,
60127+ .proc_handler = &proc_dointvec,
60128+ },
60129+#endif
60130+#ifdef CONFIG_GRKERNSEC_ROFS
60131+ {
60132+ .procname = "romount_protect",
60133+ .data = &grsec_enable_rofs,
60134+ .maxlen = sizeof(int),
60135+ .mode = 0600,
60136+ .proc_handler = &proc_dointvec_minmax,
60137+ .extra1 = &one,
60138+ .extra2 = &one,
60139+ },
60140+#endif
60141+ { }
60142+};
60143+#endif
60144diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
60145new file mode 100644
60146index 0000000..0dc13c3
60147--- /dev/null
60148+++ b/grsecurity/grsec_time.c
60149@@ -0,0 +1,16 @@
60150+#include <linux/kernel.h>
60151+#include <linux/sched.h>
60152+#include <linux/grinternal.h>
60153+#include <linux/module.h>
60154+
60155+void
60156+gr_log_timechange(void)
60157+{
60158+#ifdef CONFIG_GRKERNSEC_TIME
60159+ if (grsec_enable_time)
60160+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
60161+#endif
60162+ return;
60163+}
60164+
60165+EXPORT_SYMBOL(gr_log_timechange);
60166diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
60167new file mode 100644
60168index 0000000..07e0dc0
60169--- /dev/null
60170+++ b/grsecurity/grsec_tpe.c
60171@@ -0,0 +1,73 @@
60172+#include <linux/kernel.h>
60173+#include <linux/sched.h>
60174+#include <linux/file.h>
60175+#include <linux/fs.h>
60176+#include <linux/grinternal.h>
60177+
60178+extern int gr_acl_tpe_check(void);
60179+
60180+int
60181+gr_tpe_allow(const struct file *file)
60182+{
60183+#ifdef CONFIG_GRKERNSEC
60184+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
60185+ const struct cred *cred = current_cred();
60186+ char *msg = NULL;
60187+ char *msg2 = NULL;
60188+
60189+ // never restrict root
60190+ if (!cred->uid)
60191+ return 1;
60192+
60193+ if (grsec_enable_tpe) {
60194+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60195+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
60196+ msg = "not being in trusted group";
60197+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
60198+ msg = "being in untrusted group";
60199+#else
60200+ if (in_group_p(grsec_tpe_gid))
60201+ msg = "being in untrusted group";
60202+#endif
60203+ }
60204+ if (!msg && gr_acl_tpe_check())
60205+ msg = "being in untrusted role";
60206+
60207+ // not in any affected group/role
60208+ if (!msg)
60209+ goto next_check;
60210+
60211+ if (inode->i_uid)
60212+ msg2 = "file in non-root-owned directory";
60213+ else if (inode->i_mode & S_IWOTH)
60214+ msg2 = "file in world-writable directory";
60215+ else if (inode->i_mode & S_IWGRP)
60216+ msg2 = "file in group-writable directory";
60217+
60218+ if (msg && msg2) {
60219+ char fullmsg[70] = {0};
60220+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
60221+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
60222+ return 0;
60223+ }
60224+ msg = NULL;
60225+next_check:
60226+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60227+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
60228+ return 1;
60229+
60230+ if (inode->i_uid && (inode->i_uid != cred->uid))
60231+ msg = "directory not owned by user";
60232+ else if (inode->i_mode & S_IWOTH)
60233+ msg = "file in world-writable directory";
60234+ else if (inode->i_mode & S_IWGRP)
60235+ msg = "file in group-writable directory";
60236+
60237+ if (msg) {
60238+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
60239+ return 0;
60240+ }
60241+#endif
60242+#endif
60243+ return 1;
60244+}
60245diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60246new file mode 100644
60247index 0000000..9f7b1ac
60248--- /dev/null
60249+++ b/grsecurity/grsum.c
60250@@ -0,0 +1,61 @@
60251+#include <linux/err.h>
60252+#include <linux/kernel.h>
60253+#include <linux/sched.h>
60254+#include <linux/mm.h>
60255+#include <linux/scatterlist.h>
60256+#include <linux/crypto.h>
60257+#include <linux/gracl.h>
60258+
60259+
60260+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60261+#error "crypto and sha256 must be built into the kernel"
60262+#endif
60263+
60264+int
60265+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60266+{
60267+ char *p;
60268+ struct crypto_hash *tfm;
60269+ struct hash_desc desc;
60270+ struct scatterlist sg;
60271+ unsigned char temp_sum[GR_SHA_LEN];
60272+ volatile int retval = 0;
60273+ volatile int dummy = 0;
60274+ unsigned int i;
60275+
60276+ sg_init_table(&sg, 1);
60277+
60278+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60279+ if (IS_ERR(tfm)) {
60280+ /* should never happen, since sha256 should be built in */
60281+ return 1;
60282+ }
60283+
60284+ desc.tfm = tfm;
60285+ desc.flags = 0;
60286+
60287+ crypto_hash_init(&desc);
60288+
60289+ p = salt;
60290+ sg_set_buf(&sg, p, GR_SALT_LEN);
60291+ crypto_hash_update(&desc, &sg, sg.length);
60292+
60293+ p = entry->pw;
60294+ sg_set_buf(&sg, p, strlen(p));
60295+
60296+ crypto_hash_update(&desc, &sg, sg.length);
60297+
60298+ crypto_hash_final(&desc, temp_sum);
60299+
60300+ memset(entry->pw, 0, GR_PW_LEN);
60301+
60302+ for (i = 0; i < GR_SHA_LEN; i++)
60303+ if (sum[i] != temp_sum[i])
60304+ retval = 1;
60305+ else
60306+ dummy = 1; // waste a cycle
60307+
60308+ crypto_free_hash(tfm);
60309+
60310+ return retval;
60311+}
60312diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60313index 6cd5b64..f620d2d 100644
60314--- a/include/acpi/acpi_bus.h
60315+++ b/include/acpi/acpi_bus.h
60316@@ -107,7 +107,7 @@ struct acpi_device_ops {
60317 acpi_op_bind bind;
60318 acpi_op_unbind unbind;
60319 acpi_op_notify notify;
60320-};
60321+} __no_const;
60322
60323 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60324
60325diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60326index b7babf0..71e4e74 100644
60327--- a/include/asm-generic/atomic-long.h
60328+++ b/include/asm-generic/atomic-long.h
60329@@ -22,6 +22,12 @@
60330
60331 typedef atomic64_t atomic_long_t;
60332
60333+#ifdef CONFIG_PAX_REFCOUNT
60334+typedef atomic64_unchecked_t atomic_long_unchecked_t;
60335+#else
60336+typedef atomic64_t atomic_long_unchecked_t;
60337+#endif
60338+
60339 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60340
60341 static inline long atomic_long_read(atomic_long_t *l)
60342@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60343 return (long)atomic64_read(v);
60344 }
60345
60346+#ifdef CONFIG_PAX_REFCOUNT
60347+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60348+{
60349+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60350+
60351+ return (long)atomic64_read_unchecked(v);
60352+}
60353+#endif
60354+
60355 static inline void atomic_long_set(atomic_long_t *l, long i)
60356 {
60357 atomic64_t *v = (atomic64_t *)l;
60358@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60359 atomic64_set(v, i);
60360 }
60361
60362+#ifdef CONFIG_PAX_REFCOUNT
60363+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60364+{
60365+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60366+
60367+ atomic64_set_unchecked(v, i);
60368+}
60369+#endif
60370+
60371 static inline void atomic_long_inc(atomic_long_t *l)
60372 {
60373 atomic64_t *v = (atomic64_t *)l;
60374@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60375 atomic64_inc(v);
60376 }
60377
60378+#ifdef CONFIG_PAX_REFCOUNT
60379+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60380+{
60381+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60382+
60383+ atomic64_inc_unchecked(v);
60384+}
60385+#endif
60386+
60387 static inline void atomic_long_dec(atomic_long_t *l)
60388 {
60389 atomic64_t *v = (atomic64_t *)l;
60390@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60391 atomic64_dec(v);
60392 }
60393
60394+#ifdef CONFIG_PAX_REFCOUNT
60395+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60396+{
60397+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60398+
60399+ atomic64_dec_unchecked(v);
60400+}
60401+#endif
60402+
60403 static inline void atomic_long_add(long i, atomic_long_t *l)
60404 {
60405 atomic64_t *v = (atomic64_t *)l;
60406@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60407 atomic64_add(i, v);
60408 }
60409
60410+#ifdef CONFIG_PAX_REFCOUNT
60411+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60412+{
60413+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60414+
60415+ atomic64_add_unchecked(i, v);
60416+}
60417+#endif
60418+
60419 static inline void atomic_long_sub(long i, atomic_long_t *l)
60420 {
60421 atomic64_t *v = (atomic64_t *)l;
60422@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60423 atomic64_sub(i, v);
60424 }
60425
60426+#ifdef CONFIG_PAX_REFCOUNT
60427+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60428+{
60429+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60430+
60431+ atomic64_sub_unchecked(i, v);
60432+}
60433+#endif
60434+
60435 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60436 {
60437 atomic64_t *v = (atomic64_t *)l;
60438@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60439 return (long)atomic64_inc_return(v);
60440 }
60441
60442+#ifdef CONFIG_PAX_REFCOUNT
60443+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60444+{
60445+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60446+
60447+ return (long)atomic64_inc_return_unchecked(v);
60448+}
60449+#endif
60450+
60451 static inline long atomic_long_dec_return(atomic_long_t *l)
60452 {
60453 atomic64_t *v = (atomic64_t *)l;
60454@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60455
60456 typedef atomic_t atomic_long_t;
60457
60458+#ifdef CONFIG_PAX_REFCOUNT
60459+typedef atomic_unchecked_t atomic_long_unchecked_t;
60460+#else
60461+typedef atomic_t atomic_long_unchecked_t;
60462+#endif
60463+
60464 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60465 static inline long atomic_long_read(atomic_long_t *l)
60466 {
60467@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60468 return (long)atomic_read(v);
60469 }
60470
60471+#ifdef CONFIG_PAX_REFCOUNT
60472+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60473+{
60474+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60475+
60476+ return (long)atomic_read_unchecked(v);
60477+}
60478+#endif
60479+
60480 static inline void atomic_long_set(atomic_long_t *l, long i)
60481 {
60482 atomic_t *v = (atomic_t *)l;
60483@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60484 atomic_set(v, i);
60485 }
60486
60487+#ifdef CONFIG_PAX_REFCOUNT
60488+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60489+{
60490+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60491+
60492+ atomic_set_unchecked(v, i);
60493+}
60494+#endif
60495+
60496 static inline void atomic_long_inc(atomic_long_t *l)
60497 {
60498 atomic_t *v = (atomic_t *)l;
60499@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60500 atomic_inc(v);
60501 }
60502
60503+#ifdef CONFIG_PAX_REFCOUNT
60504+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60505+{
60506+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60507+
60508+ atomic_inc_unchecked(v);
60509+}
60510+#endif
60511+
60512 static inline void atomic_long_dec(atomic_long_t *l)
60513 {
60514 atomic_t *v = (atomic_t *)l;
60515@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60516 atomic_dec(v);
60517 }
60518
60519+#ifdef CONFIG_PAX_REFCOUNT
60520+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60521+{
60522+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60523+
60524+ atomic_dec_unchecked(v);
60525+}
60526+#endif
60527+
60528 static inline void atomic_long_add(long i, atomic_long_t *l)
60529 {
60530 atomic_t *v = (atomic_t *)l;
60531@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60532 atomic_add(i, v);
60533 }
60534
60535+#ifdef CONFIG_PAX_REFCOUNT
60536+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60537+{
60538+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60539+
60540+ atomic_add_unchecked(i, v);
60541+}
60542+#endif
60543+
60544 static inline void atomic_long_sub(long i, atomic_long_t *l)
60545 {
60546 atomic_t *v = (atomic_t *)l;
60547@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60548 atomic_sub(i, v);
60549 }
60550
60551+#ifdef CONFIG_PAX_REFCOUNT
60552+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60553+{
60554+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60555+
60556+ atomic_sub_unchecked(i, v);
60557+}
60558+#endif
60559+
60560 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60561 {
60562 atomic_t *v = (atomic_t *)l;
60563@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60564 return (long)atomic_inc_return(v);
60565 }
60566
60567+#ifdef CONFIG_PAX_REFCOUNT
60568+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60569+{
60570+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60571+
60572+ return (long)atomic_inc_return_unchecked(v);
60573+}
60574+#endif
60575+
60576 static inline long atomic_long_dec_return(atomic_long_t *l)
60577 {
60578 atomic_t *v = (atomic_t *)l;
60579@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60580
60581 #endif /* BITS_PER_LONG == 64 */
60582
60583+#ifdef CONFIG_PAX_REFCOUNT
60584+static inline void pax_refcount_needs_these_functions(void)
60585+{
60586+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
60587+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60588+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60589+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60590+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60591+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60592+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60593+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60594+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60595+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60596+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60597+
60598+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60599+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60600+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60601+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60602+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60603+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60604+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60605+}
60606+#else
60607+#define atomic_read_unchecked(v) atomic_read(v)
60608+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60609+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60610+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60611+#define atomic_inc_unchecked(v) atomic_inc(v)
60612+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60613+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60614+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60615+#define atomic_dec_unchecked(v) atomic_dec(v)
60616+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60617+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60618+
60619+#define atomic_long_read_unchecked(v) atomic_long_read(v)
60620+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60621+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60622+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60623+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60624+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60625+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60626+#endif
60627+
60628 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60629diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60630index b18ce4f..2ee2843 100644
60631--- a/include/asm-generic/atomic64.h
60632+++ b/include/asm-generic/atomic64.h
60633@@ -16,6 +16,8 @@ typedef struct {
60634 long long counter;
60635 } atomic64_t;
60636
60637+typedef atomic64_t atomic64_unchecked_t;
60638+
60639 #define ATOMIC64_INIT(i) { (i) }
60640
60641 extern long long atomic64_read(const atomic64_t *v);
60642@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60643 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60644 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60645
60646+#define atomic64_read_unchecked(v) atomic64_read(v)
60647+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60648+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60649+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60650+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60651+#define atomic64_inc_unchecked(v) atomic64_inc(v)
60652+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60653+#define atomic64_dec_unchecked(v) atomic64_dec(v)
60654+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60655+
60656 #endif /* _ASM_GENERIC_ATOMIC64_H */
60657diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60658index 1bfcfe5..e04c5c9 100644
60659--- a/include/asm-generic/cache.h
60660+++ b/include/asm-generic/cache.h
60661@@ -6,7 +6,7 @@
60662 * cache lines need to provide their own cache.h.
60663 */
60664
60665-#define L1_CACHE_SHIFT 5
60666-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60667+#define L1_CACHE_SHIFT 5UL
60668+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60669
60670 #endif /* __ASM_GENERIC_CACHE_H */
60671diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60672index 0d68a1e..b74a761 100644
60673--- a/include/asm-generic/emergency-restart.h
60674+++ b/include/asm-generic/emergency-restart.h
60675@@ -1,7 +1,7 @@
60676 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60677 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60678
60679-static inline void machine_emergency_restart(void)
60680+static inline __noreturn void machine_emergency_restart(void)
60681 {
60682 machine_restart(NULL);
60683 }
60684diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60685index 0232ccb..13d9165 100644
60686--- a/include/asm-generic/kmap_types.h
60687+++ b/include/asm-generic/kmap_types.h
60688@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60689 KMAP_D(17) KM_NMI,
60690 KMAP_D(18) KM_NMI_PTE,
60691 KMAP_D(19) KM_KDB,
60692+KMAP_D(20) KM_CLEARPAGE,
60693 /*
60694 * Remember to update debug_kmap_atomic() when adding new kmap types!
60695 */
60696-KMAP_D(20) KM_TYPE_NR
60697+KMAP_D(21) KM_TYPE_NR
60698 };
60699
60700 #undef KMAP_D
60701diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60702index 9ceb03b..2efbcbd 100644
60703--- a/include/asm-generic/local.h
60704+++ b/include/asm-generic/local.h
60705@@ -39,6 +39,7 @@ typedef struct
60706 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60707 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60708 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60709+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60710
60711 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60712 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
60713diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60714index 725612b..9cc513a 100644
60715--- a/include/asm-generic/pgtable-nopmd.h
60716+++ b/include/asm-generic/pgtable-nopmd.h
60717@@ -1,14 +1,19 @@
60718 #ifndef _PGTABLE_NOPMD_H
60719 #define _PGTABLE_NOPMD_H
60720
60721-#ifndef __ASSEMBLY__
60722-
60723 #include <asm-generic/pgtable-nopud.h>
60724
60725-struct mm_struct;
60726-
60727 #define __PAGETABLE_PMD_FOLDED
60728
60729+#define PMD_SHIFT PUD_SHIFT
60730+#define PTRS_PER_PMD 1
60731+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60732+#define PMD_MASK (~(PMD_SIZE-1))
60733+
60734+#ifndef __ASSEMBLY__
60735+
60736+struct mm_struct;
60737+
60738 /*
60739 * Having the pmd type consist of a pud gets the size right, and allows
60740 * us to conceptually access the pud entry that this pmd is folded into
60741@@ -16,11 +21,6 @@ struct mm_struct;
60742 */
60743 typedef struct { pud_t pud; } pmd_t;
60744
60745-#define PMD_SHIFT PUD_SHIFT
60746-#define PTRS_PER_PMD 1
60747-#define PMD_SIZE (1UL << PMD_SHIFT)
60748-#define PMD_MASK (~(PMD_SIZE-1))
60749-
60750 /*
60751 * The "pud_xxx()" functions here are trivial for a folded two-level
60752 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60753diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60754index 810431d..ccc3638 100644
60755--- a/include/asm-generic/pgtable-nopud.h
60756+++ b/include/asm-generic/pgtable-nopud.h
60757@@ -1,10 +1,15 @@
60758 #ifndef _PGTABLE_NOPUD_H
60759 #define _PGTABLE_NOPUD_H
60760
60761-#ifndef __ASSEMBLY__
60762-
60763 #define __PAGETABLE_PUD_FOLDED
60764
60765+#define PUD_SHIFT PGDIR_SHIFT
60766+#define PTRS_PER_PUD 1
60767+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60768+#define PUD_MASK (~(PUD_SIZE-1))
60769+
60770+#ifndef __ASSEMBLY__
60771+
60772 /*
60773 * Having the pud type consist of a pgd gets the size right, and allows
60774 * us to conceptually access the pgd entry that this pud is folded into
60775@@ -12,11 +17,6 @@
60776 */
60777 typedef struct { pgd_t pgd; } pud_t;
60778
60779-#define PUD_SHIFT PGDIR_SHIFT
60780-#define PTRS_PER_PUD 1
60781-#define PUD_SIZE (1UL << PUD_SHIFT)
60782-#define PUD_MASK (~(PUD_SIZE-1))
60783-
60784 /*
60785 * The "pgd_xxx()" functions here are trivial for a folded two-level
60786 * setup: the pud is never bad, and a pud always exists (as it's folded
60787diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60788index a03c098..7e5b223 100644
60789--- a/include/asm-generic/pgtable.h
60790+++ b/include/asm-generic/pgtable.h
60791@@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60792 #endif
60793 }
60794
60795+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60796+static inline unsigned long pax_open_kernel(void) { return 0; }
60797+#endif
60798+
60799+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60800+static inline unsigned long pax_close_kernel(void) { return 0; }
60801+#endif
60802+
60803 #endif /* CONFIG_MMU */
60804
60805 #endif /* !__ASSEMBLY__ */
60806diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
60807index 9788568..510dece 100644
60808--- a/include/asm-generic/uaccess.h
60809+++ b/include/asm-generic/uaccess.h
60810@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
60811 */
60812 #ifndef __copy_from_user
60813 static inline __must_check long __copy_from_user(void *to,
60814+ const void __user * from, unsigned long n) __size_overflow(3);
60815+static inline __must_check long __copy_from_user(void *to,
60816 const void __user * from, unsigned long n)
60817 {
60818 if (__builtin_constant_p(n)) {
60819@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
60820
60821 #ifndef __copy_to_user
60822 static inline __must_check long __copy_to_user(void __user *to,
60823+ const void *from, unsigned long n) __size_overflow(3);
60824+static inline __must_check long __copy_to_user(void __user *to,
60825 const void *from, unsigned long n)
60826 {
60827 if (__builtin_constant_p(n)) {
60828@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
60829 -EFAULT; \
60830 })
60831
60832+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
60833 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
60834 {
60835 size = __copy_from_user(x, ptr, size);
60836@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
60837 #define __copy_to_user_inatomic __copy_to_user
60838 #endif
60839
60840+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
60841 static inline long copy_from_user(void *to,
60842 const void __user * from, unsigned long n)
60843 {
60844@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
60845 return n;
60846 }
60847
60848+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
60849 static inline long copy_to_user(void __user *to,
60850 const void *from, unsigned long n)
60851 {
60852@@ -314,6 +321,8 @@ static inline long strlen_user(const char __user *src)
60853 */
60854 #ifndef __clear_user
60855 static inline __must_check unsigned long
60856+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
60857+static inline __must_check unsigned long
60858 __clear_user(void __user *to, unsigned long n)
60859 {
60860 memset((void __force *)to, 0, n);
60861@@ -322,6 +331,8 @@ __clear_user(void __user *to, unsigned long n)
60862 #endif
60863
60864 static inline __must_check unsigned long
60865+clear_user(void __user *to, unsigned long n) __size_overflow(2);
60866+static inline __must_check unsigned long
60867 clear_user(void __user *to, unsigned long n)
60868 {
60869 might_sleep();
60870diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60871index b5e2e4c..6a5373e 100644
60872--- a/include/asm-generic/vmlinux.lds.h
60873+++ b/include/asm-generic/vmlinux.lds.h
60874@@ -217,6 +217,7 @@
60875 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60876 VMLINUX_SYMBOL(__start_rodata) = .; \
60877 *(.rodata) *(.rodata.*) \
60878+ *(.data..read_only) \
60879 *(__vermagic) /* Kernel version magic */ \
60880 . = ALIGN(8); \
60881 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60882@@ -722,17 +723,18 @@
60883 * section in the linker script will go there too. @phdr should have
60884 * a leading colon.
60885 *
60886- * Note that this macros defines __per_cpu_load as an absolute symbol.
60887+ * Note that this macros defines per_cpu_load as an absolute symbol.
60888 * If there is no need to put the percpu section at a predetermined
60889 * address, use PERCPU_SECTION.
60890 */
60891 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60892- VMLINUX_SYMBOL(__per_cpu_load) = .; \
60893- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60894+ per_cpu_load = .; \
60895+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60896 - LOAD_OFFSET) { \
60897+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60898 PERCPU_INPUT(cacheline) \
60899 } phdr \
60900- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60901+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60902
60903 /**
60904 * PERCPU_SECTION - define output section for percpu area, simple version
60905diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60906index 92f0981..d44a37c 100644
60907--- a/include/drm/drmP.h
60908+++ b/include/drm/drmP.h
60909@@ -72,6 +72,7 @@
60910 #include <linux/workqueue.h>
60911 #include <linux/poll.h>
60912 #include <asm/pgalloc.h>
60913+#include <asm/local.h>
60914 #include "drm.h"
60915
60916 #include <linux/idr.h>
60917@@ -1038,7 +1039,7 @@ struct drm_device {
60918
60919 /** \name Usage Counters */
60920 /*@{ */
60921- int open_count; /**< Outstanding files open */
60922+ local_t open_count; /**< Outstanding files open */
60923 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60924 atomic_t vma_count; /**< Outstanding vma areas open */
60925 int buf_use; /**< Buffers in use -- cannot alloc */
60926@@ -1049,7 +1050,7 @@ struct drm_device {
60927 /*@{ */
60928 unsigned long counters;
60929 enum drm_stat_type types[15];
60930- atomic_t counts[15];
60931+ atomic_unchecked_t counts[15];
60932 /*@} */
60933
60934 struct list_head filelist;
60935diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60936index 37515d1..34fa8b0 100644
60937--- a/include/drm/drm_crtc_helper.h
60938+++ b/include/drm/drm_crtc_helper.h
60939@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60940
60941 /* disable crtc when not in use - more explicit than dpms off */
60942 void (*disable)(struct drm_crtc *crtc);
60943-};
60944+} __no_const;
60945
60946 struct drm_encoder_helper_funcs {
60947 void (*dpms)(struct drm_encoder *encoder, int mode);
60948@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60949 struct drm_connector *connector);
60950 /* disable encoder when not in use - more explicit than dpms off */
60951 void (*disable)(struct drm_encoder *encoder);
60952-};
60953+} __no_const;
60954
60955 struct drm_connector_helper_funcs {
60956 int (*get_modes)(struct drm_connector *connector);
60957diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60958index 26c1f78..6722682 100644
60959--- a/include/drm/ttm/ttm_memory.h
60960+++ b/include/drm/ttm/ttm_memory.h
60961@@ -47,7 +47,7 @@
60962
60963 struct ttm_mem_shrink {
60964 int (*do_shrink) (struct ttm_mem_shrink *);
60965-};
60966+} __no_const;
60967
60968 /**
60969 * struct ttm_mem_global - Global memory accounting structure.
60970diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60971index e86dfca..40cc55f 100644
60972--- a/include/linux/a.out.h
60973+++ b/include/linux/a.out.h
60974@@ -39,6 +39,14 @@ enum machine_type {
60975 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60976 };
60977
60978+/* Constants for the N_FLAGS field */
60979+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60980+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60981+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60982+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60983+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60984+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60985+
60986 #if !defined (N_MAGIC)
60987 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60988 #endif
60989diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60990index f4ff882..84b53a6 100644
60991--- a/include/linux/atmdev.h
60992+++ b/include/linux/atmdev.h
60993@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60994 #endif
60995
60996 struct k_atm_aal_stats {
60997-#define __HANDLE_ITEM(i) atomic_t i
60998+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60999 __AAL_STAT_ITEMS
61000 #undef __HANDLE_ITEM
61001 };
61002diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
61003index 0092102..8a801b4 100644
61004--- a/include/linux/binfmts.h
61005+++ b/include/linux/binfmts.h
61006@@ -89,6 +89,7 @@ struct linux_binfmt {
61007 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
61008 int (*load_shlib)(struct file *);
61009 int (*core_dump)(struct coredump_params *cprm);
61010+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
61011 unsigned long min_coredump; /* minimal dump size */
61012 };
61013
61014diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
61015index 606cf33..b72c577 100644
61016--- a/include/linux/blkdev.h
61017+++ b/include/linux/blkdev.h
61018@@ -1379,7 +1379,7 @@ struct block_device_operations {
61019 /* this callback is with swap_lock and sometimes page table lock held */
61020 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
61021 struct module *owner;
61022-};
61023+} __do_const;
61024
61025 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
61026 unsigned long);
61027diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
61028index 4d1a074..88f929a 100644
61029--- a/include/linux/blktrace_api.h
61030+++ b/include/linux/blktrace_api.h
61031@@ -162,7 +162,7 @@ struct blk_trace {
61032 struct dentry *dir;
61033 struct dentry *dropped_file;
61034 struct dentry *msg_file;
61035- atomic_t dropped;
61036+ atomic_unchecked_t dropped;
61037 };
61038
61039 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
61040diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
61041index 83195fb..0b0f77d 100644
61042--- a/include/linux/byteorder/little_endian.h
61043+++ b/include/linux/byteorder/little_endian.h
61044@@ -42,51 +42,51 @@
61045
61046 static inline __le64 __cpu_to_le64p(const __u64 *p)
61047 {
61048- return (__force __le64)*p;
61049+ return (__force const __le64)*p;
61050 }
61051 static inline __u64 __le64_to_cpup(const __le64 *p)
61052 {
61053- return (__force __u64)*p;
61054+ return (__force const __u64)*p;
61055 }
61056 static inline __le32 __cpu_to_le32p(const __u32 *p)
61057 {
61058- return (__force __le32)*p;
61059+ return (__force const __le32)*p;
61060 }
61061 static inline __u32 __le32_to_cpup(const __le32 *p)
61062 {
61063- return (__force __u32)*p;
61064+ return (__force const __u32)*p;
61065 }
61066 static inline __le16 __cpu_to_le16p(const __u16 *p)
61067 {
61068- return (__force __le16)*p;
61069+ return (__force const __le16)*p;
61070 }
61071 static inline __u16 __le16_to_cpup(const __le16 *p)
61072 {
61073- return (__force __u16)*p;
61074+ return (__force const __u16)*p;
61075 }
61076 static inline __be64 __cpu_to_be64p(const __u64 *p)
61077 {
61078- return (__force __be64)__swab64p(p);
61079+ return (__force const __be64)__swab64p(p);
61080 }
61081 static inline __u64 __be64_to_cpup(const __be64 *p)
61082 {
61083- return __swab64p((__u64 *)p);
61084+ return __swab64p((const __u64 *)p);
61085 }
61086 static inline __be32 __cpu_to_be32p(const __u32 *p)
61087 {
61088- return (__force __be32)__swab32p(p);
61089+ return (__force const __be32)__swab32p(p);
61090 }
61091 static inline __u32 __be32_to_cpup(const __be32 *p)
61092 {
61093- return __swab32p((__u32 *)p);
61094+ return __swab32p((const __u32 *)p);
61095 }
61096 static inline __be16 __cpu_to_be16p(const __u16 *p)
61097 {
61098- return (__force __be16)__swab16p(p);
61099+ return (__force const __be16)__swab16p(p);
61100 }
61101 static inline __u16 __be16_to_cpup(const __be16 *p)
61102 {
61103- return __swab16p((__u16 *)p);
61104+ return __swab16p((const __u16 *)p);
61105 }
61106 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
61107 #define __le64_to_cpus(x) do { (void)(x); } while (0)
61108diff --git a/include/linux/cache.h b/include/linux/cache.h
61109index 4c57065..4307975 100644
61110--- a/include/linux/cache.h
61111+++ b/include/linux/cache.h
61112@@ -16,6 +16,10 @@
61113 #define __read_mostly
61114 #endif
61115
61116+#ifndef __read_only
61117+#define __read_only __read_mostly
61118+#endif
61119+
61120 #ifndef ____cacheline_aligned
61121 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
61122 #endif
61123diff --git a/include/linux/capability.h b/include/linux/capability.h
61124index 12d52de..b5f7fa7 100644
61125--- a/include/linux/capability.h
61126+++ b/include/linux/capability.h
61127@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
61128 extern bool capable(int cap);
61129 extern bool ns_capable(struct user_namespace *ns, int cap);
61130 extern bool nsown_capable(int cap);
61131+extern bool capable_nolog(int cap);
61132+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
61133
61134 /* audit system wants to get cap info from files as well */
61135 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
61136diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
61137index 04ffb2e..6799180 100644
61138--- a/include/linux/cleancache.h
61139+++ b/include/linux/cleancache.h
61140@@ -31,7 +31,7 @@ struct cleancache_ops {
61141 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
61142 void (*flush_inode)(int, struct cleancache_filekey);
61143 void (*flush_fs)(int);
61144-};
61145+} __no_const;
61146
61147 extern struct cleancache_ops
61148 cleancache_register_ops(struct cleancache_ops *ops);
61149diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
61150index 2f40791..567b215 100644
61151--- a/include/linux/compiler-gcc4.h
61152+++ b/include/linux/compiler-gcc4.h
61153@@ -32,6 +32,15 @@
61154 #define __linktime_error(message) __attribute__((__error__(message)))
61155
61156 #if __GNUC_MINOR__ >= 5
61157+
61158+#ifdef CONSTIFY_PLUGIN
61159+#define __no_const __attribute__((no_const))
61160+#define __do_const __attribute__((do_const))
61161+#endif
61162+
61163+#ifdef SIZE_OVERFLOW_PLUGIN
61164+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
61165+#endif
61166 /*
61167 * Mark a position in code as unreachable. This can be used to
61168 * suppress control flow warnings after asm blocks that transfer
61169@@ -47,6 +56,11 @@
61170 #define __noclone __attribute__((__noclone__))
61171
61172 #endif
61173+
61174+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
61175+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
61176+#define __bos0(ptr) __bos((ptr), 0)
61177+#define __bos1(ptr) __bos((ptr), 1)
61178 #endif
61179
61180 #if __GNUC_MINOR__ > 0
61181diff --git a/include/linux/compiler.h b/include/linux/compiler.h
61182index 4a24354..ecaff7a 100644
61183--- a/include/linux/compiler.h
61184+++ b/include/linux/compiler.h
61185@@ -5,31 +5,62 @@
61186
61187 #ifdef __CHECKER__
61188 # define __user __attribute__((noderef, address_space(1)))
61189+# define __force_user __force __user
61190 # define __kernel __attribute__((address_space(0)))
61191+# define __force_kernel __force __kernel
61192 # define __safe __attribute__((safe))
61193 # define __force __attribute__((force))
61194 # define __nocast __attribute__((nocast))
61195 # define __iomem __attribute__((noderef, address_space(2)))
61196+# define __force_iomem __force __iomem
61197 # define __acquires(x) __attribute__((context(x,0,1)))
61198 # define __releases(x) __attribute__((context(x,1,0)))
61199 # define __acquire(x) __context__(x,1)
61200 # define __release(x) __context__(x,-1)
61201 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61202 # define __percpu __attribute__((noderef, address_space(3)))
61203+# define __force_percpu __force __percpu
61204 #ifdef CONFIG_SPARSE_RCU_POINTER
61205 # define __rcu __attribute__((noderef, address_space(4)))
61206+# define __force_rcu __force __rcu
61207 #else
61208 # define __rcu
61209+# define __force_rcu
61210 #endif
61211 extern void __chk_user_ptr(const volatile void __user *);
61212 extern void __chk_io_ptr(const volatile void __iomem *);
61213+#elif defined(CHECKER_PLUGIN)
61214+//# define __user
61215+//# define __force_user
61216+//# define __kernel
61217+//# define __force_kernel
61218+# define __safe
61219+# define __force
61220+# define __nocast
61221+# define __iomem
61222+# define __force_iomem
61223+# define __chk_user_ptr(x) (void)0
61224+# define __chk_io_ptr(x) (void)0
61225+# define __builtin_warning(x, y...) (1)
61226+# define __acquires(x)
61227+# define __releases(x)
61228+# define __acquire(x) (void)0
61229+# define __release(x) (void)0
61230+# define __cond_lock(x,c) (c)
61231+# define __percpu
61232+# define __force_percpu
61233+# define __rcu
61234+# define __force_rcu
61235 #else
61236 # define __user
61237+# define __force_user
61238 # define __kernel
61239+# define __force_kernel
61240 # define __safe
61241 # define __force
61242 # define __nocast
61243 # define __iomem
61244+# define __force_iomem
61245 # define __chk_user_ptr(x) (void)0
61246 # define __chk_io_ptr(x) (void)0
61247 # define __builtin_warning(x, y...) (1)
61248@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
61249 # define __release(x) (void)0
61250 # define __cond_lock(x,c) (c)
61251 # define __percpu
61252+# define __force_percpu
61253 # define __rcu
61254+# define __force_rcu
61255 #endif
61256
61257 #ifdef __KERNEL__
61258@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61259 # define __attribute_const__ /* unimplemented */
61260 #endif
61261
61262+#ifndef __no_const
61263+# define __no_const
61264+#endif
61265+
61266+#ifndef __do_const
61267+# define __do_const
61268+#endif
61269+
61270+#ifndef __size_overflow
61271+# define __size_overflow(...)
61272+#endif
61273 /*
61274 * Tell gcc if a function is cold. The compiler will assume any path
61275 * directly leading to the call is unlikely.
61276@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61277 #define __cold
61278 #endif
61279
61280+#ifndef __alloc_size
61281+#define __alloc_size(...)
61282+#endif
61283+
61284+#ifndef __bos
61285+#define __bos(ptr, arg)
61286+#endif
61287+
61288+#ifndef __bos0
61289+#define __bos0(ptr)
61290+#endif
61291+
61292+#ifndef __bos1
61293+#define __bos1(ptr)
61294+#endif
61295+
61296 /* Simple shorthand for a section definition */
61297 #ifndef __section
61298 # define __section(S) __attribute__ ((__section__(#S)))
61299@@ -308,6 +368,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61300 * use is to mediate communication between process-level code and irq/NMI
61301 * handlers, all running on the same CPU.
61302 */
61303-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61304+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61305+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61306
61307 #endif /* __LINUX_COMPILER_H */
61308diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
61309index e9eaec5..bfeb9bb 100644
61310--- a/include/linux/cpuset.h
61311+++ b/include/linux/cpuset.h
61312@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
61313 * nodemask.
61314 */
61315 smp_mb();
61316- --ACCESS_ONCE(current->mems_allowed_change_disable);
61317+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
61318 }
61319
61320 static inline void set_mems_allowed(nodemask_t nodemask)
61321diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
61322index b936763..48685ee 100644
61323--- a/include/linux/crash_dump.h
61324+++ b/include/linux/crash_dump.h
61325@@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr;
61326 extern unsigned long long elfcorehdr_size;
61327
61328 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
61329- unsigned long, int);
61330+ unsigned long, int) __size_overflow(3);
61331
61332 /* Architecture code defines this if there are other possible ELF
61333 * machine types, e.g. on bi-arch capable hardware. */
61334diff --git a/include/linux/cred.h b/include/linux/cred.h
61335index adadf71..6af5560 100644
61336--- a/include/linux/cred.h
61337+++ b/include/linux/cred.h
61338@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61339 static inline void validate_process_creds(void)
61340 {
61341 }
61342+static inline void validate_task_creds(struct task_struct *task)
61343+{
61344+}
61345 #endif
61346
61347 /**
61348diff --git a/include/linux/crypto.h b/include/linux/crypto.h
61349index 8a94217..15d49e3 100644
61350--- a/include/linux/crypto.h
61351+++ b/include/linux/crypto.h
61352@@ -365,7 +365,7 @@ struct cipher_tfm {
61353 const u8 *key, unsigned int keylen);
61354 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61355 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61356-};
61357+} __no_const;
61358
61359 struct hash_tfm {
61360 int (*init)(struct hash_desc *desc);
61361@@ -386,13 +386,13 @@ struct compress_tfm {
61362 int (*cot_decompress)(struct crypto_tfm *tfm,
61363 const u8 *src, unsigned int slen,
61364 u8 *dst, unsigned int *dlen);
61365-};
61366+} __no_const;
61367
61368 struct rng_tfm {
61369 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61370 unsigned int dlen);
61371 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61372-};
61373+} __no_const;
61374
61375 #define crt_ablkcipher crt_u.ablkcipher
61376 #define crt_aead crt_u.aead
61377diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61378index 7925bf0..d5143d2 100644
61379--- a/include/linux/decompress/mm.h
61380+++ b/include/linux/decompress/mm.h
61381@@ -77,7 +77,7 @@ static void free(void *where)
61382 * warnings when not needed (indeed large_malloc / large_free are not
61383 * needed by inflate */
61384
61385-#define malloc(a) kmalloc(a, GFP_KERNEL)
61386+#define malloc(a) kmalloc((a), GFP_KERNEL)
61387 #define free(a) kfree(a)
61388
61389 #define large_malloc(a) vmalloc(a)
61390diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61391index e13117c..e9fc938 100644
61392--- a/include/linux/dma-mapping.h
61393+++ b/include/linux/dma-mapping.h
61394@@ -46,7 +46,7 @@ struct dma_map_ops {
61395 u64 (*get_required_mask)(struct device *dev);
61396 #endif
61397 int is_phys;
61398-};
61399+} __do_const;
61400
61401 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61402
61403diff --git a/include/linux/efi.h b/include/linux/efi.h
61404index 7cce0ea..c2085e4 100644
61405--- a/include/linux/efi.h
61406+++ b/include/linux/efi.h
61407@@ -591,7 +591,7 @@ struct efivar_operations {
61408 efi_get_variable_t *get_variable;
61409 efi_get_next_variable_t *get_next_variable;
61410 efi_set_variable_t *set_variable;
61411-};
61412+} __no_const;
61413
61414 struct efivars {
61415 /*
61416diff --git a/include/linux/elf.h b/include/linux/elf.h
61417index 999b4f5..57753b4 100644
61418--- a/include/linux/elf.h
61419+++ b/include/linux/elf.h
61420@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
61421 #define PT_GNU_EH_FRAME 0x6474e550
61422
61423 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61424+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61425+
61426+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61427+
61428+/* Constants for the e_flags field */
61429+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61430+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61431+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61432+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61433+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61434+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61435
61436 /*
61437 * Extended Numbering
61438@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
61439 #define DT_DEBUG 21
61440 #define DT_TEXTREL 22
61441 #define DT_JMPREL 23
61442+#define DT_FLAGS 30
61443+ #define DF_TEXTREL 0x00000004
61444 #define DT_ENCODING 32
61445 #define OLD_DT_LOOS 0x60000000
61446 #define DT_LOOS 0x6000000d
61447@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
61448 #define PF_W 0x2
61449 #define PF_X 0x1
61450
61451+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61452+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61453+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61454+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61455+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61456+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61457+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61458+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61459+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61460+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61461+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61462+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61463+
61464 typedef struct elf32_phdr{
61465 Elf32_Word p_type;
61466 Elf32_Off p_offset;
61467@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
61468 #define EI_OSABI 7
61469 #define EI_PAD 8
61470
61471+#define EI_PAX 14
61472+
61473 #define ELFMAG0 0x7f /* EI_MAG */
61474 #define ELFMAG1 'E'
61475 #define ELFMAG2 'L'
61476@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
61477 #define elf_note elf32_note
61478 #define elf_addr_t Elf32_Off
61479 #define Elf_Half Elf32_Half
61480+#define elf_dyn Elf32_Dyn
61481
61482 #else
61483
61484@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
61485 #define elf_note elf64_note
61486 #define elf_addr_t Elf64_Off
61487 #define Elf_Half Elf64_Half
61488+#define elf_dyn Elf64_Dyn
61489
61490 #endif
61491
61492diff --git a/include/linux/filter.h b/include/linux/filter.h
61493index 8eeb205..d59bfa2 100644
61494--- a/include/linux/filter.h
61495+++ b/include/linux/filter.h
61496@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
61497
61498 struct sk_buff;
61499 struct sock;
61500+struct bpf_jit_work;
61501
61502 struct sk_filter
61503 {
61504@@ -141,6 +142,9 @@ struct sk_filter
61505 unsigned int len; /* Number of filter blocks */
61506 unsigned int (*bpf_func)(const struct sk_buff *skb,
61507 const struct sock_filter *filter);
61508+#ifdef CONFIG_BPF_JIT
61509+ struct bpf_jit_work *work;
61510+#endif
61511 struct rcu_head rcu;
61512 struct sock_filter insns[0];
61513 };
61514diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61515index 84ccf8e..2e9b14c 100644
61516--- a/include/linux/firewire.h
61517+++ b/include/linux/firewire.h
61518@@ -428,7 +428,7 @@ struct fw_iso_context {
61519 union {
61520 fw_iso_callback_t sc;
61521 fw_iso_mc_callback_t mc;
61522- } callback;
61523+ } __no_const callback;
61524 void *callback_data;
61525 };
61526
61527diff --git a/include/linux/fs.h b/include/linux/fs.h
61528index f4b6e06..d6ba573 100644
61529--- a/include/linux/fs.h
61530+++ b/include/linux/fs.h
61531@@ -1628,7 +1628,8 @@ struct file_operations {
61532 int (*setlease)(struct file *, long, struct file_lock **);
61533 long (*fallocate)(struct file *file, int mode, loff_t offset,
61534 loff_t len);
61535-};
61536+} __do_const;
61537+typedef struct file_operations __no_const file_operations_no_const;
61538
61539 struct inode_operations {
61540 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61541diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61542index 003dc0f..3c4ea97 100644
61543--- a/include/linux/fs_struct.h
61544+++ b/include/linux/fs_struct.h
61545@@ -6,7 +6,7 @@
61546 #include <linux/seqlock.h>
61547
61548 struct fs_struct {
61549- int users;
61550+ atomic_t users;
61551 spinlock_t lock;
61552 seqcount_t seq;
61553 int umask;
61554diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61555index ce31408..b1ad003 100644
61556--- a/include/linux/fscache-cache.h
61557+++ b/include/linux/fscache-cache.h
61558@@ -102,7 +102,7 @@ struct fscache_operation {
61559 fscache_operation_release_t release;
61560 };
61561
61562-extern atomic_t fscache_op_debug_id;
61563+extern atomic_unchecked_t fscache_op_debug_id;
61564 extern void fscache_op_work_func(struct work_struct *work);
61565
61566 extern void fscache_enqueue_operation(struct fscache_operation *);
61567@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61568 {
61569 INIT_WORK(&op->work, fscache_op_work_func);
61570 atomic_set(&op->usage, 1);
61571- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61572+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61573 op->processor = processor;
61574 op->release = release;
61575 INIT_LIST_HEAD(&op->pend_link);
61576diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61577index 2a53f10..0187fdf 100644
61578--- a/include/linux/fsnotify.h
61579+++ b/include/linux/fsnotify.h
61580@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61581 */
61582 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61583 {
61584- return kstrdup(name, GFP_KERNEL);
61585+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61586 }
61587
61588 /*
61589diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61590index 91d0e0a3..035666b 100644
61591--- a/include/linux/fsnotify_backend.h
61592+++ b/include/linux/fsnotify_backend.h
61593@@ -105,6 +105,7 @@ struct fsnotify_ops {
61594 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61595 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61596 };
61597+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61598
61599 /*
61600 * A group is a "thing" that wants to receive notification about filesystem
61601diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61602index c3da42d..c70e0df 100644
61603--- a/include/linux/ftrace_event.h
61604+++ b/include/linux/ftrace_event.h
61605@@ -97,7 +97,7 @@ struct trace_event_functions {
61606 trace_print_func raw;
61607 trace_print_func hex;
61608 trace_print_func binary;
61609-};
61610+} __no_const;
61611
61612 struct trace_event {
61613 struct hlist_node node;
61614@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61615 extern int trace_add_event_call(struct ftrace_event_call *call);
61616 extern void trace_remove_event_call(struct ftrace_event_call *call);
61617
61618-#define is_signed_type(type) (((type)(-1)) < 0)
61619+#define is_signed_type(type) (((type)(-1)) < (type)1)
61620
61621 int trace_set_clr_event(const char *system, const char *event, int set);
61622
61623diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61624index e61d319..0da8505 100644
61625--- a/include/linux/genhd.h
61626+++ b/include/linux/genhd.h
61627@@ -185,7 +185,7 @@ struct gendisk {
61628 struct kobject *slave_dir;
61629
61630 struct timer_rand_state *random;
61631- atomic_t sync_io; /* RAID */
61632+ atomic_unchecked_t sync_io; /* RAID */
61633 struct disk_events *ev;
61634 #ifdef CONFIG_BLK_DEV_INTEGRITY
61635 struct blk_integrity *integrity;
61636diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61637new file mode 100644
61638index 0000000..8a130b6
61639--- /dev/null
61640+++ b/include/linux/gracl.h
61641@@ -0,0 +1,319 @@
61642+#ifndef GR_ACL_H
61643+#define GR_ACL_H
61644+
61645+#include <linux/grdefs.h>
61646+#include <linux/resource.h>
61647+#include <linux/capability.h>
61648+#include <linux/dcache.h>
61649+#include <asm/resource.h>
61650+
61651+/* Major status information */
61652+
61653+#define GR_VERSION "grsecurity 2.9"
61654+#define GRSECURITY_VERSION 0x2900
61655+
61656+enum {
61657+ GR_SHUTDOWN = 0,
61658+ GR_ENABLE = 1,
61659+ GR_SPROLE = 2,
61660+ GR_RELOAD = 3,
61661+ GR_SEGVMOD = 4,
61662+ GR_STATUS = 5,
61663+ GR_UNSPROLE = 6,
61664+ GR_PASSSET = 7,
61665+ GR_SPROLEPAM = 8,
61666+};
61667+
61668+/* Password setup definitions
61669+ * kernel/grhash.c */
61670+enum {
61671+ GR_PW_LEN = 128,
61672+ GR_SALT_LEN = 16,
61673+ GR_SHA_LEN = 32,
61674+};
61675+
61676+enum {
61677+ GR_SPROLE_LEN = 64,
61678+};
61679+
61680+enum {
61681+ GR_NO_GLOB = 0,
61682+ GR_REG_GLOB,
61683+ GR_CREATE_GLOB
61684+};
61685+
61686+#define GR_NLIMITS 32
61687+
61688+/* Begin Data Structures */
61689+
61690+struct sprole_pw {
61691+ unsigned char *rolename;
61692+ unsigned char salt[GR_SALT_LEN];
61693+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61694+};
61695+
61696+struct name_entry {
61697+ __u32 key;
61698+ ino_t inode;
61699+ dev_t device;
61700+ char *name;
61701+ __u16 len;
61702+ __u8 deleted;
61703+ struct name_entry *prev;
61704+ struct name_entry *next;
61705+};
61706+
61707+struct inodev_entry {
61708+ struct name_entry *nentry;
61709+ struct inodev_entry *prev;
61710+ struct inodev_entry *next;
61711+};
61712+
61713+struct acl_role_db {
61714+ struct acl_role_label **r_hash;
61715+ __u32 r_size;
61716+};
61717+
61718+struct inodev_db {
61719+ struct inodev_entry **i_hash;
61720+ __u32 i_size;
61721+};
61722+
61723+struct name_db {
61724+ struct name_entry **n_hash;
61725+ __u32 n_size;
61726+};
61727+
61728+struct crash_uid {
61729+ uid_t uid;
61730+ unsigned long expires;
61731+};
61732+
61733+struct gr_hash_struct {
61734+ void **table;
61735+ void **nametable;
61736+ void *first;
61737+ __u32 table_size;
61738+ __u32 used_size;
61739+ int type;
61740+};
61741+
61742+/* Userspace Grsecurity ACL data structures */
61743+
61744+struct acl_subject_label {
61745+ char *filename;
61746+ ino_t inode;
61747+ dev_t device;
61748+ __u32 mode;
61749+ kernel_cap_t cap_mask;
61750+ kernel_cap_t cap_lower;
61751+ kernel_cap_t cap_invert_audit;
61752+
61753+ struct rlimit res[GR_NLIMITS];
61754+ __u32 resmask;
61755+
61756+ __u8 user_trans_type;
61757+ __u8 group_trans_type;
61758+ uid_t *user_transitions;
61759+ gid_t *group_transitions;
61760+ __u16 user_trans_num;
61761+ __u16 group_trans_num;
61762+
61763+ __u32 sock_families[2];
61764+ __u32 ip_proto[8];
61765+ __u32 ip_type;
61766+ struct acl_ip_label **ips;
61767+ __u32 ip_num;
61768+ __u32 inaddr_any_override;
61769+
61770+ __u32 crashes;
61771+ unsigned long expires;
61772+
61773+ struct acl_subject_label *parent_subject;
61774+ struct gr_hash_struct *hash;
61775+ struct acl_subject_label *prev;
61776+ struct acl_subject_label *next;
61777+
61778+ struct acl_object_label **obj_hash;
61779+ __u32 obj_hash_size;
61780+ __u16 pax_flags;
61781+};
61782+
61783+struct role_allowed_ip {
61784+ __u32 addr;
61785+ __u32 netmask;
61786+
61787+ struct role_allowed_ip *prev;
61788+ struct role_allowed_ip *next;
61789+};
61790+
61791+struct role_transition {
61792+ char *rolename;
61793+
61794+ struct role_transition *prev;
61795+ struct role_transition *next;
61796+};
61797+
61798+struct acl_role_label {
61799+ char *rolename;
61800+ uid_t uidgid;
61801+ __u16 roletype;
61802+
61803+ __u16 auth_attempts;
61804+ unsigned long expires;
61805+
61806+ struct acl_subject_label *root_label;
61807+ struct gr_hash_struct *hash;
61808+
61809+ struct acl_role_label *prev;
61810+ struct acl_role_label *next;
61811+
61812+ struct role_transition *transitions;
61813+ struct role_allowed_ip *allowed_ips;
61814+ uid_t *domain_children;
61815+ __u16 domain_child_num;
61816+
61817+ umode_t umask;
61818+
61819+ struct acl_subject_label **subj_hash;
61820+ __u32 subj_hash_size;
61821+};
61822+
61823+struct user_acl_role_db {
61824+ struct acl_role_label **r_table;
61825+ __u32 num_pointers; /* Number of allocations to track */
61826+ __u32 num_roles; /* Number of roles */
61827+ __u32 num_domain_children; /* Number of domain children */
61828+ __u32 num_subjects; /* Number of subjects */
61829+ __u32 num_objects; /* Number of objects */
61830+};
61831+
61832+struct acl_object_label {
61833+ char *filename;
61834+ ino_t inode;
61835+ dev_t device;
61836+ __u32 mode;
61837+
61838+ struct acl_subject_label *nested;
61839+ struct acl_object_label *globbed;
61840+
61841+ /* next two structures not used */
61842+
61843+ struct acl_object_label *prev;
61844+ struct acl_object_label *next;
61845+};
61846+
61847+struct acl_ip_label {
61848+ char *iface;
61849+ __u32 addr;
61850+ __u32 netmask;
61851+ __u16 low, high;
61852+ __u8 mode;
61853+ __u32 type;
61854+ __u32 proto[8];
61855+
61856+ /* next two structures not used */
61857+
61858+ struct acl_ip_label *prev;
61859+ struct acl_ip_label *next;
61860+};
61861+
61862+struct gr_arg {
61863+ struct user_acl_role_db role_db;
61864+ unsigned char pw[GR_PW_LEN];
61865+ unsigned char salt[GR_SALT_LEN];
61866+ unsigned char sum[GR_SHA_LEN];
61867+ unsigned char sp_role[GR_SPROLE_LEN];
61868+ struct sprole_pw *sprole_pws;
61869+ dev_t segv_device;
61870+ ino_t segv_inode;
61871+ uid_t segv_uid;
61872+ __u16 num_sprole_pws;
61873+ __u16 mode;
61874+};
61875+
61876+struct gr_arg_wrapper {
61877+ struct gr_arg *arg;
61878+ __u32 version;
61879+ __u32 size;
61880+};
61881+
61882+struct subject_map {
61883+ struct acl_subject_label *user;
61884+ struct acl_subject_label *kernel;
61885+ struct subject_map *prev;
61886+ struct subject_map *next;
61887+};
61888+
61889+struct acl_subj_map_db {
61890+ struct subject_map **s_hash;
61891+ __u32 s_size;
61892+};
61893+
61894+/* End Data Structures Section */
61895+
61896+/* Hash functions generated by empirical testing by Brad Spengler
61897+ Makes good use of the low bits of the inode. Generally 0-1 times
61898+ in loop for successful match. 0-3 for unsuccessful match.
61899+ Shift/add algorithm with modulus of table size and an XOR*/
61900+
61901+static __inline__ unsigned int
61902+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61903+{
61904+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
61905+}
61906+
61907+ static __inline__ unsigned int
61908+shash(const struct acl_subject_label *userp, const unsigned int sz)
61909+{
61910+ return ((const unsigned long)userp % sz);
61911+}
61912+
61913+static __inline__ unsigned int
61914+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61915+{
61916+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61917+}
61918+
61919+static __inline__ unsigned int
61920+nhash(const char *name, const __u16 len, const unsigned int sz)
61921+{
61922+ return full_name_hash((const unsigned char *)name, len) % sz;
61923+}
61924+
61925+#define FOR_EACH_ROLE_START(role) \
61926+ role = role_list; \
61927+ while (role) {
61928+
61929+#define FOR_EACH_ROLE_END(role) \
61930+ role = role->prev; \
61931+ }
61932+
61933+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61934+ subj = NULL; \
61935+ iter = 0; \
61936+ while (iter < role->subj_hash_size) { \
61937+ if (subj == NULL) \
61938+ subj = role->subj_hash[iter]; \
61939+ if (subj == NULL) { \
61940+ iter++; \
61941+ continue; \
61942+ }
61943+
61944+#define FOR_EACH_SUBJECT_END(subj,iter) \
61945+ subj = subj->next; \
61946+ if (subj == NULL) \
61947+ iter++; \
61948+ }
61949+
61950+
61951+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61952+ subj = role->hash->first; \
61953+ while (subj != NULL) {
61954+
61955+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61956+ subj = subj->next; \
61957+ }
61958+
61959+#endif
61960+
61961diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61962new file mode 100644
61963index 0000000..323ecf2
61964--- /dev/null
61965+++ b/include/linux/gralloc.h
61966@@ -0,0 +1,9 @@
61967+#ifndef __GRALLOC_H
61968+#define __GRALLOC_H
61969+
61970+void acl_free_all(void);
61971+int acl_alloc_stack_init(unsigned long size);
61972+void *acl_alloc(unsigned long len);
61973+void *acl_alloc_num(unsigned long num, unsigned long len);
61974+
61975+#endif
61976diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61977new file mode 100644
61978index 0000000..b30e9bc
61979--- /dev/null
61980+++ b/include/linux/grdefs.h
61981@@ -0,0 +1,140 @@
61982+#ifndef GRDEFS_H
61983+#define GRDEFS_H
61984+
61985+/* Begin grsecurity status declarations */
61986+
61987+enum {
61988+ GR_READY = 0x01,
61989+ GR_STATUS_INIT = 0x00 // disabled state
61990+};
61991+
61992+/* Begin ACL declarations */
61993+
61994+/* Role flags */
61995+
61996+enum {
61997+ GR_ROLE_USER = 0x0001,
61998+ GR_ROLE_GROUP = 0x0002,
61999+ GR_ROLE_DEFAULT = 0x0004,
62000+ GR_ROLE_SPECIAL = 0x0008,
62001+ GR_ROLE_AUTH = 0x0010,
62002+ GR_ROLE_NOPW = 0x0020,
62003+ GR_ROLE_GOD = 0x0040,
62004+ GR_ROLE_LEARN = 0x0080,
62005+ GR_ROLE_TPE = 0x0100,
62006+ GR_ROLE_DOMAIN = 0x0200,
62007+ GR_ROLE_PAM = 0x0400,
62008+ GR_ROLE_PERSIST = 0x0800
62009+};
62010+
62011+/* ACL Subject and Object mode flags */
62012+enum {
62013+ GR_DELETED = 0x80000000
62014+};
62015+
62016+/* ACL Object-only mode flags */
62017+enum {
62018+ GR_READ = 0x00000001,
62019+ GR_APPEND = 0x00000002,
62020+ GR_WRITE = 0x00000004,
62021+ GR_EXEC = 0x00000008,
62022+ GR_FIND = 0x00000010,
62023+ GR_INHERIT = 0x00000020,
62024+ GR_SETID = 0x00000040,
62025+ GR_CREATE = 0x00000080,
62026+ GR_DELETE = 0x00000100,
62027+ GR_LINK = 0x00000200,
62028+ GR_AUDIT_READ = 0x00000400,
62029+ GR_AUDIT_APPEND = 0x00000800,
62030+ GR_AUDIT_WRITE = 0x00001000,
62031+ GR_AUDIT_EXEC = 0x00002000,
62032+ GR_AUDIT_FIND = 0x00004000,
62033+ GR_AUDIT_INHERIT= 0x00008000,
62034+ GR_AUDIT_SETID = 0x00010000,
62035+ GR_AUDIT_CREATE = 0x00020000,
62036+ GR_AUDIT_DELETE = 0x00040000,
62037+ GR_AUDIT_LINK = 0x00080000,
62038+ GR_PTRACERD = 0x00100000,
62039+ GR_NOPTRACE = 0x00200000,
62040+ GR_SUPPRESS = 0x00400000,
62041+ GR_NOLEARN = 0x00800000,
62042+ GR_INIT_TRANSFER= 0x01000000
62043+};
62044+
62045+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
62046+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
62047+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
62048+
62049+/* ACL subject-only mode flags */
62050+enum {
62051+ GR_KILL = 0x00000001,
62052+ GR_VIEW = 0x00000002,
62053+ GR_PROTECTED = 0x00000004,
62054+ GR_LEARN = 0x00000008,
62055+ GR_OVERRIDE = 0x00000010,
62056+ /* just a placeholder, this mode is only used in userspace */
62057+ GR_DUMMY = 0x00000020,
62058+ GR_PROTSHM = 0x00000040,
62059+ GR_KILLPROC = 0x00000080,
62060+ GR_KILLIPPROC = 0x00000100,
62061+ /* just a placeholder, this mode is only used in userspace */
62062+ GR_NOTROJAN = 0x00000200,
62063+ GR_PROTPROCFD = 0x00000400,
62064+ GR_PROCACCT = 0x00000800,
62065+ GR_RELAXPTRACE = 0x00001000,
62066+ GR_NESTED = 0x00002000,
62067+ GR_INHERITLEARN = 0x00004000,
62068+ GR_PROCFIND = 0x00008000,
62069+ GR_POVERRIDE = 0x00010000,
62070+ GR_KERNELAUTH = 0x00020000,
62071+ GR_ATSECURE = 0x00040000,
62072+ GR_SHMEXEC = 0x00080000
62073+};
62074+
62075+enum {
62076+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
62077+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
62078+ GR_PAX_ENABLE_MPROTECT = 0x0004,
62079+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
62080+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
62081+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
62082+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
62083+ GR_PAX_DISABLE_MPROTECT = 0x0400,
62084+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
62085+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
62086+};
62087+
62088+enum {
62089+ GR_ID_USER = 0x01,
62090+ GR_ID_GROUP = 0x02,
62091+};
62092+
62093+enum {
62094+ GR_ID_ALLOW = 0x01,
62095+ GR_ID_DENY = 0x02,
62096+};
62097+
62098+#define GR_CRASH_RES 31
62099+#define GR_UIDTABLE_MAX 500
62100+
62101+/* begin resource learning section */
62102+enum {
62103+ GR_RLIM_CPU_BUMP = 60,
62104+ GR_RLIM_FSIZE_BUMP = 50000,
62105+ GR_RLIM_DATA_BUMP = 10000,
62106+ GR_RLIM_STACK_BUMP = 1000,
62107+ GR_RLIM_CORE_BUMP = 10000,
62108+ GR_RLIM_RSS_BUMP = 500000,
62109+ GR_RLIM_NPROC_BUMP = 1,
62110+ GR_RLIM_NOFILE_BUMP = 5,
62111+ GR_RLIM_MEMLOCK_BUMP = 50000,
62112+ GR_RLIM_AS_BUMP = 500000,
62113+ GR_RLIM_LOCKS_BUMP = 2,
62114+ GR_RLIM_SIGPENDING_BUMP = 5,
62115+ GR_RLIM_MSGQUEUE_BUMP = 10000,
62116+ GR_RLIM_NICE_BUMP = 1,
62117+ GR_RLIM_RTPRIO_BUMP = 1,
62118+ GR_RLIM_RTTIME_BUMP = 1000000
62119+};
62120+
62121+#endif
62122diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
62123new file mode 100644
62124index 0000000..da390f1
62125--- /dev/null
62126+++ b/include/linux/grinternal.h
62127@@ -0,0 +1,221 @@
62128+#ifndef __GRINTERNAL_H
62129+#define __GRINTERNAL_H
62130+
62131+#ifdef CONFIG_GRKERNSEC
62132+
62133+#include <linux/fs.h>
62134+#include <linux/mnt_namespace.h>
62135+#include <linux/nsproxy.h>
62136+#include <linux/gracl.h>
62137+#include <linux/grdefs.h>
62138+#include <linux/grmsg.h>
62139+
62140+void gr_add_learn_entry(const char *fmt, ...)
62141+ __attribute__ ((format (printf, 1, 2)));
62142+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62143+ const struct vfsmount *mnt);
62144+__u32 gr_check_create(const struct dentry *new_dentry,
62145+ const struct dentry *parent,
62146+ const struct vfsmount *mnt, const __u32 mode);
62147+int gr_check_protected_task(const struct task_struct *task);
62148+__u32 to_gr_audit(const __u32 reqmode);
62149+int gr_set_acls(const int type);
62150+int gr_apply_subject_to_task(struct task_struct *task);
62151+int gr_acl_is_enabled(void);
62152+char gr_roletype_to_char(void);
62153+
62154+void gr_handle_alertkill(struct task_struct *task);
62155+char *gr_to_filename(const struct dentry *dentry,
62156+ const struct vfsmount *mnt);
62157+char *gr_to_filename1(const struct dentry *dentry,
62158+ const struct vfsmount *mnt);
62159+char *gr_to_filename2(const struct dentry *dentry,
62160+ const struct vfsmount *mnt);
62161+char *gr_to_filename3(const struct dentry *dentry,
62162+ const struct vfsmount *mnt);
62163+
62164+extern int grsec_enable_ptrace_readexec;
62165+extern int grsec_enable_harden_ptrace;
62166+extern int grsec_enable_link;
62167+extern int grsec_enable_fifo;
62168+extern int grsec_enable_execve;
62169+extern int grsec_enable_shm;
62170+extern int grsec_enable_execlog;
62171+extern int grsec_enable_signal;
62172+extern int grsec_enable_audit_ptrace;
62173+extern int grsec_enable_forkfail;
62174+extern int grsec_enable_time;
62175+extern int grsec_enable_rofs;
62176+extern int grsec_enable_chroot_shmat;
62177+extern int grsec_enable_chroot_mount;
62178+extern int grsec_enable_chroot_double;
62179+extern int grsec_enable_chroot_pivot;
62180+extern int grsec_enable_chroot_chdir;
62181+extern int grsec_enable_chroot_chmod;
62182+extern int grsec_enable_chroot_mknod;
62183+extern int grsec_enable_chroot_fchdir;
62184+extern int grsec_enable_chroot_nice;
62185+extern int grsec_enable_chroot_execlog;
62186+extern int grsec_enable_chroot_caps;
62187+extern int grsec_enable_chroot_sysctl;
62188+extern int grsec_enable_chroot_unix;
62189+extern int grsec_enable_tpe;
62190+extern int grsec_tpe_gid;
62191+extern int grsec_enable_tpe_all;
62192+extern int grsec_enable_tpe_invert;
62193+extern int grsec_enable_socket_all;
62194+extern int grsec_socket_all_gid;
62195+extern int grsec_enable_socket_client;
62196+extern int grsec_socket_client_gid;
62197+extern int grsec_enable_socket_server;
62198+extern int grsec_socket_server_gid;
62199+extern int grsec_audit_gid;
62200+extern int grsec_enable_group;
62201+extern int grsec_enable_audit_textrel;
62202+extern int grsec_enable_log_rwxmaps;
62203+extern int grsec_enable_mount;
62204+extern int grsec_enable_chdir;
62205+extern int grsec_resource_logging;
62206+extern int grsec_enable_blackhole;
62207+extern int grsec_lastack_retries;
62208+extern int grsec_enable_brute;
62209+extern int grsec_lock;
62210+
62211+extern spinlock_t grsec_alert_lock;
62212+extern unsigned long grsec_alert_wtime;
62213+extern unsigned long grsec_alert_fyet;
62214+
62215+extern spinlock_t grsec_audit_lock;
62216+
62217+extern rwlock_t grsec_exec_file_lock;
62218+
62219+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62220+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62221+ (tsk)->exec_file->f_vfsmnt) : "/")
62222+
62223+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62224+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62225+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62226+
62227+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62228+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
62229+ (tsk)->exec_file->f_vfsmnt) : "/")
62230+
62231+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62232+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62233+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62234+
62235+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62236+
62237+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62238+
62239+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62240+ (task)->pid, (cred)->uid, \
62241+ (cred)->euid, (cred)->gid, (cred)->egid, \
62242+ gr_parent_task_fullpath(task), \
62243+ (task)->real_parent->comm, (task)->real_parent->pid, \
62244+ (pcred)->uid, (pcred)->euid, \
62245+ (pcred)->gid, (pcred)->egid
62246+
62247+#define GR_CHROOT_CAPS {{ \
62248+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62249+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62250+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62251+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62252+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62253+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62254+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
62255+
62256+#define security_learn(normal_msg,args...) \
62257+({ \
62258+ read_lock(&grsec_exec_file_lock); \
62259+ gr_add_learn_entry(normal_msg "\n", ## args); \
62260+ read_unlock(&grsec_exec_file_lock); \
62261+})
62262+
62263+enum {
62264+ GR_DO_AUDIT,
62265+ GR_DONT_AUDIT,
62266+ /* used for non-audit messages that we shouldn't kill the task on */
62267+ GR_DONT_AUDIT_GOOD
62268+};
62269+
62270+enum {
62271+ GR_TTYSNIFF,
62272+ GR_RBAC,
62273+ GR_RBAC_STR,
62274+ GR_STR_RBAC,
62275+ GR_RBAC_MODE2,
62276+ GR_RBAC_MODE3,
62277+ GR_FILENAME,
62278+ GR_SYSCTL_HIDDEN,
62279+ GR_NOARGS,
62280+ GR_ONE_INT,
62281+ GR_ONE_INT_TWO_STR,
62282+ GR_ONE_STR,
62283+ GR_STR_INT,
62284+ GR_TWO_STR_INT,
62285+ GR_TWO_INT,
62286+ GR_TWO_U64,
62287+ GR_THREE_INT,
62288+ GR_FIVE_INT_TWO_STR,
62289+ GR_TWO_STR,
62290+ GR_THREE_STR,
62291+ GR_FOUR_STR,
62292+ GR_STR_FILENAME,
62293+ GR_FILENAME_STR,
62294+ GR_FILENAME_TWO_INT,
62295+ GR_FILENAME_TWO_INT_STR,
62296+ GR_TEXTREL,
62297+ GR_PTRACE,
62298+ GR_RESOURCE,
62299+ GR_CAP,
62300+ GR_SIG,
62301+ GR_SIG2,
62302+ GR_CRASH1,
62303+ GR_CRASH2,
62304+ GR_PSACCT,
62305+ GR_RWXMAP
62306+};
62307+
62308+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62309+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62310+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62311+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62312+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62313+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62314+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62315+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62316+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62317+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62318+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62319+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62320+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62321+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62322+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62323+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62324+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62325+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62326+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62327+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62328+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62329+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62330+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62331+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62332+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62333+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62334+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62335+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62336+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62337+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62338+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62339+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62340+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62341+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62342+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62343+
62344+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62345+
62346+#endif
62347+
62348+#endif
62349diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62350new file mode 100644
62351index 0000000..ae576a1
62352--- /dev/null
62353+++ b/include/linux/grmsg.h
62354@@ -0,0 +1,109 @@
62355+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62356+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62357+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62358+#define GR_STOPMOD_MSG "denied modification of module state by "
62359+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62360+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62361+#define GR_IOPERM_MSG "denied use of ioperm() by "
62362+#define GR_IOPL_MSG "denied use of iopl() by "
62363+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62364+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62365+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62366+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62367+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62368+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62369+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62370+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62371+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62372+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62373+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62374+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62375+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62376+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62377+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62378+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62379+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62380+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62381+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62382+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62383+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62384+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62385+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62386+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62387+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62388+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62389+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
62390+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62391+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62392+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62393+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62394+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62395+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62396+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62397+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62398+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62399+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62400+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62401+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62402+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62403+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62404+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62405+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62406+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
62407+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62408+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62409+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62410+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62411+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62412+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62413+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62414+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62415+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62416+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62417+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62418+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62419+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62420+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62421+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62422+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62423+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62424+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62425+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62426+#define GR_FAILFORK_MSG "failed fork with errno %s by "
62427+#define GR_NICE_CHROOT_MSG "denied priority change by "
62428+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62429+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62430+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62431+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62432+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62433+#define GR_TIME_MSG "time set by "
62434+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62435+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62436+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62437+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62438+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62439+#define GR_BIND_MSG "denied bind() by "
62440+#define GR_CONNECT_MSG "denied connect() by "
62441+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62442+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62443+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62444+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62445+#define GR_CAP_ACL_MSG "use of %s denied for "
62446+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62447+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62448+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62449+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62450+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62451+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62452+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62453+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62454+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62455+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62456+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62457+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62458+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62459+#define GR_VM86_MSG "denied use of vm86 by "
62460+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62461+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
62462+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62463+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
62464diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62465new file mode 100644
62466index 0000000..acd05db
62467--- /dev/null
62468+++ b/include/linux/grsecurity.h
62469@@ -0,0 +1,232 @@
62470+#ifndef GR_SECURITY_H
62471+#define GR_SECURITY_H
62472+#include <linux/fs.h>
62473+#include <linux/fs_struct.h>
62474+#include <linux/binfmts.h>
62475+#include <linux/gracl.h>
62476+
62477+/* notify of brain-dead configs */
62478+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62479+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62480+#endif
62481+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62482+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62483+#endif
62484+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62485+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62486+#endif
62487+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62488+#error "CONFIG_PAX enabled, but no PaX options are enabled."
62489+#endif
62490+
62491+#include <linux/compat.h>
62492+
62493+struct user_arg_ptr {
62494+#ifdef CONFIG_COMPAT
62495+ bool is_compat;
62496+#endif
62497+ union {
62498+ const char __user *const __user *native;
62499+#ifdef CONFIG_COMPAT
62500+ compat_uptr_t __user *compat;
62501+#endif
62502+ } ptr;
62503+};
62504+
62505+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62506+void gr_handle_brute_check(void);
62507+void gr_handle_kernel_exploit(void);
62508+int gr_process_user_ban(void);
62509+
62510+char gr_roletype_to_char(void);
62511+
62512+int gr_acl_enable_at_secure(void);
62513+
62514+int gr_check_user_change(int real, int effective, int fs);
62515+int gr_check_group_change(int real, int effective, int fs);
62516+
62517+void gr_del_task_from_ip_table(struct task_struct *p);
62518+
62519+int gr_pid_is_chrooted(struct task_struct *p);
62520+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62521+int gr_handle_chroot_nice(void);
62522+int gr_handle_chroot_sysctl(const int op);
62523+int gr_handle_chroot_setpriority(struct task_struct *p,
62524+ const int niceval);
62525+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62526+int gr_handle_chroot_chroot(const struct dentry *dentry,
62527+ const struct vfsmount *mnt);
62528+void gr_handle_chroot_chdir(struct path *path);
62529+int gr_handle_chroot_chmod(const struct dentry *dentry,
62530+ const struct vfsmount *mnt, const int mode);
62531+int gr_handle_chroot_mknod(const struct dentry *dentry,
62532+ const struct vfsmount *mnt, const int mode);
62533+int gr_handle_chroot_mount(const struct dentry *dentry,
62534+ const struct vfsmount *mnt,
62535+ const char *dev_name);
62536+int gr_handle_chroot_pivot(void);
62537+int gr_handle_chroot_unix(const pid_t pid);
62538+
62539+int gr_handle_rawio(const struct inode *inode);
62540+
62541+void gr_handle_ioperm(void);
62542+void gr_handle_iopl(void);
62543+
62544+umode_t gr_acl_umask(void);
62545+
62546+int gr_tpe_allow(const struct file *file);
62547+
62548+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62549+void gr_clear_chroot_entries(struct task_struct *task);
62550+
62551+void gr_log_forkfail(const int retval);
62552+void gr_log_timechange(void);
62553+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62554+void gr_log_chdir(const struct dentry *dentry,
62555+ const struct vfsmount *mnt);
62556+void gr_log_chroot_exec(const struct dentry *dentry,
62557+ const struct vfsmount *mnt);
62558+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62559+void gr_log_remount(const char *devname, const int retval);
62560+void gr_log_unmount(const char *devname, const int retval);
62561+void gr_log_mount(const char *from, const char *to, const int retval);
62562+void gr_log_textrel(struct vm_area_struct *vma);
62563+void gr_log_rwxmmap(struct file *file);
62564+void gr_log_rwxmprotect(struct file *file);
62565+
62566+int gr_handle_follow_link(const struct inode *parent,
62567+ const struct inode *inode,
62568+ const struct dentry *dentry,
62569+ const struct vfsmount *mnt);
62570+int gr_handle_fifo(const struct dentry *dentry,
62571+ const struct vfsmount *mnt,
62572+ const struct dentry *dir, const int flag,
62573+ const int acc_mode);
62574+int gr_handle_hardlink(const struct dentry *dentry,
62575+ const struct vfsmount *mnt,
62576+ struct inode *inode,
62577+ const int mode, const char *to);
62578+
62579+int gr_is_capable(const int cap);
62580+int gr_is_capable_nolog(const int cap);
62581+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62582+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62583+
62584+void gr_learn_resource(const struct task_struct *task, const int limit,
62585+ const unsigned long wanted, const int gt);
62586+void gr_copy_label(struct task_struct *tsk);
62587+void gr_handle_crash(struct task_struct *task, const int sig);
62588+int gr_handle_signal(const struct task_struct *p, const int sig);
62589+int gr_check_crash_uid(const uid_t uid);
62590+int gr_check_protected_task(const struct task_struct *task);
62591+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62592+int gr_acl_handle_mmap(const struct file *file,
62593+ const unsigned long prot);
62594+int gr_acl_handle_mprotect(const struct file *file,
62595+ const unsigned long prot);
62596+int gr_check_hidden_task(const struct task_struct *tsk);
62597+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62598+ const struct vfsmount *mnt);
62599+__u32 gr_acl_handle_utime(const struct dentry *dentry,
62600+ const struct vfsmount *mnt);
62601+__u32 gr_acl_handle_access(const struct dentry *dentry,
62602+ const struct vfsmount *mnt, const int fmode);
62603+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62604+ const struct vfsmount *mnt, umode_t *mode);
62605+__u32 gr_acl_handle_chown(const struct dentry *dentry,
62606+ const struct vfsmount *mnt);
62607+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62608+ const struct vfsmount *mnt);
62609+int gr_handle_ptrace(struct task_struct *task, const long request);
62610+int gr_handle_proc_ptrace(struct task_struct *task);
62611+__u32 gr_acl_handle_execve(const struct dentry *dentry,
62612+ const struct vfsmount *mnt);
62613+int gr_check_crash_exec(const struct file *filp);
62614+int gr_acl_is_enabled(void);
62615+void gr_set_kernel_label(struct task_struct *task);
62616+void gr_set_role_label(struct task_struct *task, const uid_t uid,
62617+ const gid_t gid);
62618+int gr_set_proc_label(const struct dentry *dentry,
62619+ const struct vfsmount *mnt,
62620+ const int unsafe_flags);
62621+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62622+ const struct vfsmount *mnt);
62623+__u32 gr_acl_handle_open(const struct dentry *dentry,
62624+ const struct vfsmount *mnt, int acc_mode);
62625+__u32 gr_acl_handle_creat(const struct dentry *dentry,
62626+ const struct dentry *p_dentry,
62627+ const struct vfsmount *p_mnt,
62628+ int open_flags, int acc_mode, const int imode);
62629+void gr_handle_create(const struct dentry *dentry,
62630+ const struct vfsmount *mnt);
62631+void gr_handle_proc_create(const struct dentry *dentry,
62632+ const struct inode *inode);
62633+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62634+ const struct dentry *parent_dentry,
62635+ const struct vfsmount *parent_mnt,
62636+ const int mode);
62637+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62638+ const struct dentry *parent_dentry,
62639+ const struct vfsmount *parent_mnt);
62640+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62641+ const struct vfsmount *mnt);
62642+void gr_handle_delete(const ino_t ino, const dev_t dev);
62643+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62644+ const struct vfsmount *mnt);
62645+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62646+ const struct dentry *parent_dentry,
62647+ const struct vfsmount *parent_mnt,
62648+ const char *from);
62649+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62650+ const struct dentry *parent_dentry,
62651+ const struct vfsmount *parent_mnt,
62652+ const struct dentry *old_dentry,
62653+ const struct vfsmount *old_mnt, const char *to);
62654+int gr_acl_handle_rename(struct dentry *new_dentry,
62655+ struct dentry *parent_dentry,
62656+ const struct vfsmount *parent_mnt,
62657+ struct dentry *old_dentry,
62658+ struct inode *old_parent_inode,
62659+ struct vfsmount *old_mnt, const char *newname);
62660+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62661+ struct dentry *old_dentry,
62662+ struct dentry *new_dentry,
62663+ struct vfsmount *mnt, const __u8 replace);
62664+__u32 gr_check_link(const struct dentry *new_dentry,
62665+ const struct dentry *parent_dentry,
62666+ const struct vfsmount *parent_mnt,
62667+ const struct dentry *old_dentry,
62668+ const struct vfsmount *old_mnt);
62669+int gr_acl_handle_filldir(const struct file *file, const char *name,
62670+ const unsigned int namelen, const ino_t ino);
62671+
62672+__u32 gr_acl_handle_unix(const struct dentry *dentry,
62673+ const struct vfsmount *mnt);
62674+void gr_acl_handle_exit(void);
62675+void gr_acl_handle_psacct(struct task_struct *task, const long code);
62676+int gr_acl_handle_procpidmem(const struct task_struct *task);
62677+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62678+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62679+void gr_audit_ptrace(struct task_struct *task);
62680+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62681+
62682+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62683+
62684+#ifdef CONFIG_GRKERNSEC
62685+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62686+void gr_handle_vm86(void);
62687+void gr_handle_mem_readwrite(u64 from, u64 to);
62688+
62689+void gr_log_badprocpid(const char *entry);
62690+
62691+extern int grsec_enable_dmesg;
62692+extern int grsec_disable_privio;
62693+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62694+extern int grsec_enable_chroot_findtask;
62695+#endif
62696+#ifdef CONFIG_GRKERNSEC_SETXID
62697+extern int grsec_enable_setxid;
62698+#endif
62699+#endif
62700+
62701+#endif
62702diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62703new file mode 100644
62704index 0000000..e7ffaaf
62705--- /dev/null
62706+++ b/include/linux/grsock.h
62707@@ -0,0 +1,19 @@
62708+#ifndef __GRSOCK_H
62709+#define __GRSOCK_H
62710+
62711+extern void gr_attach_curr_ip(const struct sock *sk);
62712+extern int gr_handle_sock_all(const int family, const int type,
62713+ const int protocol);
62714+extern int gr_handle_sock_server(const struct sockaddr *sck);
62715+extern int gr_handle_sock_server_other(const struct sock *sck);
62716+extern int gr_handle_sock_client(const struct sockaddr *sck);
62717+extern int gr_search_connect(struct socket * sock,
62718+ struct sockaddr_in * addr);
62719+extern int gr_search_bind(struct socket * sock,
62720+ struct sockaddr_in * addr);
62721+extern int gr_search_listen(struct socket * sock);
62722+extern int gr_search_accept(struct socket * sock);
62723+extern int gr_search_socket(const int domain, const int type,
62724+ const int protocol);
62725+
62726+#endif
62727diff --git a/include/linux/hid.h b/include/linux/hid.h
62728index 3a95da6..51986f1 100644
62729--- a/include/linux/hid.h
62730+++ b/include/linux/hid.h
62731@@ -696,7 +696,7 @@ struct hid_ll_driver {
62732 unsigned int code, int value);
62733
62734 int (*parse)(struct hid_device *hdev);
62735-};
62736+} __no_const;
62737
62738 #define PM_HINT_FULLON 1<<5
62739 #define PM_HINT_NORMAL 1<<1
62740diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62741index 3a93f73..b19d0b3 100644
62742--- a/include/linux/highmem.h
62743+++ b/include/linux/highmem.h
62744@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
62745 kunmap_atomic(kaddr, KM_USER0);
62746 }
62747
62748+static inline void sanitize_highpage(struct page *page)
62749+{
62750+ void *kaddr;
62751+ unsigned long flags;
62752+
62753+ local_irq_save(flags);
62754+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
62755+ clear_page(kaddr);
62756+ kunmap_atomic(kaddr, KM_CLEARPAGE);
62757+ local_irq_restore(flags);
62758+}
62759+
62760 static inline void zero_user_segments(struct page *page,
62761 unsigned start1, unsigned end1,
62762 unsigned start2, unsigned end2)
62763diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62764index 8e25a91..551b161 100644
62765--- a/include/linux/i2c.h
62766+++ b/include/linux/i2c.h
62767@@ -364,6 +364,7 @@ struct i2c_algorithm {
62768 /* To determine what the adapter supports */
62769 u32 (*functionality) (struct i2c_adapter *);
62770 };
62771+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62772
62773 /*
62774 * i2c_adapter is the structure used to identify a physical i2c bus along
62775diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62776index a6deef4..c56a7f2 100644
62777--- a/include/linux/i2o.h
62778+++ b/include/linux/i2o.h
62779@@ -564,7 +564,7 @@ struct i2o_controller {
62780 struct i2o_device *exec; /* Executive */
62781 #if BITS_PER_LONG == 64
62782 spinlock_t context_list_lock; /* lock for context_list */
62783- atomic_t context_list_counter; /* needed for unique contexts */
62784+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62785 struct list_head context_list; /* list of context id's
62786 and pointers */
62787 #endif
62788diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62789index 58404b0..439ed95 100644
62790--- a/include/linux/if_team.h
62791+++ b/include/linux/if_team.h
62792@@ -64,6 +64,7 @@ struct team_mode_ops {
62793 void (*port_leave)(struct team *team, struct team_port *port);
62794 void (*port_change_mac)(struct team *team, struct team_port *port);
62795 };
62796+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62797
62798 enum team_option_type {
62799 TEAM_OPTION_TYPE_U32,
62800@@ -112,7 +113,7 @@ struct team {
62801 struct list_head option_list;
62802
62803 const struct team_mode *mode;
62804- struct team_mode_ops ops;
62805+ team_mode_ops_no_const ops;
62806 long mode_priv[TEAM_MODE_PRIV_LONGS];
62807 };
62808
62809diff --git a/include/linux/init.h b/include/linux/init.h
62810index 6b95109..4aca62c 100644
62811--- a/include/linux/init.h
62812+++ b/include/linux/init.h
62813@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
62814
62815 /* Each module must use one module_init(). */
62816 #define module_init(initfn) \
62817- static inline initcall_t __inittest(void) \
62818+ static inline __used initcall_t __inittest(void) \
62819 { return initfn; } \
62820 int init_module(void) __attribute__((alias(#initfn)));
62821
62822 /* This is only required if you want to be unloadable. */
62823 #define module_exit(exitfn) \
62824- static inline exitcall_t __exittest(void) \
62825+ static inline __used exitcall_t __exittest(void) \
62826 { return exitfn; } \
62827 void cleanup_module(void) __attribute__((alias(#exitfn)));
62828
62829diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62830index 9c66b1a..a3fdded 100644
62831--- a/include/linux/init_task.h
62832+++ b/include/linux/init_task.h
62833@@ -127,6 +127,12 @@ extern struct cred init_cred;
62834
62835 #define INIT_TASK_COMM "swapper"
62836
62837+#ifdef CONFIG_X86
62838+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62839+#else
62840+#define INIT_TASK_THREAD_INFO
62841+#endif
62842+
62843 /*
62844 * INIT_TASK is used to set up the first task table, touch at
62845 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62846@@ -165,6 +171,7 @@ extern struct cred init_cred;
62847 RCU_INIT_POINTER(.cred, &init_cred), \
62848 .comm = INIT_TASK_COMM, \
62849 .thread = INIT_THREAD, \
62850+ INIT_TASK_THREAD_INFO \
62851 .fs = &init_fs, \
62852 .files = &init_files, \
62853 .signal = &init_signals, \
62854diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62855index e6ca56d..8583707 100644
62856--- a/include/linux/intel-iommu.h
62857+++ b/include/linux/intel-iommu.h
62858@@ -296,7 +296,7 @@ struct iommu_flush {
62859 u8 fm, u64 type);
62860 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62861 unsigned int size_order, u64 type);
62862-};
62863+} __no_const;
62864
62865 enum {
62866 SR_DMAR_FECTL_REG,
62867diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62868index a64b00e..464d8bc 100644
62869--- a/include/linux/interrupt.h
62870+++ b/include/linux/interrupt.h
62871@@ -441,7 +441,7 @@ enum
62872 /* map softirq index to softirq name. update 'softirq_to_name' in
62873 * kernel/softirq.c when adding a new softirq.
62874 */
62875-extern char *softirq_to_name[NR_SOFTIRQS];
62876+extern const char * const softirq_to_name[NR_SOFTIRQS];
62877
62878 /* softirq mask and active fields moved to irq_cpustat_t in
62879 * asm/hardirq.h to get better cache usage. KAO
62880@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62881
62882 struct softirq_action
62883 {
62884- void (*action)(struct softirq_action *);
62885+ void (*action)(void);
62886 };
62887
62888 asmlinkage void do_softirq(void);
62889 asmlinkage void __do_softirq(void);
62890-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62891+extern void open_softirq(int nr, void (*action)(void));
62892 extern void softirq_init(void);
62893 static inline void __raise_softirq_irqoff(unsigned int nr)
62894 {
62895diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62896index 3875719..4cd454c 100644
62897--- a/include/linux/kallsyms.h
62898+++ b/include/linux/kallsyms.h
62899@@ -15,7 +15,8 @@
62900
62901 struct module;
62902
62903-#ifdef CONFIG_KALLSYMS
62904+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62905+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62906 /* Lookup the address for a symbol. Returns 0 if not found. */
62907 unsigned long kallsyms_lookup_name(const char *name);
62908
62909@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62910 /* Stupid that this does nothing, but I didn't create this mess. */
62911 #define __print_symbol(fmt, addr)
62912 #endif /*CONFIG_KALLSYMS*/
62913+#else /* when included by kallsyms.c, vsnprintf.c, or
62914+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62915+extern void __print_symbol(const char *fmt, unsigned long address);
62916+extern int sprint_backtrace(char *buffer, unsigned long address);
62917+extern int sprint_symbol(char *buffer, unsigned long address);
62918+const char *kallsyms_lookup(unsigned long addr,
62919+ unsigned long *symbolsize,
62920+ unsigned long *offset,
62921+ char **modname, char *namebuf);
62922+#endif
62923
62924 /* This macro allows us to keep printk typechecking */
62925 static __printf(1, 2)
62926diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62927index c4d2fc1..5df9c19 100644
62928--- a/include/linux/kgdb.h
62929+++ b/include/linux/kgdb.h
62930@@ -53,7 +53,7 @@ extern int kgdb_connected;
62931 extern int kgdb_io_module_registered;
62932
62933 extern atomic_t kgdb_setting_breakpoint;
62934-extern atomic_t kgdb_cpu_doing_single_step;
62935+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62936
62937 extern struct task_struct *kgdb_usethread;
62938 extern struct task_struct *kgdb_contthread;
62939@@ -252,7 +252,7 @@ struct kgdb_arch {
62940 void (*disable_hw_break)(struct pt_regs *regs);
62941 void (*remove_all_hw_break)(void);
62942 void (*correct_hw_break)(void);
62943-};
62944+} __do_const;
62945
62946 /**
62947 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62948@@ -277,7 +277,7 @@ struct kgdb_io {
62949 void (*pre_exception) (void);
62950 void (*post_exception) (void);
62951 int is_console;
62952-};
62953+} __do_const;
62954
62955 extern struct kgdb_arch arch_kgdb_ops;
62956
62957diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62958index 0fb48ef..1b680b2 100644
62959--- a/include/linux/kmod.h
62960+++ b/include/linux/kmod.h
62961@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62962 * usually useless though. */
62963 extern __printf(2, 3)
62964 int __request_module(bool wait, const char *name, ...);
62965+extern __printf(3, 4)
62966+int ___request_module(bool wait, char *param_name, const char *name, ...);
62967 #define request_module(mod...) __request_module(true, mod)
62968 #define request_module_nowait(mod...) __request_module(false, mod)
62969 #define try_then_request_module(x, mod...) \
62970diff --git a/include/linux/kref.h b/include/linux/kref.h
62971index 9c07dce..a92fa71 100644
62972--- a/include/linux/kref.h
62973+++ b/include/linux/kref.h
62974@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62975 static inline int kref_sub(struct kref *kref, unsigned int count,
62976 void (*release)(struct kref *kref))
62977 {
62978- WARN_ON(release == NULL);
62979+ BUG_ON(release == NULL);
62980
62981 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62982 release(kref);
62983diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62984index 4c4e83d..5f16617 100644
62985--- a/include/linux/kvm_host.h
62986+++ b/include/linux/kvm_host.h
62987@@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62988 void vcpu_load(struct kvm_vcpu *vcpu);
62989 void vcpu_put(struct kvm_vcpu *vcpu);
62990
62991-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62992+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62993 struct module *module);
62994 void kvm_exit(void);
62995
62996@@ -416,20 +416,20 @@ void kvm_get_pfn(pfn_t pfn);
62997 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
62998 int len);
62999 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
63000- unsigned long len);
63001-int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
63002+ unsigned long len) __size_overflow(4);
63003+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4);
63004 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63005- void *data, unsigned long len);
63006+ void *data, unsigned long len) __size_overflow(4);
63007 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
63008 int offset, int len);
63009 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
63010- unsigned long len);
63011+ unsigned long len) __size_overflow(2,4);
63012 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63013- void *data, unsigned long len);
63014+ void *data, unsigned long len) __size_overflow(4);
63015 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
63016 gpa_t gpa);
63017 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
63018-int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
63019+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3);
63020 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
63021 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
63022 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
63023@@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
63024 struct kvm_guest_debug *dbg);
63025 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
63026
63027-int kvm_arch_init(void *opaque);
63028+int kvm_arch_init(const void *opaque);
63029 void kvm_arch_exit(void);
63030
63031 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
63032@@ -727,7 +727,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
63033 int kvm_set_irq_routing(struct kvm *kvm,
63034 const struct kvm_irq_routing_entry *entries,
63035 unsigned nr,
63036- unsigned flags);
63037+ unsigned flags) __size_overflow(3);
63038 void kvm_free_irq_routing(struct kvm *kvm);
63039
63040 #else
63041diff --git a/include/linux/libata.h b/include/linux/libata.h
63042index cafc09a..d7e7829 100644
63043--- a/include/linux/libata.h
63044+++ b/include/linux/libata.h
63045@@ -909,7 +909,7 @@ struct ata_port_operations {
63046 * fields must be pointers.
63047 */
63048 const struct ata_port_operations *inherits;
63049-};
63050+} __do_const;
63051
63052 struct ata_port_info {
63053 unsigned long flags;
63054diff --git a/include/linux/mca.h b/include/linux/mca.h
63055index 3797270..7765ede 100644
63056--- a/include/linux/mca.h
63057+++ b/include/linux/mca.h
63058@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
63059 int region);
63060 void * (*mca_transform_memory)(struct mca_device *,
63061 void *memory);
63062-};
63063+} __no_const;
63064
63065 struct mca_bus {
63066 u64 default_dma_mask;
63067diff --git a/include/linux/memory.h b/include/linux/memory.h
63068index 1ac7f6e..a5794d0 100644
63069--- a/include/linux/memory.h
63070+++ b/include/linux/memory.h
63071@@ -143,7 +143,7 @@ struct memory_accessor {
63072 size_t count);
63073 ssize_t (*write)(struct memory_accessor *, const char *buf,
63074 off_t offset, size_t count);
63075-};
63076+} __no_const;
63077
63078 /*
63079 * Kernel text modification mutex, used for code patching. Users of this lock
63080diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
63081index 9970337..9444122 100644
63082--- a/include/linux/mfd/abx500.h
63083+++ b/include/linux/mfd/abx500.h
63084@@ -188,6 +188,7 @@ struct abx500_ops {
63085 int (*event_registers_startup_state_get) (struct device *, u8 *);
63086 int (*startup_irq_enabled) (struct device *, unsigned int);
63087 };
63088+typedef struct abx500_ops __no_const abx500_ops_no_const;
63089
63090 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
63091 void abx500_remove_ops(struct device *dev);
63092diff --git a/include/linux/mm.h b/include/linux/mm.h
63093index 17b27cd..baea141 100644
63094--- a/include/linux/mm.h
63095+++ b/include/linux/mm.h
63096@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
63097
63098 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
63099 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
63100+
63101+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63102+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
63103+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
63104+#else
63105 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
63106+#endif
63107+
63108 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
63109 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
63110
63111@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
63112 int set_page_dirty_lock(struct page *page);
63113 int clear_page_dirty_for_io(struct page *page);
63114
63115-/* Is the vma a continuation of the stack vma above it? */
63116-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
63117-{
63118- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
63119-}
63120-
63121-static inline int stack_guard_page_start(struct vm_area_struct *vma,
63122- unsigned long addr)
63123-{
63124- return (vma->vm_flags & VM_GROWSDOWN) &&
63125- (vma->vm_start == addr) &&
63126- !vma_growsdown(vma->vm_prev, addr);
63127-}
63128-
63129-/* Is the vma a continuation of the stack vma below it? */
63130-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
63131-{
63132- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
63133-}
63134-
63135-static inline int stack_guard_page_end(struct vm_area_struct *vma,
63136- unsigned long addr)
63137-{
63138- return (vma->vm_flags & VM_GROWSUP) &&
63139- (vma->vm_end == addr) &&
63140- !vma_growsup(vma->vm_next, addr);
63141-}
63142-
63143 extern unsigned long move_page_tables(struct vm_area_struct *vma,
63144 unsigned long old_addr, struct vm_area_struct *new_vma,
63145 unsigned long new_addr, unsigned long len);
63146@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
63147 }
63148 #endif
63149
63150+#ifdef CONFIG_MMU
63151+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
63152+#else
63153+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
63154+{
63155+ return __pgprot(0);
63156+}
63157+#endif
63158+
63159 int vma_wants_writenotify(struct vm_area_struct *vma);
63160
63161 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
63162@@ -1152,8 +1140,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
63163 {
63164 return 0;
63165 }
63166+
63167+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
63168+ unsigned long address)
63169+{
63170+ return 0;
63171+}
63172 #else
63173 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63174+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63175 #endif
63176
63177 #ifdef __PAGETABLE_PMD_FOLDED
63178@@ -1162,8 +1157,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
63179 {
63180 return 0;
63181 }
63182+
63183+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
63184+ unsigned long address)
63185+{
63186+ return 0;
63187+}
63188 #else
63189 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
63190+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
63191 #endif
63192
63193 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
63194@@ -1181,11 +1183,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
63195 NULL: pud_offset(pgd, address);
63196 }
63197
63198+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
63199+{
63200+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
63201+ NULL: pud_offset(pgd, address);
63202+}
63203+
63204 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
63205 {
63206 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
63207 NULL: pmd_offset(pud, address);
63208 }
63209+
63210+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
63211+{
63212+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
63213+ NULL: pmd_offset(pud, address);
63214+}
63215 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
63216
63217 #if USE_SPLIT_PTLOCKS
63218@@ -1409,6 +1423,7 @@ out:
63219 }
63220
63221 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63222+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63223
63224 extern unsigned long do_brk(unsigned long, unsigned long);
63225
63226@@ -1466,6 +1481,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
63227 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63228 struct vm_area_struct **pprev);
63229
63230+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
63231+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
63232+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63233+
63234 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63235 NULL if none. Assume start_addr < end_addr. */
63236 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
63237@@ -1494,15 +1513,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
63238 return vma;
63239 }
63240
63241-#ifdef CONFIG_MMU
63242-pgprot_t vm_get_page_prot(unsigned long vm_flags);
63243-#else
63244-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
63245-{
63246- return __pgprot(0);
63247-}
63248-#endif
63249-
63250 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63251 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63252 unsigned long pfn, unsigned long size, pgprot_t);
63253@@ -1606,7 +1616,7 @@ extern int unpoison_memory(unsigned long pfn);
63254 extern int sysctl_memory_failure_early_kill;
63255 extern int sysctl_memory_failure_recovery;
63256 extern void shake_page(struct page *p, int access);
63257-extern atomic_long_t mce_bad_pages;
63258+extern atomic_long_unchecked_t mce_bad_pages;
63259 extern int soft_offline_page(struct page *page, int flags);
63260
63261 extern void dump_page(struct page *page);
63262@@ -1637,5 +1647,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
63263 static inline bool page_is_guard(struct page *page) { return false; }
63264 #endif /* CONFIG_DEBUG_PAGEALLOC */
63265
63266+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63267+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63268+#else
63269+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63270+#endif
63271+
63272 #endif /* __KERNEL__ */
63273 #endif /* _LINUX_MM_H */
63274diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
63275index 3cc3062..efeaeb7 100644
63276--- a/include/linux/mm_types.h
63277+++ b/include/linux/mm_types.h
63278@@ -252,6 +252,8 @@ struct vm_area_struct {
63279 #ifdef CONFIG_NUMA
63280 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63281 #endif
63282+
63283+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63284 };
63285
63286 struct core_thread {
63287@@ -326,7 +328,7 @@ struct mm_struct {
63288 unsigned long def_flags;
63289 unsigned long nr_ptes; /* Page table pages */
63290 unsigned long start_code, end_code, start_data, end_data;
63291- unsigned long start_brk, brk, start_stack;
63292+ unsigned long brk_gap, start_brk, brk, start_stack;
63293 unsigned long arg_start, arg_end, env_start, env_end;
63294
63295 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
63296@@ -388,6 +390,24 @@ struct mm_struct {
63297 #ifdef CONFIG_CPUMASK_OFFSTACK
63298 struct cpumask cpumask_allocation;
63299 #endif
63300+
63301+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63302+ unsigned long pax_flags;
63303+#endif
63304+
63305+#ifdef CONFIG_PAX_DLRESOLVE
63306+ unsigned long call_dl_resolve;
63307+#endif
63308+
63309+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63310+ unsigned long call_syscall;
63311+#endif
63312+
63313+#ifdef CONFIG_PAX_ASLR
63314+ unsigned long delta_mmap; /* randomized offset */
63315+ unsigned long delta_stack; /* randomized offset */
63316+#endif
63317+
63318 };
63319
63320 static inline void mm_init_cpumask(struct mm_struct *mm)
63321diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
63322index 1d1b1e1..2a13c78 100644
63323--- a/include/linux/mmu_notifier.h
63324+++ b/include/linux/mmu_notifier.h
63325@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
63326 */
63327 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63328 ({ \
63329- pte_t __pte; \
63330+ pte_t ___pte; \
63331 struct vm_area_struct *___vma = __vma; \
63332 unsigned long ___address = __address; \
63333- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63334+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63335 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63336- __pte; \
63337+ ___pte; \
63338 })
63339
63340 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
63341diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
63342index 650ba2f..af0a58c 100644
63343--- a/include/linux/mmzone.h
63344+++ b/include/linux/mmzone.h
63345@@ -379,7 +379,7 @@ struct zone {
63346 unsigned long flags; /* zone flags, see below */
63347
63348 /* Zone statistics */
63349- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63350+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63351
63352 /*
63353 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
63354diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
63355index 83ac071..2656e0e 100644
63356--- a/include/linux/mod_devicetable.h
63357+++ b/include/linux/mod_devicetable.h
63358@@ -12,7 +12,7 @@
63359 typedef unsigned long kernel_ulong_t;
63360 #endif
63361
63362-#define PCI_ANY_ID (~0)
63363+#define PCI_ANY_ID ((__u16)~0)
63364
63365 struct pci_device_id {
63366 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63367@@ -131,7 +131,7 @@ struct usb_device_id {
63368 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63369 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63370
63371-#define HID_ANY_ID (~0)
63372+#define HID_ANY_ID (~0U)
63373
63374 struct hid_device_id {
63375 __u16 bus;
63376diff --git a/include/linux/module.h b/include/linux/module.h
63377index 4598bf0..e069d7f 100644
63378--- a/include/linux/module.h
63379+++ b/include/linux/module.h
63380@@ -17,6 +17,7 @@
63381 #include <linux/moduleparam.h>
63382 #include <linux/tracepoint.h>
63383 #include <linux/export.h>
63384+#include <linux/fs.h>
63385
63386 #include <linux/percpu.h>
63387 #include <asm/module.h>
63388@@ -275,19 +276,16 @@ struct module
63389 int (*init)(void);
63390
63391 /* If this is non-NULL, vfree after init() returns */
63392- void *module_init;
63393+ void *module_init_rx, *module_init_rw;
63394
63395 /* Here is the actual code + data, vfree'd on unload. */
63396- void *module_core;
63397+ void *module_core_rx, *module_core_rw;
63398
63399 /* Here are the sizes of the init and core sections */
63400- unsigned int init_size, core_size;
63401+ unsigned int init_size_rw, core_size_rw;
63402
63403 /* The size of the executable code in each section. */
63404- unsigned int init_text_size, core_text_size;
63405-
63406- /* Size of RO sections of the module (text+rodata) */
63407- unsigned int init_ro_size, core_ro_size;
63408+ unsigned int init_size_rx, core_size_rx;
63409
63410 /* Arch-specific module values */
63411 struct mod_arch_specific arch;
63412@@ -343,6 +341,10 @@ struct module
63413 #ifdef CONFIG_EVENT_TRACING
63414 struct ftrace_event_call **trace_events;
63415 unsigned int num_trace_events;
63416+ struct file_operations trace_id;
63417+ struct file_operations trace_enable;
63418+ struct file_operations trace_format;
63419+ struct file_operations trace_filter;
63420 #endif
63421 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63422 unsigned int num_ftrace_callsites;
63423@@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
63424 bool is_module_percpu_address(unsigned long addr);
63425 bool is_module_text_address(unsigned long addr);
63426
63427+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63428+{
63429+
63430+#ifdef CONFIG_PAX_KERNEXEC
63431+ if (ktla_ktva(addr) >= (unsigned long)start &&
63432+ ktla_ktva(addr) < (unsigned long)start + size)
63433+ return 1;
63434+#endif
63435+
63436+ return ((void *)addr >= start && (void *)addr < start + size);
63437+}
63438+
63439+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63440+{
63441+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63442+}
63443+
63444+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63445+{
63446+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63447+}
63448+
63449+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63450+{
63451+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63452+}
63453+
63454+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63455+{
63456+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63457+}
63458+
63459 static inline int within_module_core(unsigned long addr, struct module *mod)
63460 {
63461- return (unsigned long)mod->module_core <= addr &&
63462- addr < (unsigned long)mod->module_core + mod->core_size;
63463+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63464 }
63465
63466 static inline int within_module_init(unsigned long addr, struct module *mod)
63467 {
63468- return (unsigned long)mod->module_init <= addr &&
63469- addr < (unsigned long)mod->module_init + mod->init_size;
63470+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63471 }
63472
63473 /* Search for module by name: must hold module_mutex. */
63474diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63475index b2be02e..72d2f78 100644
63476--- a/include/linux/moduleloader.h
63477+++ b/include/linux/moduleloader.h
63478@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63479
63480 /* Allocator used for allocating struct module, core sections and init
63481 sections. Returns NULL on failure. */
63482-void *module_alloc(unsigned long size);
63483+void *module_alloc(unsigned long size) __size_overflow(1);
63484+
63485+#ifdef CONFIG_PAX_KERNEXEC
63486+void *module_alloc_exec(unsigned long size) __size_overflow(1);
63487+#else
63488+#define module_alloc_exec(x) module_alloc(x)
63489+#endif
63490
63491 /* Free memory returned from module_alloc. */
63492 void module_free(struct module *mod, void *module_region);
63493
63494+#ifdef CONFIG_PAX_KERNEXEC
63495+void module_free_exec(struct module *mod, void *module_region);
63496+#else
63497+#define module_free_exec(x, y) module_free((x), (y))
63498+#endif
63499+
63500 /* Apply the given relocation to the (simplified) ELF. Return -error
63501 or 0. */
63502 int apply_relocate(Elf_Shdr *sechdrs,
63503diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63504index c47f4d6..23f9bdb 100644
63505--- a/include/linux/moduleparam.h
63506+++ b/include/linux/moduleparam.h
63507@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
63508 * @len is usually just sizeof(string).
63509 */
63510 #define module_param_string(name, string, len, perm) \
63511- static const struct kparam_string __param_string_##name \
63512+ static const struct kparam_string __param_string_##name __used \
63513 = { len, string }; \
63514 __module_param_call(MODULE_PARAM_PREFIX, name, \
63515 &param_ops_string, \
63516@@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
63517 */
63518 #define module_param_array_named(name, array, type, nump, perm) \
63519 param_check_##type(name, &(array)[0]); \
63520- static const struct kparam_array __param_arr_##name \
63521+ static const struct kparam_array __param_arr_##name __used \
63522 = { .max = ARRAY_SIZE(array), .num = nump, \
63523 .ops = &param_ops_##type, \
63524 .elemsize = sizeof(array[0]), .elem = array }; \
63525diff --git a/include/linux/namei.h b/include/linux/namei.h
63526index ffc0213..2c1f2cb 100644
63527--- a/include/linux/namei.h
63528+++ b/include/linux/namei.h
63529@@ -24,7 +24,7 @@ struct nameidata {
63530 unsigned seq;
63531 int last_type;
63532 unsigned depth;
63533- char *saved_names[MAX_NESTED_LINKS + 1];
63534+ const char *saved_names[MAX_NESTED_LINKS + 1];
63535
63536 /* Intent data */
63537 union {
63538@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63539 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63540 extern void unlock_rename(struct dentry *, struct dentry *);
63541
63542-static inline void nd_set_link(struct nameidata *nd, char *path)
63543+static inline void nd_set_link(struct nameidata *nd, const char *path)
63544 {
63545 nd->saved_names[nd->depth] = path;
63546 }
63547
63548-static inline char *nd_get_link(struct nameidata *nd)
63549+static inline const char *nd_get_link(const struct nameidata *nd)
63550 {
63551 return nd->saved_names[nd->depth];
63552 }
63553diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63554index 7e472b7..212d381 100644
63555--- a/include/linux/netdevice.h
63556+++ b/include/linux/netdevice.h
63557@@ -1002,6 +1002,7 @@ struct net_device_ops {
63558 int (*ndo_neigh_construct)(struct neighbour *n);
63559 void (*ndo_neigh_destroy)(struct neighbour *n);
63560 };
63561+typedef struct net_device_ops __no_const net_device_ops_no_const;
63562
63563 /*
63564 * The DEVICE structure.
63565@@ -1063,7 +1064,7 @@ struct net_device {
63566 int iflink;
63567
63568 struct net_device_stats stats;
63569- atomic_long_t rx_dropped; /* dropped packets by core network
63570+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63571 * Do not use this in drivers.
63572 */
63573
63574diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63575new file mode 100644
63576index 0000000..33f4af8
63577--- /dev/null
63578+++ b/include/linux/netfilter/xt_gradm.h
63579@@ -0,0 +1,9 @@
63580+#ifndef _LINUX_NETFILTER_XT_GRADM_H
63581+#define _LINUX_NETFILTER_XT_GRADM_H 1
63582+
63583+struct xt_gradm_mtinfo {
63584+ __u16 flags;
63585+ __u16 invflags;
63586+};
63587+
63588+#endif
63589diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63590index c65a18a..0c05f3a 100644
63591--- a/include/linux/of_pdt.h
63592+++ b/include/linux/of_pdt.h
63593@@ -32,7 +32,7 @@ struct of_pdt_ops {
63594
63595 /* return 0 on success; fill in 'len' with number of bytes in path */
63596 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63597-};
63598+} __no_const;
63599
63600 extern void *prom_early_alloc(unsigned long size);
63601
63602diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63603index a4c5624..2dabfb7 100644
63604--- a/include/linux/oprofile.h
63605+++ b/include/linux/oprofile.h
63606@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63607 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63608 char const * name, ulong * val);
63609
63610-/** Create a file for read-only access to an atomic_t. */
63611+/** Create a file for read-only access to an atomic_unchecked_t. */
63612 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63613- char const * name, atomic_t * val);
63614+ char const * name, atomic_unchecked_t * val);
63615
63616 /** create a directory */
63617 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63618@@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
63619 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
63620 * Returns 0 on success, < 0 on error.
63621 */
63622-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
63623+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
63624
63625 /** lock for read/write safety */
63626 extern raw_spinlock_t oprofilefs_lock;
63627diff --git a/include/linux/padata.h b/include/linux/padata.h
63628index 4633b2f..988bc08 100644
63629--- a/include/linux/padata.h
63630+++ b/include/linux/padata.h
63631@@ -129,7 +129,7 @@ struct parallel_data {
63632 struct padata_instance *pinst;
63633 struct padata_parallel_queue __percpu *pqueue;
63634 struct padata_serial_queue __percpu *squeue;
63635- atomic_t seq_nr;
63636+ atomic_unchecked_t seq_nr;
63637 atomic_t reorder_objects;
63638 atomic_t refcnt;
63639 unsigned int max_seq_nr;
63640diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63641index abb2776..d8b8e15 100644
63642--- a/include/linux/perf_event.h
63643+++ b/include/linux/perf_event.h
63644@@ -750,8 +750,8 @@ struct perf_event {
63645
63646 enum perf_event_active_state state;
63647 unsigned int attach_state;
63648- local64_t count;
63649- atomic64_t child_count;
63650+ local64_t count; /* PaX: fix it one day */
63651+ atomic64_unchecked_t child_count;
63652
63653 /*
63654 * These are the total time in nanoseconds that the event
63655@@ -802,8 +802,8 @@ struct perf_event {
63656 * These accumulate total time (in nanoseconds) that children
63657 * events have been enabled and running, respectively.
63658 */
63659- atomic64_t child_total_time_enabled;
63660- atomic64_t child_total_time_running;
63661+ atomic64_unchecked_t child_total_time_enabled;
63662+ atomic64_unchecked_t child_total_time_running;
63663
63664 /*
63665 * Protect attach/detach and child_list:
63666diff --git a/include/linux/personality.h b/include/linux/personality.h
63667index 8fc7dd1a..c19d89e 100644
63668--- a/include/linux/personality.h
63669+++ b/include/linux/personality.h
63670@@ -44,6 +44,7 @@ enum {
63671 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63672 ADDR_NO_RANDOMIZE | \
63673 ADDR_COMPAT_LAYOUT | \
63674+ ADDR_LIMIT_3GB | \
63675 MMAP_PAGE_ZERO)
63676
63677 /*
63678diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63679index 0072a53..c5dcca5 100644
63680--- a/include/linux/pipe_fs_i.h
63681+++ b/include/linux/pipe_fs_i.h
63682@@ -47,9 +47,9 @@ struct pipe_buffer {
63683 struct pipe_inode_info {
63684 wait_queue_head_t wait;
63685 unsigned int nrbufs, curbuf, buffers;
63686- unsigned int readers;
63687- unsigned int writers;
63688- unsigned int waiting_writers;
63689+ atomic_t readers;
63690+ atomic_t writers;
63691+ atomic_t waiting_writers;
63692 unsigned int r_counter;
63693 unsigned int w_counter;
63694 struct page *tmp_page;
63695diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63696index 609daae..5392427 100644
63697--- a/include/linux/pm_runtime.h
63698+++ b/include/linux/pm_runtime.h
63699@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63700
63701 static inline void pm_runtime_mark_last_busy(struct device *dev)
63702 {
63703- ACCESS_ONCE(dev->power.last_busy) = jiffies;
63704+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63705 }
63706
63707 #else /* !CONFIG_PM_RUNTIME */
63708diff --git a/include/linux/poison.h b/include/linux/poison.h
63709index 2110a81..13a11bb 100644
63710--- a/include/linux/poison.h
63711+++ b/include/linux/poison.h
63712@@ -19,8 +19,8 @@
63713 * under normal circumstances, used to verify that nobody uses
63714 * non-initialized list entries.
63715 */
63716-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63717-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63718+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63719+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63720
63721 /********** include/linux/timer.h **********/
63722 /*
63723diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63724index 58969b2..ead129b 100644
63725--- a/include/linux/preempt.h
63726+++ b/include/linux/preempt.h
63727@@ -123,7 +123,7 @@ struct preempt_ops {
63728 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63729 void (*sched_out)(struct preempt_notifier *notifier,
63730 struct task_struct *next);
63731-};
63732+} __no_const;
63733
63734 /**
63735 * preempt_notifier - key for installing preemption notifiers
63736diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63737index 85c5073..51fac8b 100644
63738--- a/include/linux/proc_fs.h
63739+++ b/include/linux/proc_fs.h
63740@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
63741 return proc_create_data(name, mode, parent, proc_fops, NULL);
63742 }
63743
63744+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
63745+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63746+{
63747+#ifdef CONFIG_GRKERNSEC_PROC_USER
63748+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63749+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63750+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63751+#else
63752+ return proc_create_data(name, mode, parent, proc_fops, NULL);
63753+#endif
63754+}
63755+
63756 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63757 umode_t mode, struct proc_dir_entry *base,
63758 read_proc_t *read_proc, void * data)
63759@@ -258,7 +270,7 @@ union proc_op {
63760 int (*proc_show)(struct seq_file *m,
63761 struct pid_namespace *ns, struct pid *pid,
63762 struct task_struct *task);
63763-};
63764+} __no_const;
63765
63766 struct ctl_table_header;
63767 struct ctl_table;
63768diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
63769index c2f1f6a..6fdb196 100644
63770--- a/include/linux/ptrace.h
63771+++ b/include/linux/ptrace.h
63772@@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
63773 if (unlikely(ptrace_event_enabled(current, event))) {
63774 current->ptrace_message = message;
63775 ptrace_notify((event << 8) | SIGTRAP);
63776- } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
63777+ } else if (event == PTRACE_EVENT_EXEC) {
63778 /* legacy EXEC report via SIGTRAP */
63779- send_sig(SIGTRAP, current, 0);
63780+ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
63781+ send_sig(SIGTRAP, current, 0);
63782 }
63783 }
63784
63785diff --git a/include/linux/random.h b/include/linux/random.h
63786index 8f74538..02a1012 100644
63787--- a/include/linux/random.h
63788+++ b/include/linux/random.h
63789@@ -69,12 +69,17 @@ void srandom32(u32 seed);
63790
63791 u32 prandom32(struct rnd_state *);
63792
63793+static inline unsigned long pax_get_random_long(void)
63794+{
63795+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63796+}
63797+
63798 /*
63799 * Handle minimum values for seeds
63800 */
63801 static inline u32 __seed(u32 x, u32 m)
63802 {
63803- return (x < m) ? x + m : x;
63804+ return (x <= m) ? x + m + 1 : x;
63805 }
63806
63807 /**
63808diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63809index e0879a7..a12f962 100644
63810--- a/include/linux/reboot.h
63811+++ b/include/linux/reboot.h
63812@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
63813 * Architecture-specific implementations of sys_reboot commands.
63814 */
63815
63816-extern void machine_restart(char *cmd);
63817-extern void machine_halt(void);
63818-extern void machine_power_off(void);
63819+extern void machine_restart(char *cmd) __noreturn;
63820+extern void machine_halt(void) __noreturn;
63821+extern void machine_power_off(void) __noreturn;
63822
63823 extern void machine_shutdown(void);
63824 struct pt_regs;
63825@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63826 */
63827
63828 extern void kernel_restart_prepare(char *cmd);
63829-extern void kernel_restart(char *cmd);
63830-extern void kernel_halt(void);
63831-extern void kernel_power_off(void);
63832+extern void kernel_restart(char *cmd) __noreturn;
63833+extern void kernel_halt(void) __noreturn;
63834+extern void kernel_power_off(void) __noreturn;
63835
63836 extern int C_A_D; /* for sysctl */
63837 void ctrl_alt_del(void);
63838@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63839 * Emergency restart, callable from an interrupt handler.
63840 */
63841
63842-extern void emergency_restart(void);
63843+extern void emergency_restart(void) __noreturn;
63844 #include <asm/emergency-restart.h>
63845
63846 #endif
63847diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
63848index 2213ddc..650212a 100644
63849--- a/include/linux/reiserfs_fs.h
63850+++ b/include/linux/reiserfs_fs.h
63851@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63852 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63853
63854 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63855-#define get_generation(s) atomic_read (&fs_generation(s))
63856+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63857 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63858 #define __fs_changed(gen,s) (gen != get_generation (s))
63859 #define fs_changed(gen,s) \
63860diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
63861index 8c9e85c..1698e9a 100644
63862--- a/include/linux/reiserfs_fs_sb.h
63863+++ b/include/linux/reiserfs_fs_sb.h
63864@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
63865 /* Comment? -Hans */
63866 wait_queue_head_t s_wait;
63867 /* To be obsoleted soon by per buffer seals.. -Hans */
63868- atomic_t s_generation_counter; // increased by one every time the
63869+ atomic_unchecked_t s_generation_counter; // increased by one every time the
63870 // tree gets re-balanced
63871 unsigned long s_properties; /* File system properties. Currently holds
63872 on-disk FS format */
63873diff --git a/include/linux/relay.h b/include/linux/relay.h
63874index a822fd7..62b70f6 100644
63875--- a/include/linux/relay.h
63876+++ b/include/linux/relay.h
63877@@ -159,7 +159,7 @@ struct rchan_callbacks
63878 * The callback should return 0 if successful, negative if not.
63879 */
63880 int (*remove_buf_file)(struct dentry *dentry);
63881-};
63882+} __no_const;
63883
63884 /*
63885 * CONFIG_RELAY kernel API, kernel/relay.c
63886diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63887index c6c6084..5bf1212 100644
63888--- a/include/linux/rfkill.h
63889+++ b/include/linux/rfkill.h
63890@@ -147,6 +147,7 @@ struct rfkill_ops {
63891 void (*query)(struct rfkill *rfkill, void *data);
63892 int (*set_block)(void *data, bool blocked);
63893 };
63894+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63895
63896 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63897 /**
63898diff --git a/include/linux/rio.h b/include/linux/rio.h
63899index 4d50611..c6858a2 100644
63900--- a/include/linux/rio.h
63901+++ b/include/linux/rio.h
63902@@ -315,7 +315,7 @@ struct rio_ops {
63903 int mbox, void *buffer, size_t len);
63904 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63905 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63906-};
63907+} __no_const;
63908
63909 #define RIO_RESOURCE_MEM 0x00000100
63910 #define RIO_RESOURCE_DOORBELL 0x00000200
63911diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63912index 1cdd62a..e399f0d 100644
63913--- a/include/linux/rmap.h
63914+++ b/include/linux/rmap.h
63915@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63916 void anon_vma_init(void); /* create anon_vma_cachep */
63917 int anon_vma_prepare(struct vm_area_struct *);
63918 void unlink_anon_vmas(struct vm_area_struct *);
63919-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63920+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63921 void anon_vma_moveto_tail(struct vm_area_struct *);
63922-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63923+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63924 void __anon_vma_link(struct vm_area_struct *);
63925
63926 static inline void anon_vma_merge(struct vm_area_struct *vma,
63927diff --git a/include/linux/sched.h b/include/linux/sched.h
63928index 0657368..765f70f 100644
63929--- a/include/linux/sched.h
63930+++ b/include/linux/sched.h
63931@@ -101,6 +101,7 @@ struct bio_list;
63932 struct fs_struct;
63933 struct perf_event_context;
63934 struct blk_plug;
63935+struct linux_binprm;
63936
63937 /*
63938 * List of flags we want to share for kernel threads,
63939@@ -382,10 +383,13 @@ struct user_namespace;
63940 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63941
63942 extern int sysctl_max_map_count;
63943+extern unsigned long sysctl_heap_stack_gap;
63944
63945 #include <linux/aio.h>
63946
63947 #ifdef CONFIG_MMU
63948+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63949+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63950 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63951 extern unsigned long
63952 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63953@@ -631,6 +635,17 @@ struct signal_struct {
63954 #ifdef CONFIG_TASKSTATS
63955 struct taskstats *stats;
63956 #endif
63957+
63958+#ifdef CONFIG_GRKERNSEC
63959+ u32 curr_ip;
63960+ u32 saved_ip;
63961+ u32 gr_saddr;
63962+ u32 gr_daddr;
63963+ u16 gr_sport;
63964+ u16 gr_dport;
63965+ u8 used_accept:1;
63966+#endif
63967+
63968 #ifdef CONFIG_AUDIT
63969 unsigned audit_tty;
63970 struct tty_audit_buf *tty_audit_buf;
63971@@ -714,6 +729,11 @@ struct user_struct {
63972 struct key *session_keyring; /* UID's default session keyring */
63973 #endif
63974
63975+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63976+ unsigned int banned;
63977+ unsigned long ban_expires;
63978+#endif
63979+
63980 /* Hash table maintenance information */
63981 struct hlist_node uidhash_node;
63982 uid_t uid;
63983@@ -1354,8 +1374,8 @@ struct task_struct {
63984 struct list_head thread_group;
63985
63986 struct completion *vfork_done; /* for vfork() */
63987- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63988- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63989+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63990+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63991
63992 cputime_t utime, stime, utimescaled, stimescaled;
63993 cputime_t gtime;
63994@@ -1371,13 +1391,6 @@ struct task_struct {
63995 struct task_cputime cputime_expires;
63996 struct list_head cpu_timers[3];
63997
63998-/* process credentials */
63999- const struct cred __rcu *real_cred; /* objective and real subjective task
64000- * credentials (COW) */
64001- const struct cred __rcu *cred; /* effective (overridable) subjective task
64002- * credentials (COW) */
64003- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64004-
64005 char comm[TASK_COMM_LEN]; /* executable name excluding path
64006 - access with [gs]et_task_comm (which lock
64007 it with task_lock())
64008@@ -1394,8 +1407,16 @@ struct task_struct {
64009 #endif
64010 /* CPU-specific state of this task */
64011 struct thread_struct thread;
64012+/* thread_info moved to task_struct */
64013+#ifdef CONFIG_X86
64014+ struct thread_info tinfo;
64015+#endif
64016 /* filesystem information */
64017 struct fs_struct *fs;
64018+
64019+ const struct cred __rcu *cred; /* effective (overridable) subjective task
64020+ * credentials (COW) */
64021+
64022 /* open file information */
64023 struct files_struct *files;
64024 /* namespaces */
64025@@ -1442,6 +1463,11 @@ struct task_struct {
64026 struct rt_mutex_waiter *pi_blocked_on;
64027 #endif
64028
64029+/* process credentials */
64030+ const struct cred __rcu *real_cred; /* objective and real subjective task
64031+ * credentials (COW) */
64032+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64033+
64034 #ifdef CONFIG_DEBUG_MUTEXES
64035 /* mutex deadlock detection */
64036 struct mutex_waiter *blocked_on;
64037@@ -1558,6 +1584,27 @@ struct task_struct {
64038 unsigned long default_timer_slack_ns;
64039
64040 struct list_head *scm_work_list;
64041+
64042+#ifdef CONFIG_GRKERNSEC
64043+ /* grsecurity */
64044+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64045+ u64 exec_id;
64046+#endif
64047+#ifdef CONFIG_GRKERNSEC_SETXID
64048+ const struct cred *delayed_cred;
64049+#endif
64050+ struct dentry *gr_chroot_dentry;
64051+ struct acl_subject_label *acl;
64052+ struct acl_role_label *role;
64053+ struct file *exec_file;
64054+ u16 acl_role_id;
64055+ /* is this the task that authenticated to the special role */
64056+ u8 acl_sp_role;
64057+ u8 is_writable;
64058+ u8 brute;
64059+ u8 gr_is_chrooted;
64060+#endif
64061+
64062 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64063 /* Index of current stored address in ret_stack */
64064 int curr_ret_stack;
64065@@ -1592,6 +1639,51 @@ struct task_struct {
64066 #endif
64067 };
64068
64069+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
64070+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
64071+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
64072+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
64073+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
64074+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
64075+
64076+#ifdef CONFIG_PAX_SOFTMODE
64077+extern int pax_softmode;
64078+#endif
64079+
64080+extern int pax_check_flags(unsigned long *);
64081+
64082+/* if tsk != current then task_lock must be held on it */
64083+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64084+static inline unsigned long pax_get_flags(struct task_struct *tsk)
64085+{
64086+ if (likely(tsk->mm))
64087+ return tsk->mm->pax_flags;
64088+ else
64089+ return 0UL;
64090+}
64091+
64092+/* if tsk != current then task_lock must be held on it */
64093+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
64094+{
64095+ if (likely(tsk->mm)) {
64096+ tsk->mm->pax_flags = flags;
64097+ return 0;
64098+ }
64099+ return -EINVAL;
64100+}
64101+#endif
64102+
64103+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64104+extern void pax_set_initial_flags(struct linux_binprm *bprm);
64105+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64106+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
64107+#endif
64108+
64109+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
64110+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
64111+extern void pax_report_refcount_overflow(struct pt_regs *regs);
64112+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
64113+
64114 /* Future-safe accessor for struct task_struct's cpus_allowed. */
64115 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
64116
64117@@ -2104,7 +2196,9 @@ void yield(void);
64118 extern struct exec_domain default_exec_domain;
64119
64120 union thread_union {
64121+#ifndef CONFIG_X86
64122 struct thread_info thread_info;
64123+#endif
64124 unsigned long stack[THREAD_SIZE/sizeof(long)];
64125 };
64126
64127@@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
64128 */
64129
64130 extern struct task_struct *find_task_by_vpid(pid_t nr);
64131+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
64132 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
64133 struct pid_namespace *ns);
64134
64135@@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
64136 extern void exit_itimers(struct signal_struct *);
64137 extern void flush_itimer_signals(void);
64138
64139-extern void do_group_exit(int);
64140+extern __noreturn void do_group_exit(int);
64141
64142 extern void daemonize(const char *, ...);
64143 extern int allow_signal(int);
64144@@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
64145
64146 #endif
64147
64148-static inline int object_is_on_stack(void *obj)
64149+static inline int object_starts_on_stack(void *obj)
64150 {
64151- void *stack = task_stack_page(current);
64152+ const void *stack = task_stack_page(current);
64153
64154 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64155 }
64156
64157+#ifdef CONFIG_PAX_USERCOPY
64158+extern int object_is_on_stack(const void *obj, unsigned long len);
64159+#endif
64160+
64161 extern void thread_info_cache_init(void);
64162
64163 #ifdef CONFIG_DEBUG_STACK_USAGE
64164diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
64165index 899fbb4..1cb4138 100644
64166--- a/include/linux/screen_info.h
64167+++ b/include/linux/screen_info.h
64168@@ -43,7 +43,8 @@ struct screen_info {
64169 __u16 pages; /* 0x32 */
64170 __u16 vesa_attributes; /* 0x34 */
64171 __u32 capabilities; /* 0x36 */
64172- __u8 _reserved[6]; /* 0x3a */
64173+ __u16 vesapm_size; /* 0x3a */
64174+ __u8 _reserved[4]; /* 0x3c */
64175 } __attribute__((packed));
64176
64177 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
64178diff --git a/include/linux/security.h b/include/linux/security.h
64179index 83c18e8..2d98860 100644
64180--- a/include/linux/security.h
64181+++ b/include/linux/security.h
64182@@ -37,6 +37,7 @@
64183 #include <linux/xfrm.h>
64184 #include <linux/slab.h>
64185 #include <linux/xattr.h>
64186+#include <linux/grsecurity.h>
64187 #include <net/flow.h>
64188
64189 /* Maximum number of letters for an LSM name string */
64190diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
64191index 44f1514..2bbf6c1 100644
64192--- a/include/linux/seq_file.h
64193+++ b/include/linux/seq_file.h
64194@@ -24,6 +24,9 @@ struct seq_file {
64195 struct mutex lock;
64196 const struct seq_operations *op;
64197 int poll_event;
64198+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64199+ u64 exec_id;
64200+#endif
64201 void *private;
64202 };
64203
64204@@ -33,6 +36,7 @@ struct seq_operations {
64205 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64206 int (*show) (struct seq_file *m, void *v);
64207 };
64208+typedef struct seq_operations __no_const seq_operations_no_const;
64209
64210 #define SEQ_SKIP 1
64211
64212diff --git a/include/linux/shm.h b/include/linux/shm.h
64213index 92808b8..c28cac4 100644
64214--- a/include/linux/shm.h
64215+++ b/include/linux/shm.h
64216@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
64217
64218 /* The task created the shm object. NULL if the task is dead. */
64219 struct task_struct *shm_creator;
64220+#ifdef CONFIG_GRKERNSEC
64221+ time_t shm_createtime;
64222+ pid_t shm_lapid;
64223+#endif
64224 };
64225
64226 /* shm_mode upper byte flags */
64227diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
64228index 42854ce..3b7d3c8 100644
64229--- a/include/linux/skbuff.h
64230+++ b/include/linux/skbuff.h
64231@@ -655,7 +655,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
64232 */
64233 static inline int skb_queue_empty(const struct sk_buff_head *list)
64234 {
64235- return list->next == (struct sk_buff *)list;
64236+ return list->next == (const struct sk_buff *)list;
64237 }
64238
64239 /**
64240@@ -668,7 +668,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
64241 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64242 const struct sk_buff *skb)
64243 {
64244- return skb->next == (struct sk_buff *)list;
64245+ return skb->next == (const struct sk_buff *)list;
64246 }
64247
64248 /**
64249@@ -681,7 +681,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64250 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64251 const struct sk_buff *skb)
64252 {
64253- return skb->prev == (struct sk_buff *)list;
64254+ return skb->prev == (const struct sk_buff *)list;
64255 }
64256
64257 /**
64258@@ -1558,7 +1558,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
64259 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
64260 */
64261 #ifndef NET_SKB_PAD
64262-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
64263+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
64264 #endif
64265
64266 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
64267diff --git a/include/linux/slab.h b/include/linux/slab.h
64268index 573c809..07e1f43 100644
64269--- a/include/linux/slab.h
64270+++ b/include/linux/slab.h
64271@@ -11,12 +11,20 @@
64272
64273 #include <linux/gfp.h>
64274 #include <linux/types.h>
64275+#include <linux/err.h>
64276
64277 /*
64278 * Flags to pass to kmem_cache_create().
64279 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64280 */
64281 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64282+
64283+#ifdef CONFIG_PAX_USERCOPY
64284+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64285+#else
64286+#define SLAB_USERCOPY 0x00000000UL
64287+#endif
64288+
64289 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64290 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64291 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64292@@ -87,10 +95,13 @@
64293 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64294 * Both make kfree a no-op.
64295 */
64296-#define ZERO_SIZE_PTR ((void *)16)
64297+#define ZERO_SIZE_PTR \
64298+({ \
64299+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64300+ (void *)(-MAX_ERRNO-1L); \
64301+})
64302
64303-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64304- (unsigned long)ZERO_SIZE_PTR)
64305+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
64306
64307 /*
64308 * struct kmem_cache related prototypes
64309@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
64310 /*
64311 * Common kmalloc functions provided by all allocators
64312 */
64313-void * __must_check __krealloc(const void *, size_t, gfp_t);
64314-void * __must_check krealloc(const void *, size_t, gfp_t);
64315+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
64316+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
64317 void kfree(const void *);
64318 void kzfree(const void *);
64319 size_t ksize(const void *);
64320+void check_object_size(const void *ptr, unsigned long n, bool to);
64321
64322 /*
64323 * Allocator specific definitions. These are mainly used to establish optimized
64324@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64325 */
64326 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64327 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64328-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64329+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
64330 #define kmalloc_track_caller(size, flags) \
64331 __kmalloc_track_caller(size, flags, _RET_IP_)
64332 #else
64333@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64334 */
64335 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64336 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64337-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
64338+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
64339 #define kmalloc_node_track_caller(size, flags, node) \
64340 __kmalloc_node_track_caller(size, flags, node, \
64341 _RET_IP_)
64342diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
64343index fbd1117..c0bd874 100644
64344--- a/include/linux/slab_def.h
64345+++ b/include/linux/slab_def.h
64346@@ -66,10 +66,10 @@ struct kmem_cache {
64347 unsigned long node_allocs;
64348 unsigned long node_frees;
64349 unsigned long node_overflow;
64350- atomic_t allochit;
64351- atomic_t allocmiss;
64352- atomic_t freehit;
64353- atomic_t freemiss;
64354+ atomic_unchecked_t allochit;
64355+ atomic_unchecked_t allocmiss;
64356+ atomic_unchecked_t freehit;
64357+ atomic_unchecked_t freemiss;
64358
64359 /*
64360 * If debugging is enabled, then the allocator can add additional
64361@@ -107,7 +107,7 @@ struct cache_sizes {
64362 extern struct cache_sizes malloc_sizes[];
64363
64364 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64365-void *__kmalloc(size_t size, gfp_t flags);
64366+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64367
64368 #ifdef CONFIG_TRACING
64369 extern void *kmem_cache_alloc_trace(size_t size,
64370@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
64371 }
64372 #endif
64373
64374+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64375 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64376 {
64377 struct kmem_cache *cachep;
64378@@ -160,7 +161,7 @@ found:
64379 }
64380
64381 #ifdef CONFIG_NUMA
64382-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64383+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64384 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64385
64386 #ifdef CONFIG_TRACING
64387@@ -179,6 +180,7 @@ kmem_cache_alloc_node_trace(size_t size,
64388 }
64389 #endif
64390
64391+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64392 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64393 {
64394 struct kmem_cache *cachep;
64395diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64396index 0ec00b3..65e7e0e 100644
64397--- a/include/linux/slob_def.h
64398+++ b/include/linux/slob_def.h
64399@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64400 return kmem_cache_alloc_node(cachep, flags, -1);
64401 }
64402
64403-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64404+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64405
64406+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64407 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64408 {
64409 return __kmalloc_node(size, flags, node);
64410@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64411 * kmalloc is the normal method of allocating memory
64412 * in the kernel.
64413 */
64414+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64415 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64416 {
64417 return __kmalloc_node(size, flags, -1);
64418 }
64419
64420+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64421 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64422 {
64423 return kmalloc(size, flags);
64424diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
64425index a32bcfd..d26bd6e 100644
64426--- a/include/linux/slub_def.h
64427+++ b/include/linux/slub_def.h
64428@@ -89,7 +89,7 @@ struct kmem_cache {
64429 struct kmem_cache_order_objects max;
64430 struct kmem_cache_order_objects min;
64431 gfp_t allocflags; /* gfp flags to use on each alloc */
64432- int refcount; /* Refcount for slab cache destroy */
64433+ atomic_t refcount; /* Refcount for slab cache destroy */
64434 void (*ctor)(void *);
64435 int inuse; /* Offset to metadata */
64436 int align; /* Alignment */
64437@@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size)
64438 * This ought to end up with a global pointer to the right cache
64439 * in kmalloc_caches.
64440 */
64441+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
64442 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64443 {
64444 int index = kmalloc_index(size);
64445@@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64446 }
64447
64448 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64449-void *__kmalloc(size_t size, gfp_t flags);
64450+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
64451
64452 static __always_inline void *
64453+kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
64454+static __always_inline void *
64455 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64456 {
64457 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
64458@@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64459 }
64460 #endif
64461
64462+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
64463 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64464 {
64465 unsigned int order = get_order(size);
64466 return kmalloc_order_trace(size, flags, order);
64467 }
64468
64469+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64470 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64471 {
64472 if (__builtin_constant_p(size)) {
64473@@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64474 }
64475
64476 #ifdef CONFIG_NUMA
64477-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64478+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64479 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64480
64481 #ifdef CONFIG_TRACING
64482@@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
64483 }
64484 #endif
64485
64486+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64487 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64488 {
64489 if (__builtin_constant_p(size) &&
64490diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64491index de8832d..0147b46 100644
64492--- a/include/linux/sonet.h
64493+++ b/include/linux/sonet.h
64494@@ -61,7 +61,7 @@ struct sonet_stats {
64495 #include <linux/atomic.h>
64496
64497 struct k_sonet_stats {
64498-#define __HANDLE_ITEM(i) atomic_t i
64499+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64500 __SONET_ITEMS
64501 #undef __HANDLE_ITEM
64502 };
64503diff --git a/include/linux/stddef.h b/include/linux/stddef.h
64504index 6a40c76..1747b67 100644
64505--- a/include/linux/stddef.h
64506+++ b/include/linux/stddef.h
64507@@ -3,14 +3,10 @@
64508
64509 #include <linux/compiler.h>
64510
64511+#ifdef __KERNEL__
64512+
64513 #undef NULL
64514-#if defined(__cplusplus)
64515-#define NULL 0
64516-#else
64517 #define NULL ((void *)0)
64518-#endif
64519-
64520-#ifdef __KERNEL__
64521
64522 enum {
64523 false = 0,
64524diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64525index 2c5993a..b0e79f0 100644
64526--- a/include/linux/sunrpc/clnt.h
64527+++ b/include/linux/sunrpc/clnt.h
64528@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64529 {
64530 switch (sap->sa_family) {
64531 case AF_INET:
64532- return ntohs(((struct sockaddr_in *)sap)->sin_port);
64533+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64534 case AF_INET6:
64535- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64536+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64537 }
64538 return 0;
64539 }
64540@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64541 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64542 const struct sockaddr *src)
64543 {
64544- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64545+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64546 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64547
64548 dsin->sin_family = ssin->sin_family;
64549@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64550 if (sa->sa_family != AF_INET6)
64551 return 0;
64552
64553- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64554+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64555 }
64556
64557 #endif /* __KERNEL__ */
64558diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64559index e775689..9e206d9 100644
64560--- a/include/linux/sunrpc/sched.h
64561+++ b/include/linux/sunrpc/sched.h
64562@@ -105,6 +105,7 @@ struct rpc_call_ops {
64563 void (*rpc_call_done)(struct rpc_task *, void *);
64564 void (*rpc_release)(void *);
64565 };
64566+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64567
64568 struct rpc_task_setup {
64569 struct rpc_task *task;
64570diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64571index c14fe86..393245e 100644
64572--- a/include/linux/sunrpc/svc_rdma.h
64573+++ b/include/linux/sunrpc/svc_rdma.h
64574@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64575 extern unsigned int svcrdma_max_requests;
64576 extern unsigned int svcrdma_max_req_size;
64577
64578-extern atomic_t rdma_stat_recv;
64579-extern atomic_t rdma_stat_read;
64580-extern atomic_t rdma_stat_write;
64581-extern atomic_t rdma_stat_sq_starve;
64582-extern atomic_t rdma_stat_rq_starve;
64583-extern atomic_t rdma_stat_rq_poll;
64584-extern atomic_t rdma_stat_rq_prod;
64585-extern atomic_t rdma_stat_sq_poll;
64586-extern atomic_t rdma_stat_sq_prod;
64587+extern atomic_unchecked_t rdma_stat_recv;
64588+extern atomic_unchecked_t rdma_stat_read;
64589+extern atomic_unchecked_t rdma_stat_write;
64590+extern atomic_unchecked_t rdma_stat_sq_starve;
64591+extern atomic_unchecked_t rdma_stat_rq_starve;
64592+extern atomic_unchecked_t rdma_stat_rq_poll;
64593+extern atomic_unchecked_t rdma_stat_rq_prod;
64594+extern atomic_unchecked_t rdma_stat_sq_poll;
64595+extern atomic_unchecked_t rdma_stat_sq_prod;
64596
64597 #define RPCRDMA_VERSION 1
64598
64599diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64600index bb9127d..34ab358 100644
64601--- a/include/linux/sysctl.h
64602+++ b/include/linux/sysctl.h
64603@@ -155,7 +155,11 @@ enum
64604 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64605 };
64606
64607-
64608+#ifdef CONFIG_PAX_SOFTMODE
64609+enum {
64610+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64611+};
64612+#endif
64613
64614 /* CTL_VM names: */
64615 enum
64616@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64617
64618 extern int proc_dostring(struct ctl_table *, int,
64619 void __user *, size_t *, loff_t *);
64620+extern int proc_dostring_modpriv(struct ctl_table *, int,
64621+ void __user *, size_t *, loff_t *);
64622 extern int proc_dointvec(struct ctl_table *, int,
64623 void __user *, size_t *, loff_t *);
64624 extern int proc_dointvec_minmax(struct ctl_table *, int,
64625diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
64626index a71a292..51bd91d 100644
64627--- a/include/linux/tracehook.h
64628+++ b/include/linux/tracehook.h
64629@@ -54,12 +54,12 @@ struct linux_binprm;
64630 /*
64631 * ptrace report for syscall entry and exit looks identical.
64632 */
64633-static inline void ptrace_report_syscall(struct pt_regs *regs)
64634+static inline int ptrace_report_syscall(struct pt_regs *regs)
64635 {
64636 int ptrace = current->ptrace;
64637
64638 if (!(ptrace & PT_PTRACED))
64639- return;
64640+ return 0;
64641
64642 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
64643
64644@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64645 send_sig(current->exit_code, current, 1);
64646 current->exit_code = 0;
64647 }
64648+
64649+ return fatal_signal_pending(current);
64650 }
64651
64652 /**
64653@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64654 static inline __must_check int tracehook_report_syscall_entry(
64655 struct pt_regs *regs)
64656 {
64657- ptrace_report_syscall(regs);
64658- return 0;
64659+ return ptrace_report_syscall(regs);
64660 }
64661
64662 /**
64663diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64664index ff7dc08..893e1bd 100644
64665--- a/include/linux/tty_ldisc.h
64666+++ b/include/linux/tty_ldisc.h
64667@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
64668
64669 struct module *owner;
64670
64671- int refcount;
64672+ atomic_t refcount;
64673 };
64674
64675 struct tty_ldisc {
64676diff --git a/include/linux/types.h b/include/linux/types.h
64677index e5fa503..df6e8a4 100644
64678--- a/include/linux/types.h
64679+++ b/include/linux/types.h
64680@@ -214,10 +214,26 @@ typedef struct {
64681 int counter;
64682 } atomic_t;
64683
64684+#ifdef CONFIG_PAX_REFCOUNT
64685+typedef struct {
64686+ int counter;
64687+} atomic_unchecked_t;
64688+#else
64689+typedef atomic_t atomic_unchecked_t;
64690+#endif
64691+
64692 #ifdef CONFIG_64BIT
64693 typedef struct {
64694 long counter;
64695 } atomic64_t;
64696+
64697+#ifdef CONFIG_PAX_REFCOUNT
64698+typedef struct {
64699+ long counter;
64700+} atomic64_unchecked_t;
64701+#else
64702+typedef atomic64_t atomic64_unchecked_t;
64703+#endif
64704 #endif
64705
64706 struct list_head {
64707diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64708index 5ca0951..53a2fff 100644
64709--- a/include/linux/uaccess.h
64710+++ b/include/linux/uaccess.h
64711@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64712 long ret; \
64713 mm_segment_t old_fs = get_fs(); \
64714 \
64715- set_fs(KERNEL_DS); \
64716 pagefault_disable(); \
64717- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64718- pagefault_enable(); \
64719+ set_fs(KERNEL_DS); \
64720+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64721 set_fs(old_fs); \
64722+ pagefault_enable(); \
64723 ret; \
64724 })
64725
64726@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
64727 * Safely write to address @dst from the buffer at @src. If a kernel fault
64728 * happens, handle that and return -EFAULT.
64729 */
64730-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
64731+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
64732 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
64733
64734 #endif /* __LINUX_UACCESS_H__ */
64735diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64736index 99c1b4d..bb94261 100644
64737--- a/include/linux/unaligned/access_ok.h
64738+++ b/include/linux/unaligned/access_ok.h
64739@@ -6,32 +6,32 @@
64740
64741 static inline u16 get_unaligned_le16(const void *p)
64742 {
64743- return le16_to_cpup((__le16 *)p);
64744+ return le16_to_cpup((const __le16 *)p);
64745 }
64746
64747 static inline u32 get_unaligned_le32(const void *p)
64748 {
64749- return le32_to_cpup((__le32 *)p);
64750+ return le32_to_cpup((const __le32 *)p);
64751 }
64752
64753 static inline u64 get_unaligned_le64(const void *p)
64754 {
64755- return le64_to_cpup((__le64 *)p);
64756+ return le64_to_cpup((const __le64 *)p);
64757 }
64758
64759 static inline u16 get_unaligned_be16(const void *p)
64760 {
64761- return be16_to_cpup((__be16 *)p);
64762+ return be16_to_cpup((const __be16 *)p);
64763 }
64764
64765 static inline u32 get_unaligned_be32(const void *p)
64766 {
64767- return be32_to_cpup((__be32 *)p);
64768+ return be32_to_cpup((const __be32 *)p);
64769 }
64770
64771 static inline u64 get_unaligned_be64(const void *p)
64772 {
64773- return be64_to_cpup((__be64 *)p);
64774+ return be64_to_cpup((const __be64 *)p);
64775 }
64776
64777 static inline void put_unaligned_le16(u16 val, void *p)
64778diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
64779index 0d3f988..000f101 100644
64780--- a/include/linux/usb/renesas_usbhs.h
64781+++ b/include/linux/usb/renesas_usbhs.h
64782@@ -39,7 +39,7 @@ enum {
64783 */
64784 struct renesas_usbhs_driver_callback {
64785 int (*notify_hotplug)(struct platform_device *pdev);
64786-};
64787+} __no_const;
64788
64789 /*
64790 * callback functions for platform
64791@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
64792 * VBUS control is needed for Host
64793 */
64794 int (*set_vbus)(struct platform_device *pdev, int enable);
64795-};
64796+} __no_const;
64797
64798 /*
64799 * parameters for renesas usbhs
64800diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64801index 6f8fbcf..8259001 100644
64802--- a/include/linux/vermagic.h
64803+++ b/include/linux/vermagic.h
64804@@ -25,9 +25,35 @@
64805 #define MODULE_ARCH_VERMAGIC ""
64806 #endif
64807
64808+#ifdef CONFIG_PAX_REFCOUNT
64809+#define MODULE_PAX_REFCOUNT "REFCOUNT "
64810+#else
64811+#define MODULE_PAX_REFCOUNT ""
64812+#endif
64813+
64814+#ifdef CONSTIFY_PLUGIN
64815+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64816+#else
64817+#define MODULE_CONSTIFY_PLUGIN ""
64818+#endif
64819+
64820+#ifdef STACKLEAK_PLUGIN
64821+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64822+#else
64823+#define MODULE_STACKLEAK_PLUGIN ""
64824+#endif
64825+
64826+#ifdef CONFIG_GRKERNSEC
64827+#define MODULE_GRSEC "GRSEC "
64828+#else
64829+#define MODULE_GRSEC ""
64830+#endif
64831+
64832 #define VERMAGIC_STRING \
64833 UTS_RELEASE " " \
64834 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64835 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64836- MODULE_ARCH_VERMAGIC
64837+ MODULE_ARCH_VERMAGIC \
64838+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64839+ MODULE_GRSEC
64840
64841diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64842index dcdfc2b..cce598d 100644
64843--- a/include/linux/vmalloc.h
64844+++ b/include/linux/vmalloc.h
64845@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64846 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64847 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64848 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64849+
64850+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64851+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64852+#endif
64853+
64854 /* bits [20..32] reserved for arch specific ioremap internals */
64855
64856 /*
64857@@ -51,18 +56,18 @@ static inline void vmalloc_init(void)
64858 }
64859 #endif
64860
64861-extern void *vmalloc(unsigned long size);
64862-extern void *vzalloc(unsigned long size);
64863-extern void *vmalloc_user(unsigned long size);
64864-extern void *vmalloc_node(unsigned long size, int node);
64865-extern void *vzalloc_node(unsigned long size, int node);
64866-extern void *vmalloc_exec(unsigned long size);
64867-extern void *vmalloc_32(unsigned long size);
64868-extern void *vmalloc_32_user(unsigned long size);
64869-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64870+extern void *vmalloc(unsigned long size) __size_overflow(1);
64871+extern void *vzalloc(unsigned long size) __size_overflow(1);
64872+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
64873+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
64874+extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1);
64875+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
64876+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
64877+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
64878+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
64879 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64880 unsigned long start, unsigned long end, gfp_t gfp_mask,
64881- pgprot_t prot, int node, void *caller);
64882+ pgprot_t prot, int node, void *caller) __size_overflow(1);
64883 extern void vfree(const void *addr);
64884
64885 extern void *vmap(struct page **pages, unsigned int count,
64886@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64887 extern void free_vm_area(struct vm_struct *area);
64888
64889 /* for /dev/kmem */
64890-extern long vread(char *buf, char *addr, unsigned long count);
64891-extern long vwrite(char *buf, char *addr, unsigned long count);
64892+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64893+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64894
64895 /*
64896 * Internals. Dont't use..
64897diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64898index 65efb92..137adbb 100644
64899--- a/include/linux/vmstat.h
64900+++ b/include/linux/vmstat.h
64901@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64902 /*
64903 * Zone based page accounting with per cpu differentials.
64904 */
64905-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64906+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64907
64908 static inline void zone_page_state_add(long x, struct zone *zone,
64909 enum zone_stat_item item)
64910 {
64911- atomic_long_add(x, &zone->vm_stat[item]);
64912- atomic_long_add(x, &vm_stat[item]);
64913+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64914+ atomic_long_add_unchecked(x, &vm_stat[item]);
64915 }
64916
64917 static inline unsigned long global_page_state(enum zone_stat_item item)
64918 {
64919- long x = atomic_long_read(&vm_stat[item]);
64920+ long x = atomic_long_read_unchecked(&vm_stat[item]);
64921 #ifdef CONFIG_SMP
64922 if (x < 0)
64923 x = 0;
64924@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64925 static inline unsigned long zone_page_state(struct zone *zone,
64926 enum zone_stat_item item)
64927 {
64928- long x = atomic_long_read(&zone->vm_stat[item]);
64929+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64930 #ifdef CONFIG_SMP
64931 if (x < 0)
64932 x = 0;
64933@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64934 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64935 enum zone_stat_item item)
64936 {
64937- long x = atomic_long_read(&zone->vm_stat[item]);
64938+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64939
64940 #ifdef CONFIG_SMP
64941 int cpu;
64942@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64943
64944 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64945 {
64946- atomic_long_inc(&zone->vm_stat[item]);
64947- atomic_long_inc(&vm_stat[item]);
64948+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
64949+ atomic_long_inc_unchecked(&vm_stat[item]);
64950 }
64951
64952 static inline void __inc_zone_page_state(struct page *page,
64953@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64954
64955 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64956 {
64957- atomic_long_dec(&zone->vm_stat[item]);
64958- atomic_long_dec(&vm_stat[item]);
64959+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
64960+ atomic_long_dec_unchecked(&vm_stat[item]);
64961 }
64962
64963 static inline void __dec_zone_page_state(struct page *page,
64964diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64965index e5d1220..ef6e406 100644
64966--- a/include/linux/xattr.h
64967+++ b/include/linux/xattr.h
64968@@ -57,6 +57,11 @@
64969 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
64970 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
64971
64972+/* User namespace */
64973+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64974+#define XATTR_PAX_FLAGS_SUFFIX "flags"
64975+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64976+
64977 #ifdef __KERNEL__
64978
64979 #include <linux/types.h>
64980diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64981index 4aeff96..b378cdc 100644
64982--- a/include/media/saa7146_vv.h
64983+++ b/include/media/saa7146_vv.h
64984@@ -163,7 +163,7 @@ struct saa7146_ext_vv
64985 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64986
64987 /* the extension can override this */
64988- struct v4l2_ioctl_ops ops;
64989+ v4l2_ioctl_ops_no_const ops;
64990 /* pointer to the saa7146 core ops */
64991 const struct v4l2_ioctl_ops *core_ops;
64992
64993diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64994index c7c40f1..4f01585 100644
64995--- a/include/media/v4l2-dev.h
64996+++ b/include/media/v4l2-dev.h
64997@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
64998
64999
65000 struct v4l2_file_operations {
65001- struct module *owner;
65002+ struct module * const owner;
65003 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
65004 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
65005 unsigned int (*poll) (struct file *, struct poll_table_struct *);
65006@@ -68,6 +68,7 @@ struct v4l2_file_operations {
65007 int (*open) (struct file *);
65008 int (*release) (struct file *);
65009 };
65010+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
65011
65012 /*
65013 * Newer version of video_device, handled by videodev2.c
65014diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
65015index 3f5d60f..44210ed 100644
65016--- a/include/media/v4l2-ioctl.h
65017+++ b/include/media/v4l2-ioctl.h
65018@@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
65019 long (*vidioc_default) (struct file *file, void *fh,
65020 bool valid_prio, int cmd, void *arg);
65021 };
65022-
65023+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
65024
65025 /* v4l debugging and diagnostics */
65026
65027diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
65028index 8d55251..dfe5b0a 100644
65029--- a/include/net/caif/caif_hsi.h
65030+++ b/include/net/caif/caif_hsi.h
65031@@ -98,7 +98,7 @@ struct cfhsi_drv {
65032 void (*rx_done_cb) (struct cfhsi_drv *drv);
65033 void (*wake_up_cb) (struct cfhsi_drv *drv);
65034 void (*wake_down_cb) (struct cfhsi_drv *drv);
65035-};
65036+} __no_const;
65037
65038 /* Structure implemented by HSI device. */
65039 struct cfhsi_dev {
65040diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
65041index 9e5425b..8136ffc 100644
65042--- a/include/net/caif/cfctrl.h
65043+++ b/include/net/caif/cfctrl.h
65044@@ -52,7 +52,7 @@ struct cfctrl_rsp {
65045 void (*radioset_rsp)(void);
65046 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
65047 struct cflayer *client_layer);
65048-};
65049+} __no_const;
65050
65051 /* Link Setup Parameters for CAIF-Links. */
65052 struct cfctrl_link_param {
65053@@ -101,8 +101,8 @@ struct cfctrl_request_info {
65054 struct cfctrl {
65055 struct cfsrvl serv;
65056 struct cfctrl_rsp res;
65057- atomic_t req_seq_no;
65058- atomic_t rsp_seq_no;
65059+ atomic_unchecked_t req_seq_no;
65060+ atomic_unchecked_t rsp_seq_no;
65061 struct list_head list;
65062 /* Protects from simultaneous access to first_req list */
65063 spinlock_t info_list_lock;
65064diff --git a/include/net/flow.h b/include/net/flow.h
65065index 6c469db..7743b8e 100644
65066--- a/include/net/flow.h
65067+++ b/include/net/flow.h
65068@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
65069
65070 extern void flow_cache_flush(void);
65071 extern void flow_cache_flush_deferred(void);
65072-extern atomic_t flow_cache_genid;
65073+extern atomic_unchecked_t flow_cache_genid;
65074
65075 #endif
65076diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
65077index b94765e..053f68b 100644
65078--- a/include/net/inetpeer.h
65079+++ b/include/net/inetpeer.h
65080@@ -48,8 +48,8 @@ struct inet_peer {
65081 */
65082 union {
65083 struct {
65084- atomic_t rid; /* Frag reception counter */
65085- atomic_t ip_id_count; /* IP ID for the next packet */
65086+ atomic_unchecked_t rid; /* Frag reception counter */
65087+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
65088 __u32 tcp_ts;
65089 __u32 tcp_ts_stamp;
65090 };
65091@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
65092 more++;
65093 inet_peer_refcheck(p);
65094 do {
65095- old = atomic_read(&p->ip_id_count);
65096+ old = atomic_read_unchecked(&p->ip_id_count);
65097 new = old + more;
65098 if (!new)
65099 new = 1;
65100- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
65101+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
65102 return new;
65103 }
65104
65105diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
65106index 10422ef..662570f 100644
65107--- a/include/net/ip_fib.h
65108+++ b/include/net/ip_fib.h
65109@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
65110
65111 #define FIB_RES_SADDR(net, res) \
65112 ((FIB_RES_NH(res).nh_saddr_genid == \
65113- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
65114+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
65115 FIB_RES_NH(res).nh_saddr : \
65116 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
65117 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
65118diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
65119index ebe517f..1bd286b 100644
65120--- a/include/net/ip_vs.h
65121+++ b/include/net/ip_vs.h
65122@@ -509,7 +509,7 @@ struct ip_vs_conn {
65123 struct ip_vs_conn *control; /* Master control connection */
65124 atomic_t n_control; /* Number of controlled ones */
65125 struct ip_vs_dest *dest; /* real server */
65126- atomic_t in_pkts; /* incoming packet counter */
65127+ atomic_unchecked_t in_pkts; /* incoming packet counter */
65128
65129 /* packet transmitter for different forwarding methods. If it
65130 mangles the packet, it must return NF_DROP or better NF_STOLEN,
65131@@ -647,7 +647,7 @@ struct ip_vs_dest {
65132 __be16 port; /* port number of the server */
65133 union nf_inet_addr addr; /* IP address of the server */
65134 volatile unsigned flags; /* dest status flags */
65135- atomic_t conn_flags; /* flags to copy to conn */
65136+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
65137 atomic_t weight; /* server weight */
65138
65139 atomic_t refcnt; /* reference counter */
65140diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
65141index 69b610a..fe3962c 100644
65142--- a/include/net/irda/ircomm_core.h
65143+++ b/include/net/irda/ircomm_core.h
65144@@ -51,7 +51,7 @@ typedef struct {
65145 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
65146 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
65147 struct ircomm_info *);
65148-} call_t;
65149+} __no_const call_t;
65150
65151 struct ircomm_cb {
65152 irda_queue_t queue;
65153diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
65154index 59ba38bc..d515662 100644
65155--- a/include/net/irda/ircomm_tty.h
65156+++ b/include/net/irda/ircomm_tty.h
65157@@ -35,6 +35,7 @@
65158 #include <linux/termios.h>
65159 #include <linux/timer.h>
65160 #include <linux/tty.h> /* struct tty_struct */
65161+#include <asm/local.h>
65162
65163 #include <net/irda/irias_object.h>
65164 #include <net/irda/ircomm_core.h>
65165@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
65166 unsigned short close_delay;
65167 unsigned short closing_wait; /* time to wait before closing */
65168
65169- int open_count;
65170- int blocked_open; /* # of blocked opens */
65171+ local_t open_count;
65172+ local_t blocked_open; /* # of blocked opens */
65173
65174 /* Protect concurent access to :
65175 * o self->open_count
65176diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
65177index 0954ec9..7413562 100644
65178--- a/include/net/iucv/af_iucv.h
65179+++ b/include/net/iucv/af_iucv.h
65180@@ -138,7 +138,7 @@ struct iucv_sock {
65181 struct iucv_sock_list {
65182 struct hlist_head head;
65183 rwlock_t lock;
65184- atomic_t autobind_name;
65185+ atomic_unchecked_t autobind_name;
65186 };
65187
65188 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
65189diff --git a/include/net/neighbour.h b/include/net/neighbour.h
65190index 34c996f..bb3b4d4 100644
65191--- a/include/net/neighbour.h
65192+++ b/include/net/neighbour.h
65193@@ -123,7 +123,7 @@ struct neigh_ops {
65194 void (*error_report)(struct neighbour *, struct sk_buff *);
65195 int (*output)(struct neighbour *, struct sk_buff *);
65196 int (*connected_output)(struct neighbour *, struct sk_buff *);
65197-};
65198+} __do_const;
65199
65200 struct pneigh_entry {
65201 struct pneigh_entry *next;
65202diff --git a/include/net/netlink.h b/include/net/netlink.h
65203index cb1f350..3279d2c 100644
65204--- a/include/net/netlink.h
65205+++ b/include/net/netlink.h
65206@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
65207 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65208 {
65209 if (mark)
65210- skb_trim(skb, (unsigned char *) mark - skb->data);
65211+ skb_trim(skb, (const unsigned char *) mark - skb->data);
65212 }
65213
65214 /**
65215diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
65216index bbd023a..97c6d0d 100644
65217--- a/include/net/netns/ipv4.h
65218+++ b/include/net/netns/ipv4.h
65219@@ -57,8 +57,8 @@ struct netns_ipv4 {
65220 unsigned int sysctl_ping_group_range[2];
65221 long sysctl_tcp_mem[3];
65222
65223- atomic_t rt_genid;
65224- atomic_t dev_addr_genid;
65225+ atomic_unchecked_t rt_genid;
65226+ atomic_unchecked_t dev_addr_genid;
65227
65228 #ifdef CONFIG_IP_MROUTE
65229 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
65230diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
65231index d368561..96aaa17 100644
65232--- a/include/net/sctp/sctp.h
65233+++ b/include/net/sctp/sctp.h
65234@@ -318,9 +318,9 @@ do { \
65235
65236 #else /* SCTP_DEBUG */
65237
65238-#define SCTP_DEBUG_PRINTK(whatever...)
65239-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
65240-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65241+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
65242+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
65243+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65244 #define SCTP_ENABLE_DEBUG
65245 #define SCTP_DISABLE_DEBUG
65246 #define SCTP_ASSERT(expr, str, func)
65247diff --git a/include/net/sock.h b/include/net/sock.h
65248index 91c1c8b..15ae923 100644
65249--- a/include/net/sock.h
65250+++ b/include/net/sock.h
65251@@ -299,7 +299,7 @@ struct sock {
65252 #ifdef CONFIG_RPS
65253 __u32 sk_rxhash;
65254 #endif
65255- atomic_t sk_drops;
65256+ atomic_unchecked_t sk_drops;
65257 int sk_rcvbuf;
65258
65259 struct sk_filter __rcu *sk_filter;
65260@@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
65261 }
65262
65263 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
65264- char __user *from, char *to,
65265+ char __user *from, unsigned char *to,
65266 int copy, int offset)
65267 {
65268 if (skb->ip_summed == CHECKSUM_NONE) {
65269diff --git a/include/net/tcp.h b/include/net/tcp.h
65270index 2d80c29..aa07caf 100644
65271--- a/include/net/tcp.h
65272+++ b/include/net/tcp.h
65273@@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
65274 char *name;
65275 sa_family_t family;
65276 const struct file_operations *seq_fops;
65277- struct seq_operations seq_ops;
65278+ seq_operations_no_const seq_ops;
65279 };
65280
65281 struct tcp_iter_state {
65282diff --git a/include/net/udp.h b/include/net/udp.h
65283index e39592f..fef9680 100644
65284--- a/include/net/udp.h
65285+++ b/include/net/udp.h
65286@@ -243,7 +243,7 @@ struct udp_seq_afinfo {
65287 sa_family_t family;
65288 struct udp_table *udp_table;
65289 const struct file_operations *seq_fops;
65290- struct seq_operations seq_ops;
65291+ seq_operations_no_const seq_ops;
65292 };
65293
65294 struct udp_iter_state {
65295diff --git a/include/net/xfrm.h b/include/net/xfrm.h
65296index 89174e2..1f82598 100644
65297--- a/include/net/xfrm.h
65298+++ b/include/net/xfrm.h
65299@@ -505,7 +505,7 @@ struct xfrm_policy {
65300 struct timer_list timer;
65301
65302 struct flow_cache_object flo;
65303- atomic_t genid;
65304+ atomic_unchecked_t genid;
65305 u32 priority;
65306 u32 index;
65307 struct xfrm_mark mark;
65308diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
65309index 1a046b1..ee0bef0 100644
65310--- a/include/rdma/iw_cm.h
65311+++ b/include/rdma/iw_cm.h
65312@@ -122,7 +122,7 @@ struct iw_cm_verbs {
65313 int backlog);
65314
65315 int (*destroy_listen)(struct iw_cm_id *cm_id);
65316-};
65317+} __no_const;
65318
65319 /**
65320 * iw_create_cm_id - Create an IW CM identifier.
65321diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
65322index 6a3922f..0b73022 100644
65323--- a/include/scsi/libfc.h
65324+++ b/include/scsi/libfc.h
65325@@ -748,6 +748,7 @@ struct libfc_function_template {
65326 */
65327 void (*disc_stop_final) (struct fc_lport *);
65328 };
65329+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65330
65331 /**
65332 * struct fc_disc - Discovery context
65333@@ -851,7 +852,7 @@ struct fc_lport {
65334 struct fc_vport *vport;
65335
65336 /* Operational Information */
65337- struct libfc_function_template tt;
65338+ libfc_function_template_no_const tt;
65339 u8 link_up;
65340 u8 qfull;
65341 enum fc_lport_state state;
65342diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
65343index 77273f2..dd4031f 100644
65344--- a/include/scsi/scsi_device.h
65345+++ b/include/scsi/scsi_device.h
65346@@ -161,9 +161,9 @@ struct scsi_device {
65347 unsigned int max_device_blocked; /* what device_blocked counts down from */
65348 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65349
65350- atomic_t iorequest_cnt;
65351- atomic_t iodone_cnt;
65352- atomic_t ioerr_cnt;
65353+ atomic_unchecked_t iorequest_cnt;
65354+ atomic_unchecked_t iodone_cnt;
65355+ atomic_unchecked_t ioerr_cnt;
65356
65357 struct device sdev_gendev,
65358 sdev_dev;
65359diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
65360index 2a65167..91e01f8 100644
65361--- a/include/scsi/scsi_transport_fc.h
65362+++ b/include/scsi/scsi_transport_fc.h
65363@@ -711,7 +711,7 @@ struct fc_function_template {
65364 unsigned long show_host_system_hostname:1;
65365
65366 unsigned long disable_target_scan:1;
65367-};
65368+} __do_const;
65369
65370
65371 /**
65372diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65373index 030b87c..98a6954 100644
65374--- a/include/sound/ak4xxx-adda.h
65375+++ b/include/sound/ak4xxx-adda.h
65376@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65377 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65378 unsigned char val);
65379 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65380-};
65381+} __no_const;
65382
65383 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65384
65385diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65386index 8c05e47..2b5df97 100644
65387--- a/include/sound/hwdep.h
65388+++ b/include/sound/hwdep.h
65389@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65390 struct snd_hwdep_dsp_status *status);
65391 int (*dsp_load)(struct snd_hwdep *hw,
65392 struct snd_hwdep_dsp_image *image);
65393-};
65394+} __no_const;
65395
65396 struct snd_hwdep {
65397 struct snd_card *card;
65398diff --git a/include/sound/info.h b/include/sound/info.h
65399index 9ca1a49..aba1728 100644
65400--- a/include/sound/info.h
65401+++ b/include/sound/info.h
65402@@ -44,7 +44,7 @@ struct snd_info_entry_text {
65403 struct snd_info_buffer *buffer);
65404 void (*write)(struct snd_info_entry *entry,
65405 struct snd_info_buffer *buffer);
65406-};
65407+} __no_const;
65408
65409 struct snd_info_entry_ops {
65410 int (*open)(struct snd_info_entry *entry,
65411diff --git a/include/sound/pcm.h b/include/sound/pcm.h
65412index 0cf91b2..b70cae4 100644
65413--- a/include/sound/pcm.h
65414+++ b/include/sound/pcm.h
65415@@ -81,6 +81,7 @@ struct snd_pcm_ops {
65416 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65417 int (*ack)(struct snd_pcm_substream *substream);
65418 };
65419+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65420
65421 /*
65422 *
65423diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65424index af1b49e..a5d55a5 100644
65425--- a/include/sound/sb16_csp.h
65426+++ b/include/sound/sb16_csp.h
65427@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65428 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65429 int (*csp_stop) (struct snd_sb_csp * p);
65430 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65431-};
65432+} __no_const;
65433
65434 /*
65435 * CSP private data
65436diff --git a/include/sound/soc.h b/include/sound/soc.h
65437index 0992dff..bb366fe 100644
65438--- a/include/sound/soc.h
65439+++ b/include/sound/soc.h
65440@@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
65441 /* platform IO - used for platform DAPM */
65442 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65443 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
65444-};
65445+} __do_const;
65446
65447 struct snd_soc_platform {
65448 const char *name;
65449@@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
65450 struct snd_soc_dai_link *dai_link;
65451 struct mutex pcm_mutex;
65452 enum snd_soc_pcm_subclass pcm_subclass;
65453- struct snd_pcm_ops ops;
65454+ snd_pcm_ops_no_const ops;
65455
65456 unsigned int complete:1;
65457 unsigned int dev_registered:1;
65458diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65459index 444cd6b..3327cc5 100644
65460--- a/include/sound/ymfpci.h
65461+++ b/include/sound/ymfpci.h
65462@@ -358,7 +358,7 @@ struct snd_ymfpci {
65463 spinlock_t reg_lock;
65464 spinlock_t voice_lock;
65465 wait_queue_head_t interrupt_sleep;
65466- atomic_t interrupt_sleep_count;
65467+ atomic_unchecked_t interrupt_sleep_count;
65468 struct snd_info_entry *proc_entry;
65469 const struct firmware *dsp_microcode;
65470 const struct firmware *controller_microcode;
65471diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
65472index fe73eb8..56388b1 100644
65473--- a/include/target/target_core_base.h
65474+++ b/include/target/target_core_base.h
65475@@ -443,7 +443,7 @@ struct t10_reservation_ops {
65476 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65477 int (*t10_pr_register)(struct se_cmd *);
65478 int (*t10_pr_clear)(struct se_cmd *);
65479-};
65480+} __no_const;
65481
65482 struct t10_reservation {
65483 /* Reservation effects all target ports */
65484@@ -561,8 +561,8 @@ struct se_cmd {
65485 atomic_t t_se_count;
65486 atomic_t t_task_cdbs_left;
65487 atomic_t t_task_cdbs_ex_left;
65488- atomic_t t_task_cdbs_sent;
65489- atomic_t t_transport_aborted;
65490+ atomic_unchecked_t t_task_cdbs_sent;
65491+ atomic_unchecked_t t_transport_aborted;
65492 atomic_t t_transport_active;
65493 atomic_t t_transport_complete;
65494 atomic_t t_transport_queue_active;
65495@@ -799,7 +799,7 @@ struct se_device {
65496 spinlock_t stats_lock;
65497 /* Active commands on this virtual SE device */
65498 atomic_t simple_cmds;
65499- atomic_t dev_ordered_id;
65500+ atomic_unchecked_t dev_ordered_id;
65501 atomic_t execute_tasks;
65502 atomic_t dev_ordered_sync;
65503 atomic_t dev_qf_count;
65504diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65505index 1c09820..7f5ec79 100644
65506--- a/include/trace/events/irq.h
65507+++ b/include/trace/events/irq.h
65508@@ -36,7 +36,7 @@ struct softirq_action;
65509 */
65510 TRACE_EVENT(irq_handler_entry,
65511
65512- TP_PROTO(int irq, struct irqaction *action),
65513+ TP_PROTO(int irq, const struct irqaction *action),
65514
65515 TP_ARGS(irq, action),
65516
65517@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65518 */
65519 TRACE_EVENT(irq_handler_exit,
65520
65521- TP_PROTO(int irq, struct irqaction *action, int ret),
65522+ TP_PROTO(int irq, const struct irqaction *action, int ret),
65523
65524 TP_ARGS(irq, action, ret),
65525
65526diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65527index c41f308..6918de3 100644
65528--- a/include/video/udlfb.h
65529+++ b/include/video/udlfb.h
65530@@ -52,10 +52,10 @@ struct dlfb_data {
65531 u32 pseudo_palette[256];
65532 int blank_mode; /*one of FB_BLANK_ */
65533 /* blit-only rendering path metrics, exposed through sysfs */
65534- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65535- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65536- atomic_t bytes_sent; /* to usb, after compression including overhead */
65537- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65538+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65539+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65540+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65541+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65542 };
65543
65544 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65545diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65546index 0993a22..32ba2fe 100644
65547--- a/include/video/uvesafb.h
65548+++ b/include/video/uvesafb.h
65549@@ -177,6 +177,7 @@ struct uvesafb_par {
65550 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65551 u8 pmi_setpal; /* PMI for palette changes */
65552 u16 *pmi_base; /* protected mode interface location */
65553+ u8 *pmi_code; /* protected mode code location */
65554 void *pmi_start;
65555 void *pmi_pal;
65556 u8 *vbe_state_orig; /*
65557diff --git a/init/Kconfig b/init/Kconfig
65558index 3f42cd6..613f41d 100644
65559--- a/init/Kconfig
65560+++ b/init/Kconfig
65561@@ -799,6 +799,7 @@ endif # CGROUPS
65562
65563 config CHECKPOINT_RESTORE
65564 bool "Checkpoint/restore support" if EXPERT
65565+ depends on !GRKERNSEC
65566 default n
65567 help
65568 Enables additional kernel features in a sake of checkpoint/restore.
65569@@ -1249,7 +1250,7 @@ config SLUB_DEBUG
65570
65571 config COMPAT_BRK
65572 bool "Disable heap randomization"
65573- default y
65574+ default n
65575 help
65576 Randomizing heap placement makes heap exploits harder, but it
65577 also breaks ancient binaries (including anything libc5 based).
65578diff --git a/init/do_mounts.c b/init/do_mounts.c
65579index 2974c8b..0b863ae 100644
65580--- a/init/do_mounts.c
65581+++ b/init/do_mounts.c
65582@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
65583 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65584 {
65585 struct super_block *s;
65586- int err = sys_mount(name, "/root", fs, flags, data);
65587+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65588 if (err)
65589 return err;
65590
65591- sys_chdir((const char __user __force *)"/root");
65592+ sys_chdir((const char __force_user *)"/root");
65593 s = current->fs->pwd.dentry->d_sb;
65594 ROOT_DEV = s->s_dev;
65595 printk(KERN_INFO
65596@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
65597 va_start(args, fmt);
65598 vsprintf(buf, fmt, args);
65599 va_end(args);
65600- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65601+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65602 if (fd >= 0) {
65603 sys_ioctl(fd, FDEJECT, 0);
65604 sys_close(fd);
65605 }
65606 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65607- fd = sys_open("/dev/console", O_RDWR, 0);
65608+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65609 if (fd >= 0) {
65610 sys_ioctl(fd, TCGETS, (long)&termios);
65611 termios.c_lflag &= ~ICANON;
65612 sys_ioctl(fd, TCSETSF, (long)&termios);
65613- sys_read(fd, &c, 1);
65614+ sys_read(fd, (char __user *)&c, 1);
65615 termios.c_lflag |= ICANON;
65616 sys_ioctl(fd, TCSETSF, (long)&termios);
65617 sys_close(fd);
65618@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
65619 mount_root();
65620 out:
65621 devtmpfs_mount("dev");
65622- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65623- sys_chroot((const char __user __force *)".");
65624+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65625+ sys_chroot((const char __force_user *)".");
65626 }
65627diff --git a/init/do_mounts.h b/init/do_mounts.h
65628index f5b978a..69dbfe8 100644
65629--- a/init/do_mounts.h
65630+++ b/init/do_mounts.h
65631@@ -15,15 +15,15 @@ extern int root_mountflags;
65632
65633 static inline int create_dev(char *name, dev_t dev)
65634 {
65635- sys_unlink(name);
65636- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65637+ sys_unlink((char __force_user *)name);
65638+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65639 }
65640
65641 #if BITS_PER_LONG == 32
65642 static inline u32 bstat(char *name)
65643 {
65644 struct stat64 stat;
65645- if (sys_stat64(name, &stat) != 0)
65646+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65647 return 0;
65648 if (!S_ISBLK(stat.st_mode))
65649 return 0;
65650@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65651 static inline u32 bstat(char *name)
65652 {
65653 struct stat stat;
65654- if (sys_newstat(name, &stat) != 0)
65655+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65656 return 0;
65657 if (!S_ISBLK(stat.st_mode))
65658 return 0;
65659diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65660index 3098a38..253064e 100644
65661--- a/init/do_mounts_initrd.c
65662+++ b/init/do_mounts_initrd.c
65663@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
65664 create_dev("/dev/root.old", Root_RAM0);
65665 /* mount initrd on rootfs' /root */
65666 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65667- sys_mkdir("/old", 0700);
65668- root_fd = sys_open("/", 0, 0);
65669- old_fd = sys_open("/old", 0, 0);
65670+ sys_mkdir((const char __force_user *)"/old", 0700);
65671+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
65672+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65673 /* move initrd over / and chdir/chroot in initrd root */
65674- sys_chdir("/root");
65675- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65676- sys_chroot(".");
65677+ sys_chdir((const char __force_user *)"/root");
65678+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65679+ sys_chroot((const char __force_user *)".");
65680
65681 /*
65682 * In case that a resume from disk is carried out by linuxrc or one of
65683@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
65684
65685 /* move initrd to rootfs' /old */
65686 sys_fchdir(old_fd);
65687- sys_mount("/", ".", NULL, MS_MOVE, NULL);
65688+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65689 /* switch root and cwd back to / of rootfs */
65690 sys_fchdir(root_fd);
65691- sys_chroot(".");
65692+ sys_chroot((const char __force_user *)".");
65693 sys_close(old_fd);
65694 sys_close(root_fd);
65695
65696 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65697- sys_chdir("/old");
65698+ sys_chdir((const char __force_user *)"/old");
65699 return;
65700 }
65701
65702@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
65703 mount_root();
65704
65705 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65706- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65707+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65708 if (!error)
65709 printk("okay\n");
65710 else {
65711- int fd = sys_open("/dev/root.old", O_RDWR, 0);
65712+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65713 if (error == -ENOENT)
65714 printk("/initrd does not exist. Ignored.\n");
65715 else
65716 printk("failed\n");
65717 printk(KERN_NOTICE "Unmounting old root\n");
65718- sys_umount("/old", MNT_DETACH);
65719+ sys_umount((char __force_user *)"/old", MNT_DETACH);
65720 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65721 if (fd < 0) {
65722 error = fd;
65723@@ -116,11 +116,11 @@ int __init initrd_load(void)
65724 * mounted in the normal path.
65725 */
65726 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65727- sys_unlink("/initrd.image");
65728+ sys_unlink((const char __force_user *)"/initrd.image");
65729 handle_initrd();
65730 return 1;
65731 }
65732 }
65733- sys_unlink("/initrd.image");
65734+ sys_unlink((const char __force_user *)"/initrd.image");
65735 return 0;
65736 }
65737diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65738index 32c4799..c27ee74 100644
65739--- a/init/do_mounts_md.c
65740+++ b/init/do_mounts_md.c
65741@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
65742 partitioned ? "_d" : "", minor,
65743 md_setup_args[ent].device_names);
65744
65745- fd = sys_open(name, 0, 0);
65746+ fd = sys_open((char __force_user *)name, 0, 0);
65747 if (fd < 0) {
65748 printk(KERN_ERR "md: open failed - cannot start "
65749 "array %s\n", name);
65750@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
65751 * array without it
65752 */
65753 sys_close(fd);
65754- fd = sys_open(name, 0, 0);
65755+ fd = sys_open((char __force_user *)name, 0, 0);
65756 sys_ioctl(fd, BLKRRPART, 0);
65757 }
65758 sys_close(fd);
65759@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
65760
65761 wait_for_device_probe();
65762
65763- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
65764+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65765 if (fd >= 0) {
65766 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65767 sys_close(fd);
65768diff --git a/init/initramfs.c b/init/initramfs.c
65769index 8216c30..25e8e32 100644
65770--- a/init/initramfs.c
65771+++ b/init/initramfs.c
65772@@ -74,7 +74,7 @@ static void __init free_hash(void)
65773 }
65774 }
65775
65776-static long __init do_utime(char __user *filename, time_t mtime)
65777+static long __init do_utime(__force char __user *filename, time_t mtime)
65778 {
65779 struct timespec t[2];
65780
65781@@ -109,7 +109,7 @@ static void __init dir_utime(void)
65782 struct dir_entry *de, *tmp;
65783 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65784 list_del(&de->list);
65785- do_utime(de->name, de->mtime);
65786+ do_utime((char __force_user *)de->name, de->mtime);
65787 kfree(de->name);
65788 kfree(de);
65789 }
65790@@ -271,7 +271,7 @@ static int __init maybe_link(void)
65791 if (nlink >= 2) {
65792 char *old = find_link(major, minor, ino, mode, collected);
65793 if (old)
65794- return (sys_link(old, collected) < 0) ? -1 : 1;
65795+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
65796 }
65797 return 0;
65798 }
65799@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
65800 {
65801 struct stat st;
65802
65803- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
65804+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
65805 if (S_ISDIR(st.st_mode))
65806- sys_rmdir(path);
65807+ sys_rmdir((char __force_user *)path);
65808 else
65809- sys_unlink(path);
65810+ sys_unlink((char __force_user *)path);
65811 }
65812 }
65813
65814@@ -305,7 +305,7 @@ static int __init do_name(void)
65815 int openflags = O_WRONLY|O_CREAT;
65816 if (ml != 1)
65817 openflags |= O_TRUNC;
65818- wfd = sys_open(collected, openflags, mode);
65819+ wfd = sys_open((char __force_user *)collected, openflags, mode);
65820
65821 if (wfd >= 0) {
65822 sys_fchown(wfd, uid, gid);
65823@@ -317,17 +317,17 @@ static int __init do_name(void)
65824 }
65825 }
65826 } else if (S_ISDIR(mode)) {
65827- sys_mkdir(collected, mode);
65828- sys_chown(collected, uid, gid);
65829- sys_chmod(collected, mode);
65830+ sys_mkdir((char __force_user *)collected, mode);
65831+ sys_chown((char __force_user *)collected, uid, gid);
65832+ sys_chmod((char __force_user *)collected, mode);
65833 dir_add(collected, mtime);
65834 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65835 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65836 if (maybe_link() == 0) {
65837- sys_mknod(collected, mode, rdev);
65838- sys_chown(collected, uid, gid);
65839- sys_chmod(collected, mode);
65840- do_utime(collected, mtime);
65841+ sys_mknod((char __force_user *)collected, mode, rdev);
65842+ sys_chown((char __force_user *)collected, uid, gid);
65843+ sys_chmod((char __force_user *)collected, mode);
65844+ do_utime((char __force_user *)collected, mtime);
65845 }
65846 }
65847 return 0;
65848@@ -336,15 +336,15 @@ static int __init do_name(void)
65849 static int __init do_copy(void)
65850 {
65851 if (count >= body_len) {
65852- sys_write(wfd, victim, body_len);
65853+ sys_write(wfd, (char __force_user *)victim, body_len);
65854 sys_close(wfd);
65855- do_utime(vcollected, mtime);
65856+ do_utime((char __force_user *)vcollected, mtime);
65857 kfree(vcollected);
65858 eat(body_len);
65859 state = SkipIt;
65860 return 0;
65861 } else {
65862- sys_write(wfd, victim, count);
65863+ sys_write(wfd, (char __force_user *)victim, count);
65864 body_len -= count;
65865 eat(count);
65866 return 1;
65867@@ -355,9 +355,9 @@ static int __init do_symlink(void)
65868 {
65869 collected[N_ALIGN(name_len) + body_len] = '\0';
65870 clean_path(collected, 0);
65871- sys_symlink(collected + N_ALIGN(name_len), collected);
65872- sys_lchown(collected, uid, gid);
65873- do_utime(collected, mtime);
65874+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65875+ sys_lchown((char __force_user *)collected, uid, gid);
65876+ do_utime((char __force_user *)collected, mtime);
65877 state = SkipIt;
65878 next_state = Reset;
65879 return 0;
65880diff --git a/init/main.c b/init/main.c
65881index ff49a6d..5fa0429 100644
65882--- a/init/main.c
65883+++ b/init/main.c
65884@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
65885 extern void tc_init(void);
65886 #endif
65887
65888+extern void grsecurity_init(void);
65889+
65890 /*
65891 * Debug helper: via this flag we know that we are in 'early bootup code'
65892 * where only the boot processor is running with IRQ disabled. This means
65893@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
65894
65895 __setup("reset_devices", set_reset_devices);
65896
65897+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65898+extern char pax_enter_kernel_user[];
65899+extern char pax_exit_kernel_user[];
65900+extern pgdval_t clone_pgd_mask;
65901+#endif
65902+
65903+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65904+static int __init setup_pax_nouderef(char *str)
65905+{
65906+#ifdef CONFIG_X86_32
65907+ unsigned int cpu;
65908+ struct desc_struct *gdt;
65909+
65910+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
65911+ gdt = get_cpu_gdt_table(cpu);
65912+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65913+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65914+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65915+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65916+ }
65917+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65918+#else
65919+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65920+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65921+ clone_pgd_mask = ~(pgdval_t)0UL;
65922+#endif
65923+
65924+ return 0;
65925+}
65926+early_param("pax_nouderef", setup_pax_nouderef);
65927+#endif
65928+
65929+#ifdef CONFIG_PAX_SOFTMODE
65930+int pax_softmode;
65931+
65932+static int __init setup_pax_softmode(char *str)
65933+{
65934+ get_option(&str, &pax_softmode);
65935+ return 1;
65936+}
65937+__setup("pax_softmode=", setup_pax_softmode);
65938+#endif
65939+
65940 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65941 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65942 static const char *panic_later, *panic_param;
65943@@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65944 {
65945 int count = preempt_count();
65946 int ret;
65947+ const char *msg1 = "", *msg2 = "";
65948
65949 if (initcall_debug)
65950 ret = do_one_initcall_debug(fn);
65951@@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65952 sprintf(msgbuf, "error code %d ", ret);
65953
65954 if (preempt_count() != count) {
65955- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65956+ msg1 = " preemption imbalance";
65957 preempt_count() = count;
65958 }
65959 if (irqs_disabled()) {
65960- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65961+ msg2 = " disabled interrupts";
65962 local_irq_enable();
65963 }
65964- if (msgbuf[0]) {
65965- printk("initcall %pF returned with %s\n", fn, msgbuf);
65966+ if (msgbuf[0] || *msg1 || *msg2) {
65967+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65968 }
65969
65970 return ret;
65971@@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
65972 do_basic_setup();
65973
65974 /* Open the /dev/console on the rootfs, this should never fail */
65975- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65976+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65977 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65978
65979 (void) sys_dup(0);
65980@@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
65981 if (!ramdisk_execute_command)
65982 ramdisk_execute_command = "/init";
65983
65984- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65985+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65986 ramdisk_execute_command = NULL;
65987 prepare_namespace();
65988 }
65989
65990+ grsecurity_init();
65991+
65992 /*
65993 * Ok, we have completed the initial bootup, and
65994 * we're essentially up and running. Get rid of the
65995diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65996index 86ee272..773d937 100644
65997--- a/ipc/mqueue.c
65998+++ b/ipc/mqueue.c
65999@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
66000 mq_bytes = (mq_msg_tblsz +
66001 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
66002
66003+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66004 spin_lock(&mq_lock);
66005 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
66006 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
66007diff --git a/ipc/msg.c b/ipc/msg.c
66008index 7385de2..a8180e08 100644
66009--- a/ipc/msg.c
66010+++ b/ipc/msg.c
66011@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
66012 return security_msg_queue_associate(msq, msgflg);
66013 }
66014
66015+static struct ipc_ops msg_ops = {
66016+ .getnew = newque,
66017+ .associate = msg_security,
66018+ .more_checks = NULL
66019+};
66020+
66021 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66022 {
66023 struct ipc_namespace *ns;
66024- struct ipc_ops msg_ops;
66025 struct ipc_params msg_params;
66026
66027 ns = current->nsproxy->ipc_ns;
66028
66029- msg_ops.getnew = newque;
66030- msg_ops.associate = msg_security;
66031- msg_ops.more_checks = NULL;
66032-
66033 msg_params.key = key;
66034 msg_params.flg = msgflg;
66035
66036diff --git a/ipc/sem.c b/ipc/sem.c
66037index 5215a81..cfc0cac 100644
66038--- a/ipc/sem.c
66039+++ b/ipc/sem.c
66040@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
66041 return 0;
66042 }
66043
66044+static struct ipc_ops sem_ops = {
66045+ .getnew = newary,
66046+ .associate = sem_security,
66047+ .more_checks = sem_more_checks
66048+};
66049+
66050 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66051 {
66052 struct ipc_namespace *ns;
66053- struct ipc_ops sem_ops;
66054 struct ipc_params sem_params;
66055
66056 ns = current->nsproxy->ipc_ns;
66057@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66058 if (nsems < 0 || nsems > ns->sc_semmsl)
66059 return -EINVAL;
66060
66061- sem_ops.getnew = newary;
66062- sem_ops.associate = sem_security;
66063- sem_ops.more_checks = sem_more_checks;
66064-
66065 sem_params.key = key;
66066 sem_params.flg = semflg;
66067 sem_params.u.nsems = nsems;
66068diff --git a/ipc/shm.c b/ipc/shm.c
66069index b76be5b..859e750 100644
66070--- a/ipc/shm.c
66071+++ b/ipc/shm.c
66072@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
66073 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66074 #endif
66075
66076+#ifdef CONFIG_GRKERNSEC
66077+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66078+ const time_t shm_createtime, const uid_t cuid,
66079+ const int shmid);
66080+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66081+ const time_t shm_createtime);
66082+#endif
66083+
66084 void shm_init_ns(struct ipc_namespace *ns)
66085 {
66086 ns->shm_ctlmax = SHMMAX;
66087@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
66088 shp->shm_lprid = 0;
66089 shp->shm_atim = shp->shm_dtim = 0;
66090 shp->shm_ctim = get_seconds();
66091+#ifdef CONFIG_GRKERNSEC
66092+ {
66093+ struct timespec timeval;
66094+ do_posix_clock_monotonic_gettime(&timeval);
66095+
66096+ shp->shm_createtime = timeval.tv_sec;
66097+ }
66098+#endif
66099 shp->shm_segsz = size;
66100 shp->shm_nattch = 0;
66101 shp->shm_file = file;
66102@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
66103 return 0;
66104 }
66105
66106+static struct ipc_ops shm_ops = {
66107+ .getnew = newseg,
66108+ .associate = shm_security,
66109+ .more_checks = shm_more_checks
66110+};
66111+
66112 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66113 {
66114 struct ipc_namespace *ns;
66115- struct ipc_ops shm_ops;
66116 struct ipc_params shm_params;
66117
66118 ns = current->nsproxy->ipc_ns;
66119
66120- shm_ops.getnew = newseg;
66121- shm_ops.associate = shm_security;
66122- shm_ops.more_checks = shm_more_checks;
66123-
66124 shm_params.key = key;
66125 shm_params.flg = shmflg;
66126 shm_params.u.size = size;
66127@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66128 f_mode = FMODE_READ | FMODE_WRITE;
66129 }
66130 if (shmflg & SHM_EXEC) {
66131+
66132+#ifdef CONFIG_PAX_MPROTECT
66133+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
66134+ goto out;
66135+#endif
66136+
66137 prot |= PROT_EXEC;
66138 acc_mode |= S_IXUGO;
66139 }
66140@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66141 if (err)
66142 goto out_unlock;
66143
66144+#ifdef CONFIG_GRKERNSEC
66145+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66146+ shp->shm_perm.cuid, shmid) ||
66147+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66148+ err = -EACCES;
66149+ goto out_unlock;
66150+ }
66151+#endif
66152+
66153 path = shp->shm_file->f_path;
66154 path_get(&path);
66155 shp->shm_nattch++;
66156+#ifdef CONFIG_GRKERNSEC
66157+ shp->shm_lapid = current->pid;
66158+#endif
66159 size = i_size_read(path.dentry->d_inode);
66160 shm_unlock(shp);
66161
66162diff --git a/kernel/acct.c b/kernel/acct.c
66163index 02e6167..54824f7 100644
66164--- a/kernel/acct.c
66165+++ b/kernel/acct.c
66166@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
66167 */
66168 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66169 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66170- file->f_op->write(file, (char *)&ac,
66171+ file->f_op->write(file, (char __force_user *)&ac,
66172 sizeof(acct_t), &file->f_pos);
66173 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66174 set_fs(fs);
66175diff --git a/kernel/audit.c b/kernel/audit.c
66176index bb0eb5b..cf2a03a 100644
66177--- a/kernel/audit.c
66178+++ b/kernel/audit.c
66179@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
66180 3) suppressed due to audit_rate_limit
66181 4) suppressed due to audit_backlog_limit
66182 */
66183-static atomic_t audit_lost = ATOMIC_INIT(0);
66184+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66185
66186 /* The netlink socket. */
66187 static struct sock *audit_sock;
66188@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
66189 unsigned long now;
66190 int print;
66191
66192- atomic_inc(&audit_lost);
66193+ atomic_inc_unchecked(&audit_lost);
66194
66195 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66196
66197@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
66198 printk(KERN_WARNING
66199 "audit: audit_lost=%d audit_rate_limit=%d "
66200 "audit_backlog_limit=%d\n",
66201- atomic_read(&audit_lost),
66202+ atomic_read_unchecked(&audit_lost),
66203 audit_rate_limit,
66204 audit_backlog_limit);
66205 audit_panic(message);
66206@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
66207 status_set.pid = audit_pid;
66208 status_set.rate_limit = audit_rate_limit;
66209 status_set.backlog_limit = audit_backlog_limit;
66210- status_set.lost = atomic_read(&audit_lost);
66211+ status_set.lost = atomic_read_unchecked(&audit_lost);
66212 status_set.backlog = skb_queue_len(&audit_skb_queue);
66213 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66214 &status_set, sizeof(status_set));
66215diff --git a/kernel/auditsc.c b/kernel/auditsc.c
66216index af1de0f..06dfe57 100644
66217--- a/kernel/auditsc.c
66218+++ b/kernel/auditsc.c
66219@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
66220 }
66221
66222 /* global counter which is incremented every time something logs in */
66223-static atomic_t session_id = ATOMIC_INIT(0);
66224+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66225
66226 /**
66227 * audit_set_loginuid - set current task's audit_context loginuid
66228@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
66229 return -EPERM;
66230 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
66231
66232- sessionid = atomic_inc_return(&session_id);
66233+ sessionid = atomic_inc_return_unchecked(&session_id);
66234 if (context && context->in_syscall) {
66235 struct audit_buffer *ab;
66236
66237diff --git a/kernel/capability.c b/kernel/capability.c
66238index 3f1adb6..c564db0 100644
66239--- a/kernel/capability.c
66240+++ b/kernel/capability.c
66241@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
66242 * before modification is attempted and the application
66243 * fails.
66244 */
66245+ if (tocopy > ARRAY_SIZE(kdata))
66246+ return -EFAULT;
66247+
66248 if (copy_to_user(dataptr, kdata, tocopy
66249 * sizeof(struct __user_cap_data_struct))) {
66250 return -EFAULT;
66251@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
66252 int ret;
66253
66254 rcu_read_lock();
66255- ret = security_capable(__task_cred(t), ns, cap);
66256+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
66257+ gr_task_is_capable(t, __task_cred(t), cap);
66258 rcu_read_unlock();
66259
66260- return (ret == 0);
66261+ return ret;
66262 }
66263
66264 /**
66265@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
66266 int ret;
66267
66268 rcu_read_lock();
66269- ret = security_capable_noaudit(__task_cred(t), ns, cap);
66270+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
66271 rcu_read_unlock();
66272
66273- return (ret == 0);
66274+ return ret;
66275 }
66276
66277 /**
66278@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
66279 BUG();
66280 }
66281
66282- if (security_capable(current_cred(), ns, cap) == 0) {
66283+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
66284 current->flags |= PF_SUPERPRIV;
66285 return true;
66286 }
66287@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
66288 }
66289 EXPORT_SYMBOL(ns_capable);
66290
66291+bool ns_capable_nolog(struct user_namespace *ns, int cap)
66292+{
66293+ if (unlikely(!cap_valid(cap))) {
66294+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66295+ BUG();
66296+ }
66297+
66298+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
66299+ current->flags |= PF_SUPERPRIV;
66300+ return true;
66301+ }
66302+ return false;
66303+}
66304+EXPORT_SYMBOL(ns_capable_nolog);
66305+
66306 /**
66307 * capable - Determine if the current task has a superior capability in effect
66308 * @cap: The capability to be tested for
66309@@ -408,6 +427,12 @@ bool capable(int cap)
66310 }
66311 EXPORT_SYMBOL(capable);
66312
66313+bool capable_nolog(int cap)
66314+{
66315+ return ns_capable_nolog(&init_user_ns, cap);
66316+}
66317+EXPORT_SYMBOL(capable_nolog);
66318+
66319 /**
66320 * nsown_capable - Check superior capability to one's own user_ns
66321 * @cap: The capability in question
66322diff --git a/kernel/compat.c b/kernel/compat.c
66323index f346ced..aa2b1f4 100644
66324--- a/kernel/compat.c
66325+++ b/kernel/compat.c
66326@@ -13,6 +13,7 @@
66327
66328 #include <linux/linkage.h>
66329 #include <linux/compat.h>
66330+#include <linux/module.h>
66331 #include <linux/errno.h>
66332 #include <linux/time.h>
66333 #include <linux/signal.h>
66334@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
66335 mm_segment_t oldfs;
66336 long ret;
66337
66338- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66339+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66340 oldfs = get_fs();
66341 set_fs(KERNEL_DS);
66342 ret = hrtimer_nanosleep_restart(restart);
66343@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
66344 oldfs = get_fs();
66345 set_fs(KERNEL_DS);
66346 ret = hrtimer_nanosleep(&tu,
66347- rmtp ? (struct timespec __user *)&rmt : NULL,
66348+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
66349 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66350 set_fs(oldfs);
66351
66352@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
66353 mm_segment_t old_fs = get_fs();
66354
66355 set_fs(KERNEL_DS);
66356- ret = sys_sigpending((old_sigset_t __user *) &s);
66357+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
66358 set_fs(old_fs);
66359 if (ret == 0)
66360 ret = put_user(s, set);
66361@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
66362 old_fs = get_fs();
66363 set_fs(KERNEL_DS);
66364 ret = sys_sigprocmask(how,
66365- set ? (old_sigset_t __user *) &s : NULL,
66366- oset ? (old_sigset_t __user *) &s : NULL);
66367+ set ? (old_sigset_t __force_user *) &s : NULL,
66368+ oset ? (old_sigset_t __force_user *) &s : NULL);
66369 set_fs(old_fs);
66370 if (ret == 0)
66371 if (oset)
66372@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
66373 mm_segment_t old_fs = get_fs();
66374
66375 set_fs(KERNEL_DS);
66376- ret = sys_old_getrlimit(resource, &r);
66377+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66378 set_fs(old_fs);
66379
66380 if (!ret) {
66381@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
66382 mm_segment_t old_fs = get_fs();
66383
66384 set_fs(KERNEL_DS);
66385- ret = sys_getrusage(who, (struct rusage __user *) &r);
66386+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66387 set_fs(old_fs);
66388
66389 if (ret)
66390@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
66391 set_fs (KERNEL_DS);
66392 ret = sys_wait4(pid,
66393 (stat_addr ?
66394- (unsigned int __user *) &status : NULL),
66395- options, (struct rusage __user *) &r);
66396+ (unsigned int __force_user *) &status : NULL),
66397+ options, (struct rusage __force_user *) &r);
66398 set_fs (old_fs);
66399
66400 if (ret > 0) {
66401@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
66402 memset(&info, 0, sizeof(info));
66403
66404 set_fs(KERNEL_DS);
66405- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66406- uru ? (struct rusage __user *)&ru : NULL);
66407+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66408+ uru ? (struct rusage __force_user *)&ru : NULL);
66409 set_fs(old_fs);
66410
66411 if ((ret < 0) || (info.si_signo == 0))
66412@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
66413 oldfs = get_fs();
66414 set_fs(KERNEL_DS);
66415 err = sys_timer_settime(timer_id, flags,
66416- (struct itimerspec __user *) &newts,
66417- (struct itimerspec __user *) &oldts);
66418+ (struct itimerspec __force_user *) &newts,
66419+ (struct itimerspec __force_user *) &oldts);
66420 set_fs(oldfs);
66421 if (!err && old && put_compat_itimerspec(old, &oldts))
66422 return -EFAULT;
66423@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
66424 oldfs = get_fs();
66425 set_fs(KERNEL_DS);
66426 err = sys_timer_gettime(timer_id,
66427- (struct itimerspec __user *) &ts);
66428+ (struct itimerspec __force_user *) &ts);
66429 set_fs(oldfs);
66430 if (!err && put_compat_itimerspec(setting, &ts))
66431 return -EFAULT;
66432@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
66433 oldfs = get_fs();
66434 set_fs(KERNEL_DS);
66435 err = sys_clock_settime(which_clock,
66436- (struct timespec __user *) &ts);
66437+ (struct timespec __force_user *) &ts);
66438 set_fs(oldfs);
66439 return err;
66440 }
66441@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
66442 oldfs = get_fs();
66443 set_fs(KERNEL_DS);
66444 err = sys_clock_gettime(which_clock,
66445- (struct timespec __user *) &ts);
66446+ (struct timespec __force_user *) &ts);
66447 set_fs(oldfs);
66448 if (!err && put_compat_timespec(&ts, tp))
66449 return -EFAULT;
66450@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
66451
66452 oldfs = get_fs();
66453 set_fs(KERNEL_DS);
66454- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66455+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66456 set_fs(oldfs);
66457
66458 err = compat_put_timex(utp, &txc);
66459@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66460 oldfs = get_fs();
66461 set_fs(KERNEL_DS);
66462 err = sys_clock_getres(which_clock,
66463- (struct timespec __user *) &ts);
66464+ (struct timespec __force_user *) &ts);
66465 set_fs(oldfs);
66466 if (!err && tp && put_compat_timespec(&ts, tp))
66467 return -EFAULT;
66468@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66469 long err;
66470 mm_segment_t oldfs;
66471 struct timespec tu;
66472- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66473+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66474
66475- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66476+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66477 oldfs = get_fs();
66478 set_fs(KERNEL_DS);
66479 err = clock_nanosleep_restart(restart);
66480@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66481 oldfs = get_fs();
66482 set_fs(KERNEL_DS);
66483 err = sys_clock_nanosleep(which_clock, flags,
66484- (struct timespec __user *) &in,
66485- (struct timespec __user *) &out);
66486+ (struct timespec __force_user *) &in,
66487+ (struct timespec __force_user *) &out);
66488 set_fs(oldfs);
66489
66490 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66491diff --git a/kernel/configs.c b/kernel/configs.c
66492index 42e8fa0..9e7406b 100644
66493--- a/kernel/configs.c
66494+++ b/kernel/configs.c
66495@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66496 struct proc_dir_entry *entry;
66497
66498 /* create the current config file */
66499+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66500+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66501+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66502+ &ikconfig_file_ops);
66503+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66504+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66505+ &ikconfig_file_ops);
66506+#endif
66507+#else
66508 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66509 &ikconfig_file_ops);
66510+#endif
66511+
66512 if (!entry)
66513 return -ENOMEM;
66514
66515diff --git a/kernel/cred.c b/kernel/cred.c
66516index 48c6fd3..8398912 100644
66517--- a/kernel/cred.c
66518+++ b/kernel/cred.c
66519@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
66520 validate_creds(cred);
66521 put_cred(cred);
66522 }
66523+
66524+#ifdef CONFIG_GRKERNSEC_SETXID
66525+ cred = (struct cred *) tsk->delayed_cred;
66526+ if (cred) {
66527+ tsk->delayed_cred = NULL;
66528+ validate_creds(cred);
66529+ put_cred(cred);
66530+ }
66531+#endif
66532 }
66533
66534 /**
66535@@ -472,7 +481,7 @@ error_put:
66536 * Always returns 0 thus allowing this function to be tail-called at the end
66537 * of, say, sys_setgid().
66538 */
66539-int commit_creds(struct cred *new)
66540+static int __commit_creds(struct cred *new)
66541 {
66542 struct task_struct *task = current;
66543 const struct cred *old = task->real_cred;
66544@@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
66545
66546 get_cred(new); /* we will require a ref for the subj creds too */
66547
66548+ gr_set_role_label(task, new->uid, new->gid);
66549+
66550 /* dumpability changes */
66551 if (old->euid != new->euid ||
66552 old->egid != new->egid ||
66553@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
66554 put_cred(old);
66555 return 0;
66556 }
66557+#ifdef CONFIG_GRKERNSEC_SETXID
66558+extern int set_user(struct cred *new);
66559+
66560+void gr_delayed_cred_worker(void)
66561+{
66562+ const struct cred *new = current->delayed_cred;
66563+ struct cred *ncred;
66564+
66565+ current->delayed_cred = NULL;
66566+
66567+ if (current_uid() && new != NULL) {
66568+ // from doing get_cred on it when queueing this
66569+ put_cred(new);
66570+ return;
66571+ } else if (new == NULL)
66572+ return;
66573+
66574+ ncred = prepare_creds();
66575+ if (!ncred)
66576+ goto die;
66577+ // uids
66578+ ncred->uid = new->uid;
66579+ ncred->euid = new->euid;
66580+ ncred->suid = new->suid;
66581+ ncred->fsuid = new->fsuid;
66582+ // gids
66583+ ncred->gid = new->gid;
66584+ ncred->egid = new->egid;
66585+ ncred->sgid = new->sgid;
66586+ ncred->fsgid = new->fsgid;
66587+ // groups
66588+ if (set_groups(ncred, new->group_info) < 0) {
66589+ abort_creds(ncred);
66590+ goto die;
66591+ }
66592+ // caps
66593+ ncred->securebits = new->securebits;
66594+ ncred->cap_inheritable = new->cap_inheritable;
66595+ ncred->cap_permitted = new->cap_permitted;
66596+ ncred->cap_effective = new->cap_effective;
66597+ ncred->cap_bset = new->cap_bset;
66598+
66599+ if (set_user(ncred)) {
66600+ abort_creds(ncred);
66601+ goto die;
66602+ }
66603+
66604+ // from doing get_cred on it when queueing this
66605+ put_cred(new);
66606+
66607+ __commit_creds(ncred);
66608+ return;
66609+die:
66610+ // from doing get_cred on it when queueing this
66611+ put_cred(new);
66612+ do_group_exit(SIGKILL);
66613+}
66614+#endif
66615+
66616+int commit_creds(struct cred *new)
66617+{
66618+#ifdef CONFIG_GRKERNSEC_SETXID
66619+ int ret;
66620+ int schedule_it = 0;
66621+ struct task_struct *t;
66622+
66623+ /* we won't get called with tasklist_lock held for writing
66624+ and interrupts disabled as the cred struct in that case is
66625+ init_cred
66626+ */
66627+ if (grsec_enable_setxid && !current_is_single_threaded() &&
66628+ !current_uid() && new->uid) {
66629+ schedule_it = 1;
66630+ }
66631+ ret = __commit_creds(new);
66632+ if (schedule_it) {
66633+ rcu_read_lock();
66634+ read_lock(&tasklist_lock);
66635+ for (t = next_thread(current); t != current;
66636+ t = next_thread(t)) {
66637+ if (t->delayed_cred == NULL) {
66638+ t->delayed_cred = get_cred(new);
66639+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
66640+ set_tsk_need_resched(t);
66641+ }
66642+ }
66643+ read_unlock(&tasklist_lock);
66644+ rcu_read_unlock();
66645+ }
66646+ return ret;
66647+#else
66648+ return __commit_creds(new);
66649+#endif
66650+}
66651+
66652 EXPORT_SYMBOL(commit_creds);
66653
66654 /**
66655diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66656index 7fda904..59f620c 100644
66657--- a/kernel/debug/debug_core.c
66658+++ b/kernel/debug/debug_core.c
66659@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66660 */
66661 static atomic_t masters_in_kgdb;
66662 static atomic_t slaves_in_kgdb;
66663-static atomic_t kgdb_break_tasklet_var;
66664+static atomic_unchecked_t kgdb_break_tasklet_var;
66665 atomic_t kgdb_setting_breakpoint;
66666
66667 struct task_struct *kgdb_usethread;
66668@@ -129,7 +129,7 @@ int kgdb_single_step;
66669 static pid_t kgdb_sstep_pid;
66670
66671 /* to keep track of the CPU which is doing the single stepping*/
66672-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66673+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66674
66675 /*
66676 * If you are debugging a problem where roundup (the collection of
66677@@ -537,7 +537,7 @@ return_normal:
66678 * kernel will only try for the value of sstep_tries before
66679 * giving up and continuing on.
66680 */
66681- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66682+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66683 (kgdb_info[cpu].task &&
66684 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66685 atomic_set(&kgdb_active, -1);
66686@@ -631,8 +631,8 @@ cpu_master_loop:
66687 }
66688
66689 kgdb_restore:
66690- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66691- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66692+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66693+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66694 if (kgdb_info[sstep_cpu].task)
66695 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66696 else
66697@@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
66698 static void kgdb_tasklet_bpt(unsigned long ing)
66699 {
66700 kgdb_breakpoint();
66701- atomic_set(&kgdb_break_tasklet_var, 0);
66702+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66703 }
66704
66705 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66706
66707 void kgdb_schedule_breakpoint(void)
66708 {
66709- if (atomic_read(&kgdb_break_tasklet_var) ||
66710+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66711 atomic_read(&kgdb_active) != -1 ||
66712 atomic_read(&kgdb_setting_breakpoint))
66713 return;
66714- atomic_inc(&kgdb_break_tasklet_var);
66715+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
66716 tasklet_schedule(&kgdb_tasklet_breakpoint);
66717 }
66718 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
66719diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
66720index e2ae734..08a4c5c 100644
66721--- a/kernel/debug/kdb/kdb_main.c
66722+++ b/kernel/debug/kdb/kdb_main.c
66723@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
66724 list_for_each_entry(mod, kdb_modules, list) {
66725
66726 kdb_printf("%-20s%8u 0x%p ", mod->name,
66727- mod->core_size, (void *)mod);
66728+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
66729 #ifdef CONFIG_MODULE_UNLOAD
66730 kdb_printf("%4ld ", module_refcount(mod));
66731 #endif
66732@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
66733 kdb_printf(" (Loading)");
66734 else
66735 kdb_printf(" (Live)");
66736- kdb_printf(" 0x%p", mod->module_core);
66737+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
66738
66739 #ifdef CONFIG_MODULE_UNLOAD
66740 {
66741diff --git a/kernel/events/core.c b/kernel/events/core.c
66742index 1b5c081..c375f83 100644
66743--- a/kernel/events/core.c
66744+++ b/kernel/events/core.c
66745@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
66746 return 0;
66747 }
66748
66749-static atomic64_t perf_event_id;
66750+static atomic64_unchecked_t perf_event_id;
66751
66752 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
66753 enum event_type_t event_type);
66754@@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
66755
66756 static inline u64 perf_event_count(struct perf_event *event)
66757 {
66758- return local64_read(&event->count) + atomic64_read(&event->child_count);
66759+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
66760 }
66761
66762 static u64 perf_event_read(struct perf_event *event)
66763@@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
66764 mutex_lock(&event->child_mutex);
66765 total += perf_event_read(event);
66766 *enabled += event->total_time_enabled +
66767- atomic64_read(&event->child_total_time_enabled);
66768+ atomic64_read_unchecked(&event->child_total_time_enabled);
66769 *running += event->total_time_running +
66770- atomic64_read(&event->child_total_time_running);
66771+ atomic64_read_unchecked(&event->child_total_time_running);
66772
66773 list_for_each_entry(child, &event->child_list, child_list) {
66774 total += perf_event_read(child);
66775@@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
66776 userpg->offset -= local64_read(&event->hw.prev_count);
66777
66778 userpg->time_enabled = enabled +
66779- atomic64_read(&event->child_total_time_enabled);
66780+ atomic64_read_unchecked(&event->child_total_time_enabled);
66781
66782 userpg->time_running = running +
66783- atomic64_read(&event->child_total_time_running);
66784+ atomic64_read_unchecked(&event->child_total_time_running);
66785
66786 barrier();
66787 ++userpg->lock;
66788@@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
66789 values[n++] = perf_event_count(event);
66790 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
66791 values[n++] = enabled +
66792- atomic64_read(&event->child_total_time_enabled);
66793+ atomic64_read_unchecked(&event->child_total_time_enabled);
66794 }
66795 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
66796 values[n++] = running +
66797- atomic64_read(&event->child_total_time_running);
66798+ atomic64_read_unchecked(&event->child_total_time_running);
66799 }
66800 if (read_format & PERF_FORMAT_ID)
66801 values[n++] = primary_event_id(event);
66802@@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
66803 * need to add enough zero bytes after the string to handle
66804 * the 64bit alignment we do later.
66805 */
66806- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
66807+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
66808 if (!buf) {
66809 name = strncpy(tmp, "//enomem", sizeof(tmp));
66810 goto got_name;
66811 }
66812- name = d_path(&file->f_path, buf, PATH_MAX);
66813+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
66814 if (IS_ERR(name)) {
66815 name = strncpy(tmp, "//toolong", sizeof(tmp));
66816 goto got_name;
66817@@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
66818 event->parent = parent_event;
66819
66820 event->ns = get_pid_ns(current->nsproxy->pid_ns);
66821- event->id = atomic64_inc_return(&perf_event_id);
66822+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
66823
66824 event->state = PERF_EVENT_STATE_INACTIVE;
66825
66826@@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
66827 /*
66828 * Add back the child's count to the parent's count:
66829 */
66830- atomic64_add(child_val, &parent_event->child_count);
66831- atomic64_add(child_event->total_time_enabled,
66832+ atomic64_add_unchecked(child_val, &parent_event->child_count);
66833+ atomic64_add_unchecked(child_event->total_time_enabled,
66834 &parent_event->child_total_time_enabled);
66835- atomic64_add(child_event->total_time_running,
66836+ atomic64_add_unchecked(child_event->total_time_running,
66837 &parent_event->child_total_time_running);
66838
66839 /*
66840diff --git a/kernel/exit.c b/kernel/exit.c
66841index 46c8b14..d868958 100644
66842--- a/kernel/exit.c
66843+++ b/kernel/exit.c
66844@@ -58,6 +58,10 @@
66845 #include <asm/pgtable.h>
66846 #include <asm/mmu_context.h>
66847
66848+#ifdef CONFIG_GRKERNSEC
66849+extern rwlock_t grsec_exec_file_lock;
66850+#endif
66851+
66852 static void exit_mm(struct task_struct * tsk);
66853
66854 static void __unhash_process(struct task_struct *p, bool group_dead)
66855@@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
66856 struct task_struct *leader;
66857 int zap_leader;
66858 repeat:
66859+#ifdef CONFIG_NET
66860+ gr_del_task_from_ip_table(p);
66861+#endif
66862+
66863 /* don't need to get the RCU readlock here - the process is dead and
66864 * can't be modifying its own credentials. But shut RCU-lockdep up */
66865 rcu_read_lock();
66866@@ -381,7 +389,7 @@ int allow_signal(int sig)
66867 * know it'll be handled, so that they don't get converted to
66868 * SIGKILL or just silently dropped.
66869 */
66870- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66871+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66872 recalc_sigpending();
66873 spin_unlock_irq(&current->sighand->siglock);
66874 return 0;
66875@@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
66876 vsnprintf(current->comm, sizeof(current->comm), name, args);
66877 va_end(args);
66878
66879+#ifdef CONFIG_GRKERNSEC
66880+ write_lock(&grsec_exec_file_lock);
66881+ if (current->exec_file) {
66882+ fput(current->exec_file);
66883+ current->exec_file = NULL;
66884+ }
66885+ write_unlock(&grsec_exec_file_lock);
66886+#endif
66887+
66888+ gr_set_kernel_label(current);
66889+
66890 /*
66891 * If we were started as result of loading a module, close all of the
66892 * user space pages. We don't need them, and if we didn't close them
66893@@ -873,6 +892,8 @@ void do_exit(long code)
66894 struct task_struct *tsk = current;
66895 int group_dead;
66896
66897+ set_fs(USER_DS);
66898+
66899 profile_task_exit(tsk);
66900
66901 WARN_ON(blk_needs_flush_plug(tsk));
66902@@ -889,7 +910,6 @@ void do_exit(long code)
66903 * mm_release()->clear_child_tid() from writing to a user-controlled
66904 * kernel address.
66905 */
66906- set_fs(USER_DS);
66907
66908 ptrace_event(PTRACE_EVENT_EXIT, code);
66909
66910@@ -950,6 +970,9 @@ void do_exit(long code)
66911 tsk->exit_code = code;
66912 taskstats_exit(tsk, group_dead);
66913
66914+ gr_acl_handle_psacct(tsk, code);
66915+ gr_acl_handle_exit();
66916+
66917 exit_mm(tsk);
66918
66919 if (group_dead)
66920@@ -1066,7 +1089,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
66921 * Take down every thread in the group. This is called by fatal signals
66922 * as well as by sys_exit_group (below).
66923 */
66924-void
66925+__noreturn void
66926 do_group_exit(int exit_code)
66927 {
66928 struct signal_struct *sig = current->signal;
66929diff --git a/kernel/fork.c b/kernel/fork.c
66930index 26a7a67..a1053f9 100644
66931--- a/kernel/fork.c
66932+++ b/kernel/fork.c
66933@@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66934 *stackend = STACK_END_MAGIC; /* for overflow detection */
66935
66936 #ifdef CONFIG_CC_STACKPROTECTOR
66937- tsk->stack_canary = get_random_int();
66938+ tsk->stack_canary = pax_get_random_long();
66939 #endif
66940
66941 /*
66942@@ -308,13 +308,77 @@ out:
66943 }
66944
66945 #ifdef CONFIG_MMU
66946+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66947+{
66948+ struct vm_area_struct *tmp;
66949+ unsigned long charge;
66950+ struct mempolicy *pol;
66951+ struct file *file;
66952+
66953+ charge = 0;
66954+ if (mpnt->vm_flags & VM_ACCOUNT) {
66955+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66956+ if (security_vm_enough_memory(len))
66957+ goto fail_nomem;
66958+ charge = len;
66959+ }
66960+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66961+ if (!tmp)
66962+ goto fail_nomem;
66963+ *tmp = *mpnt;
66964+ tmp->vm_mm = mm;
66965+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66966+ pol = mpol_dup(vma_policy(mpnt));
66967+ if (IS_ERR(pol))
66968+ goto fail_nomem_policy;
66969+ vma_set_policy(tmp, pol);
66970+ if (anon_vma_fork(tmp, mpnt))
66971+ goto fail_nomem_anon_vma_fork;
66972+ tmp->vm_flags &= ~VM_LOCKED;
66973+ tmp->vm_next = tmp->vm_prev = NULL;
66974+ tmp->vm_mirror = NULL;
66975+ file = tmp->vm_file;
66976+ if (file) {
66977+ struct inode *inode = file->f_path.dentry->d_inode;
66978+ struct address_space *mapping = file->f_mapping;
66979+
66980+ get_file(file);
66981+ if (tmp->vm_flags & VM_DENYWRITE)
66982+ atomic_dec(&inode->i_writecount);
66983+ mutex_lock(&mapping->i_mmap_mutex);
66984+ if (tmp->vm_flags & VM_SHARED)
66985+ mapping->i_mmap_writable++;
66986+ flush_dcache_mmap_lock(mapping);
66987+ /* insert tmp into the share list, just after mpnt */
66988+ vma_prio_tree_add(tmp, mpnt);
66989+ flush_dcache_mmap_unlock(mapping);
66990+ mutex_unlock(&mapping->i_mmap_mutex);
66991+ }
66992+
66993+ /*
66994+ * Clear hugetlb-related page reserves for children. This only
66995+ * affects MAP_PRIVATE mappings. Faults generated by the child
66996+ * are not guaranteed to succeed, even if read-only
66997+ */
66998+ if (is_vm_hugetlb_page(tmp))
66999+ reset_vma_resv_huge_pages(tmp);
67000+
67001+ return tmp;
67002+
67003+fail_nomem_anon_vma_fork:
67004+ mpol_put(pol);
67005+fail_nomem_policy:
67006+ kmem_cache_free(vm_area_cachep, tmp);
67007+fail_nomem:
67008+ vm_unacct_memory(charge);
67009+ return NULL;
67010+}
67011+
67012 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67013 {
67014 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
67015 struct rb_node **rb_link, *rb_parent;
67016 int retval;
67017- unsigned long charge;
67018- struct mempolicy *pol;
67019
67020 down_write(&oldmm->mmap_sem);
67021 flush_cache_dup_mm(oldmm);
67022@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67023 mm->locked_vm = 0;
67024 mm->mmap = NULL;
67025 mm->mmap_cache = NULL;
67026- mm->free_area_cache = oldmm->mmap_base;
67027- mm->cached_hole_size = ~0UL;
67028+ mm->free_area_cache = oldmm->free_area_cache;
67029+ mm->cached_hole_size = oldmm->cached_hole_size;
67030 mm->map_count = 0;
67031 cpumask_clear(mm_cpumask(mm));
67032 mm->mm_rb = RB_ROOT;
67033@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67034
67035 prev = NULL;
67036 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
67037- struct file *file;
67038-
67039 if (mpnt->vm_flags & VM_DONTCOPY) {
67040 long pages = vma_pages(mpnt);
67041 mm->total_vm -= pages;
67042@@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67043 -pages);
67044 continue;
67045 }
67046- charge = 0;
67047- if (mpnt->vm_flags & VM_ACCOUNT) {
67048- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67049- if (security_vm_enough_memory(len))
67050- goto fail_nomem;
67051- charge = len;
67052+ tmp = dup_vma(mm, mpnt);
67053+ if (!tmp) {
67054+ retval = -ENOMEM;
67055+ goto out;
67056 }
67057- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67058- if (!tmp)
67059- goto fail_nomem;
67060- *tmp = *mpnt;
67061- INIT_LIST_HEAD(&tmp->anon_vma_chain);
67062- pol = mpol_dup(vma_policy(mpnt));
67063- retval = PTR_ERR(pol);
67064- if (IS_ERR(pol))
67065- goto fail_nomem_policy;
67066- vma_set_policy(tmp, pol);
67067- tmp->vm_mm = mm;
67068- if (anon_vma_fork(tmp, mpnt))
67069- goto fail_nomem_anon_vma_fork;
67070- tmp->vm_flags &= ~VM_LOCKED;
67071- tmp->vm_next = tmp->vm_prev = NULL;
67072- file = tmp->vm_file;
67073- if (file) {
67074- struct inode *inode = file->f_path.dentry->d_inode;
67075- struct address_space *mapping = file->f_mapping;
67076-
67077- get_file(file);
67078- if (tmp->vm_flags & VM_DENYWRITE)
67079- atomic_dec(&inode->i_writecount);
67080- mutex_lock(&mapping->i_mmap_mutex);
67081- if (tmp->vm_flags & VM_SHARED)
67082- mapping->i_mmap_writable++;
67083- flush_dcache_mmap_lock(mapping);
67084- /* insert tmp into the share list, just after mpnt */
67085- vma_prio_tree_add(tmp, mpnt);
67086- flush_dcache_mmap_unlock(mapping);
67087- mutex_unlock(&mapping->i_mmap_mutex);
67088- }
67089-
67090- /*
67091- * Clear hugetlb-related page reserves for children. This only
67092- * affects MAP_PRIVATE mappings. Faults generated by the child
67093- * are not guaranteed to succeed, even if read-only
67094- */
67095- if (is_vm_hugetlb_page(tmp))
67096- reset_vma_resv_huge_pages(tmp);
67097
67098 /*
67099 * Link in the new vma and copy the page table entries.
67100@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67101 if (retval)
67102 goto out;
67103 }
67104+
67105+#ifdef CONFIG_PAX_SEGMEXEC
67106+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
67107+ struct vm_area_struct *mpnt_m;
67108+
67109+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
67110+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
67111+
67112+ if (!mpnt->vm_mirror)
67113+ continue;
67114+
67115+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
67116+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
67117+ mpnt->vm_mirror = mpnt_m;
67118+ } else {
67119+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
67120+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
67121+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
67122+ mpnt->vm_mirror->vm_mirror = mpnt;
67123+ }
67124+ }
67125+ BUG_ON(mpnt_m);
67126+ }
67127+#endif
67128+
67129 /* a new mm has just been created */
67130 arch_dup_mmap(oldmm, mm);
67131 retval = 0;
67132@@ -429,14 +474,6 @@ out:
67133 flush_tlb_mm(oldmm);
67134 up_write(&oldmm->mmap_sem);
67135 return retval;
67136-fail_nomem_anon_vma_fork:
67137- mpol_put(pol);
67138-fail_nomem_policy:
67139- kmem_cache_free(vm_area_cachep, tmp);
67140-fail_nomem:
67141- retval = -ENOMEM;
67142- vm_unacct_memory(charge);
67143- goto out;
67144 }
67145
67146 static inline int mm_alloc_pgd(struct mm_struct *mm)
67147@@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
67148 return ERR_PTR(err);
67149
67150 mm = get_task_mm(task);
67151- if (mm && mm != current->mm &&
67152- !ptrace_may_access(task, mode)) {
67153+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
67154+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
67155 mmput(mm);
67156 mm = ERR_PTR(-EACCES);
67157 }
67158@@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
67159 spin_unlock(&fs->lock);
67160 return -EAGAIN;
67161 }
67162- fs->users++;
67163+ atomic_inc(&fs->users);
67164 spin_unlock(&fs->lock);
67165 return 0;
67166 }
67167 tsk->fs = copy_fs_struct(fs);
67168 if (!tsk->fs)
67169 return -ENOMEM;
67170+ gr_set_chroot_entries(tsk, &tsk->fs->root);
67171 return 0;
67172 }
67173
67174@@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67175 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
67176 #endif
67177 retval = -EAGAIN;
67178+
67179+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
67180+
67181 if (atomic_read(&p->real_cred->user->processes) >=
67182 task_rlimit(p, RLIMIT_NPROC)) {
67183 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
67184@@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67185 if (clone_flags & CLONE_THREAD)
67186 p->tgid = current->tgid;
67187
67188+ gr_copy_label(p);
67189+
67190 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
67191 /*
67192 * Clear TID on mm_release()?
67193@@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
67194 bad_fork_free:
67195 free_task(p);
67196 fork_out:
67197+ gr_log_forkfail(retval);
67198+
67199 return ERR_PTR(retval);
67200 }
67201
67202@@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
67203 if (clone_flags & CLONE_PARENT_SETTID)
67204 put_user(nr, parent_tidptr);
67205
67206+ gr_handle_brute_check();
67207+
67208 if (clone_flags & CLONE_VFORK) {
67209 p->vfork_done = &vfork;
67210 init_completion(&vfork);
67211@@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
67212 return 0;
67213
67214 /* don't need lock here; in the worst case we'll do useless copy */
67215- if (fs->users == 1)
67216+ if (atomic_read(&fs->users) == 1)
67217 return 0;
67218
67219 *new_fsp = copy_fs_struct(fs);
67220@@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
67221 fs = current->fs;
67222 spin_lock(&fs->lock);
67223 current->fs = new_fs;
67224- if (--fs->users)
67225+ gr_set_chroot_entries(current, &current->fs->root);
67226+ if (atomic_dec_return(&fs->users))
67227 new_fs = NULL;
67228 else
67229 new_fs = fs;
67230diff --git a/kernel/futex.c b/kernel/futex.c
67231index 866c9d5..5c5f828 100644
67232--- a/kernel/futex.c
67233+++ b/kernel/futex.c
67234@@ -54,6 +54,7 @@
67235 #include <linux/mount.h>
67236 #include <linux/pagemap.h>
67237 #include <linux/syscalls.h>
67238+#include <linux/ptrace.h>
67239 #include <linux/signal.h>
67240 #include <linux/export.h>
67241 #include <linux/magic.h>
67242@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
67243 struct page *page, *page_head;
67244 int err, ro = 0;
67245
67246+#ifdef CONFIG_PAX_SEGMEXEC
67247+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67248+ return -EFAULT;
67249+#endif
67250+
67251 /*
67252 * The futex address must be "naturally" aligned.
67253 */
67254@@ -2721,6 +2727,7 @@ static int __init futex_init(void)
67255 {
67256 u32 curval;
67257 int i;
67258+ mm_segment_t oldfs;
67259
67260 /*
67261 * This will fail and we want it. Some arch implementations do
67262@@ -2732,8 +2739,11 @@ static int __init futex_init(void)
67263 * implementation, the non-functional ones will return
67264 * -ENOSYS.
67265 */
67266+ oldfs = get_fs();
67267+ set_fs(USER_DS);
67268 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
67269 futex_cmpxchg_enabled = 1;
67270+ set_fs(oldfs);
67271
67272 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
67273 plist_head_init(&futex_queues[i].chain);
67274diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
67275index 9b22d03..6295b62 100644
67276--- a/kernel/gcov/base.c
67277+++ b/kernel/gcov/base.c
67278@@ -102,11 +102,6 @@ void gcov_enable_events(void)
67279 }
67280
67281 #ifdef CONFIG_MODULES
67282-static inline int within(void *addr, void *start, unsigned long size)
67283-{
67284- return ((addr >= start) && (addr < start + size));
67285-}
67286-
67287 /* Update list and generate events when modules are unloaded. */
67288 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67289 void *data)
67290@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67291 prev = NULL;
67292 /* Remove entries located in module from linked list. */
67293 for (info = gcov_info_head; info; info = info->next) {
67294- if (within(info, mod->module_core, mod->core_size)) {
67295+ if (within_module_core_rw((unsigned long)info, mod)) {
67296 if (prev)
67297 prev->next = info->next;
67298 else
67299diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
67300index ae34bf5..4e2f3d0 100644
67301--- a/kernel/hrtimer.c
67302+++ b/kernel/hrtimer.c
67303@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
67304 local_irq_restore(flags);
67305 }
67306
67307-static void run_hrtimer_softirq(struct softirq_action *h)
67308+static void run_hrtimer_softirq(void)
67309 {
67310 hrtimer_peek_ahead_timers();
67311 }
67312diff --git a/kernel/jump_label.c b/kernel/jump_label.c
67313index 01d3b70..9e4d098 100644
67314--- a/kernel/jump_label.c
67315+++ b/kernel/jump_label.c
67316@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
67317
67318 size = (((unsigned long)stop - (unsigned long)start)
67319 / sizeof(struct jump_entry));
67320+ pax_open_kernel();
67321 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67322+ pax_close_kernel();
67323 }
67324
67325 static void jump_label_update(struct jump_label_key *key, int enable);
67326@@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
67327 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67328 struct jump_entry *iter;
67329
67330+ pax_open_kernel();
67331 for (iter = iter_start; iter < iter_stop; iter++) {
67332 if (within_module_init(iter->code, mod))
67333 iter->code = 0;
67334 }
67335+ pax_close_kernel();
67336 }
67337
67338 static int
67339diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67340index 079f1d3..a407562 100644
67341--- a/kernel/kallsyms.c
67342+++ b/kernel/kallsyms.c
67343@@ -11,6 +11,9 @@
67344 * Changed the compression method from stem compression to "table lookup"
67345 * compression (see scripts/kallsyms.c for a more complete description)
67346 */
67347+#ifdef CONFIG_GRKERNSEC_HIDESYM
67348+#define __INCLUDED_BY_HIDESYM 1
67349+#endif
67350 #include <linux/kallsyms.h>
67351 #include <linux/module.h>
67352 #include <linux/init.h>
67353@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
67354
67355 static inline int is_kernel_inittext(unsigned long addr)
67356 {
67357+ if (system_state != SYSTEM_BOOTING)
67358+ return 0;
67359+
67360 if (addr >= (unsigned long)_sinittext
67361 && addr <= (unsigned long)_einittext)
67362 return 1;
67363 return 0;
67364 }
67365
67366+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67367+#ifdef CONFIG_MODULES
67368+static inline int is_module_text(unsigned long addr)
67369+{
67370+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67371+ return 1;
67372+
67373+ addr = ktla_ktva(addr);
67374+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67375+}
67376+#else
67377+static inline int is_module_text(unsigned long addr)
67378+{
67379+ return 0;
67380+}
67381+#endif
67382+#endif
67383+
67384 static inline int is_kernel_text(unsigned long addr)
67385 {
67386 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67387@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67388
67389 static inline int is_kernel(unsigned long addr)
67390 {
67391+
67392+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67393+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
67394+ return 1;
67395+
67396+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67397+#else
67398 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67399+#endif
67400+
67401 return 1;
67402 return in_gate_area_no_mm(addr);
67403 }
67404
67405 static int is_ksym_addr(unsigned long addr)
67406 {
67407+
67408+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67409+ if (is_module_text(addr))
67410+ return 0;
67411+#endif
67412+
67413 if (all_var)
67414 return is_kernel(addr);
67415
67416@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67417
67418 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67419 {
67420- iter->name[0] = '\0';
67421 iter->nameoff = get_symbol_offset(new_pos);
67422 iter->pos = new_pos;
67423 }
67424@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
67425 {
67426 struct kallsym_iter *iter = m->private;
67427
67428+#ifdef CONFIG_GRKERNSEC_HIDESYM
67429+ if (current_uid())
67430+ return 0;
67431+#endif
67432+
67433 /* Some debugging symbols have no name. Ignore them. */
67434 if (!iter->name[0])
67435 return 0;
67436@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67437 struct kallsym_iter *iter;
67438 int ret;
67439
67440- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67441+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67442 if (!iter)
67443 return -ENOMEM;
67444 reset_iter(iter, 0);
67445diff --git a/kernel/kexec.c b/kernel/kexec.c
67446index 7b08867..3bac516 100644
67447--- a/kernel/kexec.c
67448+++ b/kernel/kexec.c
67449@@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67450 unsigned long flags)
67451 {
67452 struct compat_kexec_segment in;
67453- struct kexec_segment out, __user *ksegments;
67454+ struct kexec_segment out;
67455+ struct kexec_segment __user *ksegments;
67456 unsigned long i, result;
67457
67458 /* Don't allow clients that don't understand the native
67459diff --git a/kernel/kmod.c b/kernel/kmod.c
67460index a3a46cb..f2e42f8 100644
67461--- a/kernel/kmod.c
67462+++ b/kernel/kmod.c
67463@@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
67464 * If module auto-loading support is disabled then this function
67465 * becomes a no-operation.
67466 */
67467-int __request_module(bool wait, const char *fmt, ...)
67468+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67469 {
67470- va_list args;
67471 char module_name[MODULE_NAME_LEN];
67472 unsigned int max_modprobes;
67473 int ret;
67474- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
67475+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
67476 static char *envp[] = { "HOME=/",
67477 "TERM=linux",
67478 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
67479@@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
67480 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67481 static int kmod_loop_msg;
67482
67483- va_start(args, fmt);
67484- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67485- va_end(args);
67486+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67487 if (ret >= MODULE_NAME_LEN)
67488 return -ENAMETOOLONG;
67489
67490@@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
67491 if (ret)
67492 return ret;
67493
67494+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67495+ if (!current_uid()) {
67496+ /* hack to workaround consolekit/udisks stupidity */
67497+ read_lock(&tasklist_lock);
67498+ if (!strcmp(current->comm, "mount") &&
67499+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67500+ read_unlock(&tasklist_lock);
67501+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67502+ return -EPERM;
67503+ }
67504+ read_unlock(&tasklist_lock);
67505+ }
67506+#endif
67507+
67508 /* If modprobe needs a service that is in a module, we get a recursive
67509 * loop. Limit the number of running kmod threads to max_threads/2 or
67510 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67511@@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
67512 atomic_dec(&kmod_concurrent);
67513 return ret;
67514 }
67515+
67516+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67517+{
67518+ va_list args;
67519+ int ret;
67520+
67521+ va_start(args, fmt);
67522+ ret = ____request_module(wait, module_param, fmt, args);
67523+ va_end(args);
67524+
67525+ return ret;
67526+}
67527+
67528+int __request_module(bool wait, const char *fmt, ...)
67529+{
67530+ va_list args;
67531+ int ret;
67532+
67533+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67534+ if (current_uid()) {
67535+ char module_param[MODULE_NAME_LEN];
67536+
67537+ memset(module_param, 0, sizeof(module_param));
67538+
67539+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67540+
67541+ va_start(args, fmt);
67542+ ret = ____request_module(wait, module_param, fmt, args);
67543+ va_end(args);
67544+
67545+ return ret;
67546+ }
67547+#endif
67548+
67549+ va_start(args, fmt);
67550+ ret = ____request_module(wait, NULL, fmt, args);
67551+ va_end(args);
67552+
67553+ return ret;
67554+}
67555+
67556 EXPORT_SYMBOL(__request_module);
67557 #endif /* CONFIG_MODULES */
67558
67559@@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
67560 *
67561 * Thus the __user pointer cast is valid here.
67562 */
67563- sys_wait4(pid, (int __user *)&ret, 0, NULL);
67564+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67565
67566 /*
67567 * If ret is 0, either ____call_usermodehelper failed and the
67568diff --git a/kernel/kprobes.c b/kernel/kprobes.c
67569index c62b854..cb67968 100644
67570--- a/kernel/kprobes.c
67571+++ b/kernel/kprobes.c
67572@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
67573 * kernel image and loaded module images reside. This is required
67574 * so x86_64 can correctly handle the %rip-relative fixups.
67575 */
67576- kip->insns = module_alloc(PAGE_SIZE);
67577+ kip->insns = module_alloc_exec(PAGE_SIZE);
67578 if (!kip->insns) {
67579 kfree(kip);
67580 return NULL;
67581@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
67582 */
67583 if (!list_is_singular(&kip->list)) {
67584 list_del(&kip->list);
67585- module_free(NULL, kip->insns);
67586+ module_free_exec(NULL, kip->insns);
67587 kfree(kip);
67588 }
67589 return 1;
67590@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
67591 {
67592 int i, err = 0;
67593 unsigned long offset = 0, size = 0;
67594- char *modname, namebuf[128];
67595+ char *modname, namebuf[KSYM_NAME_LEN];
67596 const char *symbol_name;
67597 void *addr;
67598 struct kprobe_blackpoint *kb;
67599@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
67600 const char *sym = NULL;
67601 unsigned int i = *(loff_t *) v;
67602 unsigned long offset = 0;
67603- char *modname, namebuf[128];
67604+ char *modname, namebuf[KSYM_NAME_LEN];
67605
67606 head = &kprobe_table[i];
67607 preempt_disable();
67608diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
67609index 4e316e1..5501eef 100644
67610--- a/kernel/ksysfs.c
67611+++ b/kernel/ksysfs.c
67612@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
67613 {
67614 if (count+1 > UEVENT_HELPER_PATH_LEN)
67615 return -ENOENT;
67616+ if (!capable(CAP_SYS_ADMIN))
67617+ return -EPERM;
67618 memcpy(uevent_helper, buf, count);
67619 uevent_helper[count] = '\0';
67620 if (count && uevent_helper[count-1] == '\n')
67621diff --git a/kernel/lockdep.c b/kernel/lockdep.c
67622index 8889f7d..95319b7 100644
67623--- a/kernel/lockdep.c
67624+++ b/kernel/lockdep.c
67625@@ -590,6 +590,10 @@ static int static_obj(void *obj)
67626 end = (unsigned long) &_end,
67627 addr = (unsigned long) obj;
67628
67629+#ifdef CONFIG_PAX_KERNEXEC
67630+ start = ktla_ktva(start);
67631+#endif
67632+
67633 /*
67634 * static variable?
67635 */
67636@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
67637 if (!static_obj(lock->key)) {
67638 debug_locks_off();
67639 printk("INFO: trying to register non-static key.\n");
67640+ printk("lock:%pS key:%pS.\n", lock, lock->key);
67641 printk("the code is fine but needs lockdep annotation.\n");
67642 printk("turning off the locking correctness validator.\n");
67643 dump_stack();
67644@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
67645 if (!class)
67646 return 0;
67647 }
67648- atomic_inc((atomic_t *)&class->ops);
67649+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
67650 if (very_verbose(class)) {
67651 printk("\nacquire class [%p] %s", class->key, class->name);
67652 if (class->name_version > 1)
67653diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
67654index 91c32a0..b2c71c5 100644
67655--- a/kernel/lockdep_proc.c
67656+++ b/kernel/lockdep_proc.c
67657@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
67658
67659 static void print_name(struct seq_file *m, struct lock_class *class)
67660 {
67661- char str[128];
67662+ char str[KSYM_NAME_LEN];
67663 const char *name = class->name;
67664
67665 if (!name) {
67666diff --git a/kernel/module.c b/kernel/module.c
67667index 3d56b6f..2a22bd0 100644
67668--- a/kernel/module.c
67669+++ b/kernel/module.c
67670@@ -58,6 +58,7 @@
67671 #include <linux/jump_label.h>
67672 #include <linux/pfn.h>
67673 #include <linux/bsearch.h>
67674+#include <linux/grsecurity.h>
67675
67676 #define CREATE_TRACE_POINTS
67677 #include <trace/events/module.h>
67678@@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
67679
67680 /* Bounds of module allocation, for speeding __module_address.
67681 * Protected by module_mutex. */
67682-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67683+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67684+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67685
67686 int register_module_notifier(struct notifier_block * nb)
67687 {
67688@@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67689 return true;
67690
67691 list_for_each_entry_rcu(mod, &modules, list) {
67692- struct symsearch arr[] = {
67693+ struct symsearch modarr[] = {
67694 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67695 NOT_GPL_ONLY, false },
67696 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
67697@@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67698 #endif
67699 };
67700
67701- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67702+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67703 return true;
67704 }
67705 return false;
67706@@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
67707 static int percpu_modalloc(struct module *mod,
67708 unsigned long size, unsigned long align)
67709 {
67710- if (align > PAGE_SIZE) {
67711+ if (align-1 >= PAGE_SIZE) {
67712 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
67713 mod->name, align, PAGE_SIZE);
67714 align = PAGE_SIZE;
67715@@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
67716 static ssize_t show_coresize(struct module_attribute *mattr,
67717 struct module_kobject *mk, char *buffer)
67718 {
67719- return sprintf(buffer, "%u\n", mk->mod->core_size);
67720+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
67721 }
67722
67723 static struct module_attribute modinfo_coresize =
67724@@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
67725 static ssize_t show_initsize(struct module_attribute *mattr,
67726 struct module_kobject *mk, char *buffer)
67727 {
67728- return sprintf(buffer, "%u\n", mk->mod->init_size);
67729+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
67730 }
67731
67732 static struct module_attribute modinfo_initsize =
67733@@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
67734 */
67735 #ifdef CONFIG_SYSFS
67736
67737-#ifdef CONFIG_KALLSYMS
67738+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67739 static inline bool sect_empty(const Elf_Shdr *sect)
67740 {
67741 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
67742@@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
67743
67744 static void unset_module_core_ro_nx(struct module *mod)
67745 {
67746- set_page_attributes(mod->module_core + mod->core_text_size,
67747- mod->module_core + mod->core_size,
67748+ set_page_attributes(mod->module_core_rw,
67749+ mod->module_core_rw + mod->core_size_rw,
67750 set_memory_x);
67751- set_page_attributes(mod->module_core,
67752- mod->module_core + mod->core_ro_size,
67753+ set_page_attributes(mod->module_core_rx,
67754+ mod->module_core_rx + mod->core_size_rx,
67755 set_memory_rw);
67756 }
67757
67758 static void unset_module_init_ro_nx(struct module *mod)
67759 {
67760- set_page_attributes(mod->module_init + mod->init_text_size,
67761- mod->module_init + mod->init_size,
67762+ set_page_attributes(mod->module_init_rw,
67763+ mod->module_init_rw + mod->init_size_rw,
67764 set_memory_x);
67765- set_page_attributes(mod->module_init,
67766- mod->module_init + mod->init_ro_size,
67767+ set_page_attributes(mod->module_init_rx,
67768+ mod->module_init_rx + mod->init_size_rx,
67769 set_memory_rw);
67770 }
67771
67772@@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
67773
67774 mutex_lock(&module_mutex);
67775 list_for_each_entry_rcu(mod, &modules, list) {
67776- if ((mod->module_core) && (mod->core_text_size)) {
67777- set_page_attributes(mod->module_core,
67778- mod->module_core + mod->core_text_size,
67779+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67780+ set_page_attributes(mod->module_core_rx,
67781+ mod->module_core_rx + mod->core_size_rx,
67782 set_memory_rw);
67783 }
67784- if ((mod->module_init) && (mod->init_text_size)) {
67785- set_page_attributes(mod->module_init,
67786- mod->module_init + mod->init_text_size,
67787+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67788+ set_page_attributes(mod->module_init_rx,
67789+ mod->module_init_rx + mod->init_size_rx,
67790 set_memory_rw);
67791 }
67792 }
67793@@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
67794
67795 mutex_lock(&module_mutex);
67796 list_for_each_entry_rcu(mod, &modules, list) {
67797- if ((mod->module_core) && (mod->core_text_size)) {
67798- set_page_attributes(mod->module_core,
67799- mod->module_core + mod->core_text_size,
67800+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67801+ set_page_attributes(mod->module_core_rx,
67802+ mod->module_core_rx + mod->core_size_rx,
67803 set_memory_ro);
67804 }
67805- if ((mod->module_init) && (mod->init_text_size)) {
67806- set_page_attributes(mod->module_init,
67807- mod->module_init + mod->init_text_size,
67808+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67809+ set_page_attributes(mod->module_init_rx,
67810+ mod->module_init_rx + mod->init_size_rx,
67811 set_memory_ro);
67812 }
67813 }
67814@@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
67815
67816 /* This may be NULL, but that's OK */
67817 unset_module_init_ro_nx(mod);
67818- module_free(mod, mod->module_init);
67819+ module_free(mod, mod->module_init_rw);
67820+ module_free_exec(mod, mod->module_init_rx);
67821 kfree(mod->args);
67822 percpu_modfree(mod);
67823
67824 /* Free lock-classes: */
67825- lockdep_free_key_range(mod->module_core, mod->core_size);
67826+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67827+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67828
67829 /* Finally, free the core (containing the module structure) */
67830 unset_module_core_ro_nx(mod);
67831- module_free(mod, mod->module_core);
67832+ module_free_exec(mod, mod->module_core_rx);
67833+ module_free(mod, mod->module_core_rw);
67834
67835 #ifdef CONFIG_MPU
67836 update_protections(current->mm);
67837@@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67838 unsigned int i;
67839 int ret = 0;
67840 const struct kernel_symbol *ksym;
67841+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67842+ int is_fs_load = 0;
67843+ int register_filesystem_found = 0;
67844+ char *p;
67845+
67846+ p = strstr(mod->args, "grsec_modharden_fs");
67847+ if (p) {
67848+ char *endptr = p + strlen("grsec_modharden_fs");
67849+ /* copy \0 as well */
67850+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67851+ is_fs_load = 1;
67852+ }
67853+#endif
67854
67855 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67856 const char *name = info->strtab + sym[i].st_name;
67857
67858+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67859+ /* it's a real shame this will never get ripped and copied
67860+ upstream! ;(
67861+ */
67862+ if (is_fs_load && !strcmp(name, "register_filesystem"))
67863+ register_filesystem_found = 1;
67864+#endif
67865+
67866 switch (sym[i].st_shndx) {
67867 case SHN_COMMON:
67868 /* We compiled with -fno-common. These are not
67869@@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67870 ksym = resolve_symbol_wait(mod, info, name);
67871 /* Ok if resolved. */
67872 if (ksym && !IS_ERR(ksym)) {
67873+ pax_open_kernel();
67874 sym[i].st_value = ksym->value;
67875+ pax_close_kernel();
67876 break;
67877 }
67878
67879@@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67880 secbase = (unsigned long)mod_percpu(mod);
67881 else
67882 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67883+ pax_open_kernel();
67884 sym[i].st_value += secbase;
67885+ pax_close_kernel();
67886 break;
67887 }
67888 }
67889
67890+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67891+ if (is_fs_load && !register_filesystem_found) {
67892+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67893+ ret = -EPERM;
67894+ }
67895+#endif
67896+
67897 return ret;
67898 }
67899
67900@@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67901 || s->sh_entsize != ~0UL
67902 || strstarts(sname, ".init"))
67903 continue;
67904- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67905+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67906+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67907+ else
67908+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67909 pr_debug("\t%s\n", sname);
67910 }
67911- switch (m) {
67912- case 0: /* executable */
67913- mod->core_size = debug_align(mod->core_size);
67914- mod->core_text_size = mod->core_size;
67915- break;
67916- case 1: /* RO: text and ro-data */
67917- mod->core_size = debug_align(mod->core_size);
67918- mod->core_ro_size = mod->core_size;
67919- break;
67920- case 3: /* whole core */
67921- mod->core_size = debug_align(mod->core_size);
67922- break;
67923- }
67924 }
67925
67926 pr_debug("Init section allocation order:\n");
67927@@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67928 || s->sh_entsize != ~0UL
67929 || !strstarts(sname, ".init"))
67930 continue;
67931- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67932- | INIT_OFFSET_MASK);
67933+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67934+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67935+ else
67936+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67937+ s->sh_entsize |= INIT_OFFSET_MASK;
67938 pr_debug("\t%s\n", sname);
67939 }
67940- switch (m) {
67941- case 0: /* executable */
67942- mod->init_size = debug_align(mod->init_size);
67943- mod->init_text_size = mod->init_size;
67944- break;
67945- case 1: /* RO: text and ro-data */
67946- mod->init_size = debug_align(mod->init_size);
67947- mod->init_ro_size = mod->init_size;
67948- break;
67949- case 3: /* whole init */
67950- mod->init_size = debug_align(mod->init_size);
67951- break;
67952- }
67953 }
67954 }
67955
67956@@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67957
67958 /* Put symbol section at end of init part of module. */
67959 symsect->sh_flags |= SHF_ALLOC;
67960- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67961+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67962 info->index.sym) | INIT_OFFSET_MASK;
67963 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67964
67965@@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67966 }
67967
67968 /* Append room for core symbols at end of core part. */
67969- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67970- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67971- mod->core_size += strtab_size;
67972+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67973+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67974+ mod->core_size_rx += strtab_size;
67975
67976 /* Put string table section at end of init part of module. */
67977 strsect->sh_flags |= SHF_ALLOC;
67978- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67979+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67980 info->index.str) | INIT_OFFSET_MASK;
67981 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67982 }
67983@@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67984 /* Make sure we get permanent strtab: don't use info->strtab. */
67985 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67986
67987+ pax_open_kernel();
67988+
67989 /* Set types up while we still have access to sections. */
67990 for (i = 0; i < mod->num_symtab; i++)
67991 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67992
67993- mod->core_symtab = dst = mod->module_core + info->symoffs;
67994- mod->core_strtab = s = mod->module_core + info->stroffs;
67995+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67996+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67997 src = mod->symtab;
67998 *dst = *src;
67999 *s++ = 0;
68000@@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68001 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
68002 }
68003 mod->core_num_syms = ndst;
68004+
68005+ pax_close_kernel();
68006 }
68007 #else
68008 static inline void layout_symtab(struct module *mod, struct load_info *info)
68009@@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
68010 return size == 0 ? NULL : vmalloc_exec(size);
68011 }
68012
68013-static void *module_alloc_update_bounds(unsigned long size)
68014+static void *module_alloc_update_bounds_rw(unsigned long size)
68015 {
68016 void *ret = module_alloc(size);
68017
68018 if (ret) {
68019 mutex_lock(&module_mutex);
68020 /* Update module bounds. */
68021- if ((unsigned long)ret < module_addr_min)
68022- module_addr_min = (unsigned long)ret;
68023- if ((unsigned long)ret + size > module_addr_max)
68024- module_addr_max = (unsigned long)ret + size;
68025+ if ((unsigned long)ret < module_addr_min_rw)
68026+ module_addr_min_rw = (unsigned long)ret;
68027+ if ((unsigned long)ret + size > module_addr_max_rw)
68028+ module_addr_max_rw = (unsigned long)ret + size;
68029+ mutex_unlock(&module_mutex);
68030+ }
68031+ return ret;
68032+}
68033+
68034+static void *module_alloc_update_bounds_rx(unsigned long size)
68035+{
68036+ void *ret = module_alloc_exec(size);
68037+
68038+ if (ret) {
68039+ mutex_lock(&module_mutex);
68040+ /* Update module bounds. */
68041+ if ((unsigned long)ret < module_addr_min_rx)
68042+ module_addr_min_rx = (unsigned long)ret;
68043+ if ((unsigned long)ret + size > module_addr_max_rx)
68044+ module_addr_max_rx = (unsigned long)ret + size;
68045 mutex_unlock(&module_mutex);
68046 }
68047 return ret;
68048@@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
68049 static int check_modinfo(struct module *mod, struct load_info *info)
68050 {
68051 const char *modmagic = get_modinfo(info, "vermagic");
68052+ const char *license = get_modinfo(info, "license");
68053 int err;
68054
68055+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
68056+ if (!license || !license_is_gpl_compatible(license))
68057+ return -ENOEXEC;
68058+#endif
68059+
68060 /* This is allowed: modprobe --force will invalidate it. */
68061 if (!modmagic) {
68062 err = try_to_force_load(mod, "bad vermagic");
68063@@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
68064 }
68065
68066 /* Set up license info based on the info section */
68067- set_license(mod, get_modinfo(info, "license"));
68068+ set_license(mod, license);
68069
68070 return 0;
68071 }
68072@@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
68073 void *ptr;
68074
68075 /* Do the allocs. */
68076- ptr = module_alloc_update_bounds(mod->core_size);
68077+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
68078 /*
68079 * The pointer to this block is stored in the module structure
68080 * which is inside the block. Just mark it as not being a
68081@@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
68082 if (!ptr)
68083 return -ENOMEM;
68084
68085- memset(ptr, 0, mod->core_size);
68086- mod->module_core = ptr;
68087+ memset(ptr, 0, mod->core_size_rw);
68088+ mod->module_core_rw = ptr;
68089
68090- ptr = module_alloc_update_bounds(mod->init_size);
68091+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68092 /*
68093 * The pointer to this block is stored in the module structure
68094 * which is inside the block. This block doesn't need to be
68095 * scanned as it contains data and code that will be freed
68096 * after the module is initialized.
68097 */
68098- kmemleak_ignore(ptr);
68099- if (!ptr && mod->init_size) {
68100- module_free(mod, mod->module_core);
68101+ kmemleak_not_leak(ptr);
68102+ if (!ptr && mod->init_size_rw) {
68103+ module_free(mod, mod->module_core_rw);
68104 return -ENOMEM;
68105 }
68106- memset(ptr, 0, mod->init_size);
68107- mod->module_init = ptr;
68108+ memset(ptr, 0, mod->init_size_rw);
68109+ mod->module_init_rw = ptr;
68110+
68111+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68112+ kmemleak_not_leak(ptr);
68113+ if (!ptr) {
68114+ module_free(mod, mod->module_init_rw);
68115+ module_free(mod, mod->module_core_rw);
68116+ return -ENOMEM;
68117+ }
68118+
68119+ pax_open_kernel();
68120+ memset(ptr, 0, mod->core_size_rx);
68121+ pax_close_kernel();
68122+ mod->module_core_rx = ptr;
68123+
68124+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68125+ kmemleak_not_leak(ptr);
68126+ if (!ptr && mod->init_size_rx) {
68127+ module_free_exec(mod, mod->module_core_rx);
68128+ module_free(mod, mod->module_init_rw);
68129+ module_free(mod, mod->module_core_rw);
68130+ return -ENOMEM;
68131+ }
68132+
68133+ pax_open_kernel();
68134+ memset(ptr, 0, mod->init_size_rx);
68135+ pax_close_kernel();
68136+ mod->module_init_rx = ptr;
68137
68138 /* Transfer each section which specifies SHF_ALLOC */
68139 pr_debug("final section addresses:\n");
68140@@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
68141 if (!(shdr->sh_flags & SHF_ALLOC))
68142 continue;
68143
68144- if (shdr->sh_entsize & INIT_OFFSET_MASK)
68145- dest = mod->module_init
68146- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68147- else
68148- dest = mod->module_core + shdr->sh_entsize;
68149+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
68150+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68151+ dest = mod->module_init_rw
68152+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68153+ else
68154+ dest = mod->module_init_rx
68155+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68156+ } else {
68157+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68158+ dest = mod->module_core_rw + shdr->sh_entsize;
68159+ else
68160+ dest = mod->module_core_rx + shdr->sh_entsize;
68161+ }
68162+
68163+ if (shdr->sh_type != SHT_NOBITS) {
68164+
68165+#ifdef CONFIG_PAX_KERNEXEC
68166+#ifdef CONFIG_X86_64
68167+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
68168+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68169+#endif
68170+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
68171+ pax_open_kernel();
68172+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68173+ pax_close_kernel();
68174+ } else
68175+#endif
68176
68177- if (shdr->sh_type != SHT_NOBITS)
68178 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68179+ }
68180 /* Update sh_addr to point to copy in image. */
68181- shdr->sh_addr = (unsigned long)dest;
68182+
68183+#ifdef CONFIG_PAX_KERNEXEC
68184+ if (shdr->sh_flags & SHF_EXECINSTR)
68185+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
68186+ else
68187+#endif
68188+
68189+ shdr->sh_addr = (unsigned long)dest;
68190 pr_debug("\t0x%lx %s\n",
68191 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
68192 }
68193@@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
68194 * Do it before processing of module parameters, so the module
68195 * can provide parameter accessor functions of its own.
68196 */
68197- if (mod->module_init)
68198- flush_icache_range((unsigned long)mod->module_init,
68199- (unsigned long)mod->module_init
68200- + mod->init_size);
68201- flush_icache_range((unsigned long)mod->module_core,
68202- (unsigned long)mod->module_core + mod->core_size);
68203+ if (mod->module_init_rx)
68204+ flush_icache_range((unsigned long)mod->module_init_rx,
68205+ (unsigned long)mod->module_init_rx
68206+ + mod->init_size_rx);
68207+ flush_icache_range((unsigned long)mod->module_core_rx,
68208+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
68209
68210 set_fs(old_fs);
68211 }
68212@@ -2802,8 +2901,10 @@ out:
68213 static void module_deallocate(struct module *mod, struct load_info *info)
68214 {
68215 percpu_modfree(mod);
68216- module_free(mod, mod->module_init);
68217- module_free(mod, mod->module_core);
68218+ module_free_exec(mod, mod->module_init_rx);
68219+ module_free_exec(mod, mod->module_core_rx);
68220+ module_free(mod, mod->module_init_rw);
68221+ module_free(mod, mod->module_core_rw);
68222 }
68223
68224 int __weak module_finalize(const Elf_Ehdr *hdr,
68225@@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
68226 if (err)
68227 goto free_unload;
68228
68229+ /* Now copy in args */
68230+ mod->args = strndup_user(uargs, ~0UL >> 1);
68231+ if (IS_ERR(mod->args)) {
68232+ err = PTR_ERR(mod->args);
68233+ goto free_unload;
68234+ }
68235+
68236 /* Set up MODINFO_ATTR fields */
68237 setup_modinfo(mod, &info);
68238
68239+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68240+ {
68241+ char *p, *p2;
68242+
68243+ if (strstr(mod->args, "grsec_modharden_netdev")) {
68244+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68245+ err = -EPERM;
68246+ goto free_modinfo;
68247+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68248+ p += strlen("grsec_modharden_normal");
68249+ p2 = strstr(p, "_");
68250+ if (p2) {
68251+ *p2 = '\0';
68252+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68253+ *p2 = '_';
68254+ }
68255+ err = -EPERM;
68256+ goto free_modinfo;
68257+ }
68258+ }
68259+#endif
68260+
68261 /* Fix up syms, so that st_value is a pointer to location. */
68262 err = simplify_symbols(mod, &info);
68263 if (err < 0)
68264@@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
68265
68266 flush_module_icache(mod);
68267
68268- /* Now copy in args */
68269- mod->args = strndup_user(uargs, ~0UL >> 1);
68270- if (IS_ERR(mod->args)) {
68271- err = PTR_ERR(mod->args);
68272- goto free_arch_cleanup;
68273- }
68274-
68275 /* Mark state as coming so strong_try_module_get() ignores us. */
68276 mod->state = MODULE_STATE_COMING;
68277
68278@@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
68279 unlock:
68280 mutex_unlock(&module_mutex);
68281 synchronize_sched();
68282- kfree(mod->args);
68283- free_arch_cleanup:
68284 module_arch_cleanup(mod);
68285 free_modinfo:
68286 free_modinfo(mod);
68287+ kfree(mod->args);
68288 free_unload:
68289 module_unload_free(mod);
68290 free_module:
68291@@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68292 MODULE_STATE_COMING, mod);
68293
68294 /* Set RO and NX regions for core */
68295- set_section_ro_nx(mod->module_core,
68296- mod->core_text_size,
68297- mod->core_ro_size,
68298- mod->core_size);
68299+ set_section_ro_nx(mod->module_core_rx,
68300+ mod->core_size_rx,
68301+ mod->core_size_rx,
68302+ mod->core_size_rx);
68303
68304 /* Set RO and NX regions for init */
68305- set_section_ro_nx(mod->module_init,
68306- mod->init_text_size,
68307- mod->init_ro_size,
68308- mod->init_size);
68309+ set_section_ro_nx(mod->module_init_rx,
68310+ mod->init_size_rx,
68311+ mod->init_size_rx,
68312+ mod->init_size_rx);
68313
68314 do_mod_ctors(mod);
68315 /* Start the module */
68316@@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68317 mod->strtab = mod->core_strtab;
68318 #endif
68319 unset_module_init_ro_nx(mod);
68320- module_free(mod, mod->module_init);
68321- mod->module_init = NULL;
68322- mod->init_size = 0;
68323- mod->init_ro_size = 0;
68324- mod->init_text_size = 0;
68325+ module_free(mod, mod->module_init_rw);
68326+ module_free_exec(mod, mod->module_init_rx);
68327+ mod->module_init_rw = NULL;
68328+ mod->module_init_rx = NULL;
68329+ mod->init_size_rw = 0;
68330+ mod->init_size_rx = 0;
68331 mutex_unlock(&module_mutex);
68332
68333 return 0;
68334@@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
68335 unsigned long nextval;
68336
68337 /* At worse, next value is at end of module */
68338- if (within_module_init(addr, mod))
68339- nextval = (unsigned long)mod->module_init+mod->init_text_size;
68340+ if (within_module_init_rx(addr, mod))
68341+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68342+ else if (within_module_init_rw(addr, mod))
68343+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68344+ else if (within_module_core_rx(addr, mod))
68345+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68346+ else if (within_module_core_rw(addr, mod))
68347+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68348 else
68349- nextval = (unsigned long)mod->module_core+mod->core_text_size;
68350+ return NULL;
68351
68352 /* Scan for closest preceding symbol, and next symbol. (ELF
68353 starts real symbols at 1). */
68354@@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
68355 char buf[8];
68356
68357 seq_printf(m, "%s %u",
68358- mod->name, mod->init_size + mod->core_size);
68359+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68360 print_unload_info(m, mod);
68361
68362 /* Informative for users. */
68363@@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
68364 mod->state == MODULE_STATE_COMING ? "Loading":
68365 "Live");
68366 /* Used by oprofile and other similar tools. */
68367- seq_printf(m, " 0x%pK", mod->module_core);
68368+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68369
68370 /* Taints info */
68371 if (mod->taints)
68372@@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
68373
68374 static int __init proc_modules_init(void)
68375 {
68376+#ifndef CONFIG_GRKERNSEC_HIDESYM
68377+#ifdef CONFIG_GRKERNSEC_PROC_USER
68378+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68379+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68380+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68381+#else
68382 proc_create("modules", 0, NULL, &proc_modules_operations);
68383+#endif
68384+#else
68385+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68386+#endif
68387 return 0;
68388 }
68389 module_init(proc_modules_init);
68390@@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
68391 {
68392 struct module *mod;
68393
68394- if (addr < module_addr_min || addr > module_addr_max)
68395+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68396+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
68397 return NULL;
68398
68399 list_for_each_entry_rcu(mod, &modules, list)
68400- if (within_module_core(addr, mod)
68401- || within_module_init(addr, mod))
68402+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
68403 return mod;
68404 return NULL;
68405 }
68406@@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
68407 */
68408 struct module *__module_text_address(unsigned long addr)
68409 {
68410- struct module *mod = __module_address(addr);
68411+ struct module *mod;
68412+
68413+#ifdef CONFIG_X86_32
68414+ addr = ktla_ktva(addr);
68415+#endif
68416+
68417+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68418+ return NULL;
68419+
68420+ mod = __module_address(addr);
68421+
68422 if (mod) {
68423 /* Make sure it's within the text section. */
68424- if (!within(addr, mod->module_init, mod->init_text_size)
68425- && !within(addr, mod->module_core, mod->core_text_size))
68426+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68427 mod = NULL;
68428 }
68429 return mod;
68430diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68431index 7e3443f..b2a1e6b 100644
68432--- a/kernel/mutex-debug.c
68433+++ b/kernel/mutex-debug.c
68434@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68435 }
68436
68437 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68438- struct thread_info *ti)
68439+ struct task_struct *task)
68440 {
68441 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68442
68443 /* Mark the current thread as blocked on the lock: */
68444- ti->task->blocked_on = waiter;
68445+ task->blocked_on = waiter;
68446 }
68447
68448 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68449- struct thread_info *ti)
68450+ struct task_struct *task)
68451 {
68452 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68453- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68454- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68455- ti->task->blocked_on = NULL;
68456+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
68457+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68458+ task->blocked_on = NULL;
68459
68460 list_del_init(&waiter->list);
68461 waiter->task = NULL;
68462diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68463index 0799fd3..d06ae3b 100644
68464--- a/kernel/mutex-debug.h
68465+++ b/kernel/mutex-debug.h
68466@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68467 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68468 extern void debug_mutex_add_waiter(struct mutex *lock,
68469 struct mutex_waiter *waiter,
68470- struct thread_info *ti);
68471+ struct task_struct *task);
68472 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68473- struct thread_info *ti);
68474+ struct task_struct *task);
68475 extern void debug_mutex_unlock(struct mutex *lock);
68476 extern void debug_mutex_init(struct mutex *lock, const char *name,
68477 struct lock_class_key *key);
68478diff --git a/kernel/mutex.c b/kernel/mutex.c
68479index 89096dd..f91ebc5 100644
68480--- a/kernel/mutex.c
68481+++ b/kernel/mutex.c
68482@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68483 spin_lock_mutex(&lock->wait_lock, flags);
68484
68485 debug_mutex_lock_common(lock, &waiter);
68486- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68487+ debug_mutex_add_waiter(lock, &waiter, task);
68488
68489 /* add waiting tasks to the end of the waitqueue (FIFO): */
68490 list_add_tail(&waiter.list, &lock->wait_list);
68491@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68492 * TASK_UNINTERRUPTIBLE case.)
68493 */
68494 if (unlikely(signal_pending_state(state, task))) {
68495- mutex_remove_waiter(lock, &waiter,
68496- task_thread_info(task));
68497+ mutex_remove_waiter(lock, &waiter, task);
68498 mutex_release(&lock->dep_map, 1, ip);
68499 spin_unlock_mutex(&lock->wait_lock, flags);
68500
68501@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68502 done:
68503 lock_acquired(&lock->dep_map, ip);
68504 /* got the lock - rejoice! */
68505- mutex_remove_waiter(lock, &waiter, current_thread_info());
68506+ mutex_remove_waiter(lock, &waiter, task);
68507 mutex_set_owner(lock);
68508
68509 /* set it to 0 if there are no waiters left: */
68510diff --git a/kernel/padata.c b/kernel/padata.c
68511index b452599..5d68f4e 100644
68512--- a/kernel/padata.c
68513+++ b/kernel/padata.c
68514@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
68515 padata->pd = pd;
68516 padata->cb_cpu = cb_cpu;
68517
68518- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
68519- atomic_set(&pd->seq_nr, -1);
68520+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
68521+ atomic_set_unchecked(&pd->seq_nr, -1);
68522
68523- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
68524+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
68525
68526 target_cpu = padata_cpu_hash(padata);
68527 queue = per_cpu_ptr(pd->pqueue, target_cpu);
68528@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
68529 padata_init_pqueues(pd);
68530 padata_init_squeues(pd);
68531 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
68532- atomic_set(&pd->seq_nr, -1);
68533+ atomic_set_unchecked(&pd->seq_nr, -1);
68534 atomic_set(&pd->reorder_objects, 0);
68535 atomic_set(&pd->refcnt, 0);
68536 pd->pinst = pinst;
68537diff --git a/kernel/panic.c b/kernel/panic.c
68538index 8ed89a1..e83856a 100644
68539--- a/kernel/panic.c
68540+++ b/kernel/panic.c
68541@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
68542 const char *board;
68543
68544 printk(KERN_WARNING "------------[ cut here ]------------\n");
68545- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68546+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68547 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68548 if (board)
68549 printk(KERN_WARNING "Hardware name: %s\n", board);
68550@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68551 */
68552 void __stack_chk_fail(void)
68553 {
68554- panic("stack-protector: Kernel stack is corrupted in: %p\n",
68555+ dump_stack();
68556+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68557 __builtin_return_address(0));
68558 }
68559 EXPORT_SYMBOL(__stack_chk_fail);
68560diff --git a/kernel/pid.c b/kernel/pid.c
68561index 9f08dfa..6765c40 100644
68562--- a/kernel/pid.c
68563+++ b/kernel/pid.c
68564@@ -33,6 +33,7 @@
68565 #include <linux/rculist.h>
68566 #include <linux/bootmem.h>
68567 #include <linux/hash.h>
68568+#include <linux/security.h>
68569 #include <linux/pid_namespace.h>
68570 #include <linux/init_task.h>
68571 #include <linux/syscalls.h>
68572@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
68573
68574 int pid_max = PID_MAX_DEFAULT;
68575
68576-#define RESERVED_PIDS 300
68577+#define RESERVED_PIDS 500
68578
68579 int pid_max_min = RESERVED_PIDS + 1;
68580 int pid_max_max = PID_MAX_LIMIT;
68581@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
68582 */
68583 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68584 {
68585+ struct task_struct *task;
68586+
68587 rcu_lockdep_assert(rcu_read_lock_held(),
68588 "find_task_by_pid_ns() needs rcu_read_lock()"
68589 " protection");
68590- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68591+
68592+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68593+
68594+ if (gr_pid_is_chrooted(task))
68595+ return NULL;
68596+
68597+ return task;
68598 }
68599
68600 struct task_struct *find_task_by_vpid(pid_t vnr)
68601@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
68602 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68603 }
68604
68605+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68606+{
68607+ rcu_lockdep_assert(rcu_read_lock_held(),
68608+ "find_task_by_pid_ns() needs rcu_read_lock()"
68609+ " protection");
68610+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68611+}
68612+
68613 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68614 {
68615 struct pid *pid;
68616diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
68617index 125cb67..a4d1c30 100644
68618--- a/kernel/posix-cpu-timers.c
68619+++ b/kernel/posix-cpu-timers.c
68620@@ -6,6 +6,7 @@
68621 #include <linux/posix-timers.h>
68622 #include <linux/errno.h>
68623 #include <linux/math64.h>
68624+#include <linux/security.h>
68625 #include <asm/uaccess.h>
68626 #include <linux/kernel_stat.h>
68627 #include <trace/events/timer.h>
68628@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
68629
68630 static __init int init_posix_cpu_timers(void)
68631 {
68632- struct k_clock process = {
68633+ static struct k_clock process = {
68634 .clock_getres = process_cpu_clock_getres,
68635 .clock_get = process_cpu_clock_get,
68636 .timer_create = process_cpu_timer_create,
68637 .nsleep = process_cpu_nsleep,
68638 .nsleep_restart = process_cpu_nsleep_restart,
68639 };
68640- struct k_clock thread = {
68641+ static struct k_clock thread = {
68642 .clock_getres = thread_cpu_clock_getres,
68643 .clock_get = thread_cpu_clock_get,
68644 .timer_create = thread_cpu_timer_create,
68645diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
68646index 69185ae..cc2847a 100644
68647--- a/kernel/posix-timers.c
68648+++ b/kernel/posix-timers.c
68649@@ -43,6 +43,7 @@
68650 #include <linux/idr.h>
68651 #include <linux/posix-clock.h>
68652 #include <linux/posix-timers.h>
68653+#include <linux/grsecurity.h>
68654 #include <linux/syscalls.h>
68655 #include <linux/wait.h>
68656 #include <linux/workqueue.h>
68657@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
68658 * which we beg off on and pass to do_sys_settimeofday().
68659 */
68660
68661-static struct k_clock posix_clocks[MAX_CLOCKS];
68662+static struct k_clock *posix_clocks[MAX_CLOCKS];
68663
68664 /*
68665 * These ones are defined below.
68666@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
68667 */
68668 static __init int init_posix_timers(void)
68669 {
68670- struct k_clock clock_realtime = {
68671+ static struct k_clock clock_realtime = {
68672 .clock_getres = hrtimer_get_res,
68673 .clock_get = posix_clock_realtime_get,
68674 .clock_set = posix_clock_realtime_set,
68675@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
68676 .timer_get = common_timer_get,
68677 .timer_del = common_timer_del,
68678 };
68679- struct k_clock clock_monotonic = {
68680+ static struct k_clock clock_monotonic = {
68681 .clock_getres = hrtimer_get_res,
68682 .clock_get = posix_ktime_get_ts,
68683 .nsleep = common_nsleep,
68684@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
68685 .timer_get = common_timer_get,
68686 .timer_del = common_timer_del,
68687 };
68688- struct k_clock clock_monotonic_raw = {
68689+ static struct k_clock clock_monotonic_raw = {
68690 .clock_getres = hrtimer_get_res,
68691 .clock_get = posix_get_monotonic_raw,
68692 };
68693- struct k_clock clock_realtime_coarse = {
68694+ static struct k_clock clock_realtime_coarse = {
68695 .clock_getres = posix_get_coarse_res,
68696 .clock_get = posix_get_realtime_coarse,
68697 };
68698- struct k_clock clock_monotonic_coarse = {
68699+ static struct k_clock clock_monotonic_coarse = {
68700 .clock_getres = posix_get_coarse_res,
68701 .clock_get = posix_get_monotonic_coarse,
68702 };
68703- struct k_clock clock_boottime = {
68704+ static struct k_clock clock_boottime = {
68705 .clock_getres = hrtimer_get_res,
68706 .clock_get = posix_get_boottime,
68707 .nsleep = common_nsleep,
68708@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
68709 return;
68710 }
68711
68712- posix_clocks[clock_id] = *new_clock;
68713+ posix_clocks[clock_id] = new_clock;
68714 }
68715 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
68716
68717@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
68718 return (id & CLOCKFD_MASK) == CLOCKFD ?
68719 &clock_posix_dynamic : &clock_posix_cpu;
68720
68721- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
68722+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
68723 return NULL;
68724- return &posix_clocks[id];
68725+ return posix_clocks[id];
68726 }
68727
68728 static int common_timer_create(struct k_itimer *new_timer)
68729@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
68730 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68731 return -EFAULT;
68732
68733+ /* only the CLOCK_REALTIME clock can be set, all other clocks
68734+ have their clock_set fptr set to a nosettime dummy function
68735+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68736+ call common_clock_set, which calls do_sys_settimeofday, which
68737+ we hook
68738+ */
68739+
68740 return kc->clock_set(which_clock, &new_tp);
68741 }
68742
68743diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
68744index d523593..68197a4 100644
68745--- a/kernel/power/poweroff.c
68746+++ b/kernel/power/poweroff.c
68747@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
68748 .enable_mask = SYSRQ_ENABLE_BOOT,
68749 };
68750
68751-static int pm_sysrq_init(void)
68752+static int __init pm_sysrq_init(void)
68753 {
68754 register_sysrq_key('o', &sysrq_poweroff_op);
68755 return 0;
68756diff --git a/kernel/power/process.c b/kernel/power/process.c
68757index 7aac07a..2d3c6dc 100644
68758--- a/kernel/power/process.c
68759+++ b/kernel/power/process.c
68760@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
68761 u64 elapsed_csecs64;
68762 unsigned int elapsed_csecs;
68763 bool wakeup = false;
68764+ bool timedout = false;
68765
68766 do_gettimeofday(&start);
68767
68768@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
68769
68770 while (true) {
68771 todo = 0;
68772+ if (time_after(jiffies, end_time))
68773+ timedout = true;
68774 read_lock(&tasklist_lock);
68775 do_each_thread(g, p) {
68776 if (p == current || !freeze_task(p))
68777@@ -60,9 +63,13 @@ static int try_to_freeze_tasks(bool user_only)
68778 * try_to_stop() after schedule() in ptrace/signal
68779 * stop sees TIF_FREEZE.
68780 */
68781- if (!task_is_stopped_or_traced(p) &&
68782- !freezer_should_skip(p))
68783+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
68784 todo++;
68785+ if (timedout) {
68786+ printk(KERN_ERR "Task refusing to freeze:\n");
68787+ sched_show_task(p);
68788+ }
68789+ }
68790 } while_each_thread(g, p);
68791 read_unlock(&tasklist_lock);
68792
68793@@ -71,7 +78,7 @@ static int try_to_freeze_tasks(bool user_only)
68794 todo += wq_busy;
68795 }
68796
68797- if (!todo || time_after(jiffies, end_time))
68798+ if (!todo || timedout)
68799 break;
68800
68801 if (pm_wakeup_pending()) {
68802diff --git a/kernel/printk.c b/kernel/printk.c
68803index 32690a0..cd7c798 100644
68804--- a/kernel/printk.c
68805+++ b/kernel/printk.c
68806@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
68807 if (from_file && type != SYSLOG_ACTION_OPEN)
68808 return 0;
68809
68810+#ifdef CONFIG_GRKERNSEC_DMESG
68811+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
68812+ return -EPERM;
68813+#endif
68814+
68815 if (syslog_action_restricted(type)) {
68816 if (capable(CAP_SYSLOG))
68817 return 0;
68818diff --git a/kernel/profile.c b/kernel/profile.c
68819index 76b8e77..a2930e8 100644
68820--- a/kernel/profile.c
68821+++ b/kernel/profile.c
68822@@ -39,7 +39,7 @@ struct profile_hit {
68823 /* Oprofile timer tick hook */
68824 static int (*timer_hook)(struct pt_regs *) __read_mostly;
68825
68826-static atomic_t *prof_buffer;
68827+static atomic_unchecked_t *prof_buffer;
68828 static unsigned long prof_len, prof_shift;
68829
68830 int prof_on __read_mostly;
68831@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
68832 hits[i].pc = 0;
68833 continue;
68834 }
68835- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68836+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68837 hits[i].hits = hits[i].pc = 0;
68838 }
68839 }
68840@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68841 * Add the current hit(s) and flush the write-queue out
68842 * to the global buffer:
68843 */
68844- atomic_add(nr_hits, &prof_buffer[pc]);
68845+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
68846 for (i = 0; i < NR_PROFILE_HIT; ++i) {
68847- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68848+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68849 hits[i].pc = hits[i].hits = 0;
68850 }
68851 out:
68852@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68853 {
68854 unsigned long pc;
68855 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68856- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68857+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68858 }
68859 #endif /* !CONFIG_SMP */
68860
68861@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
68862 return -EFAULT;
68863 buf++; p++; count--; read++;
68864 }
68865- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68866+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68867 if (copy_to_user(buf, (void *)pnt, count))
68868 return -EFAULT;
68869 read += count;
68870@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
68871 }
68872 #endif
68873 profile_discard_flip_buffers();
68874- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68875+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68876 return count;
68877 }
68878
68879diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68880index 00ab2ca..d237f61 100644
68881--- a/kernel/ptrace.c
68882+++ b/kernel/ptrace.c
68883@@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68884 task->ptrace = PT_PTRACED;
68885 if (seize)
68886 task->ptrace |= PT_SEIZED;
68887- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68888+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
68889 task->ptrace |= PT_PTRACE_CAP;
68890
68891 __ptrace_link(task, current);
68892@@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68893 break;
68894 return -EIO;
68895 }
68896- if (copy_to_user(dst, buf, retval))
68897+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68898 return -EFAULT;
68899 copied += retval;
68900 src += retval;
68901@@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
68902 bool seized = child->ptrace & PT_SEIZED;
68903 int ret = -EIO;
68904 siginfo_t siginfo, *si;
68905- void __user *datavp = (void __user *) data;
68906+ void __user *datavp = (__force void __user *) data;
68907 unsigned long __user *datalp = datavp;
68908 unsigned long flags;
68909
68910@@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68911 goto out;
68912 }
68913
68914+ if (gr_handle_ptrace(child, request)) {
68915+ ret = -EPERM;
68916+ goto out_put_task_struct;
68917+ }
68918+
68919 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68920 ret = ptrace_attach(child, request, data);
68921 /*
68922 * Some architectures need to do book-keeping after
68923 * a ptrace attach.
68924 */
68925- if (!ret)
68926+ if (!ret) {
68927 arch_ptrace_attach(child);
68928+ gr_audit_ptrace(child);
68929+ }
68930 goto out_put_task_struct;
68931 }
68932
68933@@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68934 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68935 if (copied != sizeof(tmp))
68936 return -EIO;
68937- return put_user(tmp, (unsigned long __user *)data);
68938+ return put_user(tmp, (__force unsigned long __user *)data);
68939 }
68940
68941 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68942@@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68943 goto out;
68944 }
68945
68946+ if (gr_handle_ptrace(child, request)) {
68947+ ret = -EPERM;
68948+ goto out_put_task_struct;
68949+ }
68950+
68951 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68952 ret = ptrace_attach(child, request, data);
68953 /*
68954 * Some architectures need to do book-keeping after
68955 * a ptrace attach.
68956 */
68957- if (!ret)
68958+ if (!ret) {
68959 arch_ptrace_attach(child);
68960+ gr_audit_ptrace(child);
68961+ }
68962 goto out_put_task_struct;
68963 }
68964
68965diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68966index 977296d..c4744dc 100644
68967--- a/kernel/rcutiny.c
68968+++ b/kernel/rcutiny.c
68969@@ -46,7 +46,7 @@
68970 struct rcu_ctrlblk;
68971 static void invoke_rcu_callbacks(void);
68972 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68973-static void rcu_process_callbacks(struct softirq_action *unused);
68974+static void rcu_process_callbacks(void);
68975 static void __call_rcu(struct rcu_head *head,
68976 void (*func)(struct rcu_head *rcu),
68977 struct rcu_ctrlblk *rcp);
68978@@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68979 rcu_is_callbacks_kthread()));
68980 }
68981
68982-static void rcu_process_callbacks(struct softirq_action *unused)
68983+static void rcu_process_callbacks(void)
68984 {
68985 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68986 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68987diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68988index 9cb1ae4..aac7d3e 100644
68989--- a/kernel/rcutiny_plugin.h
68990+++ b/kernel/rcutiny_plugin.h
68991@@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
68992 have_rcu_kthread_work = morework;
68993 local_irq_restore(flags);
68994 if (work)
68995- rcu_process_callbacks(NULL);
68996+ rcu_process_callbacks();
68997 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68998 }
68999
69000diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
69001index a58ac28..196a3d8 100644
69002--- a/kernel/rcutorture.c
69003+++ b/kernel/rcutorture.c
69004@@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
69005 { 0 };
69006 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69007 { 0 };
69008-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69009-static atomic_t n_rcu_torture_alloc;
69010-static atomic_t n_rcu_torture_alloc_fail;
69011-static atomic_t n_rcu_torture_free;
69012-static atomic_t n_rcu_torture_mberror;
69013-static atomic_t n_rcu_torture_error;
69014+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69015+static atomic_unchecked_t n_rcu_torture_alloc;
69016+static atomic_unchecked_t n_rcu_torture_alloc_fail;
69017+static atomic_unchecked_t n_rcu_torture_free;
69018+static atomic_unchecked_t n_rcu_torture_mberror;
69019+static atomic_unchecked_t n_rcu_torture_error;
69020 static long n_rcu_torture_boost_ktrerror;
69021 static long n_rcu_torture_boost_rterror;
69022 static long n_rcu_torture_boost_failure;
69023@@ -243,11 +243,11 @@ rcu_torture_alloc(void)
69024
69025 spin_lock_bh(&rcu_torture_lock);
69026 if (list_empty(&rcu_torture_freelist)) {
69027- atomic_inc(&n_rcu_torture_alloc_fail);
69028+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69029 spin_unlock_bh(&rcu_torture_lock);
69030 return NULL;
69031 }
69032- atomic_inc(&n_rcu_torture_alloc);
69033+ atomic_inc_unchecked(&n_rcu_torture_alloc);
69034 p = rcu_torture_freelist.next;
69035 list_del_init(p);
69036 spin_unlock_bh(&rcu_torture_lock);
69037@@ -260,7 +260,7 @@ rcu_torture_alloc(void)
69038 static void
69039 rcu_torture_free(struct rcu_torture *p)
69040 {
69041- atomic_inc(&n_rcu_torture_free);
69042+ atomic_inc_unchecked(&n_rcu_torture_free);
69043 spin_lock_bh(&rcu_torture_lock);
69044 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69045 spin_unlock_bh(&rcu_torture_lock);
69046@@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
69047 i = rp->rtort_pipe_count;
69048 if (i > RCU_TORTURE_PIPE_LEN)
69049 i = RCU_TORTURE_PIPE_LEN;
69050- atomic_inc(&rcu_torture_wcount[i]);
69051+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69052 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69053 rp->rtort_mbtest = 0;
69054 rcu_torture_free(rp);
69055@@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
69056 i = rp->rtort_pipe_count;
69057 if (i > RCU_TORTURE_PIPE_LEN)
69058 i = RCU_TORTURE_PIPE_LEN;
69059- atomic_inc(&rcu_torture_wcount[i]);
69060+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69061 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69062 rp->rtort_mbtest = 0;
69063 list_del(&rp->rtort_free);
69064@@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
69065 i = old_rp->rtort_pipe_count;
69066 if (i > RCU_TORTURE_PIPE_LEN)
69067 i = RCU_TORTURE_PIPE_LEN;
69068- atomic_inc(&rcu_torture_wcount[i]);
69069+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69070 old_rp->rtort_pipe_count++;
69071 cur_ops->deferred_free(old_rp);
69072 }
69073@@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
69074 return;
69075 }
69076 if (p->rtort_mbtest == 0)
69077- atomic_inc(&n_rcu_torture_mberror);
69078+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69079 spin_lock(&rand_lock);
69080 cur_ops->read_delay(&rand);
69081 n_rcu_torture_timers++;
69082@@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
69083 continue;
69084 }
69085 if (p->rtort_mbtest == 0)
69086- atomic_inc(&n_rcu_torture_mberror);
69087+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69088 cur_ops->read_delay(&rand);
69089 preempt_disable();
69090 pipe_count = p->rtort_pipe_count;
69091@@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
69092 rcu_torture_current,
69093 rcu_torture_current_version,
69094 list_empty(&rcu_torture_freelist),
69095- atomic_read(&n_rcu_torture_alloc),
69096- atomic_read(&n_rcu_torture_alloc_fail),
69097- atomic_read(&n_rcu_torture_free),
69098- atomic_read(&n_rcu_torture_mberror),
69099+ atomic_read_unchecked(&n_rcu_torture_alloc),
69100+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69101+ atomic_read_unchecked(&n_rcu_torture_free),
69102+ atomic_read_unchecked(&n_rcu_torture_mberror),
69103 n_rcu_torture_boost_ktrerror,
69104 n_rcu_torture_boost_rterror,
69105 n_rcu_torture_boost_failure,
69106@@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
69107 n_online_attempts,
69108 n_offline_successes,
69109 n_offline_attempts);
69110- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
69111+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
69112 n_rcu_torture_boost_ktrerror != 0 ||
69113 n_rcu_torture_boost_rterror != 0 ||
69114 n_rcu_torture_boost_failure != 0)
69115@@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
69116 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69117 if (i > 1) {
69118 cnt += sprintf(&page[cnt], "!!! ");
69119- atomic_inc(&n_rcu_torture_error);
69120+ atomic_inc_unchecked(&n_rcu_torture_error);
69121 WARN_ON_ONCE(1);
69122 }
69123 cnt += sprintf(&page[cnt], "Reader Pipe: ");
69124@@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
69125 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69126 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69127 cnt += sprintf(&page[cnt], " %d",
69128- atomic_read(&rcu_torture_wcount[i]));
69129+ atomic_read_unchecked(&rcu_torture_wcount[i]));
69130 }
69131 cnt += sprintf(&page[cnt], "\n");
69132 if (cur_ops->stats)
69133@@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
69134
69135 if (cur_ops->cleanup)
69136 cur_ops->cleanup();
69137- if (atomic_read(&n_rcu_torture_error))
69138+ if (atomic_read_unchecked(&n_rcu_torture_error))
69139 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
69140 else
69141 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
69142@@ -1664,17 +1664,17 @@ rcu_torture_init(void)
69143
69144 rcu_torture_current = NULL;
69145 rcu_torture_current_version = 0;
69146- atomic_set(&n_rcu_torture_alloc, 0);
69147- atomic_set(&n_rcu_torture_alloc_fail, 0);
69148- atomic_set(&n_rcu_torture_free, 0);
69149- atomic_set(&n_rcu_torture_mberror, 0);
69150- atomic_set(&n_rcu_torture_error, 0);
69151+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69152+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69153+ atomic_set_unchecked(&n_rcu_torture_free, 0);
69154+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69155+ atomic_set_unchecked(&n_rcu_torture_error, 0);
69156 n_rcu_torture_boost_ktrerror = 0;
69157 n_rcu_torture_boost_rterror = 0;
69158 n_rcu_torture_boost_failure = 0;
69159 n_rcu_torture_boosts = 0;
69160 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69161- atomic_set(&rcu_torture_wcount[i], 0);
69162+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69163 for_each_possible_cpu(cpu) {
69164 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69165 per_cpu(rcu_torture_count, cpu)[i] = 0;
69166diff --git a/kernel/rcutree.c b/kernel/rcutree.c
69167index 6c4a672..70f3202 100644
69168--- a/kernel/rcutree.c
69169+++ b/kernel/rcutree.c
69170@@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
69171 rcu_prepare_for_idle(smp_processor_id());
69172 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69173 smp_mb__before_atomic_inc(); /* See above. */
69174- atomic_inc(&rdtp->dynticks);
69175+ atomic_inc_unchecked(&rdtp->dynticks);
69176 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
69177- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69178+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69179 }
69180
69181 /**
69182@@ -438,10 +438,10 @@ void rcu_irq_exit(void)
69183 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
69184 {
69185 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
69186- atomic_inc(&rdtp->dynticks);
69187+ atomic_inc_unchecked(&rdtp->dynticks);
69188 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69189 smp_mb__after_atomic_inc(); /* See above. */
69190- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69191+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69192 rcu_cleanup_after_idle(smp_processor_id());
69193 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
69194 if (!is_idle_task(current)) {
69195@@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
69196 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
69197
69198 if (rdtp->dynticks_nmi_nesting == 0 &&
69199- (atomic_read(&rdtp->dynticks) & 0x1))
69200+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
69201 return;
69202 rdtp->dynticks_nmi_nesting++;
69203 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
69204- atomic_inc(&rdtp->dynticks);
69205+ atomic_inc_unchecked(&rdtp->dynticks);
69206 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69207 smp_mb__after_atomic_inc(); /* See above. */
69208- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69209+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69210 }
69211
69212 /**
69213@@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
69214 return;
69215 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69216 smp_mb__before_atomic_inc(); /* See above. */
69217- atomic_inc(&rdtp->dynticks);
69218+ atomic_inc_unchecked(&rdtp->dynticks);
69219 smp_mb__after_atomic_inc(); /* Force delay to next write. */
69220- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69221+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69222 }
69223
69224 #ifdef CONFIG_PROVE_RCU
69225@@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
69226 int ret;
69227
69228 preempt_disable();
69229- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69230+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69231 preempt_enable();
69232 return ret;
69233 }
69234@@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
69235 */
69236 static int dyntick_save_progress_counter(struct rcu_data *rdp)
69237 {
69238- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
69239+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69240 return (rdp->dynticks_snap & 0x1) == 0;
69241 }
69242
69243@@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
69244 unsigned int curr;
69245 unsigned int snap;
69246
69247- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
69248+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69249 snap = (unsigned int)rdp->dynticks_snap;
69250
69251 /*
69252@@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
69253 /*
69254 * Do RCU core processing for the current CPU.
69255 */
69256-static void rcu_process_callbacks(struct softirq_action *unused)
69257+static void rcu_process_callbacks(void)
69258 {
69259 trace_rcu_utilization("Start RCU core");
69260 __rcu_process_callbacks(&rcu_sched_state,
69261@@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
69262 rdp->qlen = 0;
69263 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
69264 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
69265- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
69266+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
69267 rdp->cpu = cpu;
69268 rdp->rsp = rsp;
69269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
69270@@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
69271 rdp->n_force_qs_snap = rsp->n_force_qs;
69272 rdp->blimit = blimit;
69273 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
69274- atomic_set(&rdp->dynticks->dynticks,
69275- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
69276+ atomic_set_unchecked(&rdp->dynticks->dynticks,
69277+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
69278 rcu_prepare_for_idle_init(cpu);
69279 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
69280
69281diff --git a/kernel/rcutree.h b/kernel/rcutree.h
69282index fddff92..2c08359 100644
69283--- a/kernel/rcutree.h
69284+++ b/kernel/rcutree.h
69285@@ -87,7 +87,7 @@ struct rcu_dynticks {
69286 long long dynticks_nesting; /* Track irq/process nesting level. */
69287 /* Process level is worth LLONG_MAX/2. */
69288 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69289- atomic_t dynticks; /* Even value for idle, else odd. */
69290+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
69291 };
69292
69293 /* RCU's kthread states for tracing. */
69294diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
69295index 8bb35d7..6ea0a463 100644
69296--- a/kernel/rcutree_plugin.h
69297+++ b/kernel/rcutree_plugin.h
69298@@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
69299
69300 /* Clean up and exit. */
69301 smp_mb(); /* ensure expedited GP seen before counter increment. */
69302- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69303+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69304 unlock_mb_ret:
69305 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69306 mb_ret:
69307@@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
69308
69309 #else /* #ifndef CONFIG_SMP */
69310
69311-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69312-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69313+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69314+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69315
69316 static int synchronize_sched_expedited_cpu_stop(void *data)
69317 {
69318@@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
69319 int firstsnap, s, snap, trycount = 0;
69320
69321 /* Note that atomic_inc_return() implies full memory barrier. */
69322- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69323+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69324 get_online_cpus();
69325
69326 /*
69327@@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
69328 }
69329
69330 /* Check to see if someone else did our work for us. */
69331- s = atomic_read(&sync_sched_expedited_done);
69332+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69333 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69334 smp_mb(); /* ensure test happens before caller kfree */
69335 return;
69336@@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
69337 * grace period works for us.
69338 */
69339 get_online_cpus();
69340- snap = atomic_read(&sync_sched_expedited_started);
69341+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
69342 smp_mb(); /* ensure read is before try_stop_cpus(). */
69343 }
69344
69345@@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
69346 * than we did beat us to the punch.
69347 */
69348 do {
69349- s = atomic_read(&sync_sched_expedited_done);
69350+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69351 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69352 smp_mb(); /* ensure test happens before caller kfree */
69353 break;
69354 }
69355- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69356+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69357
69358 put_online_cpus();
69359 }
69360diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69361index 654cfe6..c0b28e2 100644
69362--- a/kernel/rcutree_trace.c
69363+++ b/kernel/rcutree_trace.c
69364@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69365 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69366 rdp->qs_pending);
69367 seq_printf(m, " dt=%d/%llx/%d df=%lu",
69368- atomic_read(&rdp->dynticks->dynticks),
69369+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69370 rdp->dynticks->dynticks_nesting,
69371 rdp->dynticks->dynticks_nmi_nesting,
69372 rdp->dynticks_fqs);
69373@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69374 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69375 rdp->qs_pending);
69376 seq_printf(m, ",%d,%llx,%d,%lu",
69377- atomic_read(&rdp->dynticks->dynticks),
69378+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69379 rdp->dynticks->dynticks_nesting,
69380 rdp->dynticks->dynticks_nmi_nesting,
69381 rdp->dynticks_fqs);
69382diff --git a/kernel/resource.c b/kernel/resource.c
69383index 7640b3a..5879283 100644
69384--- a/kernel/resource.c
69385+++ b/kernel/resource.c
69386@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69387
69388 static int __init ioresources_init(void)
69389 {
69390+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69391+#ifdef CONFIG_GRKERNSEC_PROC_USER
69392+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69393+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69394+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69395+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69396+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69397+#endif
69398+#else
69399 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69400 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69401+#endif
69402 return 0;
69403 }
69404 __initcall(ioresources_init);
69405diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69406index 98ec494..4241d6d 100644
69407--- a/kernel/rtmutex-tester.c
69408+++ b/kernel/rtmutex-tester.c
69409@@ -20,7 +20,7 @@
69410 #define MAX_RT_TEST_MUTEXES 8
69411
69412 static spinlock_t rttest_lock;
69413-static atomic_t rttest_event;
69414+static atomic_unchecked_t rttest_event;
69415
69416 struct test_thread_data {
69417 int opcode;
69418@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69419
69420 case RTTEST_LOCKCONT:
69421 td->mutexes[td->opdata] = 1;
69422- td->event = atomic_add_return(1, &rttest_event);
69423+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69424 return 0;
69425
69426 case RTTEST_RESET:
69427@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69428 return 0;
69429
69430 case RTTEST_RESETEVENT:
69431- atomic_set(&rttest_event, 0);
69432+ atomic_set_unchecked(&rttest_event, 0);
69433 return 0;
69434
69435 default:
69436@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69437 return ret;
69438
69439 td->mutexes[id] = 1;
69440- td->event = atomic_add_return(1, &rttest_event);
69441+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69442 rt_mutex_lock(&mutexes[id]);
69443- td->event = atomic_add_return(1, &rttest_event);
69444+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69445 td->mutexes[id] = 4;
69446 return 0;
69447
69448@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69449 return ret;
69450
69451 td->mutexes[id] = 1;
69452- td->event = atomic_add_return(1, &rttest_event);
69453+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69454 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69455- td->event = atomic_add_return(1, &rttest_event);
69456+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69457 td->mutexes[id] = ret ? 0 : 4;
69458 return ret ? -EINTR : 0;
69459
69460@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69461 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69462 return ret;
69463
69464- td->event = atomic_add_return(1, &rttest_event);
69465+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69466 rt_mutex_unlock(&mutexes[id]);
69467- td->event = atomic_add_return(1, &rttest_event);
69468+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69469 td->mutexes[id] = 0;
69470 return 0;
69471
69472@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69473 break;
69474
69475 td->mutexes[dat] = 2;
69476- td->event = atomic_add_return(1, &rttest_event);
69477+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69478 break;
69479
69480 default:
69481@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69482 return;
69483
69484 td->mutexes[dat] = 3;
69485- td->event = atomic_add_return(1, &rttest_event);
69486+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69487 break;
69488
69489 case RTTEST_LOCKNOWAIT:
69490@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69491 return;
69492
69493 td->mutexes[dat] = 1;
69494- td->event = atomic_add_return(1, &rttest_event);
69495+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69496 return;
69497
69498 default:
69499diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
69500index e8a1f83..363d17d 100644
69501--- a/kernel/sched/auto_group.c
69502+++ b/kernel/sched/auto_group.c
69503@@ -11,7 +11,7 @@
69504
69505 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69506 static struct autogroup autogroup_default;
69507-static atomic_t autogroup_seq_nr;
69508+static atomic_unchecked_t autogroup_seq_nr;
69509
69510 void __init autogroup_init(struct task_struct *init_task)
69511 {
69512@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
69513
69514 kref_init(&ag->kref);
69515 init_rwsem(&ag->lock);
69516- ag->id = atomic_inc_return(&autogroup_seq_nr);
69517+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69518 ag->tg = tg;
69519 #ifdef CONFIG_RT_GROUP_SCHED
69520 /*
69521diff --git a/kernel/sched/core.c b/kernel/sched/core.c
69522index 478a04c..e16339a 100644
69523--- a/kernel/sched/core.c
69524+++ b/kernel/sched/core.c
69525@@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice)
69526 /* convert nice value [19,-20] to rlimit style value [1,40] */
69527 int nice_rlim = 20 - nice;
69528
69529+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69530+
69531 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
69532 capable(CAP_SYS_NICE));
69533 }
69534@@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69535 if (nice > 19)
69536 nice = 19;
69537
69538- if (increment < 0 && !can_nice(current, nice))
69539+ if (increment < 0 && (!can_nice(current, nice) ||
69540+ gr_handle_chroot_nice()))
69541 return -EPERM;
69542
69543 retval = security_task_setnice(current, nice);
69544@@ -4041,6 +4044,7 @@ recheck:
69545 unsigned long rlim_rtprio =
69546 task_rlimit(p, RLIMIT_RTPRIO);
69547
69548+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
69549 /* can't set/change the rt policy */
69550 if (policy != p->policy && !rlim_rtprio)
69551 return -EPERM;
69552diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
69553index aca16b8..8e3acc4 100644
69554--- a/kernel/sched/fair.c
69555+++ b/kernel/sched/fair.c
69556@@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
69557 * run_rebalance_domains is triggered when needed from the scheduler tick.
69558 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
69559 */
69560-static void run_rebalance_domains(struct softirq_action *h)
69561+static void run_rebalance_domains(void)
69562 {
69563 int this_cpu = smp_processor_id();
69564 struct rq *this_rq = cpu_rq(this_cpu);
69565diff --git a/kernel/signal.c b/kernel/signal.c
69566index b09cf3b..b291c66 100644
69567--- a/kernel/signal.c
69568+++ b/kernel/signal.c
69569@@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
69570
69571 int print_fatal_signals __read_mostly;
69572
69573-static void __user *sig_handler(struct task_struct *t, int sig)
69574+static __sighandler_t sig_handler(struct task_struct *t, int sig)
69575 {
69576 return t->sighand->action[sig - 1].sa.sa_handler;
69577 }
69578
69579-static int sig_handler_ignored(void __user *handler, int sig)
69580+static int sig_handler_ignored(__sighandler_t handler, int sig)
69581 {
69582 /* Is it explicitly or implicitly ignored? */
69583 return handler == SIG_IGN ||
69584@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
69585 static int sig_task_ignored(struct task_struct *t, int sig,
69586 int from_ancestor_ns)
69587 {
69588- void __user *handler;
69589+ __sighandler_t handler;
69590
69591 handler = sig_handler(t, sig);
69592
69593@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
69594 atomic_inc(&user->sigpending);
69595 rcu_read_unlock();
69596
69597+ if (!override_rlimit)
69598+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
69599+
69600 if (override_rlimit ||
69601 atomic_read(&user->sigpending) <=
69602 task_rlimit(t, RLIMIT_SIGPENDING)) {
69603@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
69604
69605 int unhandled_signal(struct task_struct *tsk, int sig)
69606 {
69607- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69608+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69609 if (is_global_init(tsk))
69610 return 1;
69611 if (handler != SIG_IGN && handler != SIG_DFL)
69612@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
69613 }
69614 }
69615
69616+ /* allow glibc communication via tgkill to other threads in our
69617+ thread group */
69618+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69619+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69620+ && gr_handle_signal(t, sig))
69621+ return -EPERM;
69622+
69623 return security_task_kill(t, info, sig, 0);
69624 }
69625
69626@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69627 return send_signal(sig, info, p, 1);
69628 }
69629
69630-static int
69631+int
69632 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69633 {
69634 return send_signal(sig, info, t, 0);
69635@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69636 unsigned long int flags;
69637 int ret, blocked, ignored;
69638 struct k_sigaction *action;
69639+ int is_unhandled = 0;
69640
69641 spin_lock_irqsave(&t->sighand->siglock, flags);
69642 action = &t->sighand->action[sig-1];
69643@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69644 }
69645 if (action->sa.sa_handler == SIG_DFL)
69646 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69647+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69648+ is_unhandled = 1;
69649 ret = specific_send_sig_info(sig, info, t);
69650 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69651
69652+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
69653+ normal operation */
69654+ if (is_unhandled) {
69655+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69656+ gr_handle_crash(t, sig);
69657+ }
69658+
69659 return ret;
69660 }
69661
69662@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69663 ret = check_kill_permission(sig, info, p);
69664 rcu_read_unlock();
69665
69666- if (!ret && sig)
69667+ if (!ret && sig) {
69668 ret = do_send_sig_info(sig, info, p, true);
69669+ if (!ret)
69670+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
69671+ }
69672
69673 return ret;
69674 }
69675@@ -2829,7 +2852,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
69676 int error = -ESRCH;
69677
69678 rcu_read_lock();
69679- p = find_task_by_vpid(pid);
69680+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69681+ /* allow glibc communication via tgkill to other threads in our
69682+ thread group */
69683+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69684+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
69685+ p = find_task_by_vpid_unrestricted(pid);
69686+ else
69687+#endif
69688+ p = find_task_by_vpid(pid);
69689 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69690 error = check_kill_permission(sig, info, p);
69691 /*
69692diff --git a/kernel/smp.c b/kernel/smp.c
69693index db197d6..17aef0b 100644
69694--- a/kernel/smp.c
69695+++ b/kernel/smp.c
69696@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
69697 }
69698 EXPORT_SYMBOL(smp_call_function);
69699
69700-void ipi_call_lock(void)
69701+void ipi_call_lock(void) __acquires(call_function.lock)
69702 {
69703 raw_spin_lock(&call_function.lock);
69704 }
69705
69706-void ipi_call_unlock(void)
69707+void ipi_call_unlock(void) __releases(call_function.lock)
69708 {
69709 raw_spin_unlock(&call_function.lock);
69710 }
69711
69712-void ipi_call_lock_irq(void)
69713+void ipi_call_lock_irq(void) __acquires(call_function.lock)
69714 {
69715 raw_spin_lock_irq(&call_function.lock);
69716 }
69717
69718-void ipi_call_unlock_irq(void)
69719+void ipi_call_unlock_irq(void) __releases(call_function.lock)
69720 {
69721 raw_spin_unlock_irq(&call_function.lock);
69722 }
69723diff --git a/kernel/softirq.c b/kernel/softirq.c
69724index 4eb3a0f..6f1fa81 100644
69725--- a/kernel/softirq.c
69726+++ b/kernel/softirq.c
69727@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
69728
69729 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
69730
69731-char *softirq_to_name[NR_SOFTIRQS] = {
69732+const char * const softirq_to_name[NR_SOFTIRQS] = {
69733 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69734 "TASKLET", "SCHED", "HRTIMER", "RCU"
69735 };
69736@@ -235,7 +235,7 @@ restart:
69737 kstat_incr_softirqs_this_cpu(vec_nr);
69738
69739 trace_softirq_entry(vec_nr);
69740- h->action(h);
69741+ h->action();
69742 trace_softirq_exit(vec_nr);
69743 if (unlikely(prev_count != preempt_count())) {
69744 printk(KERN_ERR "huh, entered softirq %u %s %p"
69745@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
69746 local_irq_restore(flags);
69747 }
69748
69749-void open_softirq(int nr, void (*action)(struct softirq_action *))
69750+void open_softirq(int nr, void (*action)(void))
69751 {
69752- softirq_vec[nr].action = action;
69753+ pax_open_kernel();
69754+ *(void **)&softirq_vec[nr].action = action;
69755+ pax_close_kernel();
69756 }
69757
69758 /*
69759@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
69760
69761 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
69762
69763-static void tasklet_action(struct softirq_action *a)
69764+static void tasklet_action(void)
69765 {
69766 struct tasklet_struct *list;
69767
69768@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
69769 }
69770 }
69771
69772-static void tasklet_hi_action(struct softirq_action *a)
69773+static void tasklet_hi_action(void)
69774 {
69775 struct tasklet_struct *list;
69776
69777diff --git a/kernel/sys.c b/kernel/sys.c
69778index 888d227..f04b318 100644
69779--- a/kernel/sys.c
69780+++ b/kernel/sys.c
69781@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
69782 error = -EACCES;
69783 goto out;
69784 }
69785+
69786+ if (gr_handle_chroot_setpriority(p, niceval)) {
69787+ error = -EACCES;
69788+ goto out;
69789+ }
69790+
69791 no_nice = security_task_setnice(p, niceval);
69792 if (no_nice) {
69793 error = no_nice;
69794@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
69795 goto error;
69796 }
69797
69798+ if (gr_check_group_change(new->gid, new->egid, -1))
69799+ goto error;
69800+
69801 if (rgid != (gid_t) -1 ||
69802 (egid != (gid_t) -1 && egid != old->gid))
69803 new->sgid = new->egid;
69804@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
69805 old = current_cred();
69806
69807 retval = -EPERM;
69808+
69809+ if (gr_check_group_change(gid, gid, gid))
69810+ goto error;
69811+
69812 if (nsown_capable(CAP_SETGID))
69813 new->gid = new->egid = new->sgid = new->fsgid = gid;
69814 else if (gid == old->gid || gid == old->sgid)
69815@@ -618,7 +631,7 @@ error:
69816 /*
69817 * change the user struct in a credentials set to match the new UID
69818 */
69819-static int set_user(struct cred *new)
69820+int set_user(struct cred *new)
69821 {
69822 struct user_struct *new_user;
69823
69824@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
69825 goto error;
69826 }
69827
69828+ if (gr_check_user_change(new->uid, new->euid, -1))
69829+ goto error;
69830+
69831 if (new->uid != old->uid) {
69832 retval = set_user(new);
69833 if (retval < 0)
69834@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
69835 old = current_cred();
69836
69837 retval = -EPERM;
69838+
69839+ if (gr_check_crash_uid(uid))
69840+ goto error;
69841+ if (gr_check_user_change(uid, uid, uid))
69842+ goto error;
69843+
69844 if (nsown_capable(CAP_SETUID)) {
69845 new->suid = new->uid = uid;
69846 if (uid != old->uid) {
69847@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
69848 goto error;
69849 }
69850
69851+ if (gr_check_user_change(ruid, euid, -1))
69852+ goto error;
69853+
69854 if (ruid != (uid_t) -1) {
69855 new->uid = ruid;
69856 if (ruid != old->uid) {
69857@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69858 goto error;
69859 }
69860
69861+ if (gr_check_group_change(rgid, egid, -1))
69862+ goto error;
69863+
69864 if (rgid != (gid_t) -1)
69865 new->gid = rgid;
69866 if (egid != (gid_t) -1)
69867@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69868 old = current_cred();
69869 old_fsuid = old->fsuid;
69870
69871+ if (gr_check_user_change(-1, -1, uid))
69872+ goto error;
69873+
69874 if (uid == old->uid || uid == old->euid ||
69875 uid == old->suid || uid == old->fsuid ||
69876 nsown_capable(CAP_SETUID)) {
69877@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69878 }
69879 }
69880
69881+error:
69882 abort_creds(new);
69883 return old_fsuid;
69884
69885@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69886 if (gid == old->gid || gid == old->egid ||
69887 gid == old->sgid || gid == old->fsgid ||
69888 nsown_capable(CAP_SETGID)) {
69889+ if (gr_check_group_change(-1, -1, gid))
69890+ goto error;
69891+
69892 if (gid != old_fsgid) {
69893 new->fsgid = gid;
69894 goto change_okay;
69895 }
69896 }
69897
69898+error:
69899 abort_creds(new);
69900 return old_fsgid;
69901
69902@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
69903 }
69904 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69905 snprintf(buf, len, "2.6.%u%s", v, rest);
69906- ret = copy_to_user(release, buf, len);
69907+ if (len > sizeof(buf))
69908+ ret = -EFAULT;
69909+ else
69910+ ret = copy_to_user(release, buf, len);
69911 }
69912 return ret;
69913 }
69914@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69915 return -EFAULT;
69916
69917 down_read(&uts_sem);
69918- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69919+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69920 __OLD_UTS_LEN);
69921 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69922- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69923+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69924 __OLD_UTS_LEN);
69925 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69926- error |= __copy_to_user(&name->release, &utsname()->release,
69927+ error |= __copy_to_user(name->release, &utsname()->release,
69928 __OLD_UTS_LEN);
69929 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69930- error |= __copy_to_user(&name->version, &utsname()->version,
69931+ error |= __copy_to_user(name->version, &utsname()->version,
69932 __OLD_UTS_LEN);
69933 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69934- error |= __copy_to_user(&name->machine, &utsname()->machine,
69935+ error |= __copy_to_user(name->machine, &utsname()->machine,
69936 __OLD_UTS_LEN);
69937 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69938 up_read(&uts_sem);
69939@@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69940 error = get_dumpable(me->mm);
69941 break;
69942 case PR_SET_DUMPABLE:
69943- if (arg2 < 0 || arg2 > 1) {
69944+ if (arg2 > 1) {
69945 error = -EINVAL;
69946 break;
69947 }
69948diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69949index f03a6ef..5fcc8af 100644
69950--- a/kernel/sysctl.c
69951+++ b/kernel/sysctl.c
69952@@ -86,6 +86,13 @@
69953
69954
69955 #if defined(CONFIG_SYSCTL)
69956+#include <linux/grsecurity.h>
69957+#include <linux/grinternal.h>
69958+
69959+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69960+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69961+ const int op);
69962+extern int gr_handle_chroot_sysctl(const int op);
69963
69964 /* External variables not in a header file. */
69965 extern int sysctl_overcommit_memory;
69966@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69967 }
69968
69969 #endif
69970+extern struct ctl_table grsecurity_table[];
69971
69972 static struct ctl_table root_table[];
69973 static struct ctl_table_root sysctl_table_root;
69974@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
69975 int sysctl_legacy_va_layout;
69976 #endif
69977
69978+#ifdef CONFIG_PAX_SOFTMODE
69979+static ctl_table pax_table[] = {
69980+ {
69981+ .procname = "softmode",
69982+ .data = &pax_softmode,
69983+ .maxlen = sizeof(unsigned int),
69984+ .mode = 0600,
69985+ .proc_handler = &proc_dointvec,
69986+ },
69987+
69988+ { }
69989+};
69990+#endif
69991+
69992 /* The default sysctl tables: */
69993
69994 static struct ctl_table root_table[] = {
69995@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
69996 #endif
69997
69998 static struct ctl_table kern_table[] = {
69999+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70000+ {
70001+ .procname = "grsecurity",
70002+ .mode = 0500,
70003+ .child = grsecurity_table,
70004+ },
70005+#endif
70006+
70007+#ifdef CONFIG_PAX_SOFTMODE
70008+ {
70009+ .procname = "pax",
70010+ .mode = 0500,
70011+ .child = pax_table,
70012+ },
70013+#endif
70014+
70015 {
70016 .procname = "sched_child_runs_first",
70017 .data = &sysctl_sched_child_runs_first,
70018@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
70019 .data = &modprobe_path,
70020 .maxlen = KMOD_PATH_LEN,
70021 .mode = 0644,
70022- .proc_handler = proc_dostring,
70023+ .proc_handler = proc_dostring_modpriv,
70024 },
70025 {
70026 .procname = "modules_disabled",
70027@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
70028 .extra1 = &zero,
70029 .extra2 = &one,
70030 },
70031+#endif
70032 {
70033 .procname = "kptr_restrict",
70034 .data = &kptr_restrict,
70035 .maxlen = sizeof(int),
70036 .mode = 0644,
70037 .proc_handler = proc_dointvec_minmax_sysadmin,
70038+#ifdef CONFIG_GRKERNSEC_HIDESYM
70039+ .extra1 = &two,
70040+#else
70041 .extra1 = &zero,
70042+#endif
70043 .extra2 = &two,
70044 },
70045-#endif
70046 {
70047 .procname = "ngroups_max",
70048 .data = &ngroups_max,
70049@@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
70050 .proc_handler = proc_dointvec_minmax,
70051 .extra1 = &zero,
70052 },
70053+ {
70054+ .procname = "heap_stack_gap",
70055+ .data = &sysctl_heap_stack_gap,
70056+ .maxlen = sizeof(sysctl_heap_stack_gap),
70057+ .mode = 0644,
70058+ .proc_handler = proc_doulongvec_minmax,
70059+ },
70060 #else
70061 {
70062 .procname = "nr_trim_pages",
70063@@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
70064 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
70065 {
70066 int mode;
70067+ int error;
70068+
70069+ if (table->parent != NULL && table->parent->procname != NULL &&
70070+ table->procname != NULL &&
70071+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
70072+ return -EACCES;
70073+ if (gr_handle_chroot_sysctl(op))
70074+ return -EACCES;
70075+ error = gr_handle_sysctl(table, op);
70076+ if (error)
70077+ return error;
70078
70079 if (root->permissions)
70080 mode = root->permissions(root, current->nsproxy, table);
70081@@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
70082 buffer, lenp, ppos);
70083 }
70084
70085+int proc_dostring_modpriv(struct ctl_table *table, int write,
70086+ void __user *buffer, size_t *lenp, loff_t *ppos)
70087+{
70088+ if (write && !capable(CAP_SYS_MODULE))
70089+ return -EPERM;
70090+
70091+ return _proc_do_string(table->data, table->maxlen, write,
70092+ buffer, lenp, ppos);
70093+}
70094+
70095 static size_t proc_skip_spaces(char **buf)
70096 {
70097 size_t ret;
70098@@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
70099 len = strlen(tmp);
70100 if (len > *size)
70101 len = *size;
70102+ if (len > sizeof(tmp))
70103+ len = sizeof(tmp);
70104 if (copy_to_user(*buf, tmp, len))
70105 return -EFAULT;
70106 *size -= len;
70107@@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
70108 *i = val;
70109 } else {
70110 val = convdiv * (*i) / convmul;
70111- if (!first)
70112+ if (!first) {
70113 err = proc_put_char(&buffer, &left, '\t');
70114+ if (err)
70115+ break;
70116+ }
70117 err = proc_put_long(&buffer, &left, val, false);
70118 if (err)
70119 break;
70120@@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
70121 return -ENOSYS;
70122 }
70123
70124+int proc_dostring_modpriv(struct ctl_table *table, int write,
70125+ void __user *buffer, size_t *lenp, loff_t *ppos)
70126+{
70127+ return -ENOSYS;
70128+}
70129+
70130 int proc_dointvec(struct ctl_table *table, int write,
70131 void __user *buffer, size_t *lenp, loff_t *ppos)
70132 {
70133@@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
70134 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70135 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70136 EXPORT_SYMBOL(proc_dostring);
70137+EXPORT_SYMBOL(proc_dostring_modpriv);
70138 EXPORT_SYMBOL(proc_doulongvec_minmax);
70139 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70140 EXPORT_SYMBOL(register_sysctl_table);
70141diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
70142index a650694..aaeeb20 100644
70143--- a/kernel/sysctl_binary.c
70144+++ b/kernel/sysctl_binary.c
70145@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
70146 int i;
70147
70148 set_fs(KERNEL_DS);
70149- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70150+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70151 set_fs(old_fs);
70152 if (result < 0)
70153 goto out_kfree;
70154@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
70155 }
70156
70157 set_fs(KERNEL_DS);
70158- result = vfs_write(file, buffer, str - buffer, &pos);
70159+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70160 set_fs(old_fs);
70161 if (result < 0)
70162 goto out_kfree;
70163@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
70164 int i;
70165
70166 set_fs(KERNEL_DS);
70167- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70168+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70169 set_fs(old_fs);
70170 if (result < 0)
70171 goto out_kfree;
70172@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
70173 }
70174
70175 set_fs(KERNEL_DS);
70176- result = vfs_write(file, buffer, str - buffer, &pos);
70177+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70178 set_fs(old_fs);
70179 if (result < 0)
70180 goto out_kfree;
70181@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
70182 int i;
70183
70184 set_fs(KERNEL_DS);
70185- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70186+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70187 set_fs(old_fs);
70188 if (result < 0)
70189 goto out;
70190@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70191 __le16 dnaddr;
70192
70193 set_fs(KERNEL_DS);
70194- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70195+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70196 set_fs(old_fs);
70197 if (result < 0)
70198 goto out;
70199@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70200 le16_to_cpu(dnaddr) & 0x3ff);
70201
70202 set_fs(KERNEL_DS);
70203- result = vfs_write(file, buf, len, &pos);
70204+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70205 set_fs(old_fs);
70206 if (result < 0)
70207 goto out;
70208diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
70209index 362da65..ab8ef8c 100644
70210--- a/kernel/sysctl_check.c
70211+++ b/kernel/sysctl_check.c
70212@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
70213 set_fail(&fail, table, "Directory with extra2");
70214 } else {
70215 if ((table->proc_handler == proc_dostring) ||
70216+ (table->proc_handler == proc_dostring_modpriv) ||
70217 (table->proc_handler == proc_dointvec) ||
70218 (table->proc_handler == proc_dointvec_minmax) ||
70219 (table->proc_handler == proc_dointvec_jiffies) ||
70220diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70221index e660464..c8b9e67 100644
70222--- a/kernel/taskstats.c
70223+++ b/kernel/taskstats.c
70224@@ -27,9 +27,12 @@
70225 #include <linux/cgroup.h>
70226 #include <linux/fs.h>
70227 #include <linux/file.h>
70228+#include <linux/grsecurity.h>
70229 #include <net/genetlink.h>
70230 #include <linux/atomic.h>
70231
70232+extern int gr_is_taskstats_denied(int pid);
70233+
70234 /*
70235 * Maximum length of a cpumask that can be specified in
70236 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70237@@ -556,6 +559,9 @@ err:
70238
70239 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70240 {
70241+ if (gr_is_taskstats_denied(current->pid))
70242+ return -EACCES;
70243+
70244 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70245 return cmd_attr_register_cpumask(info);
70246 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
70247diff --git a/kernel/time.c b/kernel/time.c
70248index 73e416d..cfc6f69 100644
70249--- a/kernel/time.c
70250+++ b/kernel/time.c
70251@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70252 return error;
70253
70254 if (tz) {
70255+ /* we log in do_settimeofday called below, so don't log twice
70256+ */
70257+ if (!tv)
70258+ gr_log_timechange();
70259+
70260 /* SMP safe, global irq locking makes it work. */
70261 sys_tz = *tz;
70262 update_vsyscall_tz();
70263diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
70264index 8a46f5d..bbe6f9c 100644
70265--- a/kernel/time/alarmtimer.c
70266+++ b/kernel/time/alarmtimer.c
70267@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
70268 struct platform_device *pdev;
70269 int error = 0;
70270 int i;
70271- struct k_clock alarm_clock = {
70272+ static struct k_clock alarm_clock = {
70273 .clock_getres = alarm_clock_getres,
70274 .clock_get = alarm_clock_get,
70275 .timer_create = alarm_timer_create,
70276diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
70277index fd4a7b1..fae5c2a 100644
70278--- a/kernel/time/tick-broadcast.c
70279+++ b/kernel/time/tick-broadcast.c
70280@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
70281 * then clear the broadcast bit.
70282 */
70283 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70284- int cpu = smp_processor_id();
70285+ cpu = smp_processor_id();
70286
70287 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70288 tick_broadcast_clear_oneshot(cpu);
70289diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
70290index 0c63581..e25dcb6 100644
70291--- a/kernel/time/timekeeping.c
70292+++ b/kernel/time/timekeeping.c
70293@@ -14,6 +14,7 @@
70294 #include <linux/init.h>
70295 #include <linux/mm.h>
70296 #include <linux/sched.h>
70297+#include <linux/grsecurity.h>
70298 #include <linux/syscore_ops.h>
70299 #include <linux/clocksource.h>
70300 #include <linux/jiffies.h>
70301@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
70302 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70303 return -EINVAL;
70304
70305+ gr_log_timechange();
70306+
70307 write_seqlock_irqsave(&xtime_lock, flags);
70308
70309 timekeeping_forward_now();
70310diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70311index 3258455..f35227d 100644
70312--- a/kernel/time/timer_list.c
70313+++ b/kernel/time/timer_list.c
70314@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
70315
70316 static void print_name_offset(struct seq_file *m, void *sym)
70317 {
70318+#ifdef CONFIG_GRKERNSEC_HIDESYM
70319+ SEQ_printf(m, "<%p>", NULL);
70320+#else
70321 char symname[KSYM_NAME_LEN];
70322
70323 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70324 SEQ_printf(m, "<%pK>", sym);
70325 else
70326 SEQ_printf(m, "%s", symname);
70327+#endif
70328 }
70329
70330 static void
70331@@ -112,7 +116,11 @@ next_one:
70332 static void
70333 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70334 {
70335+#ifdef CONFIG_GRKERNSEC_HIDESYM
70336+ SEQ_printf(m, " .base: %p\n", NULL);
70337+#else
70338 SEQ_printf(m, " .base: %pK\n", base);
70339+#endif
70340 SEQ_printf(m, " .index: %d\n",
70341 base->index);
70342 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70343@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
70344 {
70345 struct proc_dir_entry *pe;
70346
70347+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70348+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70349+#else
70350 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70351+#endif
70352 if (!pe)
70353 return -ENOMEM;
70354 return 0;
70355diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
70356index 0b537f2..9e71eca 100644
70357--- a/kernel/time/timer_stats.c
70358+++ b/kernel/time/timer_stats.c
70359@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70360 static unsigned long nr_entries;
70361 static struct entry entries[MAX_ENTRIES];
70362
70363-static atomic_t overflow_count;
70364+static atomic_unchecked_t overflow_count;
70365
70366 /*
70367 * The entries are in a hash-table, for fast lookup:
70368@@ -140,7 +140,7 @@ static void reset_entries(void)
70369 nr_entries = 0;
70370 memset(entries, 0, sizeof(entries));
70371 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70372- atomic_set(&overflow_count, 0);
70373+ atomic_set_unchecked(&overflow_count, 0);
70374 }
70375
70376 static struct entry *alloc_entry(void)
70377@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70378 if (likely(entry))
70379 entry->count++;
70380 else
70381- atomic_inc(&overflow_count);
70382+ atomic_inc_unchecked(&overflow_count);
70383
70384 out_unlock:
70385 raw_spin_unlock_irqrestore(lock, flags);
70386@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70387
70388 static void print_name_offset(struct seq_file *m, unsigned long addr)
70389 {
70390+#ifdef CONFIG_GRKERNSEC_HIDESYM
70391+ seq_printf(m, "<%p>", NULL);
70392+#else
70393 char symname[KSYM_NAME_LEN];
70394
70395 if (lookup_symbol_name(addr, symname) < 0)
70396 seq_printf(m, "<%p>", (void *)addr);
70397 else
70398 seq_printf(m, "%s", symname);
70399+#endif
70400 }
70401
70402 static int tstats_show(struct seq_file *m, void *v)
70403@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
70404
70405 seq_puts(m, "Timer Stats Version: v0.2\n");
70406 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70407- if (atomic_read(&overflow_count))
70408+ if (atomic_read_unchecked(&overflow_count))
70409 seq_printf(m, "Overflow: %d entries\n",
70410- atomic_read(&overflow_count));
70411+ atomic_read_unchecked(&overflow_count));
70412
70413 for (i = 0; i < nr_entries; i++) {
70414 entry = entries + i;
70415@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
70416 {
70417 struct proc_dir_entry *pe;
70418
70419+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70420+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70421+#else
70422 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70423+#endif
70424 if (!pe)
70425 return -ENOMEM;
70426 return 0;
70427diff --git a/kernel/timer.c b/kernel/timer.c
70428index a297ffc..5e16b0b 100644
70429--- a/kernel/timer.c
70430+++ b/kernel/timer.c
70431@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
70432 /*
70433 * This function runs timers and the timer-tq in bottom half context.
70434 */
70435-static void run_timer_softirq(struct softirq_action *h)
70436+static void run_timer_softirq(void)
70437 {
70438 struct tvec_base *base = __this_cpu_read(tvec_bases);
70439
70440diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
70441index cdea7b5..9b820d4 100644
70442--- a/kernel/trace/blktrace.c
70443+++ b/kernel/trace/blktrace.c
70444@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
70445 struct blk_trace *bt = filp->private_data;
70446 char buf[16];
70447
70448- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70449+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70450
70451 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70452 }
70453@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
70454 return 1;
70455
70456 bt = buf->chan->private_data;
70457- atomic_inc(&bt->dropped);
70458+ atomic_inc_unchecked(&bt->dropped);
70459 return 0;
70460 }
70461
70462@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
70463
70464 bt->dir = dir;
70465 bt->dev = dev;
70466- atomic_set(&bt->dropped, 0);
70467+ atomic_set_unchecked(&bt->dropped, 0);
70468
70469 ret = -EIO;
70470 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70471diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
70472index 683d559..d70d914 100644
70473--- a/kernel/trace/ftrace.c
70474+++ b/kernel/trace/ftrace.c
70475@@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
70476 if (unlikely(ftrace_disabled))
70477 return 0;
70478
70479+ ret = ftrace_arch_code_modify_prepare();
70480+ FTRACE_WARN_ON(ret);
70481+ if (ret)
70482+ return 0;
70483+
70484 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70485+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70486 if (ret) {
70487 ftrace_bug(ret, ip);
70488- return 0;
70489 }
70490- return 1;
70491+ return ret ? 0 : 1;
70492 }
70493
70494 /*
70495@@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
70496
70497 int
70498 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70499- void *data)
70500+ void *data)
70501 {
70502 struct ftrace_func_probe *entry;
70503 struct ftrace_page *pg;
70504diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
70505index c4579f1..6a439da 100644
70506--- a/kernel/trace/trace.c
70507+++ b/kernel/trace/trace.c
70508@@ -4258,10 +4258,9 @@ static const struct file_operations tracing_dyn_info_fops = {
70509 };
70510 #endif
70511
70512-static struct dentry *d_tracer;
70513-
70514 struct dentry *tracing_init_dentry(void)
70515 {
70516+ static struct dentry *d_tracer;
70517 static int once;
70518
70519 if (d_tracer)
70520@@ -4281,10 +4280,9 @@ struct dentry *tracing_init_dentry(void)
70521 return d_tracer;
70522 }
70523
70524-static struct dentry *d_percpu;
70525-
70526 struct dentry *tracing_dentry_percpu(void)
70527 {
70528+ static struct dentry *d_percpu;
70529 static int once;
70530 struct dentry *d_tracer;
70531
70532diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
70533index c212a7f..7b02394 100644
70534--- a/kernel/trace/trace_events.c
70535+++ b/kernel/trace/trace_events.c
70536@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
70537 struct ftrace_module_file_ops {
70538 struct list_head list;
70539 struct module *mod;
70540- struct file_operations id;
70541- struct file_operations enable;
70542- struct file_operations format;
70543- struct file_operations filter;
70544 };
70545
70546 static struct ftrace_module_file_ops *
70547@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
70548
70549 file_ops->mod = mod;
70550
70551- file_ops->id = ftrace_event_id_fops;
70552- file_ops->id.owner = mod;
70553-
70554- file_ops->enable = ftrace_enable_fops;
70555- file_ops->enable.owner = mod;
70556-
70557- file_ops->filter = ftrace_event_filter_fops;
70558- file_ops->filter.owner = mod;
70559-
70560- file_ops->format = ftrace_event_format_fops;
70561- file_ops->format.owner = mod;
70562+ pax_open_kernel();
70563+ *(void **)&mod->trace_id.owner = mod;
70564+ *(void **)&mod->trace_enable.owner = mod;
70565+ *(void **)&mod->trace_filter.owner = mod;
70566+ *(void **)&mod->trace_format.owner = mod;
70567+ pax_close_kernel();
70568
70569 list_add(&file_ops->list, &ftrace_module_file_list);
70570
70571@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
70572
70573 for_each_event(call, start, end) {
70574 __trace_add_event_call(*call, mod,
70575- &file_ops->id, &file_ops->enable,
70576- &file_ops->filter, &file_ops->format);
70577+ &mod->trace_id, &mod->trace_enable,
70578+ &mod->trace_filter, &mod->trace_format);
70579 }
70580 }
70581
70582diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
70583index 00d527c..7c5b1a3 100644
70584--- a/kernel/trace/trace_kprobe.c
70585+++ b/kernel/trace/trace_kprobe.c
70586@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70587 long ret;
70588 int maxlen = get_rloc_len(*(u32 *)dest);
70589 u8 *dst = get_rloc_data(dest);
70590- u8 *src = addr;
70591+ const u8 __user *src = (const u8 __force_user *)addr;
70592 mm_segment_t old_fs = get_fs();
70593 if (!maxlen)
70594 return;
70595@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70596 pagefault_disable();
70597 do
70598 ret = __copy_from_user_inatomic(dst++, src++, 1);
70599- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
70600+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
70601 dst[-1] = '\0';
70602 pagefault_enable();
70603 set_fs(old_fs);
70604@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70605 ((u8 *)get_rloc_data(dest))[0] = '\0';
70606 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
70607 } else
70608- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
70609+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
70610 get_rloc_offs(*(u32 *)dest));
70611 }
70612 /* Return the length of string -- including null terminal byte */
70613@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
70614 set_fs(KERNEL_DS);
70615 pagefault_disable();
70616 do {
70617- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
70618+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
70619 len++;
70620 } while (c && ret == 0 && len < MAX_STRING_SIZE);
70621 pagefault_enable();
70622diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
70623index fd3c8aa..5f324a6 100644
70624--- a/kernel/trace/trace_mmiotrace.c
70625+++ b/kernel/trace/trace_mmiotrace.c
70626@@ -24,7 +24,7 @@ struct header_iter {
70627 static struct trace_array *mmio_trace_array;
70628 static bool overrun_detected;
70629 static unsigned long prev_overruns;
70630-static atomic_t dropped_count;
70631+static atomic_unchecked_t dropped_count;
70632
70633 static void mmio_reset_data(struct trace_array *tr)
70634 {
70635@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
70636
70637 static unsigned long count_overruns(struct trace_iterator *iter)
70638 {
70639- unsigned long cnt = atomic_xchg(&dropped_count, 0);
70640+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70641 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70642
70643 if (over > prev_overruns)
70644@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
70645 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70646 sizeof(*entry), 0, pc);
70647 if (!event) {
70648- atomic_inc(&dropped_count);
70649+ atomic_inc_unchecked(&dropped_count);
70650 return;
70651 }
70652 entry = ring_buffer_event_data(event);
70653@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
70654 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70655 sizeof(*entry), 0, pc);
70656 if (!event) {
70657- atomic_inc(&dropped_count);
70658+ atomic_inc_unchecked(&dropped_count);
70659 return;
70660 }
70661 entry = ring_buffer_event_data(event);
70662diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
70663index d9c07f0..c1eeceb 100644
70664--- a/kernel/trace/trace_output.c
70665+++ b/kernel/trace/trace_output.c
70666@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
70667
70668 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70669 if (!IS_ERR(p)) {
70670- p = mangle_path(s->buffer + s->len, p, "\n");
70671+ p = mangle_path(s->buffer + s->len, p, "\n\\");
70672 if (p) {
70673 s->len = p - s->buffer;
70674 return 1;
70675diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
70676index d4545f4..a9010a1 100644
70677--- a/kernel/trace/trace_stack.c
70678+++ b/kernel/trace/trace_stack.c
70679@@ -53,7 +53,7 @@ static inline void check_stack(void)
70680 return;
70681
70682 /* we do not handle interrupt stacks yet */
70683- if (!object_is_on_stack(&this_size))
70684+ if (!object_starts_on_stack(&this_size))
70685 return;
70686
70687 local_irq_save(flags);
70688diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
70689index 209b379..7f76423 100644
70690--- a/kernel/trace/trace_workqueue.c
70691+++ b/kernel/trace/trace_workqueue.c
70692@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
70693 int cpu;
70694 pid_t pid;
70695 /* Can be inserted from interrupt or user context, need to be atomic */
70696- atomic_t inserted;
70697+ atomic_unchecked_t inserted;
70698 /*
70699 * Don't need to be atomic, works are serialized in a single workqueue thread
70700 * on a single CPU.
70701@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
70702 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
70703 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
70704 if (node->pid == wq_thread->pid) {
70705- atomic_inc(&node->inserted);
70706+ atomic_inc_unchecked(&node->inserted);
70707 goto found;
70708 }
70709 }
70710@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
70711 tsk = get_pid_task(pid, PIDTYPE_PID);
70712 if (tsk) {
70713 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
70714- atomic_read(&cws->inserted), cws->executed,
70715+ atomic_read_unchecked(&cws->inserted), cws->executed,
70716 tsk->comm);
70717 put_task_struct(tsk);
70718 }
70719diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
70720index 8745ac7..d144e37 100644
70721--- a/lib/Kconfig.debug
70722+++ b/lib/Kconfig.debug
70723@@ -1103,6 +1103,7 @@ config LATENCYTOP
70724 depends on DEBUG_KERNEL
70725 depends on STACKTRACE_SUPPORT
70726 depends on PROC_FS
70727+ depends on !GRKERNSEC_HIDESYM
70728 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
70729 select KALLSYMS
70730 select KALLSYMS_ALL
70731diff --git a/lib/bitmap.c b/lib/bitmap.c
70732index 0d4a127..33a06c7 100644
70733--- a/lib/bitmap.c
70734+++ b/lib/bitmap.c
70735@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
70736 {
70737 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70738 u32 chunk;
70739- const char __user __force *ubuf = (const char __user __force *)buf;
70740+ const char __user *ubuf = (const char __force_user *)buf;
70741
70742 bitmap_zero(maskp, nmaskbits);
70743
70744@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
70745 {
70746 if (!access_ok(VERIFY_READ, ubuf, ulen))
70747 return -EFAULT;
70748- return __bitmap_parse((const char __force *)ubuf,
70749+ return __bitmap_parse((const char __force_kernel *)ubuf,
70750 ulen, 1, maskp, nmaskbits);
70751
70752 }
70753@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
70754 {
70755 unsigned a, b;
70756 int c, old_c, totaldigits;
70757- const char __user __force *ubuf = (const char __user __force *)buf;
70758+ const char __user *ubuf = (const char __force_user *)buf;
70759 int exp_digit, in_range;
70760
70761 totaldigits = c = 0;
70762@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
70763 {
70764 if (!access_ok(VERIFY_READ, ubuf, ulen))
70765 return -EFAULT;
70766- return __bitmap_parselist((const char __force *)ubuf,
70767+ return __bitmap_parselist((const char __force_kernel *)ubuf,
70768 ulen, 1, maskp, nmaskbits);
70769 }
70770 EXPORT_SYMBOL(bitmap_parselist_user);
70771diff --git a/lib/bug.c b/lib/bug.c
70772index a28c141..2bd3d95 100644
70773--- a/lib/bug.c
70774+++ b/lib/bug.c
70775@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
70776 return BUG_TRAP_TYPE_NONE;
70777
70778 bug = find_bug(bugaddr);
70779+ if (!bug)
70780+ return BUG_TRAP_TYPE_NONE;
70781
70782 file = NULL;
70783 line = 0;
70784diff --git a/lib/debugobjects.c b/lib/debugobjects.c
70785index 0ab9ae8..f01ceca 100644
70786--- a/lib/debugobjects.c
70787+++ b/lib/debugobjects.c
70788@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
70789 if (limit > 4)
70790 return;
70791
70792- is_on_stack = object_is_on_stack(addr);
70793+ is_on_stack = object_starts_on_stack(addr);
70794 if (is_on_stack == onstack)
70795 return;
70796
70797diff --git a/lib/devres.c b/lib/devres.c
70798index 9676617..5149e15 100644
70799--- a/lib/devres.c
70800+++ b/lib/devres.c
70801@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
70802 void devm_iounmap(struct device *dev, void __iomem *addr)
70803 {
70804 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70805- (void *)addr));
70806+ (void __force *)addr));
70807 iounmap(addr);
70808 }
70809 EXPORT_SYMBOL(devm_iounmap);
70810@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
70811 {
70812 ioport_unmap(addr);
70813 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70814- devm_ioport_map_match, (void *)addr));
70815+ devm_ioport_map_match, (void __force *)addr));
70816 }
70817 EXPORT_SYMBOL(devm_ioport_unmap);
70818
70819diff --git a/lib/dma-debug.c b/lib/dma-debug.c
70820index fea790a..ebb0e82 100644
70821--- a/lib/dma-debug.c
70822+++ b/lib/dma-debug.c
70823@@ -925,7 +925,7 @@ out:
70824
70825 static void check_for_stack(struct device *dev, void *addr)
70826 {
70827- if (object_is_on_stack(addr))
70828+ if (object_starts_on_stack(addr))
70829 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70830 "stack [addr=%p]\n", addr);
70831 }
70832diff --git a/lib/extable.c b/lib/extable.c
70833index 4cac81e..63e9b8f 100644
70834--- a/lib/extable.c
70835+++ b/lib/extable.c
70836@@ -13,6 +13,7 @@
70837 #include <linux/init.h>
70838 #include <linux/sort.h>
70839 #include <asm/uaccess.h>
70840+#include <asm/pgtable.h>
70841
70842 #ifndef ARCH_HAS_SORT_EXTABLE
70843 /*
70844@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
70845 void sort_extable(struct exception_table_entry *start,
70846 struct exception_table_entry *finish)
70847 {
70848+ pax_open_kernel();
70849 sort(start, finish - start, sizeof(struct exception_table_entry),
70850 cmp_ex, NULL);
70851+ pax_close_kernel();
70852 }
70853
70854 #ifdef CONFIG_MODULES
70855diff --git a/lib/inflate.c b/lib/inflate.c
70856index 013a761..c28f3fc 100644
70857--- a/lib/inflate.c
70858+++ b/lib/inflate.c
70859@@ -269,7 +269,7 @@ static void free(void *where)
70860 malloc_ptr = free_mem_ptr;
70861 }
70862 #else
70863-#define malloc(a) kmalloc(a, GFP_KERNEL)
70864+#define malloc(a) kmalloc((a), GFP_KERNEL)
70865 #define free(a) kfree(a)
70866 #endif
70867
70868diff --git a/lib/ioremap.c b/lib/ioremap.c
70869index da4e2ad..6373b5f 100644
70870--- a/lib/ioremap.c
70871+++ b/lib/ioremap.c
70872@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
70873 unsigned long next;
70874
70875 phys_addr -= addr;
70876- pmd = pmd_alloc(&init_mm, pud, addr);
70877+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
70878 if (!pmd)
70879 return -ENOMEM;
70880 do {
70881@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
70882 unsigned long next;
70883
70884 phys_addr -= addr;
70885- pud = pud_alloc(&init_mm, pgd, addr);
70886+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
70887 if (!pud)
70888 return -ENOMEM;
70889 do {
70890diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70891index bd2bea9..6b3c95e 100644
70892--- a/lib/is_single_threaded.c
70893+++ b/lib/is_single_threaded.c
70894@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70895 struct task_struct *p, *t;
70896 bool ret;
70897
70898+ if (!mm)
70899+ return true;
70900+
70901 if (atomic_read(&task->signal->live) != 1)
70902 return false;
70903
70904diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70905index dc63d08..95ae14a 100644
70906--- a/lib/radix-tree.c
70907+++ b/lib/radix-tree.c
70908@@ -78,7 +78,7 @@ struct radix_tree_preload {
70909 int nr;
70910 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70911 };
70912-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70913+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70914
70915 static inline void *ptr_to_indirect(void *ptr)
70916 {
70917diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70918index 38e612e..4fb99a8 100644
70919--- a/lib/vsprintf.c
70920+++ b/lib/vsprintf.c
70921@@ -16,6 +16,9 @@
70922 * - scnprintf and vscnprintf
70923 */
70924
70925+#ifdef CONFIG_GRKERNSEC_HIDESYM
70926+#define __INCLUDED_BY_HIDESYM 1
70927+#endif
70928 #include <stdarg.h>
70929 #include <linux/module.h>
70930 #include <linux/types.h>
70931@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70932 char sym[KSYM_SYMBOL_LEN];
70933 if (ext == 'B')
70934 sprint_backtrace(sym, value);
70935- else if (ext != 'f' && ext != 's')
70936+ else if (ext != 'f' && ext != 's' && ext != 'a')
70937 sprint_symbol(sym, value);
70938 else
70939 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70940@@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70941 return number(buf, end, *(const netdev_features_t *)addr, spec);
70942 }
70943
70944+#ifdef CONFIG_GRKERNSEC_HIDESYM
70945+int kptr_restrict __read_mostly = 2;
70946+#else
70947 int kptr_restrict __read_mostly;
70948+#endif
70949
70950 /*
70951 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70952@@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
70953 * - 'S' For symbolic direct pointers with offset
70954 * - 's' For symbolic direct pointers without offset
70955 * - 'B' For backtraced symbolic direct pointers with offset
70956+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70957+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70958 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70959 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70960 * - 'M' For a 6-byte MAC address, it prints the address in the
70961@@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70962 {
70963 if (!ptr && *fmt != 'K') {
70964 /*
70965- * Print (null) with the same width as a pointer so it makes
70966+ * Print (nil) with the same width as a pointer so it makes
70967 * tabular output look nice.
70968 */
70969 if (spec.field_width == -1)
70970 spec.field_width = 2 * sizeof(void *);
70971- return string(buf, end, "(null)", spec);
70972+ return string(buf, end, "(nil)", spec);
70973 }
70974
70975 switch (*fmt) {
70976@@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70977 /* Fallthrough */
70978 case 'S':
70979 case 's':
70980+#ifdef CONFIG_GRKERNSEC_HIDESYM
70981+ break;
70982+#else
70983+ return symbol_string(buf, end, ptr, spec, *fmt);
70984+#endif
70985+ case 'A':
70986+ case 'a':
70987 case 'B':
70988 return symbol_string(buf, end, ptr, spec, *fmt);
70989 case 'R':
70990@@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70991 typeof(type) value; \
70992 if (sizeof(type) == 8) { \
70993 args = PTR_ALIGN(args, sizeof(u32)); \
70994- *(u32 *)&value = *(u32 *)args; \
70995- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70996+ *(u32 *)&value = *(const u32 *)args; \
70997+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70998 } else { \
70999 args = PTR_ALIGN(args, sizeof(type)); \
71000- value = *(typeof(type) *)args; \
71001+ value = *(const typeof(type) *)args; \
71002 } \
71003 args += sizeof(type); \
71004 value; \
71005@@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71006 case FORMAT_TYPE_STR: {
71007 const char *str_arg = args;
71008 args += strlen(str_arg) + 1;
71009- str = string(str, end, (char *)str_arg, spec);
71010+ str = string(str, end, str_arg, spec);
71011 break;
71012 }
71013
71014diff --git a/localversion-grsec b/localversion-grsec
71015new file mode 100644
71016index 0000000..7cd6065
71017--- /dev/null
71018+++ b/localversion-grsec
71019@@ -0,0 +1 @@
71020+-grsec
71021diff --git a/mm/Kconfig b/mm/Kconfig
71022index e338407..49b5b7a 100644
71023--- a/mm/Kconfig
71024+++ b/mm/Kconfig
71025@@ -247,10 +247,10 @@ config KSM
71026 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
71027
71028 config DEFAULT_MMAP_MIN_ADDR
71029- int "Low address space to protect from user allocation"
71030+ int "Low address space to protect from user allocation"
71031 depends on MMU
71032- default 4096
71033- help
71034+ default 65536
71035+ help
71036 This is the portion of low virtual memory which should be protected
71037 from userspace allocation. Keeping a user from writing to low pages
71038 can help reduce the impact of kernel NULL pointer bugs.
71039diff --git a/mm/filemap.c b/mm/filemap.c
71040index b662757..3081ddd 100644
71041--- a/mm/filemap.c
71042+++ b/mm/filemap.c
71043@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
71044 struct address_space *mapping = file->f_mapping;
71045
71046 if (!mapping->a_ops->readpage)
71047- return -ENOEXEC;
71048+ return -ENODEV;
71049 file_accessed(file);
71050 vma->vm_ops = &generic_file_vm_ops;
71051 vma->vm_flags |= VM_CAN_NONLINEAR;
71052@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
71053 *pos = i_size_read(inode);
71054
71055 if (limit != RLIM_INFINITY) {
71056+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71057 if (*pos >= limit) {
71058 send_sig(SIGXFSZ, current, 0);
71059 return -EFBIG;
71060diff --git a/mm/fremap.c b/mm/fremap.c
71061index 9ed4fd4..c42648d 100644
71062--- a/mm/fremap.c
71063+++ b/mm/fremap.c
71064@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
71065 retry:
71066 vma = find_vma(mm, start);
71067
71068+#ifdef CONFIG_PAX_SEGMEXEC
71069+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71070+ goto out;
71071+#endif
71072+
71073 /*
71074 * Make sure the vma is shared, that it supports prefaulting,
71075 * and that the remapped range is valid and fully within
71076diff --git a/mm/highmem.c b/mm/highmem.c
71077index 57d82c6..e9e0552 100644
71078--- a/mm/highmem.c
71079+++ b/mm/highmem.c
71080@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
71081 * So no dangers, even with speculative execution.
71082 */
71083 page = pte_page(pkmap_page_table[i]);
71084+ pax_open_kernel();
71085 pte_clear(&init_mm, (unsigned long)page_address(page),
71086 &pkmap_page_table[i]);
71087-
71088+ pax_close_kernel();
71089 set_page_address(page, NULL);
71090 need_flush = 1;
71091 }
71092@@ -186,9 +187,11 @@ start:
71093 }
71094 }
71095 vaddr = PKMAP_ADDR(last_pkmap_nr);
71096+
71097+ pax_open_kernel();
71098 set_pte_at(&init_mm, vaddr,
71099 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
71100-
71101+ pax_close_kernel();
71102 pkmap_count[last_pkmap_nr] = 1;
71103 set_page_address(page, (void *)vaddr);
71104
71105diff --git a/mm/huge_memory.c b/mm/huge_memory.c
71106index 8f7fc39..69bf1e9 100644
71107--- a/mm/huge_memory.c
71108+++ b/mm/huge_memory.c
71109@@ -733,7 +733,7 @@ out:
71110 * run pte_offset_map on the pmd, if an huge pmd could
71111 * materialize from under us from a different thread.
71112 */
71113- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
71114+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71115 return VM_FAULT_OOM;
71116 /* if an huge pmd materialized from under us just retry later */
71117 if (unlikely(pmd_trans_huge(*pmd)))
71118diff --git a/mm/hugetlb.c b/mm/hugetlb.c
71119index 24b1787..e0fbc01 100644
71120--- a/mm/hugetlb.c
71121+++ b/mm/hugetlb.c
71122@@ -2425,6 +2425,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
71123 return 1;
71124 }
71125
71126+#ifdef CONFIG_PAX_SEGMEXEC
71127+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71128+{
71129+ struct mm_struct *mm = vma->vm_mm;
71130+ struct vm_area_struct *vma_m;
71131+ unsigned long address_m;
71132+ pte_t *ptep_m;
71133+
71134+ vma_m = pax_find_mirror_vma(vma);
71135+ if (!vma_m)
71136+ return;
71137+
71138+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71139+ address_m = address + SEGMEXEC_TASK_SIZE;
71140+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71141+ get_page(page_m);
71142+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
71143+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71144+}
71145+#endif
71146+
71147 /*
71148 * Hugetlb_cow() should be called with page lock of the original hugepage held.
71149 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
71150@@ -2538,6 +2559,11 @@ retry_avoidcopy:
71151 make_huge_pte(vma, new_page, 1));
71152 page_remove_rmap(old_page);
71153 hugepage_add_new_anon_rmap(new_page, vma, address);
71154+
71155+#ifdef CONFIG_PAX_SEGMEXEC
71156+ pax_mirror_huge_pte(vma, address, new_page);
71157+#endif
71158+
71159 /* Make the old page be freed below */
71160 new_page = old_page;
71161 mmu_notifier_invalidate_range_end(mm,
71162@@ -2692,6 +2718,10 @@ retry:
71163 && (vma->vm_flags & VM_SHARED)));
71164 set_huge_pte_at(mm, address, ptep, new_pte);
71165
71166+#ifdef CONFIG_PAX_SEGMEXEC
71167+ pax_mirror_huge_pte(vma, address, page);
71168+#endif
71169+
71170 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71171 /* Optimization, do the COW without a second fault */
71172 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
71173@@ -2721,6 +2751,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71174 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71175 struct hstate *h = hstate_vma(vma);
71176
71177+#ifdef CONFIG_PAX_SEGMEXEC
71178+ struct vm_area_struct *vma_m;
71179+#endif
71180+
71181 address &= huge_page_mask(h);
71182
71183 ptep = huge_pte_offset(mm, address);
71184@@ -2734,6 +2768,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71185 VM_FAULT_SET_HINDEX(h - hstates);
71186 }
71187
71188+#ifdef CONFIG_PAX_SEGMEXEC
71189+ vma_m = pax_find_mirror_vma(vma);
71190+ if (vma_m) {
71191+ unsigned long address_m;
71192+
71193+ if (vma->vm_start > vma_m->vm_start) {
71194+ address_m = address;
71195+ address -= SEGMEXEC_TASK_SIZE;
71196+ vma = vma_m;
71197+ h = hstate_vma(vma);
71198+ } else
71199+ address_m = address + SEGMEXEC_TASK_SIZE;
71200+
71201+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71202+ return VM_FAULT_OOM;
71203+ address_m &= HPAGE_MASK;
71204+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71205+ }
71206+#endif
71207+
71208 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71209 if (!ptep)
71210 return VM_FAULT_OOM;
71211diff --git a/mm/internal.h b/mm/internal.h
71212index 2189af4..f2ca332 100644
71213--- a/mm/internal.h
71214+++ b/mm/internal.h
71215@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
71216 * in mm/page_alloc.c
71217 */
71218 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71219+extern void free_compound_page(struct page *page);
71220 extern void prep_compound_page(struct page *page, unsigned long order);
71221 #ifdef CONFIG_MEMORY_FAILURE
71222 extern bool is_free_buddy_page(struct page *page);
71223diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71224index 45eb621..6ccd8ea 100644
71225--- a/mm/kmemleak.c
71226+++ b/mm/kmemleak.c
71227@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
71228
71229 for (i = 0; i < object->trace_len; i++) {
71230 void *ptr = (void *)object->trace[i];
71231- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71232+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71233 }
71234 }
71235
71236diff --git a/mm/maccess.c b/mm/maccess.c
71237index d53adf9..03a24bf 100644
71238--- a/mm/maccess.c
71239+++ b/mm/maccess.c
71240@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
71241 set_fs(KERNEL_DS);
71242 pagefault_disable();
71243 ret = __copy_from_user_inatomic(dst,
71244- (__force const void __user *)src, size);
71245+ (const void __force_user *)src, size);
71246 pagefault_enable();
71247 set_fs(old_fs);
71248
71249@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
71250
71251 set_fs(KERNEL_DS);
71252 pagefault_disable();
71253- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71254+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71255 pagefault_enable();
71256 set_fs(old_fs);
71257
71258diff --git a/mm/madvise.c b/mm/madvise.c
71259index 74bf193..feb6fd3 100644
71260--- a/mm/madvise.c
71261+++ b/mm/madvise.c
71262@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
71263 pgoff_t pgoff;
71264 unsigned long new_flags = vma->vm_flags;
71265
71266+#ifdef CONFIG_PAX_SEGMEXEC
71267+ struct vm_area_struct *vma_m;
71268+#endif
71269+
71270 switch (behavior) {
71271 case MADV_NORMAL:
71272 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71273@@ -110,6 +114,13 @@ success:
71274 /*
71275 * vm_flags is protected by the mmap_sem held in write mode.
71276 */
71277+
71278+#ifdef CONFIG_PAX_SEGMEXEC
71279+ vma_m = pax_find_mirror_vma(vma);
71280+ if (vma_m)
71281+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71282+#endif
71283+
71284 vma->vm_flags = new_flags;
71285
71286 out:
71287@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71288 struct vm_area_struct ** prev,
71289 unsigned long start, unsigned long end)
71290 {
71291+
71292+#ifdef CONFIG_PAX_SEGMEXEC
71293+ struct vm_area_struct *vma_m;
71294+#endif
71295+
71296 *prev = vma;
71297 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71298 return -EINVAL;
71299@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71300 zap_page_range(vma, start, end - start, &details);
71301 } else
71302 zap_page_range(vma, start, end - start, NULL);
71303+
71304+#ifdef CONFIG_PAX_SEGMEXEC
71305+ vma_m = pax_find_mirror_vma(vma);
71306+ if (vma_m) {
71307+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71308+ struct zap_details details = {
71309+ .nonlinear_vma = vma_m,
71310+ .last_index = ULONG_MAX,
71311+ };
71312+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71313+ } else
71314+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71315+ }
71316+#endif
71317+
71318 return 0;
71319 }
71320
71321@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
71322 if (end < start)
71323 goto out;
71324
71325+#ifdef CONFIG_PAX_SEGMEXEC
71326+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71327+ if (end > SEGMEXEC_TASK_SIZE)
71328+ goto out;
71329+ } else
71330+#endif
71331+
71332+ if (end > TASK_SIZE)
71333+ goto out;
71334+
71335 error = 0;
71336 if (end == start)
71337 goto out;
71338diff --git a/mm/memory-failure.c b/mm/memory-failure.c
71339index 56080ea..115071e 100644
71340--- a/mm/memory-failure.c
71341+++ b/mm/memory-failure.c
71342@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
71343
71344 int sysctl_memory_failure_recovery __read_mostly = 1;
71345
71346-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71347+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71348
71349 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71350
71351@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
71352 si.si_signo = SIGBUS;
71353 si.si_errno = 0;
71354 si.si_code = BUS_MCEERR_AO;
71355- si.si_addr = (void *)addr;
71356+ si.si_addr = (void __user *)addr;
71357 #ifdef __ARCH_SI_TRAPNO
71358 si.si_trapno = trapno;
71359 #endif
71360@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71361 }
71362
71363 nr_pages = 1 << compound_trans_order(hpage);
71364- atomic_long_add(nr_pages, &mce_bad_pages);
71365+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71366
71367 /*
71368 * We need/can do nothing about count=0 pages.
71369@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71370 if (!PageHWPoison(hpage)
71371 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71372 || (p != hpage && TestSetPageHWPoison(hpage))) {
71373- atomic_long_sub(nr_pages, &mce_bad_pages);
71374+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71375 return 0;
71376 }
71377 set_page_hwpoison_huge_page(hpage);
71378@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71379 }
71380 if (hwpoison_filter(p)) {
71381 if (TestClearPageHWPoison(p))
71382- atomic_long_sub(nr_pages, &mce_bad_pages);
71383+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71384 unlock_page(hpage);
71385 put_page(hpage);
71386 return 0;
71387@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
71388 return 0;
71389 }
71390 if (TestClearPageHWPoison(p))
71391- atomic_long_sub(nr_pages, &mce_bad_pages);
71392+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71393 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71394 return 0;
71395 }
71396@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
71397 */
71398 if (TestClearPageHWPoison(page)) {
71399 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71400- atomic_long_sub(nr_pages, &mce_bad_pages);
71401+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71402 freeit = 1;
71403 if (PageHuge(page))
71404 clear_page_hwpoison_huge_page(page);
71405@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71406 }
71407 done:
71408 if (!PageHWPoison(hpage))
71409- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71410+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71411 set_page_hwpoison_huge_page(hpage);
71412 dequeue_hwpoisoned_huge_page(hpage);
71413 /* keep elevated page count for bad page */
71414@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
71415 return ret;
71416
71417 done:
71418- atomic_long_add(1, &mce_bad_pages);
71419+ atomic_long_add_unchecked(1, &mce_bad_pages);
71420 SetPageHWPoison(page);
71421 /* keep elevated page count for bad page */
71422 return ret;
71423diff --git a/mm/memory.c b/mm/memory.c
71424index 10b4dda..06857f3 100644
71425--- a/mm/memory.c
71426+++ b/mm/memory.c
71427@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71428 return;
71429
71430 pmd = pmd_offset(pud, start);
71431+
71432+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71433 pud_clear(pud);
71434 pmd_free_tlb(tlb, pmd, start);
71435+#endif
71436+
71437 }
71438
71439 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71440@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71441 if (end - 1 > ceiling - 1)
71442 return;
71443
71444+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71445 pud = pud_offset(pgd, start);
71446 pgd_clear(pgd);
71447 pud_free_tlb(tlb, pud, start);
71448+#endif
71449+
71450 }
71451
71452 /*
71453@@ -1593,12 +1600,6 @@ no_page_table:
71454 return page;
71455 }
71456
71457-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71458-{
71459- return stack_guard_page_start(vma, addr) ||
71460- stack_guard_page_end(vma, addr+PAGE_SIZE);
71461-}
71462-
71463 /**
71464 * __get_user_pages() - pin user pages in memory
71465 * @tsk: task_struct of target task
71466@@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71467 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71468 i = 0;
71469
71470- do {
71471+ while (nr_pages) {
71472 struct vm_area_struct *vma;
71473
71474- vma = find_extend_vma(mm, start);
71475+ vma = find_vma(mm, start);
71476 if (!vma && in_gate_area(mm, start)) {
71477 unsigned long pg = start & PAGE_MASK;
71478 pgd_t *pgd;
71479@@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71480 goto next_page;
71481 }
71482
71483- if (!vma ||
71484+ if (!vma || start < vma->vm_start ||
71485 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71486 !(vm_flags & vma->vm_flags))
71487 return i ? : -EFAULT;
71488@@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71489 int ret;
71490 unsigned int fault_flags = 0;
71491
71492- /* For mlock, just skip the stack guard page. */
71493- if (foll_flags & FOLL_MLOCK) {
71494- if (stack_guard_page(vma, start))
71495- goto next_page;
71496- }
71497 if (foll_flags & FOLL_WRITE)
71498 fault_flags |= FAULT_FLAG_WRITE;
71499 if (nonblocking)
71500@@ -1827,7 +1823,7 @@ next_page:
71501 start += PAGE_SIZE;
71502 nr_pages--;
71503 } while (nr_pages && start < vma->vm_end);
71504- } while (nr_pages);
71505+ }
71506 return i;
71507 }
71508 EXPORT_SYMBOL(__get_user_pages);
71509@@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
71510 page_add_file_rmap(page);
71511 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71512
71513+#ifdef CONFIG_PAX_SEGMEXEC
71514+ pax_mirror_file_pte(vma, addr, page, ptl);
71515+#endif
71516+
71517 retval = 0;
71518 pte_unmap_unlock(pte, ptl);
71519 return retval;
71520@@ -2068,10 +2068,22 @@ out:
71521 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71522 struct page *page)
71523 {
71524+
71525+#ifdef CONFIG_PAX_SEGMEXEC
71526+ struct vm_area_struct *vma_m;
71527+#endif
71528+
71529 if (addr < vma->vm_start || addr >= vma->vm_end)
71530 return -EFAULT;
71531 if (!page_count(page))
71532 return -EINVAL;
71533+
71534+#ifdef CONFIG_PAX_SEGMEXEC
71535+ vma_m = pax_find_mirror_vma(vma);
71536+ if (vma_m)
71537+ vma_m->vm_flags |= VM_INSERTPAGE;
71538+#endif
71539+
71540 vma->vm_flags |= VM_INSERTPAGE;
71541 return insert_page(vma, addr, page, vma->vm_page_prot);
71542 }
71543@@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
71544 unsigned long pfn)
71545 {
71546 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71547+ BUG_ON(vma->vm_mirror);
71548
71549 if (addr < vma->vm_start || addr >= vma->vm_end)
71550 return -EFAULT;
71551@@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
71552
71553 BUG_ON(pud_huge(*pud));
71554
71555- pmd = pmd_alloc(mm, pud, addr);
71556+ pmd = (mm == &init_mm) ?
71557+ pmd_alloc_kernel(mm, pud, addr) :
71558+ pmd_alloc(mm, pud, addr);
71559 if (!pmd)
71560 return -ENOMEM;
71561 do {
71562@@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
71563 unsigned long next;
71564 int err;
71565
71566- pud = pud_alloc(mm, pgd, addr);
71567+ pud = (mm == &init_mm) ?
71568+ pud_alloc_kernel(mm, pgd, addr) :
71569+ pud_alloc(mm, pgd, addr);
71570 if (!pud)
71571 return -ENOMEM;
71572 do {
71573@@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
71574 copy_user_highpage(dst, src, va, vma);
71575 }
71576
71577+#ifdef CONFIG_PAX_SEGMEXEC
71578+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71579+{
71580+ struct mm_struct *mm = vma->vm_mm;
71581+ spinlock_t *ptl;
71582+ pte_t *pte, entry;
71583+
71584+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71585+ entry = *pte;
71586+ if (!pte_present(entry)) {
71587+ if (!pte_none(entry)) {
71588+ BUG_ON(pte_file(entry));
71589+ free_swap_and_cache(pte_to_swp_entry(entry));
71590+ pte_clear_not_present_full(mm, address, pte, 0);
71591+ }
71592+ } else {
71593+ struct page *page;
71594+
71595+ flush_cache_page(vma, address, pte_pfn(entry));
71596+ entry = ptep_clear_flush(vma, address, pte);
71597+ BUG_ON(pte_dirty(entry));
71598+ page = vm_normal_page(vma, address, entry);
71599+ if (page) {
71600+ update_hiwater_rss(mm);
71601+ if (PageAnon(page))
71602+ dec_mm_counter_fast(mm, MM_ANONPAGES);
71603+ else
71604+ dec_mm_counter_fast(mm, MM_FILEPAGES);
71605+ page_remove_rmap(page);
71606+ page_cache_release(page);
71607+ }
71608+ }
71609+ pte_unmap_unlock(pte, ptl);
71610+}
71611+
71612+/* PaX: if vma is mirrored, synchronize the mirror's PTE
71613+ *
71614+ * the ptl of the lower mapped page is held on entry and is not released on exit
71615+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71616+ */
71617+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71618+{
71619+ struct mm_struct *mm = vma->vm_mm;
71620+ unsigned long address_m;
71621+ spinlock_t *ptl_m;
71622+ struct vm_area_struct *vma_m;
71623+ pmd_t *pmd_m;
71624+ pte_t *pte_m, entry_m;
71625+
71626+ BUG_ON(!page_m || !PageAnon(page_m));
71627+
71628+ vma_m = pax_find_mirror_vma(vma);
71629+ if (!vma_m)
71630+ return;
71631+
71632+ BUG_ON(!PageLocked(page_m));
71633+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71634+ address_m = address + SEGMEXEC_TASK_SIZE;
71635+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71636+ pte_m = pte_offset_map(pmd_m, address_m);
71637+ ptl_m = pte_lockptr(mm, pmd_m);
71638+ if (ptl != ptl_m) {
71639+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71640+ if (!pte_none(*pte_m))
71641+ goto out;
71642+ }
71643+
71644+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71645+ page_cache_get(page_m);
71646+ page_add_anon_rmap(page_m, vma_m, address_m);
71647+ inc_mm_counter_fast(mm, MM_ANONPAGES);
71648+ set_pte_at(mm, address_m, pte_m, entry_m);
71649+ update_mmu_cache(vma_m, address_m, entry_m);
71650+out:
71651+ if (ptl != ptl_m)
71652+ spin_unlock(ptl_m);
71653+ pte_unmap(pte_m);
71654+ unlock_page(page_m);
71655+}
71656+
71657+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71658+{
71659+ struct mm_struct *mm = vma->vm_mm;
71660+ unsigned long address_m;
71661+ spinlock_t *ptl_m;
71662+ struct vm_area_struct *vma_m;
71663+ pmd_t *pmd_m;
71664+ pte_t *pte_m, entry_m;
71665+
71666+ BUG_ON(!page_m || PageAnon(page_m));
71667+
71668+ vma_m = pax_find_mirror_vma(vma);
71669+ if (!vma_m)
71670+ return;
71671+
71672+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71673+ address_m = address + SEGMEXEC_TASK_SIZE;
71674+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71675+ pte_m = pte_offset_map(pmd_m, address_m);
71676+ ptl_m = pte_lockptr(mm, pmd_m);
71677+ if (ptl != ptl_m) {
71678+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71679+ if (!pte_none(*pte_m))
71680+ goto out;
71681+ }
71682+
71683+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71684+ page_cache_get(page_m);
71685+ page_add_file_rmap(page_m);
71686+ inc_mm_counter_fast(mm, MM_FILEPAGES);
71687+ set_pte_at(mm, address_m, pte_m, entry_m);
71688+ update_mmu_cache(vma_m, address_m, entry_m);
71689+out:
71690+ if (ptl != ptl_m)
71691+ spin_unlock(ptl_m);
71692+ pte_unmap(pte_m);
71693+}
71694+
71695+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71696+{
71697+ struct mm_struct *mm = vma->vm_mm;
71698+ unsigned long address_m;
71699+ spinlock_t *ptl_m;
71700+ struct vm_area_struct *vma_m;
71701+ pmd_t *pmd_m;
71702+ pte_t *pte_m, entry_m;
71703+
71704+ vma_m = pax_find_mirror_vma(vma);
71705+ if (!vma_m)
71706+ return;
71707+
71708+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71709+ address_m = address + SEGMEXEC_TASK_SIZE;
71710+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71711+ pte_m = pte_offset_map(pmd_m, address_m);
71712+ ptl_m = pte_lockptr(mm, pmd_m);
71713+ if (ptl != ptl_m) {
71714+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71715+ if (!pte_none(*pte_m))
71716+ goto out;
71717+ }
71718+
71719+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
71720+ set_pte_at(mm, address_m, pte_m, entry_m);
71721+out:
71722+ if (ptl != ptl_m)
71723+ spin_unlock(ptl_m);
71724+ pte_unmap(pte_m);
71725+}
71726+
71727+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
71728+{
71729+ struct page *page_m;
71730+ pte_t entry;
71731+
71732+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
71733+ goto out;
71734+
71735+ entry = *pte;
71736+ page_m = vm_normal_page(vma, address, entry);
71737+ if (!page_m)
71738+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
71739+ else if (PageAnon(page_m)) {
71740+ if (pax_find_mirror_vma(vma)) {
71741+ pte_unmap_unlock(pte, ptl);
71742+ lock_page(page_m);
71743+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
71744+ if (pte_same(entry, *pte))
71745+ pax_mirror_anon_pte(vma, address, page_m, ptl);
71746+ else
71747+ unlock_page(page_m);
71748+ }
71749+ } else
71750+ pax_mirror_file_pte(vma, address, page_m, ptl);
71751+
71752+out:
71753+ pte_unmap_unlock(pte, ptl);
71754+}
71755+#endif
71756+
71757 /*
71758 * This routine handles present pages, when users try to write
71759 * to a shared page. It is done by copying the page to a new address
71760@@ -2683,6 +2880,12 @@ gotten:
71761 */
71762 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71763 if (likely(pte_same(*page_table, orig_pte))) {
71764+
71765+#ifdef CONFIG_PAX_SEGMEXEC
71766+ if (pax_find_mirror_vma(vma))
71767+ BUG_ON(!trylock_page(new_page));
71768+#endif
71769+
71770 if (old_page) {
71771 if (!PageAnon(old_page)) {
71772 dec_mm_counter_fast(mm, MM_FILEPAGES);
71773@@ -2734,6 +2937,10 @@ gotten:
71774 page_remove_rmap(old_page);
71775 }
71776
71777+#ifdef CONFIG_PAX_SEGMEXEC
71778+ pax_mirror_anon_pte(vma, address, new_page, ptl);
71779+#endif
71780+
71781 /* Free the old page.. */
71782 new_page = old_page;
71783 ret |= VM_FAULT_WRITE;
71784@@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71785 swap_free(entry);
71786 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
71787 try_to_free_swap(page);
71788+
71789+#ifdef CONFIG_PAX_SEGMEXEC
71790+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
71791+#endif
71792+
71793 unlock_page(page);
71794 if (swapcache) {
71795 /*
71796@@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71797
71798 /* No need to invalidate - it was non-present before */
71799 update_mmu_cache(vma, address, page_table);
71800+
71801+#ifdef CONFIG_PAX_SEGMEXEC
71802+ pax_mirror_anon_pte(vma, address, page, ptl);
71803+#endif
71804+
71805 unlock:
71806 pte_unmap_unlock(page_table, ptl);
71807 out:
71808@@ -3055,40 +3272,6 @@ out_release:
71809 }
71810
71811 /*
71812- * This is like a special single-page "expand_{down|up}wards()",
71813- * except we must first make sure that 'address{-|+}PAGE_SIZE'
71814- * doesn't hit another vma.
71815- */
71816-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
71817-{
71818- address &= PAGE_MASK;
71819- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
71820- struct vm_area_struct *prev = vma->vm_prev;
71821-
71822- /*
71823- * Is there a mapping abutting this one below?
71824- *
71825- * That's only ok if it's the same stack mapping
71826- * that has gotten split..
71827- */
71828- if (prev && prev->vm_end == address)
71829- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71830-
71831- expand_downwards(vma, address - PAGE_SIZE);
71832- }
71833- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71834- struct vm_area_struct *next = vma->vm_next;
71835-
71836- /* As VM_GROWSDOWN but s/below/above/ */
71837- if (next && next->vm_start == address + PAGE_SIZE)
71838- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71839-
71840- expand_upwards(vma, address + PAGE_SIZE);
71841- }
71842- return 0;
71843-}
71844-
71845-/*
71846 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71847 * but allow concurrent faults), and pte mapped but not yet locked.
71848 * We return with mmap_sem still held, but pte unmapped and unlocked.
71849@@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71850 unsigned long address, pte_t *page_table, pmd_t *pmd,
71851 unsigned int flags)
71852 {
71853- struct page *page;
71854+ struct page *page = NULL;
71855 spinlock_t *ptl;
71856 pte_t entry;
71857
71858- pte_unmap(page_table);
71859-
71860- /* Check if we need to add a guard page to the stack */
71861- if (check_stack_guard_page(vma, address) < 0)
71862- return VM_FAULT_SIGBUS;
71863-
71864- /* Use the zero-page for reads */
71865 if (!(flags & FAULT_FLAG_WRITE)) {
71866 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71867 vma->vm_page_prot));
71868- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71869+ ptl = pte_lockptr(mm, pmd);
71870+ spin_lock(ptl);
71871 if (!pte_none(*page_table))
71872 goto unlock;
71873 goto setpte;
71874 }
71875
71876 /* Allocate our own private page. */
71877+ pte_unmap(page_table);
71878+
71879 if (unlikely(anon_vma_prepare(vma)))
71880 goto oom;
71881 page = alloc_zeroed_user_highpage_movable(vma, address);
71882@@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71883 if (!pte_none(*page_table))
71884 goto release;
71885
71886+#ifdef CONFIG_PAX_SEGMEXEC
71887+ if (pax_find_mirror_vma(vma))
71888+ BUG_ON(!trylock_page(page));
71889+#endif
71890+
71891 inc_mm_counter_fast(mm, MM_ANONPAGES);
71892 page_add_new_anon_rmap(page, vma, address);
71893 setpte:
71894@@ -3143,6 +3327,12 @@ setpte:
71895
71896 /* No need to invalidate - it was non-present before */
71897 update_mmu_cache(vma, address, page_table);
71898+
71899+#ifdef CONFIG_PAX_SEGMEXEC
71900+ if (page)
71901+ pax_mirror_anon_pte(vma, address, page, ptl);
71902+#endif
71903+
71904 unlock:
71905 pte_unmap_unlock(page_table, ptl);
71906 return 0;
71907@@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71908 */
71909 /* Only go through if we didn't race with anybody else... */
71910 if (likely(pte_same(*page_table, orig_pte))) {
71911+
71912+#ifdef CONFIG_PAX_SEGMEXEC
71913+ if (anon && pax_find_mirror_vma(vma))
71914+ BUG_ON(!trylock_page(page));
71915+#endif
71916+
71917 flush_icache_page(vma, page);
71918 entry = mk_pte(page, vma->vm_page_prot);
71919 if (flags & FAULT_FLAG_WRITE)
71920@@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71921
71922 /* no need to invalidate: a not-present page won't be cached */
71923 update_mmu_cache(vma, address, page_table);
71924+
71925+#ifdef CONFIG_PAX_SEGMEXEC
71926+ if (anon)
71927+ pax_mirror_anon_pte(vma, address, page, ptl);
71928+ else
71929+ pax_mirror_file_pte(vma, address, page, ptl);
71930+#endif
71931+
71932 } else {
71933 if (cow_page)
71934 mem_cgroup_uncharge_page(cow_page);
71935@@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm,
71936 if (flags & FAULT_FLAG_WRITE)
71937 flush_tlb_fix_spurious_fault(vma, address);
71938 }
71939+
71940+#ifdef CONFIG_PAX_SEGMEXEC
71941+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71942+ return 0;
71943+#endif
71944+
71945 unlock:
71946 pte_unmap_unlock(pte, ptl);
71947 return 0;
71948@@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71949 pmd_t *pmd;
71950 pte_t *pte;
71951
71952+#ifdef CONFIG_PAX_SEGMEXEC
71953+ struct vm_area_struct *vma_m;
71954+#endif
71955+
71956 __set_current_state(TASK_RUNNING);
71957
71958 count_vm_event(PGFAULT);
71959@@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71960 if (unlikely(is_vm_hugetlb_page(vma)))
71961 return hugetlb_fault(mm, vma, address, flags);
71962
71963+#ifdef CONFIG_PAX_SEGMEXEC
71964+ vma_m = pax_find_mirror_vma(vma);
71965+ if (vma_m) {
71966+ unsigned long address_m;
71967+ pgd_t *pgd_m;
71968+ pud_t *pud_m;
71969+ pmd_t *pmd_m;
71970+
71971+ if (vma->vm_start > vma_m->vm_start) {
71972+ address_m = address;
71973+ address -= SEGMEXEC_TASK_SIZE;
71974+ vma = vma_m;
71975+ } else
71976+ address_m = address + SEGMEXEC_TASK_SIZE;
71977+
71978+ pgd_m = pgd_offset(mm, address_m);
71979+ pud_m = pud_alloc(mm, pgd_m, address_m);
71980+ if (!pud_m)
71981+ return VM_FAULT_OOM;
71982+ pmd_m = pmd_alloc(mm, pud_m, address_m);
71983+ if (!pmd_m)
71984+ return VM_FAULT_OOM;
71985+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71986+ return VM_FAULT_OOM;
71987+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71988+ }
71989+#endif
71990+
71991 pgd = pgd_offset(mm, address);
71992 pud = pud_alloc(mm, pgd, address);
71993 if (!pud)
71994@@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71995 * run pte_offset_map on the pmd, if an huge pmd could
71996 * materialize from under us from a different thread.
71997 */
71998- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71999+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
72000 return VM_FAULT_OOM;
72001 /* if an huge pmd materialized from under us just retry later */
72002 if (unlikely(pmd_trans_huge(*pmd)))
72003@@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72004 spin_unlock(&mm->page_table_lock);
72005 return 0;
72006 }
72007+
72008+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72009+{
72010+ pud_t *new = pud_alloc_one(mm, address);
72011+ if (!new)
72012+ return -ENOMEM;
72013+
72014+ smp_wmb(); /* See comment in __pte_alloc */
72015+
72016+ spin_lock(&mm->page_table_lock);
72017+ if (pgd_present(*pgd)) /* Another has populated it */
72018+ pud_free(mm, new);
72019+ else
72020+ pgd_populate_kernel(mm, pgd, new);
72021+ spin_unlock(&mm->page_table_lock);
72022+ return 0;
72023+}
72024 #endif /* __PAGETABLE_PUD_FOLDED */
72025
72026 #ifndef __PAGETABLE_PMD_FOLDED
72027@@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
72028 spin_unlock(&mm->page_table_lock);
72029 return 0;
72030 }
72031+
72032+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
72033+{
72034+ pmd_t *new = pmd_alloc_one(mm, address);
72035+ if (!new)
72036+ return -ENOMEM;
72037+
72038+ smp_wmb(); /* See comment in __pte_alloc */
72039+
72040+ spin_lock(&mm->page_table_lock);
72041+#ifndef __ARCH_HAS_4LEVEL_HACK
72042+ if (pud_present(*pud)) /* Another has populated it */
72043+ pmd_free(mm, new);
72044+ else
72045+ pud_populate_kernel(mm, pud, new);
72046+#else
72047+ if (pgd_present(*pud)) /* Another has populated it */
72048+ pmd_free(mm, new);
72049+ else
72050+ pgd_populate_kernel(mm, pud, new);
72051+#endif /* __ARCH_HAS_4LEVEL_HACK */
72052+ spin_unlock(&mm->page_table_lock);
72053+ return 0;
72054+}
72055 #endif /* __PAGETABLE_PMD_FOLDED */
72056
72057 int make_pages_present(unsigned long addr, unsigned long end)
72058@@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void)
72059 gate_vma.vm_start = FIXADDR_USER_START;
72060 gate_vma.vm_end = FIXADDR_USER_END;
72061 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72062- gate_vma.vm_page_prot = __P101;
72063+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72064 /*
72065 * Make sure the vDSO gets into every core dump.
72066 * Dumping its contents makes post-mortem fully interpretable later
72067diff --git a/mm/mempolicy.c b/mm/mempolicy.c
72068index 0a37570..2048346 100644
72069--- a/mm/mempolicy.c
72070+++ b/mm/mempolicy.c
72071@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72072 unsigned long vmstart;
72073 unsigned long vmend;
72074
72075+#ifdef CONFIG_PAX_SEGMEXEC
72076+ struct vm_area_struct *vma_m;
72077+#endif
72078+
72079 vma = find_vma(mm, start);
72080 if (!vma || vma->vm_start > start)
72081 return -EFAULT;
72082@@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72083 err = policy_vma(vma, new_pol);
72084 if (err)
72085 goto out;
72086+
72087+#ifdef CONFIG_PAX_SEGMEXEC
72088+ vma_m = pax_find_mirror_vma(vma);
72089+ if (vma_m) {
72090+ err = policy_vma(vma_m, new_pol);
72091+ if (err)
72092+ goto out;
72093+ }
72094+#endif
72095+
72096 }
72097
72098 out:
72099@@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
72100
72101 if (end < start)
72102 return -EINVAL;
72103+
72104+#ifdef CONFIG_PAX_SEGMEXEC
72105+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72106+ if (end > SEGMEXEC_TASK_SIZE)
72107+ return -EINVAL;
72108+ } else
72109+#endif
72110+
72111+ if (end > TASK_SIZE)
72112+ return -EINVAL;
72113+
72114 if (end == start)
72115 return 0;
72116
72117@@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72118 if (!mm)
72119 goto out;
72120
72121+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72122+ if (mm != current->mm &&
72123+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72124+ err = -EPERM;
72125+ goto out;
72126+ }
72127+#endif
72128+
72129 /*
72130 * Check if this process has the right to modify the specified
72131 * process. The right exists if the process has administrative
72132@@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72133 rcu_read_lock();
72134 tcred = __task_cred(task);
72135 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72136- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72137- !capable(CAP_SYS_NICE)) {
72138+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72139 rcu_read_unlock();
72140 err = -EPERM;
72141 goto out;
72142diff --git a/mm/migrate.c b/mm/migrate.c
72143index 1503b6b..156c672 100644
72144--- a/mm/migrate.c
72145+++ b/mm/migrate.c
72146@@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
72147 if (!mm)
72148 return -EINVAL;
72149
72150+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72151+ if (mm != current->mm &&
72152+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72153+ err = -EPERM;
72154+ goto out;
72155+ }
72156+#endif
72157+
72158 /*
72159 * Check if this process has the right to modify the specified
72160 * process. The right exists if the process has administrative
72161@@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
72162 rcu_read_lock();
72163 tcred = __task_cred(task);
72164 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72165- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72166- !capable(CAP_SYS_NICE)) {
72167+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72168 rcu_read_unlock();
72169 err = -EPERM;
72170 goto out;
72171diff --git a/mm/mlock.c b/mm/mlock.c
72172index ef726e8..13e0901 100644
72173--- a/mm/mlock.c
72174+++ b/mm/mlock.c
72175@@ -13,6 +13,7 @@
72176 #include <linux/pagemap.h>
72177 #include <linux/mempolicy.h>
72178 #include <linux/syscalls.h>
72179+#include <linux/security.h>
72180 #include <linux/sched.h>
72181 #include <linux/export.h>
72182 #include <linux/rmap.h>
72183@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
72184 return -EINVAL;
72185 if (end == start)
72186 return 0;
72187+ if (end > TASK_SIZE)
72188+ return -EINVAL;
72189+
72190 vma = find_vma(current->mm, start);
72191 if (!vma || vma->vm_start > start)
72192 return -ENOMEM;
72193@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
72194 for (nstart = start ; ; ) {
72195 vm_flags_t newflags;
72196
72197+#ifdef CONFIG_PAX_SEGMEXEC
72198+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72199+ break;
72200+#endif
72201+
72202 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72203
72204 newflags = vma->vm_flags | VM_LOCKED;
72205@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
72206 lock_limit >>= PAGE_SHIFT;
72207
72208 /* check against resource limits */
72209+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72210 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72211 error = do_mlock(start, len, 1);
72212 up_write(&current->mm->mmap_sem);
72213@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
72214 static int do_mlockall(int flags)
72215 {
72216 struct vm_area_struct * vma, * prev = NULL;
72217- unsigned int def_flags = 0;
72218
72219 if (flags & MCL_FUTURE)
72220- def_flags = VM_LOCKED;
72221- current->mm->def_flags = def_flags;
72222+ current->mm->def_flags |= VM_LOCKED;
72223+ else
72224+ current->mm->def_flags &= ~VM_LOCKED;
72225 if (flags == MCL_FUTURE)
72226 goto out;
72227
72228 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
72229 vm_flags_t newflags;
72230
72231+#ifdef CONFIG_PAX_SEGMEXEC
72232+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72233+ break;
72234+#endif
72235+
72236+ BUG_ON(vma->vm_end > TASK_SIZE);
72237 newflags = vma->vm_flags | VM_LOCKED;
72238 if (!(flags & MCL_CURRENT))
72239 newflags &= ~VM_LOCKED;
72240@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
72241 lock_limit >>= PAGE_SHIFT;
72242
72243 ret = -ENOMEM;
72244+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72245 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72246 capable(CAP_IPC_LOCK))
72247 ret = do_mlockall(flags);
72248diff --git a/mm/mmap.c b/mm/mmap.c
72249index da15a79..314aef3 100644
72250--- a/mm/mmap.c
72251+++ b/mm/mmap.c
72252@@ -46,6 +46,16 @@
72253 #define arch_rebalance_pgtables(addr, len) (addr)
72254 #endif
72255
72256+static inline void verify_mm_writelocked(struct mm_struct *mm)
72257+{
72258+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72259+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72260+ up_read(&mm->mmap_sem);
72261+ BUG();
72262+ }
72263+#endif
72264+}
72265+
72266 static void unmap_region(struct mm_struct *mm,
72267 struct vm_area_struct *vma, struct vm_area_struct *prev,
72268 unsigned long start, unsigned long end);
72269@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
72270 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72271 *
72272 */
72273-pgprot_t protection_map[16] = {
72274+pgprot_t protection_map[16] __read_only = {
72275 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72276 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72277 };
72278
72279-pgprot_t vm_get_page_prot(unsigned long vm_flags)
72280+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
72281 {
72282- return __pgprot(pgprot_val(protection_map[vm_flags &
72283+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72284 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72285 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72286+
72287+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72288+ if (!(__supported_pte_mask & _PAGE_NX) &&
72289+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72290+ (vm_flags & (VM_READ | VM_WRITE)))
72291+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72292+#endif
72293+
72294+ return prot;
72295 }
72296 EXPORT_SYMBOL(vm_get_page_prot);
72297
72298 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72299 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
72300 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72301+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72302 /*
72303 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72304 * other variables. It can be updated by several CPUs frequently.
72305@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
72306 struct vm_area_struct *next = vma->vm_next;
72307
72308 might_sleep();
72309+ BUG_ON(vma->vm_mirror);
72310 if (vma->vm_ops && vma->vm_ops->close)
72311 vma->vm_ops->close(vma);
72312 if (vma->vm_file) {
72313@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72314 * not page aligned -Ram Gupta
72315 */
72316 rlim = rlimit(RLIMIT_DATA);
72317+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72318 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72319 (mm->end_data - mm->start_data) > rlim)
72320 goto out;
72321@@ -689,6 +711,12 @@ static int
72322 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72323 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72324 {
72325+
72326+#ifdef CONFIG_PAX_SEGMEXEC
72327+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72328+ return 0;
72329+#endif
72330+
72331 if (is_mergeable_vma(vma, file, vm_flags) &&
72332 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72333 if (vma->vm_pgoff == vm_pgoff)
72334@@ -708,6 +736,12 @@ static int
72335 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72336 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72337 {
72338+
72339+#ifdef CONFIG_PAX_SEGMEXEC
72340+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72341+ return 0;
72342+#endif
72343+
72344 if (is_mergeable_vma(vma, file, vm_flags) &&
72345 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72346 pgoff_t vm_pglen;
72347@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72348 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72349 struct vm_area_struct *prev, unsigned long addr,
72350 unsigned long end, unsigned long vm_flags,
72351- struct anon_vma *anon_vma, struct file *file,
72352+ struct anon_vma *anon_vma, struct file *file,
72353 pgoff_t pgoff, struct mempolicy *policy)
72354 {
72355 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72356 struct vm_area_struct *area, *next;
72357 int err;
72358
72359+#ifdef CONFIG_PAX_SEGMEXEC
72360+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72361+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72362+
72363+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72364+#endif
72365+
72366 /*
72367 * We later require that vma->vm_flags == vm_flags,
72368 * so this tests vma->vm_flags & VM_SPECIAL, too.
72369@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72370 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72371 next = next->vm_next;
72372
72373+#ifdef CONFIG_PAX_SEGMEXEC
72374+ if (prev)
72375+ prev_m = pax_find_mirror_vma(prev);
72376+ if (area)
72377+ area_m = pax_find_mirror_vma(area);
72378+ if (next)
72379+ next_m = pax_find_mirror_vma(next);
72380+#endif
72381+
72382 /*
72383 * Can it merge with the predecessor?
72384 */
72385@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72386 /* cases 1, 6 */
72387 err = vma_adjust(prev, prev->vm_start,
72388 next->vm_end, prev->vm_pgoff, NULL);
72389- } else /* cases 2, 5, 7 */
72390+
72391+#ifdef CONFIG_PAX_SEGMEXEC
72392+ if (!err && prev_m)
72393+ err = vma_adjust(prev_m, prev_m->vm_start,
72394+ next_m->vm_end, prev_m->vm_pgoff, NULL);
72395+#endif
72396+
72397+ } else { /* cases 2, 5, 7 */
72398 err = vma_adjust(prev, prev->vm_start,
72399 end, prev->vm_pgoff, NULL);
72400+
72401+#ifdef CONFIG_PAX_SEGMEXEC
72402+ if (!err && prev_m)
72403+ err = vma_adjust(prev_m, prev_m->vm_start,
72404+ end_m, prev_m->vm_pgoff, NULL);
72405+#endif
72406+
72407+ }
72408 if (err)
72409 return NULL;
72410 khugepaged_enter_vma_merge(prev);
72411@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72412 mpol_equal(policy, vma_policy(next)) &&
72413 can_vma_merge_before(next, vm_flags,
72414 anon_vma, file, pgoff+pglen)) {
72415- if (prev && addr < prev->vm_end) /* case 4 */
72416+ if (prev && addr < prev->vm_end) { /* case 4 */
72417 err = vma_adjust(prev, prev->vm_start,
72418 addr, prev->vm_pgoff, NULL);
72419- else /* cases 3, 8 */
72420+
72421+#ifdef CONFIG_PAX_SEGMEXEC
72422+ if (!err && prev_m)
72423+ err = vma_adjust(prev_m, prev_m->vm_start,
72424+ addr_m, prev_m->vm_pgoff, NULL);
72425+#endif
72426+
72427+ } else { /* cases 3, 8 */
72428 err = vma_adjust(area, addr, next->vm_end,
72429 next->vm_pgoff - pglen, NULL);
72430+
72431+#ifdef CONFIG_PAX_SEGMEXEC
72432+ if (!err && area_m)
72433+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
72434+ next_m->vm_pgoff - pglen, NULL);
72435+#endif
72436+
72437+ }
72438 if (err)
72439 return NULL;
72440 khugepaged_enter_vma_merge(area);
72441@@ -921,14 +1001,11 @@ none:
72442 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72443 struct file *file, long pages)
72444 {
72445- const unsigned long stack_flags
72446- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72447-
72448 if (file) {
72449 mm->shared_vm += pages;
72450 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72451 mm->exec_vm += pages;
72452- } else if (flags & stack_flags)
72453+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72454 mm->stack_vm += pages;
72455 if (flags & (VM_RESERVED|VM_IO))
72456 mm->reserved_vm += pages;
72457@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72458 * (the exception is when the underlying filesystem is noexec
72459 * mounted, in which case we dont add PROT_EXEC.)
72460 */
72461- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72462+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72463 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72464 prot |= PROT_EXEC;
72465
72466@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72467 /* Obtain the address to map to. we verify (or select) it and ensure
72468 * that it represents a valid section of the address space.
72469 */
72470- addr = get_unmapped_area(file, addr, len, pgoff, flags);
72471+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72472 if (addr & ~PAGE_MASK)
72473 return addr;
72474
72475@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72476 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72477 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72478
72479+#ifdef CONFIG_PAX_MPROTECT
72480+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72481+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72482+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72483+ gr_log_rwxmmap(file);
72484+
72485+#ifdef CONFIG_PAX_EMUPLT
72486+ vm_flags &= ~VM_EXEC;
72487+#else
72488+ return -EPERM;
72489+#endif
72490+
72491+ }
72492+
72493+ if (!(vm_flags & VM_EXEC))
72494+ vm_flags &= ~VM_MAYEXEC;
72495+#else
72496+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72497+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72498+#endif
72499+ else
72500+ vm_flags &= ~VM_MAYWRITE;
72501+ }
72502+#endif
72503+
72504+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72505+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72506+ vm_flags &= ~VM_PAGEEXEC;
72507+#endif
72508+
72509 if (flags & MAP_LOCKED)
72510 if (!can_do_mlock())
72511 return -EPERM;
72512@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72513 locked += mm->locked_vm;
72514 lock_limit = rlimit(RLIMIT_MEMLOCK);
72515 lock_limit >>= PAGE_SHIFT;
72516+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72517 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72518 return -EAGAIN;
72519 }
72520@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72521 if (error)
72522 return error;
72523
72524+ if (!gr_acl_handle_mmap(file, prot))
72525+ return -EACCES;
72526+
72527 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72528 }
72529 EXPORT_SYMBOL(do_mmap_pgoff);
72530@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
72531 vm_flags_t vm_flags = vma->vm_flags;
72532
72533 /* If it was private or non-writable, the write bit is already clear */
72534- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72535+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72536 return 0;
72537
72538 /* The backer wishes to know when pages are first written to? */
72539@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
72540 unsigned long charged = 0;
72541 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72542
72543+#ifdef CONFIG_PAX_SEGMEXEC
72544+ struct vm_area_struct *vma_m = NULL;
72545+#endif
72546+
72547+ /*
72548+ * mm->mmap_sem is required to protect against another thread
72549+ * changing the mappings in case we sleep.
72550+ */
72551+ verify_mm_writelocked(mm);
72552+
72553 /* Clear old maps */
72554 error = -ENOMEM;
72555-munmap_back:
72556 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72557 if (vma && vma->vm_start < addr + len) {
72558 if (do_munmap(mm, addr, len))
72559 return -ENOMEM;
72560- goto munmap_back;
72561+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72562+ BUG_ON(vma && vma->vm_start < addr + len);
72563 }
72564
72565 /* Check against address space limit. */
72566@@ -1258,6 +1379,16 @@ munmap_back:
72567 goto unacct_error;
72568 }
72569
72570+#ifdef CONFIG_PAX_SEGMEXEC
72571+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72572+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72573+ if (!vma_m) {
72574+ error = -ENOMEM;
72575+ goto free_vma;
72576+ }
72577+ }
72578+#endif
72579+
72580 vma->vm_mm = mm;
72581 vma->vm_start = addr;
72582 vma->vm_end = addr + len;
72583@@ -1282,6 +1413,19 @@ munmap_back:
72584 error = file->f_op->mmap(file, vma);
72585 if (error)
72586 goto unmap_and_free_vma;
72587+
72588+#ifdef CONFIG_PAX_SEGMEXEC
72589+ if (vma_m && (vm_flags & VM_EXECUTABLE))
72590+ added_exe_file_vma(mm);
72591+#endif
72592+
72593+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72594+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72595+ vma->vm_flags |= VM_PAGEEXEC;
72596+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72597+ }
72598+#endif
72599+
72600 if (vm_flags & VM_EXECUTABLE)
72601 added_exe_file_vma(mm);
72602
72603@@ -1319,6 +1463,11 @@ munmap_back:
72604 vma_link(mm, vma, prev, rb_link, rb_parent);
72605 file = vma->vm_file;
72606
72607+#ifdef CONFIG_PAX_SEGMEXEC
72608+ if (vma_m)
72609+ BUG_ON(pax_mirror_vma(vma_m, vma));
72610+#endif
72611+
72612 /* Once vma denies write, undo our temporary denial count */
72613 if (correct_wcount)
72614 atomic_inc(&inode->i_writecount);
72615@@ -1327,6 +1476,7 @@ out:
72616
72617 mm->total_vm += len >> PAGE_SHIFT;
72618 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72619+ track_exec_limit(mm, addr, addr + len, vm_flags);
72620 if (vm_flags & VM_LOCKED) {
72621 if (!mlock_vma_pages_range(vma, addr, addr + len))
72622 mm->locked_vm += (len >> PAGE_SHIFT);
72623@@ -1344,6 +1494,12 @@ unmap_and_free_vma:
72624 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72625 charged = 0;
72626 free_vma:
72627+
72628+#ifdef CONFIG_PAX_SEGMEXEC
72629+ if (vma_m)
72630+ kmem_cache_free(vm_area_cachep, vma_m);
72631+#endif
72632+
72633 kmem_cache_free(vm_area_cachep, vma);
72634 unacct_error:
72635 if (charged)
72636@@ -1351,6 +1507,44 @@ unacct_error:
72637 return error;
72638 }
72639
72640+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
72641+{
72642+ if (!vma) {
72643+#ifdef CONFIG_STACK_GROWSUP
72644+ if (addr > sysctl_heap_stack_gap)
72645+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72646+ else
72647+ vma = find_vma(current->mm, 0);
72648+ if (vma && (vma->vm_flags & VM_GROWSUP))
72649+ return false;
72650+#endif
72651+ return true;
72652+ }
72653+
72654+ if (addr + len > vma->vm_start)
72655+ return false;
72656+
72657+ if (vma->vm_flags & VM_GROWSDOWN)
72658+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72659+#ifdef CONFIG_STACK_GROWSUP
72660+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72661+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72662+#endif
72663+
72664+ return true;
72665+}
72666+
72667+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72668+{
72669+ if (vma->vm_start < len)
72670+ return -ENOMEM;
72671+ if (!(vma->vm_flags & VM_GROWSDOWN))
72672+ return vma->vm_start - len;
72673+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
72674+ return vma->vm_start - len - sysctl_heap_stack_gap;
72675+ return -ENOMEM;
72676+}
72677+
72678 /* Get an address range which is currently unmapped.
72679 * For shmat() with addr=0.
72680 *
72681@@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
72682 if (flags & MAP_FIXED)
72683 return addr;
72684
72685+#ifdef CONFIG_PAX_RANDMMAP
72686+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72687+#endif
72688+
72689 if (addr) {
72690 addr = PAGE_ALIGN(addr);
72691- vma = find_vma(mm, addr);
72692- if (TASK_SIZE - len >= addr &&
72693- (!vma || addr + len <= vma->vm_start))
72694- return addr;
72695+ if (TASK_SIZE - len >= addr) {
72696+ vma = find_vma(mm, addr);
72697+ if (check_heap_stack_gap(vma, addr, len))
72698+ return addr;
72699+ }
72700 }
72701 if (len > mm->cached_hole_size) {
72702- start_addr = addr = mm->free_area_cache;
72703+ start_addr = addr = mm->free_area_cache;
72704 } else {
72705- start_addr = addr = TASK_UNMAPPED_BASE;
72706- mm->cached_hole_size = 0;
72707+ start_addr = addr = mm->mmap_base;
72708+ mm->cached_hole_size = 0;
72709 }
72710
72711 full_search:
72712@@ -1399,34 +1598,40 @@ full_search:
72713 * Start a new search - just in case we missed
72714 * some holes.
72715 */
72716- if (start_addr != TASK_UNMAPPED_BASE) {
72717- addr = TASK_UNMAPPED_BASE;
72718- start_addr = addr;
72719+ if (start_addr != mm->mmap_base) {
72720+ start_addr = addr = mm->mmap_base;
72721 mm->cached_hole_size = 0;
72722 goto full_search;
72723 }
72724 return -ENOMEM;
72725 }
72726- if (!vma || addr + len <= vma->vm_start) {
72727- /*
72728- * Remember the place where we stopped the search:
72729- */
72730- mm->free_area_cache = addr + len;
72731- return addr;
72732- }
72733+ if (check_heap_stack_gap(vma, addr, len))
72734+ break;
72735 if (addr + mm->cached_hole_size < vma->vm_start)
72736 mm->cached_hole_size = vma->vm_start - addr;
72737 addr = vma->vm_end;
72738 }
72739+
72740+ /*
72741+ * Remember the place where we stopped the search:
72742+ */
72743+ mm->free_area_cache = addr + len;
72744+ return addr;
72745 }
72746 #endif
72747
72748 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
72749 {
72750+
72751+#ifdef CONFIG_PAX_SEGMEXEC
72752+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72753+ return;
72754+#endif
72755+
72756 /*
72757 * Is this a new hole at the lowest possible address?
72758 */
72759- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
72760+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
72761 mm->free_area_cache = addr;
72762 mm->cached_hole_size = ~0UL;
72763 }
72764@@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72765 {
72766 struct vm_area_struct *vma;
72767 struct mm_struct *mm = current->mm;
72768- unsigned long addr = addr0;
72769+ unsigned long base = mm->mmap_base, addr = addr0;
72770
72771 /* requested length too big for entire address space */
72772 if (len > TASK_SIZE)
72773@@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72774 if (flags & MAP_FIXED)
72775 return addr;
72776
72777+#ifdef CONFIG_PAX_RANDMMAP
72778+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72779+#endif
72780+
72781 /* requesting a specific address */
72782 if (addr) {
72783 addr = PAGE_ALIGN(addr);
72784- vma = find_vma(mm, addr);
72785- if (TASK_SIZE - len >= addr &&
72786- (!vma || addr + len <= vma->vm_start))
72787- return addr;
72788+ if (TASK_SIZE - len >= addr) {
72789+ vma = find_vma(mm, addr);
72790+ if (check_heap_stack_gap(vma, addr, len))
72791+ return addr;
72792+ }
72793 }
72794
72795 /* check if free_area_cache is useful for us */
72796@@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72797 /* make sure it can fit in the remaining address space */
72798 if (addr > len) {
72799 vma = find_vma(mm, addr-len);
72800- if (!vma || addr <= vma->vm_start)
72801+ if (check_heap_stack_gap(vma, addr - len, len))
72802 /* remember the address as a hint for next time */
72803 return (mm->free_area_cache = addr-len);
72804 }
72805@@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72806 * return with success:
72807 */
72808 vma = find_vma(mm, addr);
72809- if (!vma || addr+len <= vma->vm_start)
72810+ if (check_heap_stack_gap(vma, addr, len))
72811 /* remember the address as a hint for next time */
72812 return (mm->free_area_cache = addr);
72813
72814@@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72815 mm->cached_hole_size = vma->vm_start - addr;
72816
72817 /* try just below the current vma->vm_start */
72818- addr = vma->vm_start-len;
72819- } while (len < vma->vm_start);
72820+ addr = skip_heap_stack_gap(vma, len);
72821+ } while (!IS_ERR_VALUE(addr));
72822
72823 bottomup:
72824 /*
72825@@ -1510,13 +1720,21 @@ bottomup:
72826 * can happen with large stack limits and large mmap()
72827 * allocations.
72828 */
72829+ mm->mmap_base = TASK_UNMAPPED_BASE;
72830+
72831+#ifdef CONFIG_PAX_RANDMMAP
72832+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72833+ mm->mmap_base += mm->delta_mmap;
72834+#endif
72835+
72836+ mm->free_area_cache = mm->mmap_base;
72837 mm->cached_hole_size = ~0UL;
72838- mm->free_area_cache = TASK_UNMAPPED_BASE;
72839 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
72840 /*
72841 * Restore the topdown base:
72842 */
72843- mm->free_area_cache = mm->mmap_base;
72844+ mm->mmap_base = base;
72845+ mm->free_area_cache = base;
72846 mm->cached_hole_size = ~0UL;
72847
72848 return addr;
72849@@ -1525,6 +1743,12 @@ bottomup:
72850
72851 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72852 {
72853+
72854+#ifdef CONFIG_PAX_SEGMEXEC
72855+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72856+ return;
72857+#endif
72858+
72859 /*
72860 * Is this a new hole at the highest possible address?
72861 */
72862@@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72863 mm->free_area_cache = addr;
72864
72865 /* dont allow allocations above current base */
72866- if (mm->free_area_cache > mm->mmap_base)
72867+ if (mm->free_area_cache > mm->mmap_base) {
72868 mm->free_area_cache = mm->mmap_base;
72869+ mm->cached_hole_size = ~0UL;
72870+ }
72871 }
72872
72873 unsigned long
72874@@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
72875 return vma;
72876 }
72877
72878+#ifdef CONFIG_PAX_SEGMEXEC
72879+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72880+{
72881+ struct vm_area_struct *vma_m;
72882+
72883+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72884+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72885+ BUG_ON(vma->vm_mirror);
72886+ return NULL;
72887+ }
72888+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72889+ vma_m = vma->vm_mirror;
72890+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72891+ BUG_ON(vma->vm_file != vma_m->vm_file);
72892+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
72893+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
72894+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72895+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72896+ return vma_m;
72897+}
72898+#endif
72899+
72900 /*
72901 * Verify that the stack growth is acceptable and
72902 * update accounting. This is shared with both the
72903@@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72904 return -ENOMEM;
72905
72906 /* Stack limit test */
72907+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
72908 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72909 return -ENOMEM;
72910
72911@@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72912 locked = mm->locked_vm + grow;
72913 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72914 limit >>= PAGE_SHIFT;
72915+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72916 if (locked > limit && !capable(CAP_IPC_LOCK))
72917 return -ENOMEM;
72918 }
72919@@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72920 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72921 * vma is the last one with address > vma->vm_end. Have to extend vma.
72922 */
72923+#ifndef CONFIG_IA64
72924+static
72925+#endif
72926 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72927 {
72928 int error;
72929+ bool locknext;
72930
72931 if (!(vma->vm_flags & VM_GROWSUP))
72932 return -EFAULT;
72933
72934+ /* Also guard against wrapping around to address 0. */
72935+ if (address < PAGE_ALIGN(address+1))
72936+ address = PAGE_ALIGN(address+1);
72937+ else
72938+ return -ENOMEM;
72939+
72940 /*
72941 * We must make sure the anon_vma is allocated
72942 * so that the anon_vma locking is not a noop.
72943 */
72944 if (unlikely(anon_vma_prepare(vma)))
72945 return -ENOMEM;
72946+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72947+ if (locknext && anon_vma_prepare(vma->vm_next))
72948+ return -ENOMEM;
72949 vma_lock_anon_vma(vma);
72950+ if (locknext)
72951+ vma_lock_anon_vma(vma->vm_next);
72952
72953 /*
72954 * vma->vm_start/vm_end cannot change under us because the caller
72955 * is required to hold the mmap_sem in read mode. We need the
72956- * anon_vma lock to serialize against concurrent expand_stacks.
72957- * Also guard against wrapping around to address 0.
72958+ * anon_vma locks to serialize against concurrent expand_stacks
72959+ * and expand_upwards.
72960 */
72961- if (address < PAGE_ALIGN(address+4))
72962- address = PAGE_ALIGN(address+4);
72963- else {
72964- vma_unlock_anon_vma(vma);
72965- return -ENOMEM;
72966- }
72967 error = 0;
72968
72969 /* Somebody else might have raced and expanded it already */
72970- if (address > vma->vm_end) {
72971+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72972+ error = -ENOMEM;
72973+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72974 unsigned long size, grow;
72975
72976 size = address - vma->vm_start;
72977@@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72978 }
72979 }
72980 }
72981+ if (locknext)
72982+ vma_unlock_anon_vma(vma->vm_next);
72983 vma_unlock_anon_vma(vma);
72984 khugepaged_enter_vma_merge(vma);
72985 return error;
72986@@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
72987 unsigned long address)
72988 {
72989 int error;
72990+ bool lockprev = false;
72991+ struct vm_area_struct *prev;
72992
72993 /*
72994 * We must make sure the anon_vma is allocated
72995@@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
72996 if (error)
72997 return error;
72998
72999+ prev = vma->vm_prev;
73000+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
73001+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73002+#endif
73003+ if (lockprev && anon_vma_prepare(prev))
73004+ return -ENOMEM;
73005+ if (lockprev)
73006+ vma_lock_anon_vma(prev);
73007+
73008 vma_lock_anon_vma(vma);
73009
73010 /*
73011@@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
73012 */
73013
73014 /* Somebody else might have raced and expanded it already */
73015- if (address < vma->vm_start) {
73016+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73017+ error = -ENOMEM;
73018+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
73019 unsigned long size, grow;
73020
73021+#ifdef CONFIG_PAX_SEGMEXEC
73022+ struct vm_area_struct *vma_m;
73023+
73024+ vma_m = pax_find_mirror_vma(vma);
73025+#endif
73026+
73027 size = vma->vm_end - address;
73028 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73029
73030@@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
73031 if (!error) {
73032 vma->vm_start = address;
73033 vma->vm_pgoff -= grow;
73034+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
73035+
73036+#ifdef CONFIG_PAX_SEGMEXEC
73037+ if (vma_m) {
73038+ vma_m->vm_start -= grow << PAGE_SHIFT;
73039+ vma_m->vm_pgoff -= grow;
73040+ }
73041+#endif
73042+
73043 perf_event_mmap(vma);
73044 }
73045 }
73046 }
73047 vma_unlock_anon_vma(vma);
73048+ if (lockprev)
73049+ vma_unlock_anon_vma(prev);
73050 khugepaged_enter_vma_merge(vma);
73051 return error;
73052 }
73053@@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
73054 do {
73055 long nrpages = vma_pages(vma);
73056
73057+#ifdef CONFIG_PAX_SEGMEXEC
73058+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73059+ vma = remove_vma(vma);
73060+ continue;
73061+ }
73062+#endif
73063+
73064 mm->total_vm -= nrpages;
73065 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
73066 vma = remove_vma(vma);
73067@@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
73068 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
73069 vma->vm_prev = NULL;
73070 do {
73071+
73072+#ifdef CONFIG_PAX_SEGMEXEC
73073+ if (vma->vm_mirror) {
73074+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73075+ vma->vm_mirror->vm_mirror = NULL;
73076+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
73077+ vma->vm_mirror = NULL;
73078+ }
73079+#endif
73080+
73081 rb_erase(&vma->vm_rb, &mm->mm_rb);
73082 mm->map_count--;
73083 tail_vma = vma;
73084@@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73085 struct vm_area_struct *new;
73086 int err = -ENOMEM;
73087
73088+#ifdef CONFIG_PAX_SEGMEXEC
73089+ struct vm_area_struct *vma_m, *new_m = NULL;
73090+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
73091+#endif
73092+
73093 if (is_vm_hugetlb_page(vma) && (addr &
73094 ~(huge_page_mask(hstate_vma(vma)))))
73095 return -EINVAL;
73096
73097+#ifdef CONFIG_PAX_SEGMEXEC
73098+ vma_m = pax_find_mirror_vma(vma);
73099+#endif
73100+
73101 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73102 if (!new)
73103 goto out_err;
73104
73105+#ifdef CONFIG_PAX_SEGMEXEC
73106+ if (vma_m) {
73107+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73108+ if (!new_m) {
73109+ kmem_cache_free(vm_area_cachep, new);
73110+ goto out_err;
73111+ }
73112+ }
73113+#endif
73114+
73115 /* most fields are the same, copy all, and then fixup */
73116 *new = *vma;
73117
73118@@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73119 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73120 }
73121
73122+#ifdef CONFIG_PAX_SEGMEXEC
73123+ if (vma_m) {
73124+ *new_m = *vma_m;
73125+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
73126+ new_m->vm_mirror = new;
73127+ new->vm_mirror = new_m;
73128+
73129+ if (new_below)
73130+ new_m->vm_end = addr_m;
73131+ else {
73132+ new_m->vm_start = addr_m;
73133+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73134+ }
73135+ }
73136+#endif
73137+
73138 pol = mpol_dup(vma_policy(vma));
73139 if (IS_ERR(pol)) {
73140 err = PTR_ERR(pol);
73141@@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73142 else
73143 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
73144
73145+#ifdef CONFIG_PAX_SEGMEXEC
73146+ if (!err && vma_m) {
73147+ if (anon_vma_clone(new_m, vma_m))
73148+ goto out_free_mpol;
73149+
73150+ mpol_get(pol);
73151+ vma_set_policy(new_m, pol);
73152+
73153+ if (new_m->vm_file) {
73154+ get_file(new_m->vm_file);
73155+ if (vma_m->vm_flags & VM_EXECUTABLE)
73156+ added_exe_file_vma(mm);
73157+ }
73158+
73159+ if (new_m->vm_ops && new_m->vm_ops->open)
73160+ new_m->vm_ops->open(new_m);
73161+
73162+ if (new_below)
73163+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
73164+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73165+ else
73166+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73167+
73168+ if (err) {
73169+ if (new_m->vm_ops && new_m->vm_ops->close)
73170+ new_m->vm_ops->close(new_m);
73171+ if (new_m->vm_file) {
73172+ if (vma_m->vm_flags & VM_EXECUTABLE)
73173+ removed_exe_file_vma(mm);
73174+ fput(new_m->vm_file);
73175+ }
73176+ mpol_put(pol);
73177+ }
73178+ }
73179+#endif
73180+
73181 /* Success. */
73182 if (!err)
73183 return 0;
73184@@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73185 removed_exe_file_vma(mm);
73186 fput(new->vm_file);
73187 }
73188- unlink_anon_vmas(new);
73189 out_free_mpol:
73190 mpol_put(pol);
73191 out_free_vma:
73192+
73193+#ifdef CONFIG_PAX_SEGMEXEC
73194+ if (new_m) {
73195+ unlink_anon_vmas(new_m);
73196+ kmem_cache_free(vm_area_cachep, new_m);
73197+ }
73198+#endif
73199+
73200+ unlink_anon_vmas(new);
73201 kmem_cache_free(vm_area_cachep, new);
73202 out_err:
73203 return err;
73204@@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73205 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73206 unsigned long addr, int new_below)
73207 {
73208+
73209+#ifdef CONFIG_PAX_SEGMEXEC
73210+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73211+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73212+ if (mm->map_count >= sysctl_max_map_count-1)
73213+ return -ENOMEM;
73214+ } else
73215+#endif
73216+
73217 if (mm->map_count >= sysctl_max_map_count)
73218 return -ENOMEM;
73219
73220@@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73221 * work. This now handles partial unmappings.
73222 * Jeremy Fitzhardinge <jeremy@goop.org>
73223 */
73224+#ifdef CONFIG_PAX_SEGMEXEC
73225 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73226 {
73227+ int ret = __do_munmap(mm, start, len);
73228+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73229+ return ret;
73230+
73231+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73232+}
73233+
73234+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73235+#else
73236+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73237+#endif
73238+{
73239 unsigned long end;
73240 struct vm_area_struct *vma, *prev, *last;
73241
73242+ /*
73243+ * mm->mmap_sem is required to protect against another thread
73244+ * changing the mappings in case we sleep.
73245+ */
73246+ verify_mm_writelocked(mm);
73247+
73248 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73249 return -EINVAL;
73250
73251@@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73252 /* Fix up all other VM information */
73253 remove_vma_list(mm, vma);
73254
73255+ track_exec_limit(mm, start, end, 0UL);
73256+
73257 return 0;
73258 }
73259
73260@@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
73261
73262 profile_munmap(addr);
73263
73264+#ifdef CONFIG_PAX_SEGMEXEC
73265+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73266+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
73267+ return -EINVAL;
73268+#endif
73269+
73270 down_write(&mm->mmap_sem);
73271 ret = do_munmap(mm, addr, len);
73272 up_write(&mm->mmap_sem);
73273 return ret;
73274 }
73275
73276-static inline void verify_mm_writelocked(struct mm_struct *mm)
73277-{
73278-#ifdef CONFIG_DEBUG_VM
73279- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73280- WARN_ON(1);
73281- up_read(&mm->mmap_sem);
73282- }
73283-#endif
73284-}
73285-
73286 /*
73287 * this is really a simplified "do_mmap". it only handles
73288 * anonymous maps. eventually we may be able to do some
73289@@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73290 struct rb_node ** rb_link, * rb_parent;
73291 pgoff_t pgoff = addr >> PAGE_SHIFT;
73292 int error;
73293+ unsigned long charged;
73294
73295 len = PAGE_ALIGN(len);
73296 if (!len)
73297@@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73298
73299 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73300
73301+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73302+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73303+ flags &= ~VM_EXEC;
73304+
73305+#ifdef CONFIG_PAX_MPROTECT
73306+ if (mm->pax_flags & MF_PAX_MPROTECT)
73307+ flags &= ~VM_MAYEXEC;
73308+#endif
73309+
73310+ }
73311+#endif
73312+
73313 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73314 if (error & ~PAGE_MASK)
73315 return error;
73316
73317+ charged = len >> PAGE_SHIFT;
73318+
73319 /*
73320 * mlock MCL_FUTURE?
73321 */
73322 if (mm->def_flags & VM_LOCKED) {
73323 unsigned long locked, lock_limit;
73324- locked = len >> PAGE_SHIFT;
73325+ locked = charged;
73326 locked += mm->locked_vm;
73327 lock_limit = rlimit(RLIMIT_MEMLOCK);
73328 lock_limit >>= PAGE_SHIFT;
73329@@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73330 /*
73331 * Clear old maps. this also does some error checking for us
73332 */
73333- munmap_back:
73334 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73335 if (vma && vma->vm_start < addr + len) {
73336 if (do_munmap(mm, addr, len))
73337 return -ENOMEM;
73338- goto munmap_back;
73339+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73340+ BUG_ON(vma && vma->vm_start < addr + len);
73341 }
73342
73343 /* Check against address space limits *after* clearing old maps... */
73344- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73345+ if (!may_expand_vm(mm, charged))
73346 return -ENOMEM;
73347
73348 if (mm->map_count > sysctl_max_map_count)
73349 return -ENOMEM;
73350
73351- if (security_vm_enough_memory(len >> PAGE_SHIFT))
73352+ if (security_vm_enough_memory(charged))
73353 return -ENOMEM;
73354
73355 /* Can we just expand an old private anonymous mapping? */
73356@@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73357 */
73358 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73359 if (!vma) {
73360- vm_unacct_memory(len >> PAGE_SHIFT);
73361+ vm_unacct_memory(charged);
73362 return -ENOMEM;
73363 }
73364
73365@@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73366 vma_link(mm, vma, prev, rb_link, rb_parent);
73367 out:
73368 perf_event_mmap(vma);
73369- mm->total_vm += len >> PAGE_SHIFT;
73370+ mm->total_vm += charged;
73371 if (flags & VM_LOCKED) {
73372 if (!mlock_vma_pages_range(vma, addr, addr + len))
73373- mm->locked_vm += (len >> PAGE_SHIFT);
73374+ mm->locked_vm += charged;
73375 }
73376+ track_exec_limit(mm, addr, addr + len, flags);
73377 return addr;
73378 }
73379
73380@@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
73381 * Walk the list again, actually closing and freeing it,
73382 * with preemption enabled, without holding any MM locks.
73383 */
73384- while (vma)
73385+ while (vma) {
73386+ vma->vm_mirror = NULL;
73387 vma = remove_vma(vma);
73388+ }
73389
73390 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
73391 }
73392@@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73393 struct vm_area_struct * __vma, * prev;
73394 struct rb_node ** rb_link, * rb_parent;
73395
73396+#ifdef CONFIG_PAX_SEGMEXEC
73397+ struct vm_area_struct *vma_m = NULL;
73398+#endif
73399+
73400+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
73401+ return -EPERM;
73402+
73403 /*
73404 * The vm_pgoff of a purely anonymous vma should be irrelevant
73405 * until its first write fault, when page's anon_vma and index
73406@@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73407 if ((vma->vm_flags & VM_ACCOUNT) &&
73408 security_vm_enough_memory_mm(mm, vma_pages(vma)))
73409 return -ENOMEM;
73410+
73411+#ifdef CONFIG_PAX_SEGMEXEC
73412+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73413+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73414+ if (!vma_m)
73415+ return -ENOMEM;
73416+ }
73417+#endif
73418+
73419 vma_link(mm, vma, prev, rb_link, rb_parent);
73420+
73421+#ifdef CONFIG_PAX_SEGMEXEC
73422+ if (vma_m)
73423+ BUG_ON(pax_mirror_vma(vma_m, vma));
73424+#endif
73425+
73426 return 0;
73427 }
73428
73429@@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73430 struct mempolicy *pol;
73431 bool faulted_in_anon_vma = true;
73432
73433+ BUG_ON(vma->vm_mirror);
73434+
73435 /*
73436 * If anonymous vma has not yet been faulted, update new pgoff
73437 * to match new location, to increase its chance of merging.
73438@@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73439 return NULL;
73440 }
73441
73442+#ifdef CONFIG_PAX_SEGMEXEC
73443+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73444+{
73445+ struct vm_area_struct *prev_m;
73446+ struct rb_node **rb_link_m, *rb_parent_m;
73447+ struct mempolicy *pol_m;
73448+
73449+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73450+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73451+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73452+ *vma_m = *vma;
73453+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73454+ if (anon_vma_clone(vma_m, vma))
73455+ return -ENOMEM;
73456+ pol_m = vma_policy(vma_m);
73457+ mpol_get(pol_m);
73458+ vma_set_policy(vma_m, pol_m);
73459+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73460+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73461+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73462+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73463+ if (vma_m->vm_file)
73464+ get_file(vma_m->vm_file);
73465+ if (vma_m->vm_ops && vma_m->vm_ops->open)
73466+ vma_m->vm_ops->open(vma_m);
73467+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73468+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73469+ vma_m->vm_mirror = vma;
73470+ vma->vm_mirror = vma_m;
73471+ return 0;
73472+}
73473+#endif
73474+
73475 /*
73476 * Return true if the calling process may expand its vm space by the passed
73477 * number of pages
73478@@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
73479
73480 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
73481
73482+#ifdef CONFIG_PAX_RANDMMAP
73483+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73484+ cur -= mm->brk_gap;
73485+#endif
73486+
73487+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73488 if (cur + npages > lim)
73489 return 0;
73490 return 1;
73491@@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
73492 vma->vm_start = addr;
73493 vma->vm_end = addr + len;
73494
73495+#ifdef CONFIG_PAX_MPROTECT
73496+ if (mm->pax_flags & MF_PAX_MPROTECT) {
73497+#ifndef CONFIG_PAX_MPROTECT_COMPAT
73498+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73499+ return -EPERM;
73500+ if (!(vm_flags & VM_EXEC))
73501+ vm_flags &= ~VM_MAYEXEC;
73502+#else
73503+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73504+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73505+#endif
73506+ else
73507+ vm_flags &= ~VM_MAYWRITE;
73508+ }
73509+#endif
73510+
73511 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73512 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73513
73514diff --git a/mm/mprotect.c b/mm/mprotect.c
73515index f437d05..e3763f6 100644
73516--- a/mm/mprotect.c
73517+++ b/mm/mprotect.c
73518@@ -23,10 +23,16 @@
73519 #include <linux/mmu_notifier.h>
73520 #include <linux/migrate.h>
73521 #include <linux/perf_event.h>
73522+
73523+#ifdef CONFIG_PAX_MPROTECT
73524+#include <linux/elf.h>
73525+#endif
73526+
73527 #include <asm/uaccess.h>
73528 #include <asm/pgtable.h>
73529 #include <asm/cacheflush.h>
73530 #include <asm/tlbflush.h>
73531+#include <asm/mmu_context.h>
73532
73533 #ifndef pgprot_modify
73534 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
73535@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
73536 flush_tlb_range(vma, start, end);
73537 }
73538
73539+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73540+/* called while holding the mmap semaphor for writing except stack expansion */
73541+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73542+{
73543+ unsigned long oldlimit, newlimit = 0UL;
73544+
73545+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
73546+ return;
73547+
73548+ spin_lock(&mm->page_table_lock);
73549+ oldlimit = mm->context.user_cs_limit;
73550+ if ((prot & VM_EXEC) && oldlimit < end)
73551+ /* USER_CS limit moved up */
73552+ newlimit = end;
73553+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73554+ /* USER_CS limit moved down */
73555+ newlimit = start;
73556+
73557+ if (newlimit) {
73558+ mm->context.user_cs_limit = newlimit;
73559+
73560+#ifdef CONFIG_SMP
73561+ wmb();
73562+ cpus_clear(mm->context.cpu_user_cs_mask);
73563+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73564+#endif
73565+
73566+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73567+ }
73568+ spin_unlock(&mm->page_table_lock);
73569+ if (newlimit == end) {
73570+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
73571+
73572+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
73573+ if (is_vm_hugetlb_page(vma))
73574+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73575+ else
73576+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73577+ }
73578+}
73579+#endif
73580+
73581 int
73582 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73583 unsigned long start, unsigned long end, unsigned long newflags)
73584@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73585 int error;
73586 int dirty_accountable = 0;
73587
73588+#ifdef CONFIG_PAX_SEGMEXEC
73589+ struct vm_area_struct *vma_m = NULL;
73590+ unsigned long start_m, end_m;
73591+
73592+ start_m = start + SEGMEXEC_TASK_SIZE;
73593+ end_m = end + SEGMEXEC_TASK_SIZE;
73594+#endif
73595+
73596 if (newflags == oldflags) {
73597 *pprev = vma;
73598 return 0;
73599 }
73600
73601+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73602+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73603+
73604+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73605+ return -ENOMEM;
73606+
73607+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73608+ return -ENOMEM;
73609+ }
73610+
73611 /*
73612 * If we make a private mapping writable we increase our commit;
73613 * but (without finer accounting) cannot reduce our commit if we
73614@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73615 }
73616 }
73617
73618+#ifdef CONFIG_PAX_SEGMEXEC
73619+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73620+ if (start != vma->vm_start) {
73621+ error = split_vma(mm, vma, start, 1);
73622+ if (error)
73623+ goto fail;
73624+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73625+ *pprev = (*pprev)->vm_next;
73626+ }
73627+
73628+ if (end != vma->vm_end) {
73629+ error = split_vma(mm, vma, end, 0);
73630+ if (error)
73631+ goto fail;
73632+ }
73633+
73634+ if (pax_find_mirror_vma(vma)) {
73635+ error = __do_munmap(mm, start_m, end_m - start_m);
73636+ if (error)
73637+ goto fail;
73638+ } else {
73639+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73640+ if (!vma_m) {
73641+ error = -ENOMEM;
73642+ goto fail;
73643+ }
73644+ vma->vm_flags = newflags;
73645+ error = pax_mirror_vma(vma_m, vma);
73646+ if (error) {
73647+ vma->vm_flags = oldflags;
73648+ goto fail;
73649+ }
73650+ }
73651+ }
73652+#endif
73653+
73654 /*
73655 * First try to merge with previous and/or next vma.
73656 */
73657@@ -204,9 +306,21 @@ success:
73658 * vm_flags and vm_page_prot are protected by the mmap_sem
73659 * held in write mode.
73660 */
73661+
73662+#ifdef CONFIG_PAX_SEGMEXEC
73663+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73664+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73665+#endif
73666+
73667 vma->vm_flags = newflags;
73668+
73669+#ifdef CONFIG_PAX_MPROTECT
73670+ if (mm->binfmt && mm->binfmt->handle_mprotect)
73671+ mm->binfmt->handle_mprotect(vma, newflags);
73672+#endif
73673+
73674 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73675- vm_get_page_prot(newflags));
73676+ vm_get_page_prot(vma->vm_flags));
73677
73678 if (vma_wants_writenotify(vma)) {
73679 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
73680@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73681 end = start + len;
73682 if (end <= start)
73683 return -ENOMEM;
73684+
73685+#ifdef CONFIG_PAX_SEGMEXEC
73686+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73687+ if (end > SEGMEXEC_TASK_SIZE)
73688+ return -EINVAL;
73689+ } else
73690+#endif
73691+
73692+ if (end > TASK_SIZE)
73693+ return -EINVAL;
73694+
73695 if (!arch_validate_prot(prot))
73696 return -EINVAL;
73697
73698@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73699 /*
73700 * Does the application expect PROT_READ to imply PROT_EXEC:
73701 */
73702- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73703+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73704 prot |= PROT_EXEC;
73705
73706 vm_flags = calc_vm_prot_bits(prot);
73707@@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73708 if (start > vma->vm_start)
73709 prev = vma;
73710
73711+#ifdef CONFIG_PAX_MPROTECT
73712+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73713+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
73714+#endif
73715+
73716 for (nstart = start ; ; ) {
73717 unsigned long newflags;
73718
73719@@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73720
73721 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73722 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73723+ if (prot & (PROT_WRITE | PROT_EXEC))
73724+ gr_log_rwxmprotect(vma->vm_file);
73725+
73726+ error = -EACCES;
73727+ goto out;
73728+ }
73729+
73730+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73731 error = -EACCES;
73732 goto out;
73733 }
73734@@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73735 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
73736 if (error)
73737 goto out;
73738+
73739+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
73740+
73741 nstart = tmp;
73742
73743 if (nstart < prev->vm_end)
73744diff --git a/mm/mremap.c b/mm/mremap.c
73745index 87bb839..c3bfadb 100644
73746--- a/mm/mremap.c
73747+++ b/mm/mremap.c
73748@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73749 continue;
73750 pte = ptep_get_and_clear(mm, old_addr, old_pte);
73751 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
73752+
73753+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73754+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
73755+ pte = pte_exprotect(pte);
73756+#endif
73757+
73758 set_pte_at(mm, new_addr, new_pte, pte);
73759 }
73760
73761@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
73762 if (is_vm_hugetlb_page(vma))
73763 goto Einval;
73764
73765+#ifdef CONFIG_PAX_SEGMEXEC
73766+ if (pax_find_mirror_vma(vma))
73767+ goto Einval;
73768+#endif
73769+
73770 /* We can't remap across vm area boundaries */
73771 if (old_len > vma->vm_end - addr)
73772 goto Efault;
73773@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
73774 unsigned long ret = -EINVAL;
73775 unsigned long charged = 0;
73776 unsigned long map_flags;
73777+ unsigned long pax_task_size = TASK_SIZE;
73778
73779 if (new_addr & ~PAGE_MASK)
73780 goto out;
73781
73782- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
73783+#ifdef CONFIG_PAX_SEGMEXEC
73784+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73785+ pax_task_size = SEGMEXEC_TASK_SIZE;
73786+#endif
73787+
73788+ pax_task_size -= PAGE_SIZE;
73789+
73790+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
73791 goto out;
73792
73793 /* Check if the location we're moving into overlaps the
73794 * old location at all, and fail if it does.
73795 */
73796- if ((new_addr <= addr) && (new_addr+new_len) > addr)
73797- goto out;
73798-
73799- if ((addr <= new_addr) && (addr+old_len) > new_addr)
73800+ if (addr + old_len > new_addr && new_addr + new_len > addr)
73801 goto out;
73802
73803 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73804@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
73805 struct vm_area_struct *vma;
73806 unsigned long ret = -EINVAL;
73807 unsigned long charged = 0;
73808+ unsigned long pax_task_size = TASK_SIZE;
73809
73810 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
73811 goto out;
73812@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
73813 if (!new_len)
73814 goto out;
73815
73816+#ifdef CONFIG_PAX_SEGMEXEC
73817+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73818+ pax_task_size = SEGMEXEC_TASK_SIZE;
73819+#endif
73820+
73821+ pax_task_size -= PAGE_SIZE;
73822+
73823+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
73824+ old_len > pax_task_size || addr > pax_task_size-old_len)
73825+ goto out;
73826+
73827 if (flags & MREMAP_FIXED) {
73828 if (flags & MREMAP_MAYMOVE)
73829 ret = mremap_to(addr, old_len, new_addr, new_len);
73830@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
73831 addr + new_len);
73832 }
73833 ret = addr;
73834+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73835 goto out;
73836 }
73837 }
73838@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
73839 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73840 if (ret)
73841 goto out;
73842+
73843+ map_flags = vma->vm_flags;
73844 ret = move_vma(vma, addr, old_len, new_len, new_addr);
73845+ if (!(ret & ~PAGE_MASK)) {
73846+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
73847+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
73848+ }
73849 }
73850 out:
73851 if (ret & ~PAGE_MASK)
73852diff --git a/mm/nommu.c b/mm/nommu.c
73853index f59e170..34e2a2b 100644
73854--- a/mm/nommu.c
73855+++ b/mm/nommu.c
73856@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
73857 int sysctl_overcommit_ratio = 50; /* default is 50% */
73858 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73859 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73860-int heap_stack_gap = 0;
73861
73862 atomic_long_t mmap_pages_allocated;
73863
73864@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
73865 EXPORT_SYMBOL(find_vma);
73866
73867 /*
73868- * find a VMA
73869- * - we don't extend stack VMAs under NOMMU conditions
73870- */
73871-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73872-{
73873- return find_vma(mm, addr);
73874-}
73875-
73876-/*
73877 * expand a stack to a given address
73878 * - not supported under NOMMU conditions
73879 */
73880@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73881
73882 /* most fields are the same, copy all, and then fixup */
73883 *new = *vma;
73884+ INIT_LIST_HEAD(&new->anon_vma_chain);
73885 *region = *vma->vm_region;
73886 new->vm_region = region;
73887
73888diff --git a/mm/page_alloc.c b/mm/page_alloc.c
73889index a13ded1..b949d15 100644
73890--- a/mm/page_alloc.c
73891+++ b/mm/page_alloc.c
73892@@ -335,7 +335,7 @@ out:
73893 * This usage means that zero-order pages may not be compound.
73894 */
73895
73896-static void free_compound_page(struct page *page)
73897+void free_compound_page(struct page *page)
73898 {
73899 __free_pages_ok(page, compound_order(page));
73900 }
73901@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73902 int i;
73903 int bad = 0;
73904
73905+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73906+ unsigned long index = 1UL << order;
73907+#endif
73908+
73909 trace_mm_page_free(page, order);
73910 kmemcheck_free_shadow(page, order);
73911
73912@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73913 debug_check_no_obj_freed(page_address(page),
73914 PAGE_SIZE << order);
73915 }
73916+
73917+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73918+ for (; index; --index)
73919+ sanitize_highpage(page + index - 1);
73920+#endif
73921+
73922 arch_free_page(page, order);
73923 kernel_map_pages(page, 1 << order, 0);
73924
73925@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73926 arch_alloc_page(page, order);
73927 kernel_map_pages(page, 1 << order, 1);
73928
73929+#ifndef CONFIG_PAX_MEMORY_SANITIZE
73930 if (gfp_flags & __GFP_ZERO)
73931 prep_zero_page(page, order, gfp_flags);
73932+#endif
73933
73934 if (order && (gfp_flags & __GFP_COMP))
73935 prep_compound_page(page, order);
73936@@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73937 unsigned long pfn;
73938
73939 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73940+#ifdef CONFIG_X86_32
73941+ /* boot failures in VMware 8 on 32bit vanilla since
73942+ this change */
73943+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73944+#else
73945 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73946+#endif
73947 return 1;
73948 }
73949 return 0;
73950diff --git a/mm/percpu.c b/mm/percpu.c
73951index f47af91..7eeef99 100644
73952--- a/mm/percpu.c
73953+++ b/mm/percpu.c
73954@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73955 static unsigned int pcpu_high_unit_cpu __read_mostly;
73956
73957 /* the address of the first chunk which starts with the kernel static area */
73958-void *pcpu_base_addr __read_mostly;
73959+void *pcpu_base_addr __read_only;
73960 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73961
73962 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73963diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73964index c20ff48..137702a 100644
73965--- a/mm/process_vm_access.c
73966+++ b/mm/process_vm_access.c
73967@@ -13,6 +13,7 @@
73968 #include <linux/uio.h>
73969 #include <linux/sched.h>
73970 #include <linux/highmem.h>
73971+#include <linux/security.h>
73972 #include <linux/ptrace.h>
73973 #include <linux/slab.h>
73974 #include <linux/syscalls.h>
73975@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73976 size_t iov_l_curr_offset = 0;
73977 ssize_t iov_len;
73978
73979+ return -ENOSYS; // PaX: until properly audited
73980+
73981 /*
73982 * Work out how many pages of struct pages we're going to need
73983 * when eventually calling get_user_pages
73984 */
73985 for (i = 0; i < riovcnt; i++) {
73986 iov_len = rvec[i].iov_len;
73987- if (iov_len > 0) {
73988- nr_pages_iov = ((unsigned long)rvec[i].iov_base
73989- + iov_len)
73990- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73991- / PAGE_SIZE + 1;
73992- nr_pages = max(nr_pages, nr_pages_iov);
73993- }
73994+ if (iov_len <= 0)
73995+ continue;
73996+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73997+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73998+ nr_pages = max(nr_pages, nr_pages_iov);
73999 }
74000
74001 if (nr_pages == 0)
74002@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74003 goto free_proc_pages;
74004 }
74005
74006+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
74007+ rc = -EPERM;
74008+ goto put_task_struct;
74009+ }
74010+
74011 mm = mm_access(task, PTRACE_MODE_ATTACH);
74012 if (!mm || IS_ERR(mm)) {
74013 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
74014diff --git a/mm/rmap.c b/mm/rmap.c
74015index c8454e0..b04f3a2 100644
74016--- a/mm/rmap.c
74017+++ b/mm/rmap.c
74018@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74019 struct anon_vma *anon_vma = vma->anon_vma;
74020 struct anon_vma_chain *avc;
74021
74022+#ifdef CONFIG_PAX_SEGMEXEC
74023+ struct anon_vma_chain *avc_m = NULL;
74024+#endif
74025+
74026 might_sleep();
74027 if (unlikely(!anon_vma)) {
74028 struct mm_struct *mm = vma->vm_mm;
74029@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74030 if (!avc)
74031 goto out_enomem;
74032
74033+#ifdef CONFIG_PAX_SEGMEXEC
74034+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
74035+ if (!avc_m)
74036+ goto out_enomem_free_avc;
74037+#endif
74038+
74039 anon_vma = find_mergeable_anon_vma(vma);
74040 allocated = NULL;
74041 if (!anon_vma) {
74042@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74043 /* page_table_lock to protect against threads */
74044 spin_lock(&mm->page_table_lock);
74045 if (likely(!vma->anon_vma)) {
74046+
74047+#ifdef CONFIG_PAX_SEGMEXEC
74048+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74049+
74050+ if (vma_m) {
74051+ BUG_ON(vma_m->anon_vma);
74052+ vma_m->anon_vma = anon_vma;
74053+ avc_m->anon_vma = anon_vma;
74054+ avc_m->vma = vma;
74055+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
74056+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
74057+ avc_m = NULL;
74058+ }
74059+#endif
74060+
74061 vma->anon_vma = anon_vma;
74062 avc->anon_vma = anon_vma;
74063 avc->vma = vma;
74064@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74065
74066 if (unlikely(allocated))
74067 put_anon_vma(allocated);
74068+
74069+#ifdef CONFIG_PAX_SEGMEXEC
74070+ if (unlikely(avc_m))
74071+ anon_vma_chain_free(avc_m);
74072+#endif
74073+
74074 if (unlikely(avc))
74075 anon_vma_chain_free(avc);
74076 }
74077 return 0;
74078
74079 out_enomem_free_avc:
74080+
74081+#ifdef CONFIG_PAX_SEGMEXEC
74082+ if (avc_m)
74083+ anon_vma_chain_free(avc_m);
74084+#endif
74085+
74086 anon_vma_chain_free(avc);
74087 out_enomem:
74088 return -ENOMEM;
74089@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
74090 * Attach the anon_vmas from src to dst.
74091 * Returns 0 on success, -ENOMEM on failure.
74092 */
74093-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
74094+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
74095 {
74096 struct anon_vma_chain *avc, *pavc;
74097 struct anon_vma *root = NULL;
74098@@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
74099 * the corresponding VMA in the parent process is attached to.
74100 * Returns 0 on success, non-zero on failure.
74101 */
74102-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
74103+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
74104 {
74105 struct anon_vma_chain *avc;
74106 struct anon_vma *anon_vma;
74107diff --git a/mm/shmem.c b/mm/shmem.c
74108index 269d049..a9d2b50 100644
74109--- a/mm/shmem.c
74110+++ b/mm/shmem.c
74111@@ -31,7 +31,7 @@
74112 #include <linux/export.h>
74113 #include <linux/swap.h>
74114
74115-static struct vfsmount *shm_mnt;
74116+struct vfsmount *shm_mnt;
74117
74118 #ifdef CONFIG_SHMEM
74119 /*
74120@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
74121 #define BOGO_DIRENT_SIZE 20
74122
74123 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
74124-#define SHORT_SYMLINK_LEN 128
74125+#define SHORT_SYMLINK_LEN 64
74126
74127 struct shmem_xattr {
74128 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
74129@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
74130 int err = -ENOMEM;
74131
74132 /* Round up to L1_CACHE_BYTES to resist false sharing */
74133- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74134- L1_CACHE_BYTES), GFP_KERNEL);
74135+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74136 if (!sbinfo)
74137 return -ENOMEM;
74138
74139diff --git a/mm/slab.c b/mm/slab.c
74140index f0bd785..348b96a 100644
74141--- a/mm/slab.c
74142+++ b/mm/slab.c
74143@@ -153,7 +153,7 @@
74144
74145 /* Legal flag mask for kmem_cache_create(). */
74146 #if DEBUG
74147-# define CREATE_MASK (SLAB_RED_ZONE | \
74148+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74149 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74150 SLAB_CACHE_DMA | \
74151 SLAB_STORE_USER | \
74152@@ -161,7 +161,7 @@
74153 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74154 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74155 #else
74156-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74157+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74158 SLAB_CACHE_DMA | \
74159 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74160 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74161@@ -290,7 +290,7 @@ struct kmem_list3 {
74162 * Need this for bootstrapping a per node allocator.
74163 */
74164 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
74165-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74166+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
74167 #define CACHE_CACHE 0
74168 #define SIZE_AC MAX_NUMNODES
74169 #define SIZE_L3 (2 * MAX_NUMNODES)
74170@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
74171 if ((x)->max_freeable < i) \
74172 (x)->max_freeable = i; \
74173 } while (0)
74174-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74175-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74176-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74177-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74178+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74179+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74180+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74181+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74182 #else
74183 #define STATS_INC_ACTIVE(x) do { } while (0)
74184 #define STATS_DEC_ACTIVE(x) do { } while (0)
74185@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
74186 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74187 */
74188 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74189- const struct slab *slab, void *obj)
74190+ const struct slab *slab, const void *obj)
74191 {
74192 u32 offset = (obj - slab->s_mem);
74193 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
74194@@ -568,7 +568,7 @@ struct cache_names {
74195 static struct cache_names __initdata cache_names[] = {
74196 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
74197 #include <linux/kmalloc_sizes.h>
74198- {NULL,}
74199+ {NULL}
74200 #undef CACHE
74201 };
74202
74203@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
74204 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74205 sizes[INDEX_AC].cs_size,
74206 ARCH_KMALLOC_MINALIGN,
74207- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74208+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74209 NULL);
74210
74211 if (INDEX_AC != INDEX_L3) {
74212@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
74213 kmem_cache_create(names[INDEX_L3].name,
74214 sizes[INDEX_L3].cs_size,
74215 ARCH_KMALLOC_MINALIGN,
74216- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74217+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74218 NULL);
74219 }
74220
74221@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
74222 sizes->cs_cachep = kmem_cache_create(names->name,
74223 sizes->cs_size,
74224 ARCH_KMALLOC_MINALIGN,
74225- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74226+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74227 NULL);
74228 }
74229 #ifdef CONFIG_ZONE_DMA
74230@@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
74231 }
74232 /* cpu stats */
74233 {
74234- unsigned long allochit = atomic_read(&cachep->allochit);
74235- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74236- unsigned long freehit = atomic_read(&cachep->freehit);
74237- unsigned long freemiss = atomic_read(&cachep->freemiss);
74238+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74239+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74240+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74241+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74242
74243 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74244 allochit, allocmiss, freehit, freemiss);
74245@@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
74246 {
74247 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
74248 #ifdef CONFIG_DEBUG_SLAB_LEAK
74249- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74250+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
74251 #endif
74252 return 0;
74253 }
74254 module_init(slab_proc_init);
74255 #endif
74256
74257+void check_object_size(const void *ptr, unsigned long n, bool to)
74258+{
74259+
74260+#ifdef CONFIG_PAX_USERCOPY
74261+ struct page *page;
74262+ struct kmem_cache *cachep = NULL;
74263+ struct slab *slabp;
74264+ unsigned int objnr;
74265+ unsigned long offset;
74266+ const char *type;
74267+
74268+ if (!n)
74269+ return;
74270+
74271+ type = "<null>";
74272+ if (ZERO_OR_NULL_PTR(ptr))
74273+ goto report;
74274+
74275+ if (!virt_addr_valid(ptr))
74276+ return;
74277+
74278+ page = virt_to_head_page(ptr);
74279+
74280+ type = "<process stack>";
74281+ if (!PageSlab(page)) {
74282+ if (object_is_on_stack(ptr, n) == -1)
74283+ goto report;
74284+ return;
74285+ }
74286+
74287+ cachep = page_get_cache(page);
74288+ type = cachep->name;
74289+ if (!(cachep->flags & SLAB_USERCOPY))
74290+ goto report;
74291+
74292+ slabp = page_get_slab(page);
74293+ objnr = obj_to_index(cachep, slabp, ptr);
74294+ BUG_ON(objnr >= cachep->num);
74295+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74296+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74297+ return;
74298+
74299+report:
74300+ pax_report_usercopy(ptr, n, to, type);
74301+#endif
74302+
74303+}
74304+EXPORT_SYMBOL(check_object_size);
74305+
74306 /**
74307 * ksize - get the actual amount of memory allocated for a given object
74308 * @objp: Pointer to the object
74309diff --git a/mm/slob.c b/mm/slob.c
74310index 8105be4..e045f96 100644
74311--- a/mm/slob.c
74312+++ b/mm/slob.c
74313@@ -29,7 +29,7 @@
74314 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74315 * alloc_pages() directly, allocating compound pages so the page order
74316 * does not have to be separately tracked, and also stores the exact
74317- * allocation size in page->private so that it can be used to accurately
74318+ * allocation size in slob_page->size so that it can be used to accurately
74319 * provide ksize(). These objects are detected in kfree() because slob_page()
74320 * is false for them.
74321 *
74322@@ -58,6 +58,7 @@
74323 */
74324
74325 #include <linux/kernel.h>
74326+#include <linux/sched.h>
74327 #include <linux/slab.h>
74328 #include <linux/mm.h>
74329 #include <linux/swap.h> /* struct reclaim_state */
74330@@ -102,7 +103,8 @@ struct slob_page {
74331 unsigned long flags; /* mandatory */
74332 atomic_t _count; /* mandatory */
74333 slobidx_t units; /* free units left in page */
74334- unsigned long pad[2];
74335+ unsigned long pad[1];
74336+ unsigned long size; /* size when >=PAGE_SIZE */
74337 slob_t *free; /* first free slob_t in page */
74338 struct list_head list; /* linked list of free pages */
74339 };
74340@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
74341 */
74342 static inline int is_slob_page(struct slob_page *sp)
74343 {
74344- return PageSlab((struct page *)sp);
74345+ return PageSlab((struct page *)sp) && !sp->size;
74346 }
74347
74348 static inline void set_slob_page(struct slob_page *sp)
74349@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
74350
74351 static inline struct slob_page *slob_page(const void *addr)
74352 {
74353- return (struct slob_page *)virt_to_page(addr);
74354+ return (struct slob_page *)virt_to_head_page(addr);
74355 }
74356
74357 /*
74358@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
74359 /*
74360 * Return the size of a slob block.
74361 */
74362-static slobidx_t slob_units(slob_t *s)
74363+static slobidx_t slob_units(const slob_t *s)
74364 {
74365 if (s->units > 0)
74366 return s->units;
74367@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
74368 /*
74369 * Return the next free slob block pointer after this one.
74370 */
74371-static slob_t *slob_next(slob_t *s)
74372+static slob_t *slob_next(const slob_t *s)
74373 {
74374 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74375 slobidx_t next;
74376@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
74377 /*
74378 * Returns true if s is the last free block in its page.
74379 */
74380-static int slob_last(slob_t *s)
74381+static int slob_last(const slob_t *s)
74382 {
74383 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74384 }
74385@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
74386 if (!page)
74387 return NULL;
74388
74389+ set_slob_page(page);
74390 return page_address(page);
74391 }
74392
74393@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
74394 if (!b)
74395 return NULL;
74396 sp = slob_page(b);
74397- set_slob_page(sp);
74398
74399 spin_lock_irqsave(&slob_lock, flags);
74400 sp->units = SLOB_UNITS(PAGE_SIZE);
74401 sp->free = b;
74402+ sp->size = 0;
74403 INIT_LIST_HEAD(&sp->list);
74404 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74405 set_slob_page_free(sp, slob_list);
74406@@ -476,10 +479,9 @@ out:
74407 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74408 */
74409
74410-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74411+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74412 {
74413- unsigned int *m;
74414- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74415+ slob_t *m;
74416 void *ret;
74417
74418 gfp &= gfp_allowed_mask;
74419@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74420
74421 if (!m)
74422 return NULL;
74423- *m = size;
74424+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74425+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74426+ m[0].units = size;
74427+ m[1].units = align;
74428 ret = (void *)m + align;
74429
74430 trace_kmalloc_node(_RET_IP_, ret,
74431@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74432 gfp |= __GFP_COMP;
74433 ret = slob_new_pages(gfp, order, node);
74434 if (ret) {
74435- struct page *page;
74436- page = virt_to_page(ret);
74437- page->private = size;
74438+ struct slob_page *sp;
74439+ sp = slob_page(ret);
74440+ sp->size = size;
74441 }
74442
74443 trace_kmalloc_node(_RET_IP_, ret,
74444 size, PAGE_SIZE << order, gfp, node);
74445 }
74446
74447- kmemleak_alloc(ret, size, 1, gfp);
74448+ return ret;
74449+}
74450+
74451+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74452+{
74453+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74454+ void *ret = __kmalloc_node_align(size, gfp, node, align);
74455+
74456+ if (!ZERO_OR_NULL_PTR(ret))
74457+ kmemleak_alloc(ret, size, 1, gfp);
74458 return ret;
74459 }
74460 EXPORT_SYMBOL(__kmalloc_node);
74461@@ -533,13 +547,92 @@ void kfree(const void *block)
74462 sp = slob_page(block);
74463 if (is_slob_page(sp)) {
74464 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74465- unsigned int *m = (unsigned int *)(block - align);
74466- slob_free(m, *m + align);
74467- } else
74468+ slob_t *m = (slob_t *)(block - align);
74469+ slob_free(m, m[0].units + align);
74470+ } else {
74471+ clear_slob_page(sp);
74472+ free_slob_page(sp);
74473+ sp->size = 0;
74474 put_page(&sp->page);
74475+ }
74476 }
74477 EXPORT_SYMBOL(kfree);
74478
74479+void check_object_size(const void *ptr, unsigned long n, bool to)
74480+{
74481+
74482+#ifdef CONFIG_PAX_USERCOPY
74483+ struct slob_page *sp;
74484+ const slob_t *free;
74485+ const void *base;
74486+ unsigned long flags;
74487+ const char *type;
74488+
74489+ if (!n)
74490+ return;
74491+
74492+ type = "<null>";
74493+ if (ZERO_OR_NULL_PTR(ptr))
74494+ goto report;
74495+
74496+ if (!virt_addr_valid(ptr))
74497+ return;
74498+
74499+ type = "<process stack>";
74500+ sp = slob_page(ptr);
74501+ if (!PageSlab((struct page *)sp)) {
74502+ if (object_is_on_stack(ptr, n) == -1)
74503+ goto report;
74504+ return;
74505+ }
74506+
74507+ type = "<slob>";
74508+ if (sp->size) {
74509+ base = page_address(&sp->page);
74510+ if (base <= ptr && n <= sp->size - (ptr - base))
74511+ return;
74512+ goto report;
74513+ }
74514+
74515+ /* some tricky double walking to find the chunk */
74516+ spin_lock_irqsave(&slob_lock, flags);
74517+ base = (void *)((unsigned long)ptr & PAGE_MASK);
74518+ free = sp->free;
74519+
74520+ while (!slob_last(free) && (void *)free <= ptr) {
74521+ base = free + slob_units(free);
74522+ free = slob_next(free);
74523+ }
74524+
74525+ while (base < (void *)free) {
74526+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74527+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
74528+ int offset;
74529+
74530+ if (ptr < base + align)
74531+ break;
74532+
74533+ offset = ptr - base - align;
74534+ if (offset >= m) {
74535+ base += size;
74536+ continue;
74537+ }
74538+
74539+ if (n > m - offset)
74540+ break;
74541+
74542+ spin_unlock_irqrestore(&slob_lock, flags);
74543+ return;
74544+ }
74545+
74546+ spin_unlock_irqrestore(&slob_lock, flags);
74547+report:
74548+ pax_report_usercopy(ptr, n, to, type);
74549+#endif
74550+
74551+}
74552+EXPORT_SYMBOL(check_object_size);
74553+
74554 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74555 size_t ksize(const void *block)
74556 {
74557@@ -552,10 +645,10 @@ size_t ksize(const void *block)
74558 sp = slob_page(block);
74559 if (is_slob_page(sp)) {
74560 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74561- unsigned int *m = (unsigned int *)(block - align);
74562- return SLOB_UNITS(*m) * SLOB_UNIT;
74563+ slob_t *m = (slob_t *)(block - align);
74564+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74565 } else
74566- return sp->page.private;
74567+ return sp->size;
74568 }
74569 EXPORT_SYMBOL(ksize);
74570
74571@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74572 {
74573 struct kmem_cache *c;
74574
74575+#ifdef CONFIG_PAX_USERCOPY
74576+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
74577+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74578+#else
74579 c = slob_alloc(sizeof(struct kmem_cache),
74580 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74581+#endif
74582
74583 if (c) {
74584 c->name = name;
74585@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
74586
74587 lockdep_trace_alloc(flags);
74588
74589+#ifdef CONFIG_PAX_USERCOPY
74590+ b = __kmalloc_node_align(c->size, flags, node, c->align);
74591+#else
74592 if (c->size < PAGE_SIZE) {
74593 b = slob_alloc(c->size, flags, c->align, node);
74594 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74595 SLOB_UNITS(c->size) * SLOB_UNIT,
74596 flags, node);
74597 } else {
74598+ struct slob_page *sp;
74599+
74600 b = slob_new_pages(flags, get_order(c->size), node);
74601+ sp = slob_page(b);
74602+ sp->size = c->size;
74603 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74604 PAGE_SIZE << get_order(c->size),
74605 flags, node);
74606 }
74607+#endif
74608
74609 if (c->ctor)
74610 c->ctor(b);
74611@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
74612
74613 static void __kmem_cache_free(void *b, int size)
74614 {
74615- if (size < PAGE_SIZE)
74616+ struct slob_page *sp = slob_page(b);
74617+
74618+ if (is_slob_page(sp))
74619 slob_free(b, size);
74620- else
74621+ else {
74622+ clear_slob_page(sp);
74623+ free_slob_page(sp);
74624+ sp->size = 0;
74625 slob_free_pages(b, get_order(size));
74626+ }
74627 }
74628
74629 static void kmem_rcu_free(struct rcu_head *head)
74630@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
74631
74632 void kmem_cache_free(struct kmem_cache *c, void *b)
74633 {
74634+ int size = c->size;
74635+
74636+#ifdef CONFIG_PAX_USERCOPY
74637+ if (size + c->align < PAGE_SIZE) {
74638+ size += c->align;
74639+ b -= c->align;
74640+ }
74641+#endif
74642+
74643 kmemleak_free_recursive(b, c->flags);
74644 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74645 struct slob_rcu *slob_rcu;
74646- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
74647- slob_rcu->size = c->size;
74648+ slob_rcu = b + (size - sizeof(struct slob_rcu));
74649+ slob_rcu->size = size;
74650 call_rcu(&slob_rcu->head, kmem_rcu_free);
74651 } else {
74652- __kmem_cache_free(b, c->size);
74653+ __kmem_cache_free(b, size);
74654 }
74655
74656+#ifdef CONFIG_PAX_USERCOPY
74657+ trace_kfree(_RET_IP_, b);
74658+#else
74659 trace_kmem_cache_free(_RET_IP_, b);
74660+#endif
74661+
74662 }
74663 EXPORT_SYMBOL(kmem_cache_free);
74664
74665diff --git a/mm/slub.c b/mm/slub.c
74666index 0342a5d..8180ae9 100644
74667--- a/mm/slub.c
74668+++ b/mm/slub.c
74669@@ -208,7 +208,7 @@ struct track {
74670
74671 enum track_item { TRACK_ALLOC, TRACK_FREE };
74672
74673-#ifdef CONFIG_SYSFS
74674+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74675 static int sysfs_slab_add(struct kmem_cache *);
74676 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74677 static void sysfs_slab_remove(struct kmem_cache *);
74678@@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
74679 if (!t->addr)
74680 return;
74681
74682- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74683+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74684 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
74685 #ifdef CONFIG_STACKTRACE
74686 {
74687@@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
74688
74689 page = virt_to_head_page(x);
74690
74691+ BUG_ON(!PageSlab(page));
74692+
74693 slab_free(s, page, x, _RET_IP_);
74694
74695 trace_kmem_cache_free(_RET_IP_, x);
74696@@ -2604,7 +2606,7 @@ static int slub_min_objects;
74697 * Merge control. If this is set then no merging of slab caches will occur.
74698 * (Could be removed. This was introduced to pacify the merge skeptics.)
74699 */
74700-static int slub_nomerge;
74701+static int slub_nomerge = 1;
74702
74703 /*
74704 * Calculate the order of allocation given an slab object size.
74705@@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
74706 else
74707 s->cpu_partial = 30;
74708
74709- s->refcount = 1;
74710+ atomic_set(&s->refcount, 1);
74711 #ifdef CONFIG_NUMA
74712 s->remote_node_defrag_ratio = 1000;
74713 #endif
74714@@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
74715 void kmem_cache_destroy(struct kmem_cache *s)
74716 {
74717 down_write(&slub_lock);
74718- s->refcount--;
74719- if (!s->refcount) {
74720+ if (atomic_dec_and_test(&s->refcount)) {
74721 list_del(&s->list);
74722 up_write(&slub_lock);
74723 if (kmem_cache_close(s)) {
74724@@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
74725 EXPORT_SYMBOL(__kmalloc_node);
74726 #endif
74727
74728+void check_object_size(const void *ptr, unsigned long n, bool to)
74729+{
74730+
74731+#ifdef CONFIG_PAX_USERCOPY
74732+ struct page *page;
74733+ struct kmem_cache *s = NULL;
74734+ unsigned long offset;
74735+ const char *type;
74736+
74737+ if (!n)
74738+ return;
74739+
74740+ type = "<null>";
74741+ if (ZERO_OR_NULL_PTR(ptr))
74742+ goto report;
74743+
74744+ if (!virt_addr_valid(ptr))
74745+ return;
74746+
74747+ page = virt_to_head_page(ptr);
74748+
74749+ type = "<process stack>";
74750+ if (!PageSlab(page)) {
74751+ if (object_is_on_stack(ptr, n) == -1)
74752+ goto report;
74753+ return;
74754+ }
74755+
74756+ s = page->slab;
74757+ type = s->name;
74758+ if (!(s->flags & SLAB_USERCOPY))
74759+ goto report;
74760+
74761+ offset = (ptr - page_address(page)) % s->size;
74762+ if (offset <= s->objsize && n <= s->objsize - offset)
74763+ return;
74764+
74765+report:
74766+ pax_report_usercopy(ptr, n, to, type);
74767+#endif
74768+
74769+}
74770+EXPORT_SYMBOL(check_object_size);
74771+
74772 size_t ksize(const void *object)
74773 {
74774 struct page *page;
74775@@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
74776 int node;
74777
74778 list_add(&s->list, &slab_caches);
74779- s->refcount = -1;
74780+ atomic_set(&s->refcount, -1);
74781
74782 for_each_node_state(node, N_NORMAL_MEMORY) {
74783 struct kmem_cache_node *n = get_node(s, node);
74784@@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
74785
74786 /* Caches that are not of the two-to-the-power-of size */
74787 if (KMALLOC_MIN_SIZE <= 32) {
74788- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
74789+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
74790 caches++;
74791 }
74792
74793 if (KMALLOC_MIN_SIZE <= 64) {
74794- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
74795+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74796 caches++;
74797 }
74798
74799 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74800- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74801+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74802 caches++;
74803 }
74804
74805@@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
74806 /*
74807 * We may have set a slab to be unmergeable during bootstrap.
74808 */
74809- if (s->refcount < 0)
74810+ if (atomic_read(&s->refcount) < 0)
74811 return 1;
74812
74813 return 0;
74814@@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74815 down_write(&slub_lock);
74816 s = find_mergeable(size, align, flags, name, ctor);
74817 if (s) {
74818- s->refcount++;
74819+ atomic_inc(&s->refcount);
74820 /*
74821 * Adjust the object sizes so that we clear
74822 * the complete object on kzalloc.
74823@@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74824 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
74825
74826 if (sysfs_slab_alias(s, name)) {
74827- s->refcount--;
74828+ atomic_dec(&s->refcount);
74829 goto err;
74830 }
74831 up_write(&slub_lock);
74832@@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
74833 }
74834 #endif
74835
74836-#ifdef CONFIG_SYSFS
74837+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74838 static int count_inuse(struct page *page)
74839 {
74840 return page->inuse;
74841@@ -4429,12 +4474,12 @@ static void resiliency_test(void)
74842 validate_slab_cache(kmalloc_caches[9]);
74843 }
74844 #else
74845-#ifdef CONFIG_SYSFS
74846+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74847 static void resiliency_test(void) {};
74848 #endif
74849 #endif
74850
74851-#ifdef CONFIG_SYSFS
74852+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74853 enum slab_stat_type {
74854 SL_ALL, /* All slabs */
74855 SL_PARTIAL, /* Only partially allocated slabs */
74856@@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
74857
74858 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74859 {
74860- return sprintf(buf, "%d\n", s->refcount - 1);
74861+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74862 }
74863 SLAB_ATTR_RO(aliases);
74864
74865@@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
74866 return name;
74867 }
74868
74869+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74870 static int sysfs_slab_add(struct kmem_cache *s)
74871 {
74872 int err;
74873@@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
74874 kobject_del(&s->kobj);
74875 kobject_put(&s->kobj);
74876 }
74877+#endif
74878
74879 /*
74880 * Need to buffer aliases during bootup until sysfs becomes
74881@@ -5319,6 +5366,7 @@ struct saved_alias {
74882
74883 static struct saved_alias *alias_list;
74884
74885+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74886 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74887 {
74888 struct saved_alias *al;
74889@@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74890 alias_list = al;
74891 return 0;
74892 }
74893+#endif
74894
74895 static int __init slab_sysfs_init(void)
74896 {
74897diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74898index 1b7e22a..3fcd4f3 100644
74899--- a/mm/sparse-vmemmap.c
74900+++ b/mm/sparse-vmemmap.c
74901@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74902 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74903 if (!p)
74904 return NULL;
74905- pud_populate(&init_mm, pud, p);
74906+ pud_populate_kernel(&init_mm, pud, p);
74907 }
74908 return pud;
74909 }
74910@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74911 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74912 if (!p)
74913 return NULL;
74914- pgd_populate(&init_mm, pgd, p);
74915+ pgd_populate_kernel(&init_mm, pgd, p);
74916 }
74917 return pgd;
74918 }
74919diff --git a/mm/swap.c b/mm/swap.c
74920index 14380e9..e244704 100644
74921--- a/mm/swap.c
74922+++ b/mm/swap.c
74923@@ -30,6 +30,7 @@
74924 #include <linux/backing-dev.h>
74925 #include <linux/memcontrol.h>
74926 #include <linux/gfp.h>
74927+#include <linux/hugetlb.h>
74928
74929 #include "internal.h"
74930
74931@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
74932
74933 __page_cache_release(page);
74934 dtor = get_compound_page_dtor(page);
74935+ if (!PageHuge(page))
74936+ BUG_ON(dtor != free_compound_page);
74937 (*dtor)(page);
74938 }
74939
74940diff --git a/mm/swapfile.c b/mm/swapfile.c
74941index f31b29d..8bdcae2 100644
74942--- a/mm/swapfile.c
74943+++ b/mm/swapfile.c
74944@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
74945
74946 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74947 /* Activity counter to indicate that a swapon or swapoff has occurred */
74948-static atomic_t proc_poll_event = ATOMIC_INIT(0);
74949+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74950
74951 static inline unsigned char swap_count(unsigned char ent)
74952 {
74953@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74954 }
74955 filp_close(swap_file, NULL);
74956 err = 0;
74957- atomic_inc(&proc_poll_event);
74958+ atomic_inc_unchecked(&proc_poll_event);
74959 wake_up_interruptible(&proc_poll_wait);
74960
74961 out_dput:
74962@@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74963
74964 poll_wait(file, &proc_poll_wait, wait);
74965
74966- if (seq->poll_event != atomic_read(&proc_poll_event)) {
74967- seq->poll_event = atomic_read(&proc_poll_event);
74968+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74969+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74970 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74971 }
74972
74973@@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74974 return ret;
74975
74976 seq = file->private_data;
74977- seq->poll_event = atomic_read(&proc_poll_event);
74978+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74979 return 0;
74980 }
74981
74982@@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74983 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74984
74985 mutex_unlock(&swapon_mutex);
74986- atomic_inc(&proc_poll_event);
74987+ atomic_inc_unchecked(&proc_poll_event);
74988 wake_up_interruptible(&proc_poll_wait);
74989
74990 if (S_ISREG(inode->i_mode))
74991diff --git a/mm/util.c b/mm/util.c
74992index 136ac4f..f917fa9 100644
74993--- a/mm/util.c
74994+++ b/mm/util.c
74995@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74996 void arch_pick_mmap_layout(struct mm_struct *mm)
74997 {
74998 mm->mmap_base = TASK_UNMAPPED_BASE;
74999+
75000+#ifdef CONFIG_PAX_RANDMMAP
75001+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75002+ mm->mmap_base += mm->delta_mmap;
75003+#endif
75004+
75005 mm->get_unmapped_area = arch_get_unmapped_area;
75006 mm->unmap_area = arch_unmap_area;
75007 }
75008diff --git a/mm/vmalloc.c b/mm/vmalloc.c
75009index 86ce9a5..e0bd080 100644
75010--- a/mm/vmalloc.c
75011+++ b/mm/vmalloc.c
75012@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
75013
75014 pte = pte_offset_kernel(pmd, addr);
75015 do {
75016- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75017- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75018+
75019+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75020+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75021+ BUG_ON(!pte_exec(*pte));
75022+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75023+ continue;
75024+ }
75025+#endif
75026+
75027+ {
75028+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75029+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75030+ }
75031 } while (pte++, addr += PAGE_SIZE, addr != end);
75032 }
75033
75034@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75035 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75036 {
75037 pte_t *pte;
75038+ int ret = -ENOMEM;
75039
75040 /*
75041 * nr is a running index into the array which helps higher level
75042@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75043 pte = pte_alloc_kernel(pmd, addr);
75044 if (!pte)
75045 return -ENOMEM;
75046+
75047+ pax_open_kernel();
75048 do {
75049 struct page *page = pages[*nr];
75050
75051- if (WARN_ON(!pte_none(*pte)))
75052- return -EBUSY;
75053- if (WARN_ON(!page))
75054- return -ENOMEM;
75055+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75056+ if (pgprot_val(prot) & _PAGE_NX)
75057+#endif
75058+
75059+ if (WARN_ON(!pte_none(*pte))) {
75060+ ret = -EBUSY;
75061+ goto out;
75062+ }
75063+ if (WARN_ON(!page)) {
75064+ ret = -ENOMEM;
75065+ goto out;
75066+ }
75067 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75068 (*nr)++;
75069 } while (pte++, addr += PAGE_SIZE, addr != end);
75070- return 0;
75071+ ret = 0;
75072+out:
75073+ pax_close_kernel();
75074+ return ret;
75075 }
75076
75077 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75078@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75079 pmd_t *pmd;
75080 unsigned long next;
75081
75082- pmd = pmd_alloc(&init_mm, pud, addr);
75083+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
75084 if (!pmd)
75085 return -ENOMEM;
75086 do {
75087@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
75088 pud_t *pud;
75089 unsigned long next;
75090
75091- pud = pud_alloc(&init_mm, pgd, addr);
75092+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
75093 if (!pud)
75094 return -ENOMEM;
75095 do {
75096@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
75097 * and fall back on vmalloc() if that fails. Others
75098 * just put it in the vmalloc space.
75099 */
75100-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75101+#ifdef CONFIG_MODULES
75102+#ifdef MODULES_VADDR
75103 unsigned long addr = (unsigned long)x;
75104 if (addr >= MODULES_VADDR && addr < MODULES_END)
75105 return 1;
75106 #endif
75107+
75108+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75109+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75110+ return 1;
75111+#endif
75112+
75113+#endif
75114+
75115 return is_vmalloc_addr(x);
75116 }
75117
75118@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
75119
75120 if (!pgd_none(*pgd)) {
75121 pud_t *pud = pud_offset(pgd, addr);
75122+#ifdef CONFIG_X86
75123+ if (!pud_large(*pud))
75124+#endif
75125 if (!pud_none(*pud)) {
75126 pmd_t *pmd = pmd_offset(pud, addr);
75127+#ifdef CONFIG_X86
75128+ if (!pmd_large(*pmd))
75129+#endif
75130 if (!pmd_none(*pmd)) {
75131 pte_t *ptep, pte;
75132
75133@@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
75134 struct vm_struct *area;
75135
75136 BUG_ON(in_interrupt());
75137+
75138+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75139+ if (flags & VM_KERNEXEC) {
75140+ if (start != VMALLOC_START || end != VMALLOC_END)
75141+ return NULL;
75142+ start = (unsigned long)MODULES_EXEC_VADDR;
75143+ end = (unsigned long)MODULES_EXEC_END;
75144+ }
75145+#endif
75146+
75147 if (flags & VM_IOREMAP) {
75148 int bit = fls(size);
75149
75150@@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
75151 if (count > totalram_pages)
75152 return NULL;
75153
75154+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75155+ if (!(pgprot_val(prot) & _PAGE_NX))
75156+ flags |= VM_KERNEXEC;
75157+#endif
75158+
75159 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75160 __builtin_return_address(0));
75161 if (!area)
75162@@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
75163 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
75164 goto fail;
75165
75166+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75167+ if (!(pgprot_val(prot) & _PAGE_NX))
75168+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
75169+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
75170+ else
75171+#endif
75172+
75173 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
75174 start, end, node, gfp_mask, caller);
75175 if (!area)
75176@@ -1825,10 +1887,9 @@ EXPORT_SYMBOL(vzalloc_node);
75177 * For tight control over page level allocator and protection flags
75178 * use __vmalloc() instead.
75179 */
75180-
75181 void *vmalloc_exec(unsigned long size)
75182 {
75183- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75184+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
75185 -1, __builtin_return_address(0));
75186 }
75187
75188@@ -2123,6 +2184,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
75189 unsigned long uaddr = vma->vm_start;
75190 unsigned long usize = vma->vm_end - vma->vm_start;
75191
75192+ BUG_ON(vma->vm_mirror);
75193+
75194 if ((PAGE_SIZE-1) & (unsigned long)addr)
75195 return -EINVAL;
75196
75197diff --git a/mm/vmstat.c b/mm/vmstat.c
75198index f600557..1459fc8 100644
75199--- a/mm/vmstat.c
75200+++ b/mm/vmstat.c
75201@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
75202 *
75203 * vm_stat contains the global counters
75204 */
75205-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75206+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75207 EXPORT_SYMBOL(vm_stat);
75208
75209 #ifdef CONFIG_SMP
75210@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
75211 v = p->vm_stat_diff[i];
75212 p->vm_stat_diff[i] = 0;
75213 local_irq_restore(flags);
75214- atomic_long_add(v, &zone->vm_stat[i]);
75215+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75216 global_diff[i] += v;
75217 #ifdef CONFIG_NUMA
75218 /* 3 seconds idle till flush */
75219@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
75220
75221 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75222 if (global_diff[i])
75223- atomic_long_add(global_diff[i], &vm_stat[i]);
75224+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75225 }
75226
75227 #endif
75228@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
75229 start_cpu_timer(cpu);
75230 #endif
75231 #ifdef CONFIG_PROC_FS
75232- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75233- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75234- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75235- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75236+ {
75237+ mode_t gr_mode = S_IRUGO;
75238+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75239+ gr_mode = S_IRUSR;
75240+#endif
75241+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75242+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75243+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75244+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75245+#else
75246+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75247+#endif
75248+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75249+ }
75250 #endif
75251 return 0;
75252 }
75253diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
75254index efea35b..9c8dd0b 100644
75255--- a/net/8021q/vlan.c
75256+++ b/net/8021q/vlan.c
75257@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
75258 err = -EPERM;
75259 if (!capable(CAP_NET_ADMIN))
75260 break;
75261- if ((args.u.name_type >= 0) &&
75262- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75263+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75264 struct vlan_net *vn;
75265
75266 vn = net_generic(net, vlan_net_id);
75267diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
75268index fccae26..e7ece2f 100644
75269--- a/net/9p/trans_fd.c
75270+++ b/net/9p/trans_fd.c
75271@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
75272 oldfs = get_fs();
75273 set_fs(get_ds());
75274 /* The cast to a user pointer is valid due to the set_fs() */
75275- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75276+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75277 set_fs(oldfs);
75278
75279 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75280diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
75281index 876fbe8..8bbea9f 100644
75282--- a/net/atm/atm_misc.c
75283+++ b/net/atm/atm_misc.c
75284@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
75285 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75286 return 1;
75287 atm_return(vcc, truesize);
75288- atomic_inc(&vcc->stats->rx_drop);
75289+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75290 return 0;
75291 }
75292 EXPORT_SYMBOL(atm_charge);
75293@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
75294 }
75295 }
75296 atm_return(vcc, guess);
75297- atomic_inc(&vcc->stats->rx_drop);
75298+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75299 return NULL;
75300 }
75301 EXPORT_SYMBOL(atm_alloc_charge);
75302@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
75303
75304 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75305 {
75306-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75307+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75308 __SONET_ITEMS
75309 #undef __HANDLE_ITEM
75310 }
75311@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
75312
75313 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75314 {
75315-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75316+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75317 __SONET_ITEMS
75318 #undef __HANDLE_ITEM
75319 }
75320diff --git a/net/atm/lec.h b/net/atm/lec.h
75321index dfc0719..47c5322 100644
75322--- a/net/atm/lec.h
75323+++ b/net/atm/lec.h
75324@@ -48,7 +48,7 @@ struct lane2_ops {
75325 const u8 *tlvs, u32 sizeoftlvs);
75326 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75327 const u8 *tlvs, u32 sizeoftlvs);
75328-};
75329+} __no_const;
75330
75331 /*
75332 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
75333diff --git a/net/atm/mpc.h b/net/atm/mpc.h
75334index 0919a88..a23d54e 100644
75335--- a/net/atm/mpc.h
75336+++ b/net/atm/mpc.h
75337@@ -33,7 +33,7 @@ struct mpoa_client {
75338 struct mpc_parameters parameters; /* parameters for this client */
75339
75340 const struct net_device_ops *old_ops;
75341- struct net_device_ops new_ops;
75342+ net_device_ops_no_const new_ops;
75343 };
75344
75345
75346diff --git a/net/atm/proc.c b/net/atm/proc.c
75347index 0d020de..011c7bb 100644
75348--- a/net/atm/proc.c
75349+++ b/net/atm/proc.c
75350@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
75351 const struct k_atm_aal_stats *stats)
75352 {
75353 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
75354- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
75355- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
75356- atomic_read(&stats->rx_drop));
75357+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75358+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75359+ atomic_read_unchecked(&stats->rx_drop));
75360 }
75361
75362 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75363diff --git a/net/atm/resources.c b/net/atm/resources.c
75364index 23f45ce..c748f1a 100644
75365--- a/net/atm/resources.c
75366+++ b/net/atm/resources.c
75367@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
75368 static void copy_aal_stats(struct k_atm_aal_stats *from,
75369 struct atm_aal_stats *to)
75370 {
75371-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75372+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75373 __AAL_STAT_ITEMS
75374 #undef __HANDLE_ITEM
75375 }
75376@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
75377 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75378 struct atm_aal_stats *to)
75379 {
75380-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75381+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75382 __AAL_STAT_ITEMS
75383 #undef __HANDLE_ITEM
75384 }
75385diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
75386index 3512e25..2b33401 100644
75387--- a/net/batman-adv/bat_iv_ogm.c
75388+++ b/net/batman-adv/bat_iv_ogm.c
75389@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75390
75391 /* change sequence number to network order */
75392 batman_ogm_packet->seqno =
75393- htonl((uint32_t)atomic_read(&hard_iface->seqno));
75394+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75395
75396 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
75397 batman_ogm_packet->tt_crc = htons((uint16_t)
75398@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75399 else
75400 batman_ogm_packet->gw_flags = NO_FLAGS;
75401
75402- atomic_inc(&hard_iface->seqno);
75403+ atomic_inc_unchecked(&hard_iface->seqno);
75404
75405 slide_own_bcast_window(hard_iface);
75406 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
75407@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
75408 return;
75409
75410 /* could be changed by schedule_own_packet() */
75411- if_incoming_seqno = atomic_read(&if_incoming->seqno);
75412+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
75413
75414 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
75415
75416diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
75417index 7704df4..beb4e16 100644
75418--- a/net/batman-adv/hard-interface.c
75419+++ b/net/batman-adv/hard-interface.c
75420@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
75421 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
75422 dev_add_pack(&hard_iface->batman_adv_ptype);
75423
75424- atomic_set(&hard_iface->seqno, 1);
75425- atomic_set(&hard_iface->frag_seqno, 1);
75426+ atomic_set_unchecked(&hard_iface->seqno, 1);
75427+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
75428 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
75429 hard_iface->net_dev->name);
75430
75431diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
75432index 987c75a..20d6f36 100644
75433--- a/net/batman-adv/soft-interface.c
75434+++ b/net/batman-adv/soft-interface.c
75435@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
75436
75437 /* set broadcast sequence number */
75438 bcast_packet->seqno =
75439- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
75440+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
75441
75442 add_bcast_packet_to_list(bat_priv, skb, 1);
75443
75444@@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
75445 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
75446
75447 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
75448- atomic_set(&bat_priv->bcast_seqno, 1);
75449+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
75450 atomic_set(&bat_priv->ttvn, 0);
75451 atomic_set(&bat_priv->tt_local_changes, 0);
75452 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
75453diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
75454index e9eb043..d174eeb 100644
75455--- a/net/batman-adv/types.h
75456+++ b/net/batman-adv/types.h
75457@@ -38,8 +38,8 @@ struct hard_iface {
75458 int16_t if_num;
75459 char if_status;
75460 struct net_device *net_dev;
75461- atomic_t seqno;
75462- atomic_t frag_seqno;
75463+ atomic_unchecked_t seqno;
75464+ atomic_unchecked_t frag_seqno;
75465 unsigned char *packet_buff;
75466 int packet_len;
75467 struct kobject *hardif_obj;
75468@@ -154,7 +154,7 @@ struct bat_priv {
75469 atomic_t orig_interval; /* uint */
75470 atomic_t hop_penalty; /* uint */
75471 atomic_t log_level; /* uint */
75472- atomic_t bcast_seqno;
75473+ atomic_unchecked_t bcast_seqno;
75474 atomic_t bcast_queue_left;
75475 atomic_t batman_queue_left;
75476 atomic_t ttvn; /* translation table version number */
75477diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
75478index 07d1c1d..7e9bea9 100644
75479--- a/net/batman-adv/unicast.c
75480+++ b/net/batman-adv/unicast.c
75481@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
75482 frag1->flags = UNI_FRAG_HEAD | large_tail;
75483 frag2->flags = large_tail;
75484
75485- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
75486+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
75487 frag1->seqno = htons(seqno - 1);
75488 frag2->seqno = htons(seqno);
75489
75490diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
75491index 280953b..cd219bb 100644
75492--- a/net/bluetooth/hci_conn.c
75493+++ b/net/bluetooth/hci_conn.c
75494@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
75495 memset(&cp, 0, sizeof(cp));
75496
75497 cp.handle = cpu_to_le16(conn->handle);
75498- memcpy(cp.ltk, ltk, sizeof(ltk));
75499+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
75500
75501 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
75502 }
75503diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
75504index 32d338c..d24bcdb 100644
75505--- a/net/bluetooth/l2cap_core.c
75506+++ b/net/bluetooth/l2cap_core.c
75507@@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
75508 break;
75509
75510 case L2CAP_CONF_RFC:
75511- if (olen == sizeof(rfc))
75512- memcpy(&rfc, (void *)val, olen);
75513+ if (olen != sizeof(rfc))
75514+ break;
75515+
75516+ memcpy(&rfc, (void *)val, olen);
75517
75518 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
75519 rfc.mode != chan->mode)
75520@@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
75521
75522 switch (type) {
75523 case L2CAP_CONF_RFC:
75524- if (olen == sizeof(rfc))
75525- memcpy(&rfc, (void *)val, olen);
75526+ if (olen != sizeof(rfc))
75527+ break;
75528+
75529+ memcpy(&rfc, (void *)val, olen);
75530 goto done;
75531 }
75532 }
75533diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
75534index 5449294..7da9a5f 100644
75535--- a/net/bridge/netfilter/ebt_ulog.c
75536+++ b/net/bridge/netfilter/ebt_ulog.c
75537@@ -96,6 +96,7 @@ static void ulog_timer(unsigned long data)
75538 spin_unlock_bh(&ulog_buffers[data].lock);
75539 }
75540
75541+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
75542 static struct sk_buff *ulog_alloc_skb(unsigned int size)
75543 {
75544 struct sk_buff *skb;
75545diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
75546index 5fe2ff3..10968b5 100644
75547--- a/net/bridge/netfilter/ebtables.c
75548+++ b/net/bridge/netfilter/ebtables.c
75549@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
75550 tmp.valid_hooks = t->table->valid_hooks;
75551 }
75552 mutex_unlock(&ebt_mutex);
75553- if (copy_to_user(user, &tmp, *len) != 0){
75554+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
75555 BUGPRINT("c2u Didn't work\n");
75556 ret = -EFAULT;
75557 break;
75558diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
75559index a97d97a..6f679ed 100644
75560--- a/net/caif/caif_socket.c
75561+++ b/net/caif/caif_socket.c
75562@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
75563 #ifdef CONFIG_DEBUG_FS
75564 struct debug_fs_counter {
75565 atomic_t caif_nr_socks;
75566- atomic_t caif_sock_create;
75567- atomic_t num_connect_req;
75568- atomic_t num_connect_resp;
75569- atomic_t num_connect_fail_resp;
75570- atomic_t num_disconnect;
75571- atomic_t num_remote_shutdown_ind;
75572- atomic_t num_tx_flow_off_ind;
75573- atomic_t num_tx_flow_on_ind;
75574- atomic_t num_rx_flow_off;
75575- atomic_t num_rx_flow_on;
75576+ atomic_unchecked_t caif_sock_create;
75577+ atomic_unchecked_t num_connect_req;
75578+ atomic_unchecked_t num_connect_resp;
75579+ atomic_unchecked_t num_connect_fail_resp;
75580+ atomic_unchecked_t num_disconnect;
75581+ atomic_unchecked_t num_remote_shutdown_ind;
75582+ atomic_unchecked_t num_tx_flow_off_ind;
75583+ atomic_unchecked_t num_tx_flow_on_ind;
75584+ atomic_unchecked_t num_rx_flow_off;
75585+ atomic_unchecked_t num_rx_flow_on;
75586 };
75587 static struct debug_fs_counter cnt;
75588 #define dbfs_atomic_inc(v) atomic_inc_return(v)
75589+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
75590 #define dbfs_atomic_dec(v) atomic_dec_return(v)
75591 #else
75592 #define dbfs_atomic_inc(v) 0
75593@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75594 atomic_read(&cf_sk->sk.sk_rmem_alloc),
75595 sk_rcvbuf_lowwater(cf_sk));
75596 set_rx_flow_off(cf_sk);
75597- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75598+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75599 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75600 }
75601
75602@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75603 set_rx_flow_off(cf_sk);
75604 if (net_ratelimit())
75605 pr_debug("sending flow OFF due to rmem_schedule\n");
75606- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75607+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75608 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75609 }
75610 skb->dev = NULL;
75611@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
75612 switch (flow) {
75613 case CAIF_CTRLCMD_FLOW_ON_IND:
75614 /* OK from modem to start sending again */
75615- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
75616+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
75617 set_tx_flow_on(cf_sk);
75618 cf_sk->sk.sk_state_change(&cf_sk->sk);
75619 break;
75620
75621 case CAIF_CTRLCMD_FLOW_OFF_IND:
75622 /* Modem asks us to shut up */
75623- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
75624+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
75625 set_tx_flow_off(cf_sk);
75626 cf_sk->sk.sk_state_change(&cf_sk->sk);
75627 break;
75628@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75629 /* We're now connected */
75630 caif_client_register_refcnt(&cf_sk->layer,
75631 cfsk_hold, cfsk_put);
75632- dbfs_atomic_inc(&cnt.num_connect_resp);
75633+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
75634 cf_sk->sk.sk_state = CAIF_CONNECTED;
75635 set_tx_flow_on(cf_sk);
75636 cf_sk->sk.sk_state_change(&cf_sk->sk);
75637@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75638
75639 case CAIF_CTRLCMD_INIT_FAIL_RSP:
75640 /* Connect request failed */
75641- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
75642+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
75643 cf_sk->sk.sk_err = ECONNREFUSED;
75644 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
75645 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75646@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75647
75648 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
75649 /* Modem has closed this connection, or device is down. */
75650- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
75651+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
75652 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75653 cf_sk->sk.sk_err = ECONNRESET;
75654 set_rx_flow_on(cf_sk);
75655@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
75656 return;
75657
75658 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
75659- dbfs_atomic_inc(&cnt.num_rx_flow_on);
75660+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
75661 set_rx_flow_on(cf_sk);
75662 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
75663 }
75664@@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
75665 /*ifindex = id of the interface.*/
75666 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
75667
75668- dbfs_atomic_inc(&cnt.num_connect_req);
75669+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
75670 cf_sk->layer.receive = caif_sktrecv_cb;
75671
75672 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
75673@@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
75674 spin_unlock_bh(&sk->sk_receive_queue.lock);
75675 sock->sk = NULL;
75676
75677- dbfs_atomic_inc(&cnt.num_disconnect);
75678+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
75679
75680 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
75681 if (cf_sk->debugfs_socket_dir != NULL)
75682@@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
75683 cf_sk->conn_req.protocol = protocol;
75684 /* Increase the number of sockets created. */
75685 dbfs_atomic_inc(&cnt.caif_nr_socks);
75686- num = dbfs_atomic_inc(&cnt.caif_sock_create);
75687+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
75688 #ifdef CONFIG_DEBUG_FS
75689 if (!IS_ERR(debugfsdir)) {
75690
75691diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
75692index 5cf5222..6f704ad 100644
75693--- a/net/caif/cfctrl.c
75694+++ b/net/caif/cfctrl.c
75695@@ -9,6 +9,7 @@
75696 #include <linux/stddef.h>
75697 #include <linux/spinlock.h>
75698 #include <linux/slab.h>
75699+#include <linux/sched.h>
75700 #include <net/caif/caif_layer.h>
75701 #include <net/caif/cfpkt.h>
75702 #include <net/caif/cfctrl.h>
75703@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
75704 memset(&dev_info, 0, sizeof(dev_info));
75705 dev_info.id = 0xff;
75706 cfsrvl_init(&this->serv, 0, &dev_info, false);
75707- atomic_set(&this->req_seq_no, 1);
75708- atomic_set(&this->rsp_seq_no, 1);
75709+ atomic_set_unchecked(&this->req_seq_no, 1);
75710+ atomic_set_unchecked(&this->rsp_seq_no, 1);
75711 this->serv.layer.receive = cfctrl_recv;
75712 sprintf(this->serv.layer.name, "ctrl");
75713 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
75714@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
75715 struct cfctrl_request_info *req)
75716 {
75717 spin_lock_bh(&ctrl->info_list_lock);
75718- atomic_inc(&ctrl->req_seq_no);
75719- req->sequence_no = atomic_read(&ctrl->req_seq_no);
75720+ atomic_inc_unchecked(&ctrl->req_seq_no);
75721+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
75722 list_add_tail(&req->list, &ctrl->list);
75723 spin_unlock_bh(&ctrl->info_list_lock);
75724 }
75725@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
75726 if (p != first)
75727 pr_warn("Requests are not received in order\n");
75728
75729- atomic_set(&ctrl->rsp_seq_no,
75730+ atomic_set_unchecked(&ctrl->rsp_seq_no,
75731 p->sequence_no);
75732 list_del(&p->list);
75733 goto out;
75734diff --git a/net/can/gw.c b/net/can/gw.c
75735index 3d79b12..8de85fa 100644
75736--- a/net/can/gw.c
75737+++ b/net/can/gw.c
75738@@ -96,7 +96,7 @@ struct cf_mod {
75739 struct {
75740 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
75741 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
75742- } csumfunc;
75743+ } __no_const csumfunc;
75744 };
75745
75746
75747diff --git a/net/compat.c b/net/compat.c
75748index 6def90e..c6992fa 100644
75749--- a/net/compat.c
75750+++ b/net/compat.c
75751@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
75752 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75753 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75754 return -EFAULT;
75755- kmsg->msg_name = compat_ptr(tmp1);
75756- kmsg->msg_iov = compat_ptr(tmp2);
75757- kmsg->msg_control = compat_ptr(tmp3);
75758+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75759+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75760+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75761 return 0;
75762 }
75763
75764@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75765
75766 if (kern_msg->msg_namelen) {
75767 if (mode == VERIFY_READ) {
75768- int err = move_addr_to_kernel(kern_msg->msg_name,
75769+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
75770 kern_msg->msg_namelen,
75771 kern_address);
75772 if (err < 0)
75773@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75774 kern_msg->msg_name = NULL;
75775
75776 tot_len = iov_from_user_compat_to_kern(kern_iov,
75777- (struct compat_iovec __user *)kern_msg->msg_iov,
75778+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
75779 kern_msg->msg_iovlen);
75780 if (tot_len >= 0)
75781 kern_msg->msg_iov = kern_iov;
75782@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75783
75784 #define CMSG_COMPAT_FIRSTHDR(msg) \
75785 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75786- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75787+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75788 (struct compat_cmsghdr __user *)NULL)
75789
75790 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75791 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75792 (ucmlen) <= (unsigned long) \
75793 ((mhdr)->msg_controllen - \
75794- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75795+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75796
75797 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75798 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75799 {
75800 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75801- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75802+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75803 msg->msg_controllen)
75804 return NULL;
75805 return (struct compat_cmsghdr __user *)ptr;
75806@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75807 {
75808 struct compat_timeval ctv;
75809 struct compat_timespec cts[3];
75810- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75811+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75812 struct compat_cmsghdr cmhdr;
75813 int cmlen;
75814
75815@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75816
75817 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75818 {
75819- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75820+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75821 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75822 int fdnum = scm->fp->count;
75823 struct file **fp = scm->fp->fp;
75824@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
75825 return -EFAULT;
75826 old_fs = get_fs();
75827 set_fs(KERNEL_DS);
75828- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
75829+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
75830 set_fs(old_fs);
75831
75832 return err;
75833@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
75834 len = sizeof(ktime);
75835 old_fs = get_fs();
75836 set_fs(KERNEL_DS);
75837- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75838+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75839 set_fs(old_fs);
75840
75841 if (!err) {
75842@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75843 case MCAST_JOIN_GROUP:
75844 case MCAST_LEAVE_GROUP:
75845 {
75846- struct compat_group_req __user *gr32 = (void *)optval;
75847+ struct compat_group_req __user *gr32 = (void __user *)optval;
75848 struct group_req __user *kgr =
75849 compat_alloc_user_space(sizeof(struct group_req));
75850 u32 interface;
75851@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75852 case MCAST_BLOCK_SOURCE:
75853 case MCAST_UNBLOCK_SOURCE:
75854 {
75855- struct compat_group_source_req __user *gsr32 = (void *)optval;
75856+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75857 struct group_source_req __user *kgsr = compat_alloc_user_space(
75858 sizeof(struct group_source_req));
75859 u32 interface;
75860@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75861 }
75862 case MCAST_MSFILTER:
75863 {
75864- struct compat_group_filter __user *gf32 = (void *)optval;
75865+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75866 struct group_filter __user *kgf;
75867 u32 interface, fmode, numsrc;
75868
75869@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
75870 char __user *optval, int __user *optlen,
75871 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75872 {
75873- struct compat_group_filter __user *gf32 = (void *)optval;
75874+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75875 struct group_filter __user *kgf;
75876 int __user *koptlen;
75877 u32 interface, fmode, numsrc;
75878diff --git a/net/core/datagram.c b/net/core/datagram.c
75879index 68bbf9f..5ef0d12 100644
75880--- a/net/core/datagram.c
75881+++ b/net/core/datagram.c
75882@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
75883 }
75884
75885 kfree_skb(skb);
75886- atomic_inc(&sk->sk_drops);
75887+ atomic_inc_unchecked(&sk->sk_drops);
75888 sk_mem_reclaim_partial(sk);
75889
75890 return err;
75891diff --git a/net/core/dev.c b/net/core/dev.c
75892index 0336374..659088a 100644
75893--- a/net/core/dev.c
75894+++ b/net/core/dev.c
75895@@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
75896 if (no_module && capable(CAP_NET_ADMIN))
75897 no_module = request_module("netdev-%s", name);
75898 if (no_module && capable(CAP_SYS_MODULE)) {
75899+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75900+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
75901+#else
75902 if (!request_module("%s", name))
75903 pr_err("Loading kernel module for a network device "
75904 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75905 "instead\n", name);
75906+#endif
75907 }
75908 }
75909 EXPORT_SYMBOL(dev_load);
75910@@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75911 {
75912 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
75913 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
75914- atomic_long_inc(&dev->rx_dropped);
75915+ atomic_long_inc_unchecked(&dev->rx_dropped);
75916 kfree_skb(skb);
75917 return NET_RX_DROP;
75918 }
75919@@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75920 nf_reset(skb);
75921
75922 if (unlikely(!is_skb_forwardable(dev, skb))) {
75923- atomic_long_inc(&dev->rx_dropped);
75924+ atomic_long_inc_unchecked(&dev->rx_dropped);
75925 kfree_skb(skb);
75926 return NET_RX_DROP;
75927 }
75928@@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75929
75930 struct dev_gso_cb {
75931 void (*destructor)(struct sk_buff *skb);
75932-};
75933+} __no_const;
75934
75935 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75936
75937@@ -2933,7 +2937,7 @@ enqueue:
75938
75939 local_irq_restore(flags);
75940
75941- atomic_long_inc(&skb->dev->rx_dropped);
75942+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75943 kfree_skb(skb);
75944 return NET_RX_DROP;
75945 }
75946@@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb)
75947 }
75948 EXPORT_SYMBOL(netif_rx_ni);
75949
75950-static void net_tx_action(struct softirq_action *h)
75951+static void net_tx_action(void)
75952 {
75953 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75954
75955@@ -3293,7 +3297,7 @@ ncls:
75956 if (pt_prev) {
75957 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75958 } else {
75959- atomic_long_inc(&skb->dev->rx_dropped);
75960+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75961 kfree_skb(skb);
75962 /* Jamal, now you will not able to escape explaining
75963 * me how you were going to use this. :-)
75964@@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi)
75965 }
75966 EXPORT_SYMBOL(netif_napi_del);
75967
75968-static void net_rx_action(struct softirq_action *h)
75969+static void net_rx_action(void)
75970 {
75971 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75972 unsigned long time_limit = jiffies + 2;
75973@@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
75974 } else {
75975 netdev_stats_to_stats64(storage, &dev->stats);
75976 }
75977- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75978+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75979 return storage;
75980 }
75981 EXPORT_SYMBOL(dev_get_stats);
75982diff --git a/net/core/flow.c b/net/core/flow.c
75983index e318c7e..168b1d0 100644
75984--- a/net/core/flow.c
75985+++ b/net/core/flow.c
75986@@ -61,7 +61,7 @@ struct flow_cache {
75987 struct timer_list rnd_timer;
75988 };
75989
75990-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75991+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75992 EXPORT_SYMBOL(flow_cache_genid);
75993 static struct flow_cache flow_cache_global;
75994 static struct kmem_cache *flow_cachep __read_mostly;
75995@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75996
75997 static int flow_entry_valid(struct flow_cache_entry *fle)
75998 {
75999- if (atomic_read(&flow_cache_genid) != fle->genid)
76000+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
76001 return 0;
76002 if (fle->object && !fle->object->ops->check(fle->object))
76003 return 0;
76004@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
76005 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
76006 fcp->hash_count++;
76007 }
76008- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
76009+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
76010 flo = fle->object;
76011 if (!flo)
76012 goto ret_object;
76013@@ -280,7 +280,7 @@ nocache:
76014 }
76015 flo = resolver(net, key, family, dir, flo, ctx);
76016 if (fle) {
76017- fle->genid = atomic_read(&flow_cache_genid);
76018+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
76019 if (!IS_ERR(flo))
76020 fle->object = flo;
76021 else
76022diff --git a/net/core/iovec.c b/net/core/iovec.c
76023index c40f27e..7f49254 100644
76024--- a/net/core/iovec.c
76025+++ b/net/core/iovec.c
76026@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
76027 if (m->msg_namelen) {
76028 if (mode == VERIFY_READ) {
76029 void __user *namep;
76030- namep = (void __user __force *) m->msg_name;
76031+ namep = (void __force_user *) m->msg_name;
76032 err = move_addr_to_kernel(namep, m->msg_namelen,
76033 address);
76034 if (err < 0)
76035@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
76036 }
76037
76038 size = m->msg_iovlen * sizeof(struct iovec);
76039- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
76040+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
76041 return -EFAULT;
76042
76043 m->msg_iov = iov;
76044diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
76045index 5c30296..ebe7b61 100644
76046--- a/net/core/rtnetlink.c
76047+++ b/net/core/rtnetlink.c
76048@@ -57,7 +57,7 @@ struct rtnl_link {
76049 rtnl_doit_func doit;
76050 rtnl_dumpit_func dumpit;
76051 rtnl_calcit_func calcit;
76052-};
76053+} __no_const;
76054
76055 static DEFINE_MUTEX(rtnl_mutex);
76056
76057diff --git a/net/core/scm.c b/net/core/scm.c
76058index ff52ad0..aff1c0f 100644
76059--- a/net/core/scm.c
76060+++ b/net/core/scm.c
76061@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
76062 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76063 {
76064 struct cmsghdr __user *cm
76065- = (__force struct cmsghdr __user *)msg->msg_control;
76066+ = (struct cmsghdr __force_user *)msg->msg_control;
76067 struct cmsghdr cmhdr;
76068 int cmlen = CMSG_LEN(len);
76069 int err;
76070@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76071 err = -EFAULT;
76072 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
76073 goto out;
76074- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
76075+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
76076 goto out;
76077 cmlen = CMSG_SPACE(len);
76078 if (msg->msg_controllen < cmlen)
76079@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
76080 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76081 {
76082 struct cmsghdr __user *cm
76083- = (__force struct cmsghdr __user*)msg->msg_control;
76084+ = (struct cmsghdr __force_user *)msg->msg_control;
76085
76086 int fdmax = 0;
76087 int fdnum = scm->fp->count;
76088@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76089 if (fdnum < fdmax)
76090 fdmax = fdnum;
76091
76092- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
76093+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
76094 i++, cmfptr++)
76095 {
76096 int new_fd;
76097diff --git a/net/core/sock.c b/net/core/sock.c
76098index 02f8dfe..86dfd4a 100644
76099--- a/net/core/sock.c
76100+++ b/net/core/sock.c
76101@@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76102 struct sk_buff_head *list = &sk->sk_receive_queue;
76103
76104 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
76105- atomic_inc(&sk->sk_drops);
76106+ atomic_inc_unchecked(&sk->sk_drops);
76107 trace_sock_rcvqueue_full(sk, skb);
76108 return -ENOMEM;
76109 }
76110@@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76111 return err;
76112
76113 if (!sk_rmem_schedule(sk, skb->truesize)) {
76114- atomic_inc(&sk->sk_drops);
76115+ atomic_inc_unchecked(&sk->sk_drops);
76116 return -ENOBUFS;
76117 }
76118
76119@@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76120 skb_dst_force(skb);
76121
76122 spin_lock_irqsave(&list->lock, flags);
76123- skb->dropcount = atomic_read(&sk->sk_drops);
76124+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76125 __skb_queue_tail(list, skb);
76126 spin_unlock_irqrestore(&list->lock, flags);
76127
76128@@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76129 skb->dev = NULL;
76130
76131 if (sk_rcvqueues_full(sk, skb)) {
76132- atomic_inc(&sk->sk_drops);
76133+ atomic_inc_unchecked(&sk->sk_drops);
76134 goto discard_and_relse;
76135 }
76136 if (nested)
76137@@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76138 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76139 } else if (sk_add_backlog(sk, skb)) {
76140 bh_unlock_sock(sk);
76141- atomic_inc(&sk->sk_drops);
76142+ atomic_inc_unchecked(&sk->sk_drops);
76143 goto discard_and_relse;
76144 }
76145
76146@@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76147 if (len > sizeof(peercred))
76148 len = sizeof(peercred);
76149 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76150- if (copy_to_user(optval, &peercred, len))
76151+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76152 return -EFAULT;
76153 goto lenout;
76154 }
76155@@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76156 return -ENOTCONN;
76157 if (lv < len)
76158 return -EINVAL;
76159- if (copy_to_user(optval, address, len))
76160+ if (len > sizeof(address) || copy_to_user(optval, address, len))
76161 return -EFAULT;
76162 goto lenout;
76163 }
76164@@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76165
76166 if (len > lv)
76167 len = lv;
76168- if (copy_to_user(optval, &v, len))
76169+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
76170 return -EFAULT;
76171 lenout:
76172 if (put_user(len, optlen))
76173@@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
76174 */
76175 smp_wmb();
76176 atomic_set(&sk->sk_refcnt, 1);
76177- atomic_set(&sk->sk_drops, 0);
76178+ atomic_set_unchecked(&sk->sk_drops, 0);
76179 }
76180 EXPORT_SYMBOL(sock_init_data);
76181
76182diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
76183index b9868e1..849f809 100644
76184--- a/net/core/sock_diag.c
76185+++ b/net/core/sock_diag.c
76186@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
76187
76188 int sock_diag_check_cookie(void *sk, __u32 *cookie)
76189 {
76190+#ifndef CONFIG_GRKERNSEC_HIDESYM
76191 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
76192 cookie[1] != INET_DIAG_NOCOOKIE) &&
76193 ((u32)(unsigned long)sk != cookie[0] ||
76194 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
76195 return -ESTALE;
76196 else
76197+#endif
76198 return 0;
76199 }
76200 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
76201
76202 void sock_diag_save_cookie(void *sk, __u32 *cookie)
76203 {
76204+#ifdef CONFIG_GRKERNSEC_HIDESYM
76205+ cookie[0] = 0;
76206+ cookie[1] = 0;
76207+#else
76208 cookie[0] = (u32)(unsigned long)sk;
76209 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
76210+#endif
76211 }
76212 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
76213
76214diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
76215index 02e75d1..9a57a7c 100644
76216--- a/net/decnet/sysctl_net_decnet.c
76217+++ b/net/decnet/sysctl_net_decnet.c
76218@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
76219
76220 if (len > *lenp) len = *lenp;
76221
76222- if (copy_to_user(buffer, addr, len))
76223+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
76224 return -EFAULT;
76225
76226 *lenp = len;
76227@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
76228
76229 if (len > *lenp) len = *lenp;
76230
76231- if (copy_to_user(buffer, devname, len))
76232+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
76233 return -EFAULT;
76234
76235 *lenp = len;
76236diff --git a/net/econet/Kconfig b/net/econet/Kconfig
76237index 39a2d29..f39c0fe 100644
76238--- a/net/econet/Kconfig
76239+++ b/net/econet/Kconfig
76240@@ -4,7 +4,7 @@
76241
76242 config ECONET
76243 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
76244- depends on EXPERIMENTAL && INET
76245+ depends on EXPERIMENTAL && INET && BROKEN
76246 ---help---
76247 Econet is a fairly old and slow networking protocol mainly used by
76248 Acorn computers to access file and print servers. It uses native
76249diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
76250index 36d1440..44ff28b 100644
76251--- a/net/ipv4/ah4.c
76252+++ b/net/ipv4/ah4.c
76253@@ -19,6 +19,8 @@ struct ah_skb_cb {
76254 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76255
76256 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76257+ unsigned int size) __size_overflow(3);
76258+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76259 unsigned int size)
76260 {
76261 unsigned int len;
76262diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
76263index 92fc5f6..b790d91 100644
76264--- a/net/ipv4/fib_frontend.c
76265+++ b/net/ipv4/fib_frontend.c
76266@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
76267 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76268 fib_sync_up(dev);
76269 #endif
76270- atomic_inc(&net->ipv4.dev_addr_genid);
76271+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76272 rt_cache_flush(dev_net(dev), -1);
76273 break;
76274 case NETDEV_DOWN:
76275 fib_del_ifaddr(ifa, NULL);
76276- atomic_inc(&net->ipv4.dev_addr_genid);
76277+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76278 if (ifa->ifa_dev->ifa_list == NULL) {
76279 /* Last address was deleted from this interface.
76280 * Disable IP.
76281@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
76282 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76283 fib_sync_up(dev);
76284 #endif
76285- atomic_inc(&net->ipv4.dev_addr_genid);
76286+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76287 rt_cache_flush(dev_net(dev), -1);
76288 break;
76289 case NETDEV_DOWN:
76290diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
76291index 80106d8..232e898 100644
76292--- a/net/ipv4/fib_semantics.c
76293+++ b/net/ipv4/fib_semantics.c
76294@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
76295 nh->nh_saddr = inet_select_addr(nh->nh_dev,
76296 nh->nh_gw,
76297 nh->nh_parent->fib_scope);
76298- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
76299+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
76300
76301 return nh->nh_saddr;
76302 }
76303diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
76304index 984ec65..97ac518 100644
76305--- a/net/ipv4/inet_hashtables.c
76306+++ b/net/ipv4/inet_hashtables.c
76307@@ -18,12 +18,15 @@
76308 #include <linux/sched.h>
76309 #include <linux/slab.h>
76310 #include <linux/wait.h>
76311+#include <linux/security.h>
76312
76313 #include <net/inet_connection_sock.h>
76314 #include <net/inet_hashtables.h>
76315 #include <net/secure_seq.h>
76316 #include <net/ip.h>
76317
76318+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
76319+
76320 /*
76321 * Allocate and initialize a new local port bind bucket.
76322 * The bindhash mutex for snum's hash chain must be held here.
76323@@ -530,6 +533,8 @@ ok:
76324 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
76325 spin_unlock(&head->lock);
76326
76327+ gr_update_task_in_ip_table(current, inet_sk(sk));
76328+
76329 if (tw) {
76330 inet_twsk_deschedule(tw, death_row);
76331 while (twrefcnt) {
76332diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
76333index d4d61b6..b81aec8 100644
76334--- a/net/ipv4/inetpeer.c
76335+++ b/net/ipv4/inetpeer.c
76336@@ -487,8 +487,8 @@ relookup:
76337 if (p) {
76338 p->daddr = *daddr;
76339 atomic_set(&p->refcnt, 1);
76340- atomic_set(&p->rid, 0);
76341- atomic_set(&p->ip_id_count,
76342+ atomic_set_unchecked(&p->rid, 0);
76343+ atomic_set_unchecked(&p->ip_id_count,
76344 (daddr->family == AF_INET) ?
76345 secure_ip_id(daddr->addr.a4) :
76346 secure_ipv6_id(daddr->addr.a6));
76347diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
76348index 1f23a57..7180dfe 100644
76349--- a/net/ipv4/ip_fragment.c
76350+++ b/net/ipv4/ip_fragment.c
76351@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
76352 return 0;
76353
76354 start = qp->rid;
76355- end = atomic_inc_return(&peer->rid);
76356+ end = atomic_inc_return_unchecked(&peer->rid);
76357 qp->rid = end;
76358
76359 rc = qp->q.fragments && (end - start) > max;
76360diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
76361index 8aa87c1..35c3248 100644
76362--- a/net/ipv4/ip_sockglue.c
76363+++ b/net/ipv4/ip_sockglue.c
76364@@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76365 len = min_t(unsigned int, len, opt->optlen);
76366 if (put_user(len, optlen))
76367 return -EFAULT;
76368- if (copy_to_user(optval, opt->__data, len))
76369+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76370+ copy_to_user(optval, opt->__data, len))
76371 return -EFAULT;
76372 return 0;
76373 }
76374@@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76375 if (sk->sk_type != SOCK_STREAM)
76376 return -ENOPROTOOPT;
76377
76378- msg.msg_control = optval;
76379+ msg.msg_control = (void __force_kernel *)optval;
76380 msg.msg_controllen = len;
76381 msg.msg_flags = flags;
76382
76383diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
76384index 6e412a6..6640538 100644
76385--- a/net/ipv4/ipconfig.c
76386+++ b/net/ipv4/ipconfig.c
76387@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
76388
76389 mm_segment_t oldfs = get_fs();
76390 set_fs(get_ds());
76391- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76392+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76393 set_fs(oldfs);
76394 return res;
76395 }
76396@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
76397
76398 mm_segment_t oldfs = get_fs();
76399 set_fs(get_ds());
76400- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76401+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76402 set_fs(oldfs);
76403 return res;
76404 }
76405@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
76406
76407 mm_segment_t oldfs = get_fs();
76408 set_fs(get_ds());
76409- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76410+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76411 set_fs(oldfs);
76412 return res;
76413 }
76414diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
76415index fd7a3f6..a1b1013 100644
76416--- a/net/ipv4/netfilter/arp_tables.c
76417+++ b/net/ipv4/netfilter/arp_tables.c
76418@@ -757,6 +757,9 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76419
76420 static int copy_entries_to_user(unsigned int total_size,
76421 const struct xt_table *table,
76422+ void __user *userptr) __size_overflow(1);
76423+static int copy_entries_to_user(unsigned int total_size,
76424+ const struct xt_table *table,
76425 void __user *userptr)
76426 {
76427 unsigned int off, num;
76428@@ -984,6 +987,11 @@ static int __do_replace(struct net *net, const char *name,
76429 unsigned int valid_hooks,
76430 struct xt_table_info *newinfo,
76431 unsigned int num_counters,
76432+ void __user *counters_ptr) __size_overflow(5);
76433+static int __do_replace(struct net *net, const char *name,
76434+ unsigned int valid_hooks,
76435+ struct xt_table_info *newinfo,
76436+ unsigned int num_counters,
76437 void __user *counters_ptr)
76438 {
76439 int ret;
76440@@ -1104,6 +1112,8 @@ static int do_replace(struct net *net, const void __user *user,
76441 }
76442
76443 static int do_add_counters(struct net *net, const void __user *user,
76444+ unsigned int len, int compat) __size_overflow(3);
76445+static int do_add_counters(struct net *net, const void __user *user,
76446 unsigned int len, int compat)
76447 {
76448 unsigned int i, curcpu;
76449diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
76450index 24e556e..b073356 100644
76451--- a/net/ipv4/netfilter/ip_tables.c
76452+++ b/net/ipv4/netfilter/ip_tables.c
76453@@ -923,6 +923,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76454 static int
76455 copy_entries_to_user(unsigned int total_size,
76456 const struct xt_table *table,
76457+ void __user *userptr) __size_overflow(1);
76458+static int
76459+copy_entries_to_user(unsigned int total_size,
76460+ const struct xt_table *table,
76461 void __user *userptr)
76462 {
76463 unsigned int off, num;
76464@@ -1172,6 +1176,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
76465 static int
76466 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76467 struct xt_table_info *newinfo, unsigned int num_counters,
76468+ void __user *counters_ptr) __size_overflow(5);
76469+static int
76470+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76471+ struct xt_table_info *newinfo, unsigned int num_counters,
76472 void __user *counters_ptr)
76473 {
76474 int ret;
76475@@ -1293,6 +1301,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76476
76477 static int
76478 do_add_counters(struct net *net, const void __user *user,
76479+ unsigned int len, int compat) __size_overflow(3);
76480+static int
76481+do_add_counters(struct net *net, const void __user *user,
76482 unsigned int len, int compat)
76483 {
76484 unsigned int i, curcpu;
76485diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
76486index ba5756d..8d34d74 100644
76487--- a/net/ipv4/netfilter/ipt_ULOG.c
76488+++ b/net/ipv4/netfilter/ipt_ULOG.c
76489@@ -125,6 +125,7 @@ static void ulog_timer(unsigned long data)
76490 spin_unlock_bh(&ulog_lock);
76491 }
76492
76493+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
76494 static struct sk_buff *ulog_alloc_skb(unsigned int size)
76495 {
76496 struct sk_buff *skb;
76497diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76498index 2133c30..0e8047e 100644
76499--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
76500+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76501@@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
76502 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76503 unsigned char *eoc,
76504 unsigned long **oid,
76505+ unsigned int *len) __size_overflow(2);
76506+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76507+ unsigned char *eoc,
76508+ unsigned long **oid,
76509 unsigned int *len)
76510 {
76511 unsigned long subid;
76512diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
76513index b072386..abdebcf 100644
76514--- a/net/ipv4/ping.c
76515+++ b/net/ipv4/ping.c
76516@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
76517 sk_rmem_alloc_get(sp),
76518 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76519 atomic_read(&sp->sk_refcnt), sp,
76520- atomic_read(&sp->sk_drops), len);
76521+ atomic_read_unchecked(&sp->sk_drops), len);
76522 }
76523
76524 static int ping_seq_show(struct seq_file *seq, void *v)
76525diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
76526index 3ccda5a..3c1e61d 100644
76527--- a/net/ipv4/raw.c
76528+++ b/net/ipv4/raw.c
76529@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
76530 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76531 {
76532 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76533- atomic_inc(&sk->sk_drops);
76534+ atomic_inc_unchecked(&sk->sk_drops);
76535 kfree_skb(skb);
76536 return NET_RX_DROP;
76537 }
76538@@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
76539
76540 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76541 {
76542+ struct icmp_filter filter;
76543+
76544 if (optlen > sizeof(struct icmp_filter))
76545 optlen = sizeof(struct icmp_filter);
76546- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76547+ if (copy_from_user(&filter, optval, optlen))
76548 return -EFAULT;
76549+ raw_sk(sk)->filter = filter;
76550 return 0;
76551 }
76552
76553 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76554 {
76555 int len, ret = -EFAULT;
76556+ struct icmp_filter filter;
76557
76558 if (get_user(len, optlen))
76559 goto out;
76560@@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
76561 if (len > sizeof(struct icmp_filter))
76562 len = sizeof(struct icmp_filter);
76563 ret = -EFAULT;
76564- if (put_user(len, optlen) ||
76565- copy_to_user(optval, &raw_sk(sk)->filter, len))
76566+ filter = raw_sk(sk)->filter;
76567+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
76568 goto out;
76569 ret = 0;
76570 out: return ret;
76571@@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76572 sk_wmem_alloc_get(sp),
76573 sk_rmem_alloc_get(sp),
76574 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76575- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76576+ atomic_read(&sp->sk_refcnt),
76577+#ifdef CONFIG_GRKERNSEC_HIDESYM
76578+ NULL,
76579+#else
76580+ sp,
76581+#endif
76582+ atomic_read_unchecked(&sp->sk_drops));
76583 }
76584
76585 static int raw_seq_show(struct seq_file *seq, void *v)
76586diff --git a/net/ipv4/route.c b/net/ipv4/route.c
76587index 0197747..7adb0dc 100644
76588--- a/net/ipv4/route.c
76589+++ b/net/ipv4/route.c
76590@@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
76591
76592 static inline int rt_genid(struct net *net)
76593 {
76594- return atomic_read(&net->ipv4.rt_genid);
76595+ return atomic_read_unchecked(&net->ipv4.rt_genid);
76596 }
76597
76598 #ifdef CONFIG_PROC_FS
76599@@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
76600 unsigned char shuffle;
76601
76602 get_random_bytes(&shuffle, sizeof(shuffle));
76603- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76604+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
76605 inetpeer_invalidate_tree(AF_INET);
76606 }
76607
76608@@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
76609 error = rt->dst.error;
76610 if (peer) {
76611 inet_peer_refcheck(rt->peer);
76612- id = atomic_read(&peer->ip_id_count) & 0xffff;
76613+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
76614 if (peer->tcp_ts_stamp) {
76615 ts = peer->tcp_ts;
76616 tsage = get_seconds() - peer->tcp_ts_stamp;
76617diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
76618index fd54c5f..96d6407 100644
76619--- a/net/ipv4/tcp_ipv4.c
76620+++ b/net/ipv4/tcp_ipv4.c
76621@@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
76622 int sysctl_tcp_low_latency __read_mostly;
76623 EXPORT_SYMBOL(sysctl_tcp_low_latency);
76624
76625+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76626+extern int grsec_enable_blackhole;
76627+#endif
76628
76629 #ifdef CONFIG_TCP_MD5SIG
76630 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
76631@@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
76632 return 0;
76633
76634 reset:
76635+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76636+ if (!grsec_enable_blackhole)
76637+#endif
76638 tcp_v4_send_reset(rsk, skb);
76639 discard:
76640 kfree_skb(skb);
76641@@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
76642 TCP_SKB_CB(skb)->sacked = 0;
76643
76644 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76645- if (!sk)
76646+ if (!sk) {
76647+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76648+ ret = 1;
76649+#endif
76650 goto no_tcp_socket;
76651-
76652+ }
76653 process:
76654- if (sk->sk_state == TCP_TIME_WAIT)
76655+ if (sk->sk_state == TCP_TIME_WAIT) {
76656+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76657+ ret = 2;
76658+#endif
76659 goto do_time_wait;
76660+ }
76661
76662 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
76663 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76664@@ -1755,6 +1768,10 @@ no_tcp_socket:
76665 bad_packet:
76666 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76667 } else {
76668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76669+ if (!grsec_enable_blackhole || (ret == 1 &&
76670+ (skb->dev->flags & IFF_LOOPBACK)))
76671+#endif
76672 tcp_v4_send_reset(NULL, skb);
76673 }
76674
76675@@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
76676 0, /* non standard timer */
76677 0, /* open_requests have no inode */
76678 atomic_read(&sk->sk_refcnt),
76679+#ifdef CONFIG_GRKERNSEC_HIDESYM
76680+ NULL,
76681+#else
76682 req,
76683+#endif
76684 len);
76685 }
76686
76687@@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
76688 sock_i_uid(sk),
76689 icsk->icsk_probes_out,
76690 sock_i_ino(sk),
76691- atomic_read(&sk->sk_refcnt), sk,
76692+ atomic_read(&sk->sk_refcnt),
76693+#ifdef CONFIG_GRKERNSEC_HIDESYM
76694+ NULL,
76695+#else
76696+ sk,
76697+#endif
76698 jiffies_to_clock_t(icsk->icsk_rto),
76699 jiffies_to_clock_t(icsk->icsk_ack.ato),
76700 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
76701@@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
76702 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
76703 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76704 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76705- atomic_read(&tw->tw_refcnt), tw, len);
76706+ atomic_read(&tw->tw_refcnt),
76707+#ifdef CONFIG_GRKERNSEC_HIDESYM
76708+ NULL,
76709+#else
76710+ tw,
76711+#endif
76712+ len);
76713 }
76714
76715 #define TMPSZ 150
76716diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
76717index 550e755..25721b3 100644
76718--- a/net/ipv4/tcp_minisocks.c
76719+++ b/net/ipv4/tcp_minisocks.c
76720@@ -27,6 +27,10 @@
76721 #include <net/inet_common.h>
76722 #include <net/xfrm.h>
76723
76724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76725+extern int grsec_enable_blackhole;
76726+#endif
76727+
76728 int sysctl_tcp_syncookies __read_mostly = 1;
76729 EXPORT_SYMBOL(sysctl_tcp_syncookies);
76730
76731@@ -753,6 +757,10 @@ listen_overflow:
76732
76733 embryonic_reset:
76734 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
76735+
76736+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76737+ if (!grsec_enable_blackhole)
76738+#endif
76739 if (!(flg & TCP_FLAG_RST))
76740 req->rsk_ops->send_reset(sk, skb);
76741
76742diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
76743index 85ee7eb..53277ab 100644
76744--- a/net/ipv4/tcp_probe.c
76745+++ b/net/ipv4/tcp_probe.c
76746@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
76747 if (cnt + width >= len)
76748 break;
76749
76750- if (copy_to_user(buf + cnt, tbuf, width))
76751+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
76752 return -EFAULT;
76753 cnt += width;
76754 }
76755diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
76756index cd2e072..1fffee2 100644
76757--- a/net/ipv4/tcp_timer.c
76758+++ b/net/ipv4/tcp_timer.c
76759@@ -22,6 +22,10 @@
76760 #include <linux/gfp.h>
76761 #include <net/tcp.h>
76762
76763+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76764+extern int grsec_lastack_retries;
76765+#endif
76766+
76767 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76768 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76769 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
76770@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
76771 }
76772 }
76773
76774+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76775+ if ((sk->sk_state == TCP_LAST_ACK) &&
76776+ (grsec_lastack_retries > 0) &&
76777+ (grsec_lastack_retries < retry_until))
76778+ retry_until = grsec_lastack_retries;
76779+#endif
76780+
76781 if (retransmits_timed_out(sk, retry_until,
76782 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
76783 /* Has it gone just too far? */
76784diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
76785index 5d075b5..d907d5f 100644
76786--- a/net/ipv4/udp.c
76787+++ b/net/ipv4/udp.c
76788@@ -86,6 +86,7 @@
76789 #include <linux/types.h>
76790 #include <linux/fcntl.h>
76791 #include <linux/module.h>
76792+#include <linux/security.h>
76793 #include <linux/socket.h>
76794 #include <linux/sockios.h>
76795 #include <linux/igmp.h>
76796@@ -108,6 +109,10 @@
76797 #include <trace/events/udp.h>
76798 #include "udp_impl.h"
76799
76800+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76801+extern int grsec_enable_blackhole;
76802+#endif
76803+
76804 struct udp_table udp_table __read_mostly;
76805 EXPORT_SYMBOL(udp_table);
76806
76807@@ -566,6 +571,9 @@ found:
76808 return s;
76809 }
76810
76811+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76812+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76813+
76814 /*
76815 * This routine is called by the ICMP module when it gets some
76816 * sort of error condition. If err < 0 then the socket should
76817@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
76818 dport = usin->sin_port;
76819 if (dport == 0)
76820 return -EINVAL;
76821+
76822+ err = gr_search_udp_sendmsg(sk, usin);
76823+ if (err)
76824+ return err;
76825 } else {
76826 if (sk->sk_state != TCP_ESTABLISHED)
76827 return -EDESTADDRREQ;
76828+
76829+ err = gr_search_udp_sendmsg(sk, NULL);
76830+ if (err)
76831+ return err;
76832+
76833 daddr = inet->inet_daddr;
76834 dport = inet->inet_dport;
76835 /* Open fast path for connected socket.
76836@@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
76837 udp_lib_checksum_complete(skb)) {
76838 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76839 IS_UDPLITE(sk));
76840- atomic_inc(&sk->sk_drops);
76841+ atomic_inc_unchecked(&sk->sk_drops);
76842 __skb_unlink(skb, rcvq);
76843 __skb_queue_tail(&list_kill, skb);
76844 }
76845@@ -1186,6 +1203,10 @@ try_again:
76846 if (!skb)
76847 goto out;
76848
76849+ err = gr_search_udp_recvmsg(sk, skb);
76850+ if (err)
76851+ goto out_free;
76852+
76853 ulen = skb->len - sizeof(struct udphdr);
76854 copied = len;
76855 if (copied > ulen)
76856@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76857
76858 drop:
76859 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76860- atomic_inc(&sk->sk_drops);
76861+ atomic_inc_unchecked(&sk->sk_drops);
76862 kfree_skb(skb);
76863 return -1;
76864 }
76865@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76866 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
76867
76868 if (!skb1) {
76869- atomic_inc(&sk->sk_drops);
76870+ atomic_inc_unchecked(&sk->sk_drops);
76871 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76872 IS_UDPLITE(sk));
76873 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76874@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76875 goto csum_error;
76876
76877 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76878+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76879+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76880+#endif
76881 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76882
76883 /*
76884@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
76885 sk_wmem_alloc_get(sp),
76886 sk_rmem_alloc_get(sp),
76887 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76888- atomic_read(&sp->sk_refcnt), sp,
76889- atomic_read(&sp->sk_drops), len);
76890+ atomic_read(&sp->sk_refcnt),
76891+#ifdef CONFIG_GRKERNSEC_HIDESYM
76892+ NULL,
76893+#else
76894+ sp,
76895+#endif
76896+ atomic_read_unchecked(&sp->sk_drops), len);
76897 }
76898
76899 int udp4_seq_show(struct seq_file *seq, void *v)
76900diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
76901index 6b8ebc5..1d624f4 100644
76902--- a/net/ipv6/addrconf.c
76903+++ b/net/ipv6/addrconf.c
76904@@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
76905 p.iph.ihl = 5;
76906 p.iph.protocol = IPPROTO_IPV6;
76907 p.iph.ttl = 64;
76908- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76909+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76910
76911 if (ops->ndo_do_ioctl) {
76912 mm_segment_t oldfs = get_fs();
76913diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
76914index 2ae79db..8f101bf 100644
76915--- a/net/ipv6/ah6.c
76916+++ b/net/ipv6/ah6.c
76917@@ -56,6 +56,8 @@ struct ah_skb_cb {
76918 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76919
76920 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76921+ unsigned int size) __size_overflow(3);
76922+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76923 unsigned int size)
76924 {
76925 unsigned int len;
76926diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
76927index 02dd203..e03fcc9 100644
76928--- a/net/ipv6/inet6_connection_sock.c
76929+++ b/net/ipv6/inet6_connection_sock.c
76930@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
76931 #ifdef CONFIG_XFRM
76932 {
76933 struct rt6_info *rt = (struct rt6_info *)dst;
76934- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76935+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76936 }
76937 #endif
76938 }
76939@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
76940 #ifdef CONFIG_XFRM
76941 if (dst) {
76942 struct rt6_info *rt = (struct rt6_info *)dst;
76943- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76944+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76945 __sk_dst_reset(sk);
76946 dst = NULL;
76947 }
76948diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
76949index 18a2719..779f36a 100644
76950--- a/net/ipv6/ipv6_sockglue.c
76951+++ b/net/ipv6/ipv6_sockglue.c
76952@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76953 if (sk->sk_type != SOCK_STREAM)
76954 return -ENOPROTOOPT;
76955
76956- msg.msg_control = optval;
76957+ msg.msg_control = (void __force_kernel *)optval;
76958 msg.msg_controllen = len;
76959 msg.msg_flags = flags;
76960
76961diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
76962index 94874b0..108a94d 100644
76963--- a/net/ipv6/netfilter/ip6_tables.c
76964+++ b/net/ipv6/netfilter/ip6_tables.c
76965@@ -945,6 +945,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76966 static int
76967 copy_entries_to_user(unsigned int total_size,
76968 const struct xt_table *table,
76969+ void __user *userptr) __size_overflow(1);
76970+static int
76971+copy_entries_to_user(unsigned int total_size,
76972+ const struct xt_table *table,
76973 void __user *userptr)
76974 {
76975 unsigned int off, num;
76976@@ -1194,6 +1198,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
76977 static int
76978 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76979 struct xt_table_info *newinfo, unsigned int num_counters,
76980+ void __user *counters_ptr) __size_overflow(5);
76981+static int
76982+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76983+ struct xt_table_info *newinfo, unsigned int num_counters,
76984 void __user *counters_ptr)
76985 {
76986 int ret;
76987@@ -1315,6 +1323,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76988
76989 static int
76990 do_add_counters(struct net *net, const void __user *user, unsigned int len,
76991+ int compat) __size_overflow(3);
76992+static int
76993+do_add_counters(struct net *net, const void __user *user, unsigned int len,
76994 int compat)
76995 {
76996 unsigned int i, curcpu;
76997diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76998index d02f7e4..2d2a0f1 100644
76999--- a/net/ipv6/raw.c
77000+++ b/net/ipv6/raw.c
77001@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
77002 {
77003 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
77004 skb_checksum_complete(skb)) {
77005- atomic_inc(&sk->sk_drops);
77006+ atomic_inc_unchecked(&sk->sk_drops);
77007 kfree_skb(skb);
77008 return NET_RX_DROP;
77009 }
77010@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77011 struct raw6_sock *rp = raw6_sk(sk);
77012
77013 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
77014- atomic_inc(&sk->sk_drops);
77015+ atomic_inc_unchecked(&sk->sk_drops);
77016 kfree_skb(skb);
77017 return NET_RX_DROP;
77018 }
77019@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77020
77021 if (inet->hdrincl) {
77022 if (skb_checksum_complete(skb)) {
77023- atomic_inc(&sk->sk_drops);
77024+ atomic_inc_unchecked(&sk->sk_drops);
77025 kfree_skb(skb);
77026 return NET_RX_DROP;
77027 }
77028@@ -602,7 +602,7 @@ out:
77029 return err;
77030 }
77031
77032-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
77033+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
77034 struct flowi6 *fl6, struct dst_entry **dstp,
77035 unsigned int flags)
77036 {
77037@@ -912,12 +912,15 @@ do_confirm:
77038 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
77039 char __user *optval, int optlen)
77040 {
77041+ struct icmp6_filter filter;
77042+
77043 switch (optname) {
77044 case ICMPV6_FILTER:
77045 if (optlen > sizeof(struct icmp6_filter))
77046 optlen = sizeof(struct icmp6_filter);
77047- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
77048+ if (copy_from_user(&filter, optval, optlen))
77049 return -EFAULT;
77050+ raw6_sk(sk)->filter = filter;
77051 return 0;
77052 default:
77053 return -ENOPROTOOPT;
77054@@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77055 char __user *optval, int __user *optlen)
77056 {
77057 int len;
77058+ struct icmp6_filter filter;
77059
77060 switch (optname) {
77061 case ICMPV6_FILTER:
77062@@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77063 len = sizeof(struct icmp6_filter);
77064 if (put_user(len, optlen))
77065 return -EFAULT;
77066- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
77067+ filter = raw6_sk(sk)->filter;
77068+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
77069 return -EFAULT;
77070 return 0;
77071 default:
77072@@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77073 0, 0L, 0,
77074 sock_i_uid(sp), 0,
77075 sock_i_ino(sp),
77076- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77077+ atomic_read(&sp->sk_refcnt),
77078+#ifdef CONFIG_GRKERNSEC_HIDESYM
77079+ NULL,
77080+#else
77081+ sp,
77082+#endif
77083+ atomic_read_unchecked(&sp->sk_drops));
77084 }
77085
77086 static int raw6_seq_show(struct seq_file *seq, void *v)
77087diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
77088index a89ca8d..12e66b0 100644
77089--- a/net/ipv6/tcp_ipv6.c
77090+++ b/net/ipv6/tcp_ipv6.c
77091@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
77092 }
77093 #endif
77094
77095+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77096+extern int grsec_enable_blackhole;
77097+#endif
77098+
77099 static void tcp_v6_hash(struct sock *sk)
77100 {
77101 if (sk->sk_state != TCP_CLOSE) {
77102@@ -1654,6 +1658,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
77103 return 0;
77104
77105 reset:
77106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77107+ if (!grsec_enable_blackhole)
77108+#endif
77109 tcp_v6_send_reset(sk, skb);
77110 discard:
77111 if (opt_skb)
77112@@ -1733,12 +1740,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
77113 TCP_SKB_CB(skb)->sacked = 0;
77114
77115 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77116- if (!sk)
77117+ if (!sk) {
77118+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77119+ ret = 1;
77120+#endif
77121 goto no_tcp_socket;
77122+ }
77123
77124 process:
77125- if (sk->sk_state == TCP_TIME_WAIT)
77126+ if (sk->sk_state == TCP_TIME_WAIT) {
77127+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77128+ ret = 2;
77129+#endif
77130 goto do_time_wait;
77131+ }
77132
77133 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
77134 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77135@@ -1786,6 +1801,10 @@ no_tcp_socket:
77136 bad_packet:
77137 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77138 } else {
77139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77140+ if (!grsec_enable_blackhole || (ret == 1 &&
77141+ (skb->dev->flags & IFF_LOOPBACK)))
77142+#endif
77143 tcp_v6_send_reset(NULL, skb);
77144 }
77145
77146@@ -2047,7 +2066,13 @@ static void get_openreq6(struct seq_file *seq,
77147 uid,
77148 0, /* non standard timer */
77149 0, /* open_requests have no inode */
77150- 0, req);
77151+ 0,
77152+#ifdef CONFIG_GRKERNSEC_HIDESYM
77153+ NULL
77154+#else
77155+ req
77156+#endif
77157+ );
77158 }
77159
77160 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77161@@ -2097,7 +2122,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77162 sock_i_uid(sp),
77163 icsk->icsk_probes_out,
77164 sock_i_ino(sp),
77165- atomic_read(&sp->sk_refcnt), sp,
77166+ atomic_read(&sp->sk_refcnt),
77167+#ifdef CONFIG_GRKERNSEC_HIDESYM
77168+ NULL,
77169+#else
77170+ sp,
77171+#endif
77172 jiffies_to_clock_t(icsk->icsk_rto),
77173 jiffies_to_clock_t(icsk->icsk_ack.ato),
77174 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
77175@@ -2132,7 +2162,13 @@ static void get_timewait6_sock(struct seq_file *seq,
77176 dest->s6_addr32[2], dest->s6_addr32[3], destp,
77177 tw->tw_substate, 0, 0,
77178 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77179- atomic_read(&tw->tw_refcnt), tw);
77180+ atomic_read(&tw->tw_refcnt),
77181+#ifdef CONFIG_GRKERNSEC_HIDESYM
77182+ NULL
77183+#else
77184+ tw
77185+#endif
77186+ );
77187 }
77188
77189 static int tcp6_seq_show(struct seq_file *seq, void *v)
77190diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
77191index 4f96b5c..75543d7 100644
77192--- a/net/ipv6/udp.c
77193+++ b/net/ipv6/udp.c
77194@@ -50,6 +50,10 @@
77195 #include <linux/seq_file.h>
77196 #include "udp_impl.h"
77197
77198+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77199+extern int grsec_enable_blackhole;
77200+#endif
77201+
77202 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
77203 {
77204 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
77205@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
77206
77207 return 0;
77208 drop:
77209- atomic_inc(&sk->sk_drops);
77210+ atomic_inc_unchecked(&sk->sk_drops);
77211 drop_no_sk_drops_inc:
77212 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77213 kfree_skb(skb);
77214@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77215 continue;
77216 }
77217 drop:
77218- atomic_inc(&sk->sk_drops);
77219+ atomic_inc_unchecked(&sk->sk_drops);
77220 UDP6_INC_STATS_BH(sock_net(sk),
77221 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
77222 UDP6_INC_STATS_BH(sock_net(sk),
77223@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77224 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
77225 proto == IPPROTO_UDPLITE);
77226
77227+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77228+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77229+#endif
77230 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
77231
77232 kfree_skb(skb);
77233@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77234 if (!sock_owned_by_user(sk))
77235 udpv6_queue_rcv_skb(sk, skb);
77236 else if (sk_add_backlog(sk, skb)) {
77237- atomic_inc(&sk->sk_drops);
77238+ atomic_inc_unchecked(&sk->sk_drops);
77239 bh_unlock_sock(sk);
77240 sock_put(sk);
77241 goto discard;
77242@@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
77243 0, 0L, 0,
77244 sock_i_uid(sp), 0,
77245 sock_i_ino(sp),
77246- atomic_read(&sp->sk_refcnt), sp,
77247- atomic_read(&sp->sk_drops));
77248+ atomic_read(&sp->sk_refcnt),
77249+#ifdef CONFIG_GRKERNSEC_HIDESYM
77250+ NULL,
77251+#else
77252+ sp,
77253+#endif
77254+ atomic_read_unchecked(&sp->sk_drops));
77255 }
77256
77257 int udp6_seq_show(struct seq_file *seq, void *v)
77258diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
77259index 253695d..9481ce8 100644
77260--- a/net/irda/ircomm/ircomm_tty.c
77261+++ b/net/irda/ircomm/ircomm_tty.c
77262@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77263 add_wait_queue(&self->open_wait, &wait);
77264
77265 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
77266- __FILE__,__LINE__, tty->driver->name, self->open_count );
77267+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77268
77269 /* As far as I can see, we protect open_count - Jean II */
77270 spin_lock_irqsave(&self->spinlock, flags);
77271 if (!tty_hung_up_p(filp)) {
77272 extra_count = 1;
77273- self->open_count--;
77274+ local_dec(&self->open_count);
77275 }
77276 spin_unlock_irqrestore(&self->spinlock, flags);
77277- self->blocked_open++;
77278+ local_inc(&self->blocked_open);
77279
77280 while (1) {
77281 if (tty->termios->c_cflag & CBAUD) {
77282@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77283 }
77284
77285 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
77286- __FILE__,__LINE__, tty->driver->name, self->open_count );
77287+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77288
77289 schedule();
77290 }
77291@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77292 if (extra_count) {
77293 /* ++ is not atomic, so this should be protected - Jean II */
77294 spin_lock_irqsave(&self->spinlock, flags);
77295- self->open_count++;
77296+ local_inc(&self->open_count);
77297 spin_unlock_irqrestore(&self->spinlock, flags);
77298 }
77299- self->blocked_open--;
77300+ local_dec(&self->blocked_open);
77301
77302 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
77303- __FILE__,__LINE__, tty->driver->name, self->open_count);
77304+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
77305
77306 if (!retval)
77307 self->flags |= ASYNC_NORMAL_ACTIVE;
77308@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
77309 }
77310 /* ++ is not atomic, so this should be protected - Jean II */
77311 spin_lock_irqsave(&self->spinlock, flags);
77312- self->open_count++;
77313+ local_inc(&self->open_count);
77314
77315 tty->driver_data = self;
77316 self->tty = tty;
77317 spin_unlock_irqrestore(&self->spinlock, flags);
77318
77319 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
77320- self->line, self->open_count);
77321+ self->line, local_read(&self->open_count));
77322
77323 /* Not really used by us, but lets do it anyway */
77324 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
77325@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77326 return;
77327 }
77328
77329- if ((tty->count == 1) && (self->open_count != 1)) {
77330+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
77331 /*
77332 * Uh, oh. tty->count is 1, which means that the tty
77333 * structure will be freed. state->count should always
77334@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77335 */
77336 IRDA_DEBUG(0, "%s(), bad serial port count; "
77337 "tty->count is 1, state->count is %d\n", __func__ ,
77338- self->open_count);
77339- self->open_count = 1;
77340+ local_read(&self->open_count));
77341+ local_set(&self->open_count, 1);
77342 }
77343
77344- if (--self->open_count < 0) {
77345+ if (local_dec_return(&self->open_count) < 0) {
77346 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77347- __func__, self->line, self->open_count);
77348- self->open_count = 0;
77349+ __func__, self->line, local_read(&self->open_count));
77350+ local_set(&self->open_count, 0);
77351 }
77352- if (self->open_count) {
77353+ if (local_read(&self->open_count)) {
77354 spin_unlock_irqrestore(&self->spinlock, flags);
77355
77356 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
77357@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77358 tty->closing = 0;
77359 self->tty = NULL;
77360
77361- if (self->blocked_open) {
77362+ if (local_read(&self->blocked_open)) {
77363 if (self->close_delay)
77364 schedule_timeout_interruptible(self->close_delay);
77365 wake_up_interruptible(&self->open_wait);
77366@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
77367 spin_lock_irqsave(&self->spinlock, flags);
77368 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77369 self->tty = NULL;
77370- self->open_count = 0;
77371+ local_set(&self->open_count, 0);
77372 spin_unlock_irqrestore(&self->spinlock, flags);
77373
77374 wake_up_interruptible(&self->open_wait);
77375@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
77376 seq_putc(m, '\n');
77377
77378 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77379- seq_printf(m, "Open count: %d\n", self->open_count);
77380+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
77381 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77382 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77383
77384diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
77385index d5c5b8f..33beff0 100644
77386--- a/net/iucv/af_iucv.c
77387+++ b/net/iucv/af_iucv.c
77388@@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
77389
77390 write_lock_bh(&iucv_sk_list.lock);
77391
77392- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77393+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77394 while (__iucv_get_sock_by_name(name)) {
77395 sprintf(name, "%08x",
77396- atomic_inc_return(&iucv_sk_list.autobind_name));
77397+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77398 }
77399
77400 write_unlock_bh(&iucv_sk_list.lock);
77401diff --git a/net/key/af_key.c b/net/key/af_key.c
77402index 11dbb22..c20f667 100644
77403--- a/net/key/af_key.c
77404+++ b/net/key/af_key.c
77405@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
77406 static u32 get_acqseq(void)
77407 {
77408 u32 res;
77409- static atomic_t acqseq;
77410+ static atomic_unchecked_t acqseq;
77411
77412 do {
77413- res = atomic_inc_return(&acqseq);
77414+ res = atomic_inc_return_unchecked(&acqseq);
77415 } while (!res);
77416 return res;
77417 }
77418diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
77419index 2f0642d..e5c6fba 100644
77420--- a/net/mac80211/ieee80211_i.h
77421+++ b/net/mac80211/ieee80211_i.h
77422@@ -28,6 +28,7 @@
77423 #include <net/ieee80211_radiotap.h>
77424 #include <net/cfg80211.h>
77425 #include <net/mac80211.h>
77426+#include <asm/local.h>
77427 #include "key.h"
77428 #include "sta_info.h"
77429
77430@@ -781,7 +782,7 @@ struct ieee80211_local {
77431 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
77432 spinlock_t queue_stop_reason_lock;
77433
77434- int open_count;
77435+ local_t open_count;
77436 int monitors, cooked_mntrs;
77437 /* number of interfaces with corresponding FIF_ flags */
77438 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
77439diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
77440index 8e2137b..2974283 100644
77441--- a/net/mac80211/iface.c
77442+++ b/net/mac80211/iface.c
77443@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77444 break;
77445 }
77446
77447- if (local->open_count == 0) {
77448+ if (local_read(&local->open_count) == 0) {
77449 res = drv_start(local);
77450 if (res)
77451 goto err_del_bss;
77452@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77453 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
77454
77455 if (!is_valid_ether_addr(dev->dev_addr)) {
77456- if (!local->open_count)
77457+ if (!local_read(&local->open_count))
77458 drv_stop(local);
77459 return -EADDRNOTAVAIL;
77460 }
77461@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77462 mutex_unlock(&local->mtx);
77463
77464 if (coming_up)
77465- local->open_count++;
77466+ local_inc(&local->open_count);
77467
77468 if (hw_reconf_flags)
77469 ieee80211_hw_config(local, hw_reconf_flags);
77470@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77471 err_del_interface:
77472 drv_remove_interface(local, sdata);
77473 err_stop:
77474- if (!local->open_count)
77475+ if (!local_read(&local->open_count))
77476 drv_stop(local);
77477 err_del_bss:
77478 sdata->bss = NULL;
77479@@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77480 }
77481
77482 if (going_down)
77483- local->open_count--;
77484+ local_dec(&local->open_count);
77485
77486 switch (sdata->vif.type) {
77487 case NL80211_IFTYPE_AP_VLAN:
77488@@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77489
77490 ieee80211_recalc_ps(local, -1);
77491
77492- if (local->open_count == 0) {
77493+ if (local_read(&local->open_count) == 0) {
77494 if (local->ops->napi_poll)
77495 napi_disable(&local->napi);
77496 ieee80211_clear_tx_pending(local);
77497diff --git a/net/mac80211/main.c b/net/mac80211/main.c
77498index b142bd4..a651749 100644
77499--- a/net/mac80211/main.c
77500+++ b/net/mac80211/main.c
77501@@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
77502 local->hw.conf.power_level = power;
77503 }
77504
77505- if (changed && local->open_count) {
77506+ if (changed && local_read(&local->open_count)) {
77507 ret = drv_config(local, changed);
77508 /*
77509 * Goal:
77510diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
77511index 596efaf..8f1911f 100644
77512--- a/net/mac80211/pm.c
77513+++ b/net/mac80211/pm.c
77514@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77515 struct ieee80211_sub_if_data *sdata;
77516 struct sta_info *sta;
77517
77518- if (!local->open_count)
77519+ if (!local_read(&local->open_count))
77520 goto suspend;
77521
77522 ieee80211_scan_cancel(local);
77523@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77524 cancel_work_sync(&local->dynamic_ps_enable_work);
77525 del_timer_sync(&local->dynamic_ps_timer);
77526
77527- local->wowlan = wowlan && local->open_count;
77528+ local->wowlan = wowlan && local_read(&local->open_count);
77529 if (local->wowlan) {
77530 int err = drv_suspend(local, wowlan);
77531 if (err < 0) {
77532@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77533 }
77534
77535 /* stop hardware - this must stop RX */
77536- if (local->open_count)
77537+ if (local_read(&local->open_count))
77538 ieee80211_stop_device(local);
77539
77540 suspend:
77541diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
77542index f9b8e81..bb89b46 100644
77543--- a/net/mac80211/rate.c
77544+++ b/net/mac80211/rate.c
77545@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
77546
77547 ASSERT_RTNL();
77548
77549- if (local->open_count)
77550+ if (local_read(&local->open_count))
77551 return -EBUSY;
77552
77553 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
77554diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
77555index c97a065..ff61928 100644
77556--- a/net/mac80211/rc80211_pid_debugfs.c
77557+++ b/net/mac80211/rc80211_pid_debugfs.c
77558@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
77559
77560 spin_unlock_irqrestore(&events->lock, status);
77561
77562- if (copy_to_user(buf, pb, p))
77563+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
77564 return -EFAULT;
77565
77566 return p;
77567diff --git a/net/mac80211/util.c b/net/mac80211/util.c
77568index 9919892..8c49803 100644
77569--- a/net/mac80211/util.c
77570+++ b/net/mac80211/util.c
77571@@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
77572 }
77573 #endif
77574 /* everything else happens only if HW was up & running */
77575- if (!local->open_count)
77576+ if (!local_read(&local->open_count))
77577 goto wake_up;
77578
77579 /*
77580diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
77581index f8ac4ef..b02560b 100644
77582--- a/net/netfilter/Kconfig
77583+++ b/net/netfilter/Kconfig
77584@@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
77585
77586 To compile it as a module, choose M here. If unsure, say N.
77587
77588+config NETFILTER_XT_MATCH_GRADM
77589+ tristate '"gradm" match support'
77590+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77591+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77592+ ---help---
77593+ The gradm match allows to match on grsecurity RBAC being enabled.
77594+ It is useful when iptables rules are applied early on bootup to
77595+ prevent connections to the machine (except from a trusted host)
77596+ while the RBAC system is disabled.
77597+
77598 config NETFILTER_XT_MATCH_HASHLIMIT
77599 tristate '"hashlimit" match support'
77600 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77601diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
77602index 40f4c3d..0d5dd6b 100644
77603--- a/net/netfilter/Makefile
77604+++ b/net/netfilter/Makefile
77605@@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
77606 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
77607 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
77608 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77609+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77610 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77611 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77612 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77613diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
77614index 29fa5ba..8debc79 100644
77615--- a/net/netfilter/ipvs/ip_vs_conn.c
77616+++ b/net/netfilter/ipvs/ip_vs_conn.c
77617@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
77618 /* Increase the refcnt counter of the dest */
77619 atomic_inc(&dest->refcnt);
77620
77621- conn_flags = atomic_read(&dest->conn_flags);
77622+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
77623 if (cp->protocol != IPPROTO_UDP)
77624 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
77625 /* Bind with the destination and its corresponding transmitter */
77626@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
77627 atomic_set(&cp->refcnt, 1);
77628
77629 atomic_set(&cp->n_control, 0);
77630- atomic_set(&cp->in_pkts, 0);
77631+ atomic_set_unchecked(&cp->in_pkts, 0);
77632
77633 atomic_inc(&ipvs->conn_count);
77634 if (flags & IP_VS_CONN_F_NO_CPORT)
77635@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
77636
77637 /* Don't drop the entry if its number of incoming packets is not
77638 located in [0, 8] */
77639- i = atomic_read(&cp->in_pkts);
77640+ i = atomic_read_unchecked(&cp->in_pkts);
77641 if (i > 8 || i < 0) return 0;
77642
77643 if (!todrop_rate[i]) return 0;
77644diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
77645index 2555816..31492d9 100644
77646--- a/net/netfilter/ipvs/ip_vs_core.c
77647+++ b/net/netfilter/ipvs/ip_vs_core.c
77648@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
77649 ret = cp->packet_xmit(skb, cp, pd->pp);
77650 /* do not touch skb anymore */
77651
77652- atomic_inc(&cp->in_pkts);
77653+ atomic_inc_unchecked(&cp->in_pkts);
77654 ip_vs_conn_put(cp);
77655 return ret;
77656 }
77657@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
77658 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
77659 pkts = sysctl_sync_threshold(ipvs);
77660 else
77661- pkts = atomic_add_return(1, &cp->in_pkts);
77662+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77663
77664 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
77665 cp->protocol == IPPROTO_SCTP) {
77666diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
77667index b3afe18..08ec940 100644
77668--- a/net/netfilter/ipvs/ip_vs_ctl.c
77669+++ b/net/netfilter/ipvs/ip_vs_ctl.c
77670@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
77671 ip_vs_rs_hash(ipvs, dest);
77672 write_unlock_bh(&ipvs->rs_lock);
77673 }
77674- atomic_set(&dest->conn_flags, conn_flags);
77675+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
77676
77677 /* bind the service */
77678 if (!dest->svc) {
77679@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77680 " %-7s %-6d %-10d %-10d\n",
77681 &dest->addr.in6,
77682 ntohs(dest->port),
77683- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77684+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77685 atomic_read(&dest->weight),
77686 atomic_read(&dest->activeconns),
77687 atomic_read(&dest->inactconns));
77688@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77689 "%-7s %-6d %-10d %-10d\n",
77690 ntohl(dest->addr.ip),
77691 ntohs(dest->port),
77692- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77693+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77694 atomic_read(&dest->weight),
77695 atomic_read(&dest->activeconns),
77696 atomic_read(&dest->inactconns));
77697@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
77698
77699 entry.addr = dest->addr.ip;
77700 entry.port = dest->port;
77701- entry.conn_flags = atomic_read(&dest->conn_flags);
77702+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77703 entry.weight = atomic_read(&dest->weight);
77704 entry.u_threshold = dest->u_threshold;
77705 entry.l_threshold = dest->l_threshold;
77706@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
77707 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77708
77709 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77710- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77711+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77712 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77713 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77714 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
77715diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
77716index 8a0d6d6..90ec197 100644
77717--- a/net/netfilter/ipvs/ip_vs_sync.c
77718+++ b/net/netfilter/ipvs/ip_vs_sync.c
77719@@ -649,7 +649,7 @@ control:
77720 * i.e only increment in_pkts for Templates.
77721 */
77722 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
77723- int pkts = atomic_add_return(1, &cp->in_pkts);
77724+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77725
77726 if (pkts % sysctl_sync_period(ipvs) != 1)
77727 return;
77728@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
77729
77730 if (opt)
77731 memcpy(&cp->in_seq, opt, sizeof(*opt));
77732- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77733+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77734 cp->state = state;
77735 cp->old_state = cp->state;
77736 /*
77737diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
77738index 7fd66de..e6fb361 100644
77739--- a/net/netfilter/ipvs/ip_vs_xmit.c
77740+++ b/net/netfilter/ipvs/ip_vs_xmit.c
77741@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
77742 else
77743 rc = NF_ACCEPT;
77744 /* do not touch skb anymore */
77745- atomic_inc(&cp->in_pkts);
77746+ atomic_inc_unchecked(&cp->in_pkts);
77747 goto out;
77748 }
77749
77750@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
77751 else
77752 rc = NF_ACCEPT;
77753 /* do not touch skb anymore */
77754- atomic_inc(&cp->in_pkts);
77755+ atomic_inc_unchecked(&cp->in_pkts);
77756 goto out;
77757 }
77758
77759diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
77760index 66b2c54..c7884e3 100644
77761--- a/net/netfilter/nfnetlink_log.c
77762+++ b/net/netfilter/nfnetlink_log.c
77763@@ -70,7 +70,7 @@ struct nfulnl_instance {
77764 };
77765
77766 static DEFINE_SPINLOCK(instances_lock);
77767-static atomic_t global_seq;
77768+static atomic_unchecked_t global_seq;
77769
77770 #define INSTANCE_BUCKETS 16
77771 static struct hlist_head instance_table[INSTANCE_BUCKETS];
77772@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
77773 /* global sequence number */
77774 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77775 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77776- htonl(atomic_inc_return(&global_seq)));
77777+ htonl(atomic_inc_return_unchecked(&global_seq)));
77778
77779 if (data_len) {
77780 struct nlattr *nla;
77781diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
77782new file mode 100644
77783index 0000000..6905327
77784--- /dev/null
77785+++ b/net/netfilter/xt_gradm.c
77786@@ -0,0 +1,51 @@
77787+/*
77788+ * gradm match for netfilter
77789