]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-3.3.0-201203242026.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.0-201203242026.patch
CommitLineData
23fa62ed
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 0c083c5..9c2512a 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -69,6 +73,7 @@ Image
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -92,19 +97,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -115,9 +125,11 @@ devlist.h*
67 dnotify_test
68 docproc
69 dslm
70+dtc-lexer.lex.c
71 elf2ecoff
72 elfconfig.h*
73 evergreen_reg_safe.h
74+exception_policy.conf
75 fixdep
76 flask.h
77 fore200e_mkfirm
78@@ -125,12 +137,15 @@ fore200e_pca_fw.c*
79 gconf
80 gconf.glade.h
81 gen-devlist
82+gen-kdb_cmds.c
83 gen_crc32table
84 gen_init_cpio
85 generated
86 genheaders
87 genksyms
88 *_gray256.c
89+hash
90+hid-example
91 hpet_example
92 hugepage-mmap
93 hugepage-shm
94@@ -145,7 +160,7 @@ int32.c
95 int4.c
96 int8.c
97 kallsyms
98-kconfig
99+kern_constants.h
100 keywords.c
101 ksym.c*
102 ksym.h*
103@@ -153,7 +168,7 @@ kxgettext
104 lkc_defs.h
105 lex.c
106 lex.*.c
107-linux
108+lib1funcs.S
109 logo_*.c
110 logo_*_clut224.c
111 logo_*_mono.c
112@@ -165,14 +180,15 @@ machtypes.h
113 map
114 map_hugetlb
115 maui_boot.h
116-media
117 mconf
118+mdp
119 miboot*
120 mk_elfconfig
121 mkboot
122 mkbugboot
123 mkcpustr
124 mkdep
125+mkpiggy
126 mkprep
127 mkregtable
128 mktables
129@@ -208,6 +224,7 @@ r300_reg_safe.h
130 r420_reg_safe.h
131 r600_reg_safe.h
132 recordmcount
133+regdb.c
134 relocs
135 rlim_names.h
136 rn50_reg_safe.h
137@@ -218,6 +235,7 @@ setup
138 setup.bin
139 setup.elf
140 sImage
141+slabinfo
142 sm_tbl*
143 split-include
144 syscalltab.h
145@@ -228,6 +246,7 @@ tftpboot.img
146 timeconst.h
147 times.h*
148 trix_boot.h
149+user_constants.h
150 utsrelease.h*
151 vdso-syms.lds
152 vdso.lds
153@@ -245,7 +264,9 @@ vmlinux
154 vmlinux-*
155 vmlinux.aout
156 vmlinux.bin.all
157+vmlinux.bin.bz2
158 vmlinux.lds
159+vmlinux.relocs
160 vmlinuz
161 voffset.h
162 vsyscall.lds
163@@ -253,9 +274,11 @@ vsyscall_32.lds
164 wanxlfw.inc
165 uImage
166 unifdef
167+utsrelease.h
168 wakeup.bin
169 wakeup.elf
170 wakeup.lds
171 zImage*
172 zconf.hash.c
173+zconf.lex.c
174 zoffset.h
175diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
176index d99fd9c..8689fef 100644
177--- a/Documentation/kernel-parameters.txt
178+++ b/Documentation/kernel-parameters.txt
179@@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
180 the specified number of seconds. This is to be used if
181 your oopses keep scrolling off the screen.
182
183+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
184+ virtualization environments that don't cope well with the
185+ expand down segment used by UDEREF on X86-32 or the frequent
186+ page table updates on X86-64.
187+
188+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
189+
190 pcbit= [HW,ISDN]
191
192 pcd. [PARIDE]
193diff --git a/Makefile b/Makefile
194index 1932984..0204e68 100644
195--- a/Makefile
196+++ b/Makefile
197@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
198
199 HOSTCC = gcc
200 HOSTCXX = g++
201-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
202-HOSTCXXFLAGS = -O2
203+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
204+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
205+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
206
207 # Decide whether to build built-in, modular, or both.
208 # Normally, just do built-in.
209@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
210 # Rules shared between *config targets and build targets
211
212 # Basic helpers built in scripts/
213-PHONY += scripts_basic
214-scripts_basic:
215+PHONY += scripts_basic gcc-plugins
216+scripts_basic: gcc-plugins
217 $(Q)$(MAKE) $(build)=scripts/basic
218 $(Q)rm -f .tmp_quiet_recordmcount
219
220@@ -564,6 +565,50 @@ else
221 KBUILD_CFLAGS += -O2
222 endif
223
224+ifndef DISABLE_PAX_PLUGINS
225+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
226+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
227+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
228+endif
229+ifdef CONFIG_PAX_MEMORY_STACKLEAK
230+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
231+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
232+endif
233+ifdef CONFIG_KALLOCSTAT_PLUGIN
234+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
235+endif
236+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
237+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
238+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
239+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
240+endif
241+ifdef CONFIG_CHECKER_PLUGIN
242+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
243+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
244+endif
245+endif
246+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
247+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
248+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
249+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
250+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
251+ifeq ($(KBUILD_EXTMOD),)
252+gcc-plugins:
253+ $(Q)$(MAKE) $(build)=tools/gcc
254+else
255+gcc-plugins: ;
256+endif
257+else
258+gcc-plugins:
259+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
260+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
261+else
262+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
263+endif
264+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
265+endif
266+endif
267+
268 include $(srctree)/arch/$(SRCARCH)/Makefile
269
270 ifneq ($(CONFIG_FRAME_WARN),0)
271@@ -708,7 +753,7 @@ export mod_strip_cmd
272
273
274 ifeq ($(KBUILD_EXTMOD),)
275-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
276+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
277
278 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
279 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
280@@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
281
282 # The actual objects are generated when descending,
283 # make sure no implicit rule kicks in
284+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
285+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
286 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
287
288 # Handle descending into subdirectories listed in $(vmlinux-dirs)
289@@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
290 # Error messages still appears in the original language
291
292 PHONY += $(vmlinux-dirs)
293-$(vmlinux-dirs): prepare scripts
294+$(vmlinux-dirs): gcc-plugins prepare scripts
295 $(Q)$(MAKE) $(build)=$@
296
297 # Store (new) KERNELRELASE string in include/config/kernel.release
298@@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
299 $(Q)$(MAKE) $(build)=.
300
301 # All the preparing..
302+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
303 prepare: prepare0
304
305 # Generate some files
306@@ -1089,6 +1137,8 @@ all: modules
307 # using awk while concatenating to the final file.
308
309 PHONY += modules
310+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
311+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
312 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
313 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
314 @$(kecho) ' Building modules, stage 2.';
315@@ -1104,7 +1154,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
316
317 # Target to prepare building external modules
318 PHONY += modules_prepare
319-modules_prepare: prepare scripts
320+modules_prepare: gcc-plugins prepare scripts
321
322 # Target to install modules
323 PHONY += modules_install
324@@ -1201,6 +1251,7 @@ distclean: mrproper
325 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
326 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
327 -o -name '.*.rej' \
328+ -o -name '.*.rej' -o -name '*.so' \
329 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
330 -type f -print | xargs rm -f
331
332@@ -1361,6 +1412,8 @@ PHONY += $(module-dirs) modules
333 $(module-dirs): crmodverdir $(objtree)/Module.symvers
334 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
335
336+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
337+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
338 modules: $(module-dirs)
339 @$(kecho) ' Building modules, stage 2.';
340 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
341@@ -1487,17 +1540,21 @@ else
342 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
343 endif
344
345-%.s: %.c prepare scripts FORCE
346+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348+%.s: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.i: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352-%.o: %.c prepare scripts FORCE
353+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
354+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
355+%.o: %.c gcc-plugins prepare scripts FORCE
356 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
357 %.lst: %.c prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359-%.s: %.S prepare scripts FORCE
360+%.s: %.S gcc-plugins prepare scripts FORCE
361 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
362-%.o: %.S prepare scripts FORCE
363+%.o: %.S gcc-plugins prepare scripts FORCE
364 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
365 %.symtypes: %.c prepare scripts FORCE
366 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
367@@ -1507,11 +1564,15 @@ endif
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371-%/: prepare scripts FORCE
372+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374+%/: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir)
378-%.ko: prepare scripts FORCE
379+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381+%.ko: gcc-plugins prepare scripts FORCE
382 $(cmd_crmodverdir)
383 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
384 $(build)=$(build-dir) $(@:.ko=.o)
385diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
386index 640f909..48b6597 100644
387--- a/arch/alpha/include/asm/atomic.h
388+++ b/arch/alpha/include/asm/atomic.h
389@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
390 #define atomic_dec(v) atomic_sub(1,(v))
391 #define atomic64_dec(v) atomic64_sub(1,(v))
392
393+#define atomic64_read_unchecked(v) atomic64_read(v)
394+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
395+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
396+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
397+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
398+#define atomic64_inc_unchecked(v) atomic64_inc(v)
399+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
400+#define atomic64_dec_unchecked(v) atomic64_dec(v)
401+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
402+
403 #define smp_mb__before_atomic_dec() smp_mb()
404 #define smp_mb__after_atomic_dec() smp_mb()
405 #define smp_mb__before_atomic_inc() smp_mb()
406diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
407index ad368a9..fbe0f25 100644
408--- a/arch/alpha/include/asm/cache.h
409+++ b/arch/alpha/include/asm/cache.h
410@@ -4,19 +4,19 @@
411 #ifndef __ARCH_ALPHA_CACHE_H
412 #define __ARCH_ALPHA_CACHE_H
413
414+#include <linux/const.h>
415
416 /* Bytes per L1 (data) cache line. */
417 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
418-# define L1_CACHE_BYTES 64
419 # define L1_CACHE_SHIFT 6
420 #else
421 /* Both EV4 and EV5 are write-through, read-allocate,
422 direct-mapped, physical.
423 */
424-# define L1_CACHE_BYTES 32
425 # define L1_CACHE_SHIFT 5
426 #endif
427
428+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
429 #define SMP_CACHE_BYTES L1_CACHE_BYTES
430
431 #endif
432diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
433index da5449e..7418343 100644
434--- a/arch/alpha/include/asm/elf.h
435+++ b/arch/alpha/include/asm/elf.h
436@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
437
438 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
439
440+#ifdef CONFIG_PAX_ASLR
441+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
442+
443+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
444+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
445+#endif
446+
447 /* $0 is set by ld.so to a pointer to a function which might be
448 registered using atexit. This provides a mean for the dynamic
449 linker to call DT_FINI functions for shared libraries that have
450diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
451index de98a73..bd4f1f8 100644
452--- a/arch/alpha/include/asm/pgtable.h
453+++ b/arch/alpha/include/asm/pgtable.h
454@@ -101,6 +101,17 @@ struct vm_area_struct;
455 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
456 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
457 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
458+
459+#ifdef CONFIG_PAX_PAGEEXEC
460+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
461+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
462+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
463+#else
464+# define PAGE_SHARED_NOEXEC PAGE_SHARED
465+# define PAGE_COPY_NOEXEC PAGE_COPY
466+# define PAGE_READONLY_NOEXEC PAGE_READONLY
467+#endif
468+
469 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
470
471 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
472diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
473index 2fd00b7..cfd5069 100644
474--- a/arch/alpha/kernel/module.c
475+++ b/arch/alpha/kernel/module.c
476@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
477
478 /* The small sections were sorted to the end of the segment.
479 The following should definitely cover them. */
480- gp = (u64)me->module_core + me->core_size - 0x8000;
481+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
482 got = sechdrs[me->arch.gotsecindex].sh_addr;
483
484 for (i = 0; i < n; i++) {
485diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
486index 01e8715..be0e80f 100644
487--- a/arch/alpha/kernel/osf_sys.c
488+++ b/arch/alpha/kernel/osf_sys.c
489@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
490 /* At this point: (!vma || addr < vma->vm_end). */
491 if (limit - len < addr)
492 return -ENOMEM;
493- if (!vma || addr + len <= vma->vm_start)
494+ if (check_heap_stack_gap(vma, addr, len))
495 return addr;
496 addr = vma->vm_end;
497 vma = vma->vm_next;
498@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
499 merely specific addresses, but regions of memory -- perhaps
500 this feature should be incorporated into all ports? */
501
502+#ifdef CONFIG_PAX_RANDMMAP
503+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
504+#endif
505+
506 if (addr) {
507 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
508 if (addr != (unsigned long) -ENOMEM)
509@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
510 }
511
512 /* Next, try allocating at TASK_UNMAPPED_BASE. */
513- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
514- len, limit);
515+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
516+
517 if (addr != (unsigned long) -ENOMEM)
518 return addr;
519
520diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
521index fadd5f8..904e73a 100644
522--- a/arch/alpha/mm/fault.c
523+++ b/arch/alpha/mm/fault.c
524@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
525 __reload_thread(pcb);
526 }
527
528+#ifdef CONFIG_PAX_PAGEEXEC
529+/*
530+ * PaX: decide what to do with offenders (regs->pc = fault address)
531+ *
532+ * returns 1 when task should be killed
533+ * 2 when patched PLT trampoline was detected
534+ * 3 when unpatched PLT trampoline was detected
535+ */
536+static int pax_handle_fetch_fault(struct pt_regs *regs)
537+{
538+
539+#ifdef CONFIG_PAX_EMUPLT
540+ int err;
541+
542+ do { /* PaX: patched PLT emulation #1 */
543+ unsigned int ldah, ldq, jmp;
544+
545+ err = get_user(ldah, (unsigned int *)regs->pc);
546+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
547+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
548+
549+ if (err)
550+ break;
551+
552+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
553+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
554+ jmp == 0x6BFB0000U)
555+ {
556+ unsigned long r27, addr;
557+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
558+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
559+
560+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
561+ err = get_user(r27, (unsigned long *)addr);
562+ if (err)
563+ break;
564+
565+ regs->r27 = r27;
566+ regs->pc = r27;
567+ return 2;
568+ }
569+ } while (0);
570+
571+ do { /* PaX: patched PLT emulation #2 */
572+ unsigned int ldah, lda, br;
573+
574+ err = get_user(ldah, (unsigned int *)regs->pc);
575+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
576+ err |= get_user(br, (unsigned int *)(regs->pc+8));
577+
578+ if (err)
579+ break;
580+
581+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
582+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
583+ (br & 0xFFE00000U) == 0xC3E00000U)
584+ {
585+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
586+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
587+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
588+
589+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
590+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
591+ return 2;
592+ }
593+ } while (0);
594+
595+ do { /* PaX: unpatched PLT emulation */
596+ unsigned int br;
597+
598+ err = get_user(br, (unsigned int *)regs->pc);
599+
600+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
601+ unsigned int br2, ldq, nop, jmp;
602+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
603+
604+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
605+ err = get_user(br2, (unsigned int *)addr);
606+ err |= get_user(ldq, (unsigned int *)(addr+4));
607+ err |= get_user(nop, (unsigned int *)(addr+8));
608+ err |= get_user(jmp, (unsigned int *)(addr+12));
609+ err |= get_user(resolver, (unsigned long *)(addr+16));
610+
611+ if (err)
612+ break;
613+
614+ if (br2 == 0xC3600000U &&
615+ ldq == 0xA77B000CU &&
616+ nop == 0x47FF041FU &&
617+ jmp == 0x6B7B0000U)
618+ {
619+ regs->r28 = regs->pc+4;
620+ regs->r27 = addr+16;
621+ regs->pc = resolver;
622+ return 3;
623+ }
624+ }
625+ } while (0);
626+#endif
627+
628+ return 1;
629+}
630+
631+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
632+{
633+ unsigned long i;
634+
635+ printk(KERN_ERR "PAX: bytes at PC: ");
636+ for (i = 0; i < 5; i++) {
637+ unsigned int c;
638+ if (get_user(c, (unsigned int *)pc+i))
639+ printk(KERN_CONT "???????? ");
640+ else
641+ printk(KERN_CONT "%08x ", c);
642+ }
643+ printk("\n");
644+}
645+#endif
646
647 /*
648 * This routine handles page faults. It determines the address,
649@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
650 good_area:
651 si_code = SEGV_ACCERR;
652 if (cause < 0) {
653- if (!(vma->vm_flags & VM_EXEC))
654+ if (!(vma->vm_flags & VM_EXEC)) {
655+
656+#ifdef CONFIG_PAX_PAGEEXEC
657+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
658+ goto bad_area;
659+
660+ up_read(&mm->mmap_sem);
661+ switch (pax_handle_fetch_fault(regs)) {
662+
663+#ifdef CONFIG_PAX_EMUPLT
664+ case 2:
665+ case 3:
666+ return;
667+#endif
668+
669+ }
670+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
671+ do_group_exit(SIGKILL);
672+#else
673 goto bad_area;
674+#endif
675+
676+ }
677 } else if (!cause) {
678 /* Allow reads even for write-only mappings */
679 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
680diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
681index 86976d0..8e07f84 100644
682--- a/arch/arm/include/asm/atomic.h
683+++ b/arch/arm/include/asm/atomic.h
684@@ -15,6 +15,10 @@
685 #include <linux/types.h>
686 #include <asm/system.h>
687
688+#ifdef CONFIG_GENERIC_ATOMIC64
689+#include <asm-generic/atomic64.h>
690+#endif
691+
692 #define ATOMIC_INIT(i) { (i) }
693
694 #ifdef __KERNEL__
695@@ -25,7 +29,15 @@
696 * atomic_set() is the clrex or dummy strex done on every exception return.
697 */
698 #define atomic_read(v) (*(volatile int *)&(v)->counter)
699+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
700+{
701+ return v->counter;
702+}
703 #define atomic_set(v,i) (((v)->counter) = (i))
704+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
705+{
706+ v->counter = i;
707+}
708
709 #if __LINUX_ARM_ARCH__ >= 6
710
711@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
712 int result;
713
714 __asm__ __volatile__("@ atomic_add\n"
715+"1: ldrex %1, [%3]\n"
716+" adds %0, %1, %4\n"
717+
718+#ifdef CONFIG_PAX_REFCOUNT
719+" bvc 3f\n"
720+"2: bkpt 0xf103\n"
721+"3:\n"
722+#endif
723+
724+" strex %1, %0, [%3]\n"
725+" teq %1, #0\n"
726+" bne 1b"
727+
728+#ifdef CONFIG_PAX_REFCOUNT
729+"\n4:\n"
730+ _ASM_EXTABLE(2b, 4b)
731+#endif
732+
733+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
734+ : "r" (&v->counter), "Ir" (i)
735+ : "cc");
736+}
737+
738+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
739+{
740+ unsigned long tmp;
741+ int result;
742+
743+ __asm__ __volatile__("@ atomic_add_unchecked\n"
744 "1: ldrex %0, [%3]\n"
745 " add %0, %0, %4\n"
746 " strex %1, %0, [%3]\n"
747@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
748 smp_mb();
749
750 __asm__ __volatile__("@ atomic_add_return\n"
751+"1: ldrex %1, [%3]\n"
752+" adds %0, %1, %4\n"
753+
754+#ifdef CONFIG_PAX_REFCOUNT
755+" bvc 3f\n"
756+" mov %0, %1\n"
757+"2: bkpt 0xf103\n"
758+"3:\n"
759+#endif
760+
761+" strex %1, %0, [%3]\n"
762+" teq %1, #0\n"
763+" bne 1b"
764+
765+#ifdef CONFIG_PAX_REFCOUNT
766+"\n4:\n"
767+ _ASM_EXTABLE(2b, 4b)
768+#endif
769+
770+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
771+ : "r" (&v->counter), "Ir" (i)
772+ : "cc");
773+
774+ smp_mb();
775+
776+ return result;
777+}
778+
779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
780+{
781+ unsigned long tmp;
782+ int result;
783+
784+ smp_mb();
785+
786+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
787 "1: ldrex %0, [%3]\n"
788 " add %0, %0, %4\n"
789 " strex %1, %0, [%3]\n"
790@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
791 int result;
792
793 __asm__ __volatile__("@ atomic_sub\n"
794+"1: ldrex %1, [%3]\n"
795+" subs %0, %1, %4\n"
796+
797+#ifdef CONFIG_PAX_REFCOUNT
798+" bvc 3f\n"
799+"2: bkpt 0xf103\n"
800+"3:\n"
801+#endif
802+
803+" strex %1, %0, [%3]\n"
804+" teq %1, #0\n"
805+" bne 1b"
806+
807+#ifdef CONFIG_PAX_REFCOUNT
808+"\n4:\n"
809+ _ASM_EXTABLE(2b, 4b)
810+#endif
811+
812+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
813+ : "r" (&v->counter), "Ir" (i)
814+ : "cc");
815+}
816+
817+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
818+{
819+ unsigned long tmp;
820+ int result;
821+
822+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
823 "1: ldrex %0, [%3]\n"
824 " sub %0, %0, %4\n"
825 " strex %1, %0, [%3]\n"
826@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
827 smp_mb();
828
829 __asm__ __volatile__("@ atomic_sub_return\n"
830-"1: ldrex %0, [%3]\n"
831-" sub %0, %0, %4\n"
832+"1: ldrex %1, [%3]\n"
833+" sub %0, %1, %4\n"
834+
835+#ifdef CONFIG_PAX_REFCOUNT
836+" bvc 3f\n"
837+" mov %0, %1\n"
838+"2: bkpt 0xf103\n"
839+"3:\n"
840+#endif
841+
842 " strex %1, %0, [%3]\n"
843 " teq %1, #0\n"
844 " bne 1b"
845+
846+#ifdef CONFIG_PAX_REFCOUNT
847+"\n4:\n"
848+ _ASM_EXTABLE(2b, 4b)
849+#endif
850+
851 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
852 : "r" (&v->counter), "Ir" (i)
853 : "cc");
854@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
855 return oldval;
856 }
857
858+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
859+{
860+ unsigned long oldval, res;
861+
862+ smp_mb();
863+
864+ do {
865+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
866+ "ldrex %1, [%3]\n"
867+ "mov %0, #0\n"
868+ "teq %1, %4\n"
869+ "strexeq %0, %5, [%3]\n"
870+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
871+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
872+ : "cc");
873+ } while (res);
874+
875+ smp_mb();
876+
877+ return oldval;
878+}
879+
880 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
881 {
882 unsigned long tmp, tmp2;
883@@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
884
885 return val;
886 }
887+#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
888 #define atomic_add(i, v) (void) atomic_add_return(i, v)
889+#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
890
891 static inline int atomic_sub_return(int i, atomic_t *v)
892 {
893@@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
894
895 return val;
896 }
897+#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
898 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
899+#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
900
901 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
902 {
903@@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
904
905 return ret;
906 }
907+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
908
909 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
910 {
911@@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
912 #endif /* __LINUX_ARM_ARCH__ */
913
914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
915+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
916+{
917+ return xchg(&v->counter, new);
918+}
919
920 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
921 {
922@@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
923 }
924
925 #define atomic_inc(v) atomic_add(1, v)
926+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
927+{
928+ atomic_add_unchecked(1, v);
929+}
930 #define atomic_dec(v) atomic_sub(1, v)
931+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
932+{
933+ atomic_sub_unchecked(1, v);
934+}
935
936 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
937+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
938+{
939+ return atomic_add_return_unchecked(1, v) == 0;
940+}
941 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
942 #define atomic_inc_return(v) (atomic_add_return(1, v))
943+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
944+{
945+ return atomic_add_return_unchecked(1, v);
946+}
947 #define atomic_dec_return(v) (atomic_sub_return(1, v))
948 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
949
950@@ -239,6 +406,14 @@ typedef struct {
951 u64 __aligned(8) counter;
952 } atomic64_t;
953
954+#ifdef CONFIG_PAX_REFCOUNT
955+typedef struct {
956+ u64 __aligned(8) counter;
957+} atomic64_unchecked_t;
958+#else
959+typedef atomic64_t atomic64_unchecked_t;
960+#endif
961+
962 #define ATOMIC64_INIT(i) { (i) }
963
964 static inline u64 atomic64_read(atomic64_t *v)
965@@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
966 return result;
967 }
968
969+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
970+{
971+ u64 result;
972+
973+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
974+" ldrexd %0, %H0, [%1]"
975+ : "=&r" (result)
976+ : "r" (&v->counter), "Qo" (v->counter)
977+ );
978+
979+ return result;
980+}
981+
982 static inline void atomic64_set(atomic64_t *v, u64 i)
983 {
984 u64 tmp;
985@@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
986 : "cc");
987 }
988
989+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
990+{
991+ u64 tmp;
992+
993+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
994+"1: ldrexd %0, %H0, [%2]\n"
995+" strexd %0, %3, %H3, [%2]\n"
996+" teq %0, #0\n"
997+" bne 1b"
998+ : "=&r" (tmp), "=Qo" (v->counter)
999+ : "r" (&v->counter), "r" (i)
1000+ : "cc");
1001+}
1002+
1003 static inline void atomic64_add(u64 i, atomic64_t *v)
1004 {
1005 u64 result;
1006@@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1007 __asm__ __volatile__("@ atomic64_add\n"
1008 "1: ldrexd %0, %H0, [%3]\n"
1009 " adds %0, %0, %4\n"
1010+" adcs %H0, %H0, %H4\n"
1011+
1012+#ifdef CONFIG_PAX_REFCOUNT
1013+" bvc 3f\n"
1014+"2: bkpt 0xf103\n"
1015+"3:\n"
1016+#endif
1017+
1018+" strexd %1, %0, %H0, [%3]\n"
1019+" teq %1, #0\n"
1020+" bne 1b"
1021+
1022+#ifdef CONFIG_PAX_REFCOUNT
1023+"\n4:\n"
1024+ _ASM_EXTABLE(2b, 4b)
1025+#endif
1026+
1027+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1028+ : "r" (&v->counter), "r" (i)
1029+ : "cc");
1030+}
1031+
1032+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1033+{
1034+ u64 result;
1035+ unsigned long tmp;
1036+
1037+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1038+"1: ldrexd %0, %H0, [%3]\n"
1039+" adds %0, %0, %4\n"
1040 " adc %H0, %H0, %H4\n"
1041 " strexd %1, %0, %H0, [%3]\n"
1042 " teq %1, #0\n"
1043@@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1044
1045 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1046 {
1047- u64 result;
1048- unsigned long tmp;
1049+ u64 result, tmp;
1050
1051 smp_mb();
1052
1053 __asm__ __volatile__("@ atomic64_add_return\n"
1054+"1: ldrexd %1, %H1, [%3]\n"
1055+" adds %0, %1, %4\n"
1056+" adcs %H0, %H1, %H4\n"
1057+
1058+#ifdef CONFIG_PAX_REFCOUNT
1059+" bvc 3f\n"
1060+" mov %0, %1\n"
1061+" mov %H0, %H1\n"
1062+"2: bkpt 0xf103\n"
1063+"3:\n"
1064+#endif
1065+
1066+" strexd %1, %0, %H0, [%3]\n"
1067+" teq %1, #0\n"
1068+" bne 1b"
1069+
1070+#ifdef CONFIG_PAX_REFCOUNT
1071+"\n4:\n"
1072+ _ASM_EXTABLE(2b, 4b)
1073+#endif
1074+
1075+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1076+ : "r" (&v->counter), "r" (i)
1077+ : "cc");
1078+
1079+ smp_mb();
1080+
1081+ return result;
1082+}
1083+
1084+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1085+{
1086+ u64 result;
1087+ unsigned long tmp;
1088+
1089+ smp_mb();
1090+
1091+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1092 "1: ldrexd %0, %H0, [%3]\n"
1093 " adds %0, %0, %4\n"
1094 " adc %H0, %H0, %H4\n"
1095@@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1096 __asm__ __volatile__("@ atomic64_sub\n"
1097 "1: ldrexd %0, %H0, [%3]\n"
1098 " subs %0, %0, %4\n"
1099+" sbcs %H0, %H0, %H4\n"
1100+
1101+#ifdef CONFIG_PAX_REFCOUNT
1102+" bvc 3f\n"
1103+"2: bkpt 0xf103\n"
1104+"3:\n"
1105+#endif
1106+
1107+" strexd %1, %0, %H0, [%3]\n"
1108+" teq %1, #0\n"
1109+" bne 1b"
1110+
1111+#ifdef CONFIG_PAX_REFCOUNT
1112+"\n4:\n"
1113+ _ASM_EXTABLE(2b, 4b)
1114+#endif
1115+
1116+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1117+ : "r" (&v->counter), "r" (i)
1118+ : "cc");
1119+}
1120+
1121+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1122+{
1123+ u64 result;
1124+ unsigned long tmp;
1125+
1126+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1127+"1: ldrexd %0, %H0, [%3]\n"
1128+" subs %0, %0, %4\n"
1129 " sbc %H0, %H0, %H4\n"
1130 " strexd %1, %0, %H0, [%3]\n"
1131 " teq %1, #0\n"
1132@@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1133
1134 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1135 {
1136- u64 result;
1137- unsigned long tmp;
1138+ u64 result, tmp;
1139
1140 smp_mb();
1141
1142 __asm__ __volatile__("@ atomic64_sub_return\n"
1143-"1: ldrexd %0, %H0, [%3]\n"
1144-" subs %0, %0, %4\n"
1145-" sbc %H0, %H0, %H4\n"
1146+"1: ldrexd %1, %H1, [%3]\n"
1147+" subs %0, %1, %4\n"
1148+" sbc %H0, %H1, %H4\n"
1149+
1150+#ifdef CONFIG_PAX_REFCOUNT
1151+" bvc 3f\n"
1152+" mov %0, %1\n"
1153+" mov %H0, %H1\n"
1154+"2: bkpt 0xf103\n"
1155+"3:\n"
1156+#endif
1157+
1158 " strexd %1, %0, %H0, [%3]\n"
1159 " teq %1, #0\n"
1160 " bne 1b"
1161+
1162+#ifdef CONFIG_PAX_REFCOUNT
1163+"\n4:\n"
1164+ _ASM_EXTABLE(2b, 4b)
1165+#endif
1166+
1167 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1168 : "r" (&v->counter), "r" (i)
1169 : "cc");
1170@@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1171 return oldval;
1172 }
1173
1174+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1175+{
1176+ u64 oldval;
1177+ unsigned long res;
1178+
1179+ smp_mb();
1180+
1181+ do {
1182+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1183+ "ldrexd %1, %H1, [%3]\n"
1184+ "mov %0, #0\n"
1185+ "teq %1, %4\n"
1186+ "teqeq %H1, %H4\n"
1187+ "strexdeq %0, %5, %H5, [%3]"
1188+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1189+ : "r" (&ptr->counter), "r" (old), "r" (new)
1190+ : "cc");
1191+ } while (res);
1192+
1193+ smp_mb();
1194+
1195+ return oldval;
1196+}
1197+
1198 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1199 {
1200 u64 result;
1201@@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1202
1203 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1204 {
1205- u64 result;
1206- unsigned long tmp;
1207+ u64 result, tmp;
1208
1209 smp_mb();
1210
1211 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1212-"1: ldrexd %0, %H0, [%3]\n"
1213-" subs %0, %0, #1\n"
1214-" sbc %H0, %H0, #0\n"
1215+"1: ldrexd %1, %H1, [%3]\n"
1216+" subs %0, %1, #1\n"
1217+" sbc %H0, %H1, #0\n"
1218+
1219+#ifdef CONFIG_PAX_REFCOUNT
1220+" bvc 3f\n"
1221+" mov %0, %1\n"
1222+" mov %H0, %H1\n"
1223+"2: bkpt 0xf103\n"
1224+"3:\n"
1225+#endif
1226+
1227 " teq %H0, #0\n"
1228-" bmi 2f\n"
1229+" bmi 4f\n"
1230 " strexd %1, %0, %H0, [%3]\n"
1231 " teq %1, #0\n"
1232 " bne 1b\n"
1233-"2:"
1234+"4:\n"
1235+
1236+#ifdef CONFIG_PAX_REFCOUNT
1237+ _ASM_EXTABLE(2b, 4b)
1238+#endif
1239+
1240 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1241 : "r" (&v->counter)
1242 : "cc");
1243@@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1244 " teq %0, %5\n"
1245 " teqeq %H0, %H5\n"
1246 " moveq %1, #0\n"
1247-" beq 2f\n"
1248+" beq 4f\n"
1249 " adds %0, %0, %6\n"
1250 " adc %H0, %H0, %H6\n"
1251+
1252+#ifdef CONFIG_PAX_REFCOUNT
1253+" bvc 3f\n"
1254+"2: bkpt 0xf103\n"
1255+"3:\n"
1256+#endif
1257+
1258 " strexd %2, %0, %H0, [%4]\n"
1259 " teq %2, #0\n"
1260 " bne 1b\n"
1261-"2:"
1262+"4:\n"
1263+
1264+#ifdef CONFIG_PAX_REFCOUNT
1265+ _ASM_EXTABLE(2b, 4b)
1266+#endif
1267+
1268 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1269 : "r" (&v->counter), "r" (u), "r" (a)
1270 : "cc");
1271@@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1272
1273 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1274 #define atomic64_inc(v) atomic64_add(1LL, (v))
1275+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1276 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1277+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1278 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1279 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1280 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1281+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1282 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1283 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1284 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1285diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1286index 75fe66b..2255c86 100644
1287--- a/arch/arm/include/asm/cache.h
1288+++ b/arch/arm/include/asm/cache.h
1289@@ -4,8 +4,10 @@
1290 #ifndef __ASMARM_CACHE_H
1291 #define __ASMARM_CACHE_H
1292
1293+#include <linux/const.h>
1294+
1295 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1296-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1297+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1298
1299 /*
1300 * Memory returned by kmalloc() may be used for DMA, so we must make
1301diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1302index d5d8d5c..ad92c96 100644
1303--- a/arch/arm/include/asm/cacheflush.h
1304+++ b/arch/arm/include/asm/cacheflush.h
1305@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1306 void (*dma_unmap_area)(const void *, size_t, int);
1307
1308 void (*dma_flush_range)(const void *, const void *);
1309-};
1310+} __no_const;
1311
1312 /*
1313 * Select the calling method
1314diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1315index 0e9ce8d..6ef1e03 100644
1316--- a/arch/arm/include/asm/elf.h
1317+++ b/arch/arm/include/asm/elf.h
1318@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1319 the loader. We need to make sure that it is out of the way of the program
1320 that it will "exec", and that there is sufficient room for the brk. */
1321
1322-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1323+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1324+
1325+#ifdef CONFIG_PAX_ASLR
1326+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1327+
1328+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1329+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1330+#endif
1331
1332 /* When the program starts, a1 contains a pointer to a function to be
1333 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1334@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1335 extern void elf_set_personality(const struct elf32_hdr *);
1336 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1337
1338-struct mm_struct;
1339-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1340-#define arch_randomize_brk arch_randomize_brk
1341-
1342 extern int vectors_user_mapping(void);
1343 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1344 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1345diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1346index e51b1e8..32a3113 100644
1347--- a/arch/arm/include/asm/kmap_types.h
1348+++ b/arch/arm/include/asm/kmap_types.h
1349@@ -21,6 +21,7 @@ enum km_type {
1350 KM_L1_CACHE,
1351 KM_L2_CACHE,
1352 KM_KDB,
1353+ KM_CLEARPAGE,
1354 KM_TYPE_NR
1355 };
1356
1357diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1358index 53426c6..c7baff3 100644
1359--- a/arch/arm/include/asm/outercache.h
1360+++ b/arch/arm/include/asm/outercache.h
1361@@ -35,7 +35,7 @@ struct outer_cache_fns {
1362 #endif
1363 void (*set_debug)(unsigned long);
1364 void (*resume)(void);
1365-};
1366+} __no_const;
1367
1368 #ifdef CONFIG_OUTER_CACHE
1369
1370diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1371index 97b440c..b7ff179 100644
1372--- a/arch/arm/include/asm/page.h
1373+++ b/arch/arm/include/asm/page.h
1374@@ -123,7 +123,7 @@ struct cpu_user_fns {
1375 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1376 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1377 unsigned long vaddr, struct vm_area_struct *vma);
1378-};
1379+} __no_const;
1380
1381 #ifdef MULTI_USER
1382 extern struct cpu_user_fns cpu_user;
1383diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1384index e4c96cc..1145653 100644
1385--- a/arch/arm/include/asm/system.h
1386+++ b/arch/arm/include/asm/system.h
1387@@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1388
1389 #define xchg(ptr,x) \
1390 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1391+#define xchg_unchecked(ptr,x) \
1392+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1393
1394 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1395
1396@@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1397
1398 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1399
1400+#define _ASM_EXTABLE(from, to) \
1401+" .pushsection __ex_table,\"a\"\n"\
1402+" .align 3\n" \
1403+" .long " #from ", " #to"\n" \
1404+" .popsection"
1405+
1406+
1407 #endif /* __ASSEMBLY__ */
1408
1409 #define arch_align_stack(x) (x)
1410diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1411index 2958976..12ccac4 100644
1412--- a/arch/arm/include/asm/uaccess.h
1413+++ b/arch/arm/include/asm/uaccess.h
1414@@ -22,6 +22,8 @@
1415 #define VERIFY_READ 0
1416 #define VERIFY_WRITE 1
1417
1418+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1419+
1420 /*
1421 * The exception table consists of pairs of addresses: the first is the
1422 * address of an instruction that is allowed to fault, and the second is
1423@@ -387,8 +389,23 @@ do { \
1424
1425
1426 #ifdef CONFIG_MMU
1427-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1428-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1429+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1430+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1431+
1432+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1433+{
1434+ if (!__builtin_constant_p(n))
1435+ check_object_size(to, n, false);
1436+ return ___copy_from_user(to, from, n);
1437+}
1438+
1439+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1440+{
1441+ if (!__builtin_constant_p(n))
1442+ check_object_size(from, n, true);
1443+ return ___copy_to_user(to, from, n);
1444+}
1445+
1446 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1447 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1448 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1449@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1450
1451 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1452 {
1453+ if ((long)n < 0)
1454+ return n;
1455+
1456 if (access_ok(VERIFY_READ, from, n))
1457 n = __copy_from_user(to, from, n);
1458 else /* security hole - plug it */
1459@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1460
1461 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1462 {
1463+ if ((long)n < 0)
1464+ return n;
1465+
1466 if (access_ok(VERIFY_WRITE, to, n))
1467 n = __copy_to_user(to, from, n);
1468 return n;
1469diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1470index 5b0bce6..becd81c 100644
1471--- a/arch/arm/kernel/armksyms.c
1472+++ b/arch/arm/kernel/armksyms.c
1473@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1474 #ifdef CONFIG_MMU
1475 EXPORT_SYMBOL(copy_page);
1476
1477-EXPORT_SYMBOL(__copy_from_user);
1478-EXPORT_SYMBOL(__copy_to_user);
1479+EXPORT_SYMBOL(___copy_from_user);
1480+EXPORT_SYMBOL(___copy_to_user);
1481 EXPORT_SYMBOL(__clear_user);
1482
1483 EXPORT_SYMBOL(__get_user_1);
1484diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1485index 971d65c..cc936fb 100644
1486--- a/arch/arm/kernel/process.c
1487+++ b/arch/arm/kernel/process.c
1488@@ -28,7 +28,6 @@
1489 #include <linux/tick.h>
1490 #include <linux/utsname.h>
1491 #include <linux/uaccess.h>
1492-#include <linux/random.h>
1493 #include <linux/hw_breakpoint.h>
1494 #include <linux/cpuidle.h>
1495
1496@@ -273,9 +272,10 @@ void machine_power_off(void)
1497 machine_shutdown();
1498 if (pm_power_off)
1499 pm_power_off();
1500+ BUG();
1501 }
1502
1503-void machine_restart(char *cmd)
1504+__noreturn void machine_restart(char *cmd)
1505 {
1506 machine_shutdown();
1507
1508@@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1509 return 0;
1510 }
1511
1512-unsigned long arch_randomize_brk(struct mm_struct *mm)
1513-{
1514- unsigned long range_end = mm->brk + 0x02000000;
1515- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1516-}
1517-
1518 #ifdef CONFIG_MMU
1519 /*
1520 * The vectors page is always readable from user space for the
1521diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1522index a255c39..4a19b25 100644
1523--- a/arch/arm/kernel/setup.c
1524+++ b/arch/arm/kernel/setup.c
1525@@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1526 struct cpu_tlb_fns cpu_tlb __read_mostly;
1527 #endif
1528 #ifdef MULTI_USER
1529-struct cpu_user_fns cpu_user __read_mostly;
1530+struct cpu_user_fns cpu_user __read_only;
1531 #endif
1532 #ifdef MULTI_CACHE
1533-struct cpu_cache_fns cpu_cache __read_mostly;
1534+struct cpu_cache_fns cpu_cache __read_only;
1535 #endif
1536 #ifdef CONFIG_OUTER_CACHE
1537-struct outer_cache_fns outer_cache __read_mostly;
1538+struct outer_cache_fns outer_cache __read_only;
1539 EXPORT_SYMBOL(outer_cache);
1540 #endif
1541
1542diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1543index f84dfe6..13e94f7 100644
1544--- a/arch/arm/kernel/traps.c
1545+++ b/arch/arm/kernel/traps.c
1546@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1547
1548 static DEFINE_RAW_SPINLOCK(die_lock);
1549
1550+extern void gr_handle_kernel_exploit(void);
1551+
1552 /*
1553 * This function is protected against re-entrancy.
1554 */
1555@@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1556 panic("Fatal exception in interrupt");
1557 if (panic_on_oops)
1558 panic("Fatal exception");
1559+
1560+ gr_handle_kernel_exploit();
1561+
1562 if (ret != NOTIFY_STOP)
1563 do_exit(SIGSEGV);
1564 }
1565diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1566index 66a477a..bee61d3 100644
1567--- a/arch/arm/lib/copy_from_user.S
1568+++ b/arch/arm/lib/copy_from_user.S
1569@@ -16,7 +16,7 @@
1570 /*
1571 * Prototype:
1572 *
1573- * size_t __copy_from_user(void *to, const void *from, size_t n)
1574+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1575 *
1576 * Purpose:
1577 *
1578@@ -84,11 +84,11 @@
1579
1580 .text
1581
1582-ENTRY(__copy_from_user)
1583+ENTRY(___copy_from_user)
1584
1585 #include "copy_template.S"
1586
1587-ENDPROC(__copy_from_user)
1588+ENDPROC(___copy_from_user)
1589
1590 .pushsection .fixup,"ax"
1591 .align 0
1592diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1593index 6ee2f67..d1cce76 100644
1594--- a/arch/arm/lib/copy_page.S
1595+++ b/arch/arm/lib/copy_page.S
1596@@ -10,6 +10,7 @@
1597 * ASM optimised string functions
1598 */
1599 #include <linux/linkage.h>
1600+#include <linux/const.h>
1601 #include <asm/assembler.h>
1602 #include <asm/asm-offsets.h>
1603 #include <asm/cache.h>
1604diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1605index d066df6..df28194 100644
1606--- a/arch/arm/lib/copy_to_user.S
1607+++ b/arch/arm/lib/copy_to_user.S
1608@@ -16,7 +16,7 @@
1609 /*
1610 * Prototype:
1611 *
1612- * size_t __copy_to_user(void *to, const void *from, size_t n)
1613+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1614 *
1615 * Purpose:
1616 *
1617@@ -88,11 +88,11 @@
1618 .text
1619
1620 ENTRY(__copy_to_user_std)
1621-WEAK(__copy_to_user)
1622+WEAK(___copy_to_user)
1623
1624 #include "copy_template.S"
1625
1626-ENDPROC(__copy_to_user)
1627+ENDPROC(___copy_to_user)
1628 ENDPROC(__copy_to_user_std)
1629
1630 .pushsection .fixup,"ax"
1631diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1632index 5c908b1..e712687 100644
1633--- a/arch/arm/lib/uaccess.S
1634+++ b/arch/arm/lib/uaccess.S
1635@@ -20,7 +20,7 @@
1636
1637 #define PAGE_SHIFT 12
1638
1639-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1640+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1641 * Purpose : copy a block to user memory from kernel memory
1642 * Params : to - user memory
1643 * : from - kernel memory
1644@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1645 sub r2, r2, ip
1646 b .Lc2u_dest_aligned
1647
1648-ENTRY(__copy_to_user)
1649+ENTRY(___copy_to_user)
1650 stmfd sp!, {r2, r4 - r7, lr}
1651 cmp r2, #4
1652 blt .Lc2u_not_enough
1653@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1654 ldrgtb r3, [r1], #0
1655 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1656 b .Lc2u_finished
1657-ENDPROC(__copy_to_user)
1658+ENDPROC(___copy_to_user)
1659
1660 .pushsection .fixup,"ax"
1661 .align 0
1662 9001: ldmfd sp!, {r0, r4 - r7, pc}
1663 .popsection
1664
1665-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1666+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1667 * Purpose : copy a block from user memory to kernel memory
1668 * Params : to - kernel memory
1669 * : from - user memory
1670@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1671 sub r2, r2, ip
1672 b .Lcfu_dest_aligned
1673
1674-ENTRY(__copy_from_user)
1675+ENTRY(___copy_from_user)
1676 stmfd sp!, {r0, r2, r4 - r7, lr}
1677 cmp r2, #4
1678 blt .Lcfu_not_enough
1679@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1680 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1681 strgtb r3, [r0], #1
1682 b .Lcfu_finished
1683-ENDPROC(__copy_from_user)
1684+ENDPROC(___copy_from_user)
1685
1686 .pushsection .fixup,"ax"
1687 .align 0
1688diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1689index 025f742..8432b08 100644
1690--- a/arch/arm/lib/uaccess_with_memcpy.c
1691+++ b/arch/arm/lib/uaccess_with_memcpy.c
1692@@ -104,7 +104,7 @@ out:
1693 }
1694
1695 unsigned long
1696-__copy_to_user(void __user *to, const void *from, unsigned long n)
1697+___copy_to_user(void __user *to, const void *from, unsigned long n)
1698 {
1699 /*
1700 * This test is stubbed out of the main function above to keep
1701diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1702index 6722627..8f97548c 100644
1703--- a/arch/arm/mach-omap2/board-n8x0.c
1704+++ b/arch/arm/mach-omap2/board-n8x0.c
1705@@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1706 }
1707 #endif
1708
1709-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1710+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1711 .late_init = n8x0_menelaus_late_init,
1712 };
1713
1714diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1715index 2b2d51c..0127490 100644
1716--- a/arch/arm/mach-ux500/mbox-db5500.c
1717+++ b/arch/arm/mach-ux500/mbox-db5500.c
1718@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1719 return sprintf(buf, "0x%X\n", mbox_value);
1720 }
1721
1722-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1723+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1724
1725 static int mbox_show(struct seq_file *s, void *data)
1726 {
1727diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1728index bb7eac3..3bade16 100644
1729--- a/arch/arm/mm/fault.c
1730+++ b/arch/arm/mm/fault.c
1731@@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1732 }
1733 #endif
1734
1735+#ifdef CONFIG_PAX_PAGEEXEC
1736+ if (fsr & FSR_LNX_PF) {
1737+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1738+ do_group_exit(SIGKILL);
1739+ }
1740+#endif
1741+
1742 tsk->thread.address = addr;
1743 tsk->thread.error_code = fsr;
1744 tsk->thread.trap_no = 14;
1745@@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1746 }
1747 #endif /* CONFIG_MMU */
1748
1749+#ifdef CONFIG_PAX_PAGEEXEC
1750+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1751+{
1752+ long i;
1753+
1754+ printk(KERN_ERR "PAX: bytes at PC: ");
1755+ for (i = 0; i < 20; i++) {
1756+ unsigned char c;
1757+ if (get_user(c, (__force unsigned char __user *)pc+i))
1758+ printk(KERN_CONT "?? ");
1759+ else
1760+ printk(KERN_CONT "%02x ", c);
1761+ }
1762+ printk("\n");
1763+
1764+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1765+ for (i = -1; i < 20; i++) {
1766+ unsigned long c;
1767+ if (get_user(c, (__force unsigned long __user *)sp+i))
1768+ printk(KERN_CONT "???????? ");
1769+ else
1770+ printk(KERN_CONT "%08lx ", c);
1771+ }
1772+ printk("\n");
1773+}
1774+#endif
1775+
1776 /*
1777 * First Level Translation Fault Handler
1778 *
1779@@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1780 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1781 struct siginfo info;
1782
1783+#ifdef CONFIG_PAX_REFCOUNT
1784+ if (fsr_fs(ifsr) == 2) {
1785+ unsigned int bkpt;
1786+
1787+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1788+ current->thread.error_code = ifsr;
1789+ current->thread.trap_no = 0;
1790+ pax_report_refcount_overflow(regs);
1791+ fixup_exception(regs);
1792+ return;
1793+ }
1794+ }
1795+#endif
1796+
1797 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1798 return;
1799
1800diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1801index ce8cb19..3ec539d 100644
1802--- a/arch/arm/mm/mmap.c
1803+++ b/arch/arm/mm/mmap.c
1804@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1805 if (len > TASK_SIZE)
1806 return -ENOMEM;
1807
1808+#ifdef CONFIG_PAX_RANDMMAP
1809+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1810+#endif
1811+
1812 if (addr) {
1813 if (do_align)
1814 addr = COLOUR_ALIGN(addr, pgoff);
1815@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1816 addr = PAGE_ALIGN(addr);
1817
1818 vma = find_vma(mm, addr);
1819- if (TASK_SIZE - len >= addr &&
1820- (!vma || addr + len <= vma->vm_start))
1821+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1822 return addr;
1823 }
1824 if (len > mm->cached_hole_size) {
1825- start_addr = addr = mm->free_area_cache;
1826+ start_addr = addr = mm->free_area_cache;
1827 } else {
1828- start_addr = addr = mm->mmap_base;
1829- mm->cached_hole_size = 0;
1830+ start_addr = addr = mm->mmap_base;
1831+ mm->cached_hole_size = 0;
1832 }
1833
1834 full_search:
1835@@ -124,14 +127,14 @@ full_search:
1836 * Start a new search - just in case we missed
1837 * some holes.
1838 */
1839- if (start_addr != TASK_UNMAPPED_BASE) {
1840- start_addr = addr = TASK_UNMAPPED_BASE;
1841+ if (start_addr != mm->mmap_base) {
1842+ start_addr = addr = mm->mmap_base;
1843 mm->cached_hole_size = 0;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848- if (!vma || addr + len <= vma->vm_start) {
1849+ if (check_heap_stack_gap(vma, addr, len)) {
1850 /*
1851 * Remember the place where we stopped the search:
1852 */
1853@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1854
1855 if (mmap_is_legacy()) {
1856 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1857+
1858+#ifdef CONFIG_PAX_RANDMMAP
1859+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1860+ mm->mmap_base += mm->delta_mmap;
1861+#endif
1862+
1863 mm->get_unmapped_area = arch_get_unmapped_area;
1864 mm->unmap_area = arch_unmap_area;
1865 } else {
1866 mm->mmap_base = mmap_base(random_factor);
1867+
1868+#ifdef CONFIG_PAX_RANDMMAP
1869+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1870+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1871+#endif
1872+
1873 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1874 mm->unmap_area = arch_unmap_area_topdown;
1875 }
1876diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1877index 71a6827..e7fbc23 100644
1878--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1879+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1880@@ -43,7 +43,7 @@ struct samsung_dma_ops {
1881 int (*started)(unsigned ch);
1882 int (*flush)(unsigned ch);
1883 int (*stop)(unsigned ch);
1884-};
1885+} __no_const;
1886
1887 extern void *samsung_dmadev_get_ops(void);
1888 extern void *s3c_dma_get_ops(void);
1889diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1890index 5f28cae..3d23723 100644
1891--- a/arch/arm/plat-samsung/include/plat/ehci.h
1892+++ b/arch/arm/plat-samsung/include/plat/ehci.h
1893@@ -14,7 +14,7 @@
1894 struct s5p_ehci_platdata {
1895 int (*phy_init)(struct platform_device *pdev, int type);
1896 int (*phy_exit)(struct platform_device *pdev, int type);
1897-};
1898+} __no_const;
1899
1900 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1901
1902diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1903index c3a58a1..78fbf54 100644
1904--- a/arch/avr32/include/asm/cache.h
1905+++ b/arch/avr32/include/asm/cache.h
1906@@ -1,8 +1,10 @@
1907 #ifndef __ASM_AVR32_CACHE_H
1908 #define __ASM_AVR32_CACHE_H
1909
1910+#include <linux/const.h>
1911+
1912 #define L1_CACHE_SHIFT 5
1913-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1914+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1915
1916 /*
1917 * Memory returned by kmalloc() may be used for DMA, so we must make
1918diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1919index 3b3159b..425ea94 100644
1920--- a/arch/avr32/include/asm/elf.h
1921+++ b/arch/avr32/include/asm/elf.h
1922@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1923 the loader. We need to make sure that it is out of the way of the program
1924 that it will "exec", and that there is sufficient room for the brk. */
1925
1926-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1927+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1928
1929+#ifdef CONFIG_PAX_ASLR
1930+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1931+
1932+#define PAX_DELTA_MMAP_LEN 15
1933+#define PAX_DELTA_STACK_LEN 15
1934+#endif
1935
1936 /* This yields a mask that user programs can use to figure out what
1937 instruction set this CPU supports. This could be done in user space,
1938diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1939index b7f5c68..556135c 100644
1940--- a/arch/avr32/include/asm/kmap_types.h
1941+++ b/arch/avr32/include/asm/kmap_types.h
1942@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1943 D(11) KM_IRQ1,
1944 D(12) KM_SOFTIRQ0,
1945 D(13) KM_SOFTIRQ1,
1946-D(14) KM_TYPE_NR
1947+D(14) KM_CLEARPAGE,
1948+D(15) KM_TYPE_NR
1949 };
1950
1951 #undef D
1952diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1953index f7040a1..db9f300 100644
1954--- a/arch/avr32/mm/fault.c
1955+++ b/arch/avr32/mm/fault.c
1956@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1957
1958 int exception_trace = 1;
1959
1960+#ifdef CONFIG_PAX_PAGEEXEC
1961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1962+{
1963+ unsigned long i;
1964+
1965+ printk(KERN_ERR "PAX: bytes at PC: ");
1966+ for (i = 0; i < 20; i++) {
1967+ unsigned char c;
1968+ if (get_user(c, (unsigned char *)pc+i))
1969+ printk(KERN_CONT "???????? ");
1970+ else
1971+ printk(KERN_CONT "%02x ", c);
1972+ }
1973+ printk("\n");
1974+}
1975+#endif
1976+
1977 /*
1978 * This routine handles page faults. It determines the address and the
1979 * problem, and then passes it off to one of the appropriate routines.
1980@@ -156,6 +173,16 @@ bad_area:
1981 up_read(&mm->mmap_sem);
1982
1983 if (user_mode(regs)) {
1984+
1985+#ifdef CONFIG_PAX_PAGEEXEC
1986+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1987+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1988+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1989+ do_group_exit(SIGKILL);
1990+ }
1991+ }
1992+#endif
1993+
1994 if (exception_trace && printk_ratelimit())
1995 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1996 "sp %08lx ecr %lu\n",
1997diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1998index 568885a..f8008df 100644
1999--- a/arch/blackfin/include/asm/cache.h
2000+++ b/arch/blackfin/include/asm/cache.h
2001@@ -7,6 +7,7 @@
2002 #ifndef __ARCH_BLACKFIN_CACHE_H
2003 #define __ARCH_BLACKFIN_CACHE_H
2004
2005+#include <linux/const.h>
2006 #include <linux/linkage.h> /* for asmlinkage */
2007
2008 /*
2009@@ -14,7 +15,7 @@
2010 * Blackfin loads 32 bytes for cache
2011 */
2012 #define L1_CACHE_SHIFT 5
2013-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2014+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2015 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2016
2017 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2018diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2019index aea2718..3639a60 100644
2020--- a/arch/cris/include/arch-v10/arch/cache.h
2021+++ b/arch/cris/include/arch-v10/arch/cache.h
2022@@ -1,8 +1,9 @@
2023 #ifndef _ASM_ARCH_CACHE_H
2024 #define _ASM_ARCH_CACHE_H
2025
2026+#include <linux/const.h>
2027 /* Etrax 100LX have 32-byte cache-lines. */
2028-#define L1_CACHE_BYTES 32
2029 #define L1_CACHE_SHIFT 5
2030+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2031
2032 #endif /* _ASM_ARCH_CACHE_H */
2033diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2034index 1de779f..336fad3 100644
2035--- a/arch/cris/include/arch-v32/arch/cache.h
2036+++ b/arch/cris/include/arch-v32/arch/cache.h
2037@@ -1,11 +1,12 @@
2038 #ifndef _ASM_CRIS_ARCH_CACHE_H
2039 #define _ASM_CRIS_ARCH_CACHE_H
2040
2041+#include <linux/const.h>
2042 #include <arch/hwregs/dma.h>
2043
2044 /* A cache-line is 32 bytes. */
2045-#define L1_CACHE_BYTES 32
2046 #define L1_CACHE_SHIFT 5
2047+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2048
2049 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2050
2051diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2052index 0d8a7d6..d0c9ff5 100644
2053--- a/arch/frv/include/asm/atomic.h
2054+++ b/arch/frv/include/asm/atomic.h
2055@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2056 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2057 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2058
2059+#define atomic64_read_unchecked(v) atomic64_read(v)
2060+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2061+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2062+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2063+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2064+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2065+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2066+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2067+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2068+
2069 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2070 {
2071 int c, old;
2072diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2073index 2797163..c2a401d 100644
2074--- a/arch/frv/include/asm/cache.h
2075+++ b/arch/frv/include/asm/cache.h
2076@@ -12,10 +12,11 @@
2077 #ifndef __ASM_CACHE_H
2078 #define __ASM_CACHE_H
2079
2080+#include <linux/const.h>
2081
2082 /* bytes per L1 cache line */
2083 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2084-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2085+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2086
2087 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2088 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2089diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2090index f8e16b2..c73ff79 100644
2091--- a/arch/frv/include/asm/kmap_types.h
2092+++ b/arch/frv/include/asm/kmap_types.h
2093@@ -23,6 +23,7 @@ enum km_type {
2094 KM_IRQ1,
2095 KM_SOFTIRQ0,
2096 KM_SOFTIRQ1,
2097+ KM_CLEARPAGE,
2098 KM_TYPE_NR
2099 };
2100
2101diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2102index 385fd30..6c3d97e 100644
2103--- a/arch/frv/mm/elf-fdpic.c
2104+++ b/arch/frv/mm/elf-fdpic.c
2105@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2106 if (addr) {
2107 addr = PAGE_ALIGN(addr);
2108 vma = find_vma(current->mm, addr);
2109- if (TASK_SIZE - len >= addr &&
2110- (!vma || addr + len <= vma->vm_start))
2111+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2112 goto success;
2113 }
2114
2115@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2116 for (; vma; vma = vma->vm_next) {
2117 if (addr > limit)
2118 break;
2119- if (addr + len <= vma->vm_start)
2120+ if (check_heap_stack_gap(vma, addr, len))
2121 goto success;
2122 addr = vma->vm_end;
2123 }
2124@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2125 for (; vma; vma = vma->vm_next) {
2126 if (addr > limit)
2127 break;
2128- if (addr + len <= vma->vm_start)
2129+ if (check_heap_stack_gap(vma, addr, len))
2130 goto success;
2131 addr = vma->vm_end;
2132 }
2133diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2134index c635028..6d9445a 100644
2135--- a/arch/h8300/include/asm/cache.h
2136+++ b/arch/h8300/include/asm/cache.h
2137@@ -1,8 +1,10 @@
2138 #ifndef __ARCH_H8300_CACHE_H
2139 #define __ARCH_H8300_CACHE_H
2140
2141+#include <linux/const.h>
2142+
2143 /* bytes per L1 cache line */
2144-#define L1_CACHE_BYTES 4
2145+#define L1_CACHE_BYTES _AC(4,UL)
2146
2147 /* m68k-elf-gcc 2.95.2 doesn't like these */
2148
2149diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2150index 0f01de2..d37d309 100644
2151--- a/arch/hexagon/include/asm/cache.h
2152+++ b/arch/hexagon/include/asm/cache.h
2153@@ -21,9 +21,11 @@
2154 #ifndef __ASM_CACHE_H
2155 #define __ASM_CACHE_H
2156
2157+#include <linux/const.h>
2158+
2159 /* Bytes per L1 cache line */
2160-#define L1_CACHE_SHIFT (5)
2161-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2162+#define L1_CACHE_SHIFT 5
2163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2164
2165 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2166 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2167diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2168index 3fad89e..3047da5 100644
2169--- a/arch/ia64/include/asm/atomic.h
2170+++ b/arch/ia64/include/asm/atomic.h
2171@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2172 #define atomic64_inc(v) atomic64_add(1, (v))
2173 #define atomic64_dec(v) atomic64_sub(1, (v))
2174
2175+#define atomic64_read_unchecked(v) atomic64_read(v)
2176+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2177+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2178+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2179+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2180+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2181+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2182+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2183+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2184+
2185 /* Atomic operations are already serializing */
2186 #define smp_mb__before_atomic_dec() barrier()
2187 #define smp_mb__after_atomic_dec() barrier()
2188diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2189index 988254a..e1ee885 100644
2190--- a/arch/ia64/include/asm/cache.h
2191+++ b/arch/ia64/include/asm/cache.h
2192@@ -1,6 +1,7 @@
2193 #ifndef _ASM_IA64_CACHE_H
2194 #define _ASM_IA64_CACHE_H
2195
2196+#include <linux/const.h>
2197
2198 /*
2199 * Copyright (C) 1998-2000 Hewlett-Packard Co
2200@@ -9,7 +10,7 @@
2201
2202 /* Bytes per L1 (data) cache line. */
2203 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2204-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2205+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2206
2207 #ifdef CONFIG_SMP
2208 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2209diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2210index b5298eb..67c6e62 100644
2211--- a/arch/ia64/include/asm/elf.h
2212+++ b/arch/ia64/include/asm/elf.h
2213@@ -42,6 +42,13 @@
2214 */
2215 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2216
2217+#ifdef CONFIG_PAX_ASLR
2218+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2219+
2220+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2221+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2222+#endif
2223+
2224 #define PT_IA_64_UNWIND 0x70000001
2225
2226 /* IA-64 relocations: */
2227diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2228index 1a97af3..7529d31 100644
2229--- a/arch/ia64/include/asm/pgtable.h
2230+++ b/arch/ia64/include/asm/pgtable.h
2231@@ -12,7 +12,7 @@
2232 * David Mosberger-Tang <davidm@hpl.hp.com>
2233 */
2234
2235-
2236+#include <linux/const.h>
2237 #include <asm/mman.h>
2238 #include <asm/page.h>
2239 #include <asm/processor.h>
2240@@ -143,6 +143,17 @@
2241 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2242 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2243 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2244+
2245+#ifdef CONFIG_PAX_PAGEEXEC
2246+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2247+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2248+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2249+#else
2250+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2251+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2252+# define PAGE_COPY_NOEXEC PAGE_COPY
2253+#endif
2254+
2255 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2256 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2257 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2258diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2259index b77768d..e0795eb 100644
2260--- a/arch/ia64/include/asm/spinlock.h
2261+++ b/arch/ia64/include/asm/spinlock.h
2262@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2263 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2264
2265 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2266- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2267+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2268 }
2269
2270 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2271diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2272index 449c8c0..432a3d2 100644
2273--- a/arch/ia64/include/asm/uaccess.h
2274+++ b/arch/ia64/include/asm/uaccess.h
2275@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2276 const void *__cu_from = (from); \
2277 long __cu_len = (n); \
2278 \
2279- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2280+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2281 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2282 __cu_len; \
2283 })
2284@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2285 long __cu_len = (n); \
2286 \
2287 __chk_user_ptr(__cu_from); \
2288- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2289+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2290 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2291 __cu_len; \
2292 })
2293diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2294index 24603be..948052d 100644
2295--- a/arch/ia64/kernel/module.c
2296+++ b/arch/ia64/kernel/module.c
2297@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2298 void
2299 module_free (struct module *mod, void *module_region)
2300 {
2301- if (mod && mod->arch.init_unw_table &&
2302- module_region == mod->module_init) {
2303+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2304 unw_remove_unwind_table(mod->arch.init_unw_table);
2305 mod->arch.init_unw_table = NULL;
2306 }
2307@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2308 }
2309
2310 static inline int
2311+in_init_rx (const struct module *mod, uint64_t addr)
2312+{
2313+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2314+}
2315+
2316+static inline int
2317+in_init_rw (const struct module *mod, uint64_t addr)
2318+{
2319+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2320+}
2321+
2322+static inline int
2323 in_init (const struct module *mod, uint64_t addr)
2324 {
2325- return addr - (uint64_t) mod->module_init < mod->init_size;
2326+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2327+}
2328+
2329+static inline int
2330+in_core_rx (const struct module *mod, uint64_t addr)
2331+{
2332+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2333+}
2334+
2335+static inline int
2336+in_core_rw (const struct module *mod, uint64_t addr)
2337+{
2338+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2339 }
2340
2341 static inline int
2342 in_core (const struct module *mod, uint64_t addr)
2343 {
2344- return addr - (uint64_t) mod->module_core < mod->core_size;
2345+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2346 }
2347
2348 static inline int
2349@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2350 break;
2351
2352 case RV_BDREL:
2353- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2354+ if (in_init_rx(mod, val))
2355+ val -= (uint64_t) mod->module_init_rx;
2356+ else if (in_init_rw(mod, val))
2357+ val -= (uint64_t) mod->module_init_rw;
2358+ else if (in_core_rx(mod, val))
2359+ val -= (uint64_t) mod->module_core_rx;
2360+ else if (in_core_rw(mod, val))
2361+ val -= (uint64_t) mod->module_core_rw;
2362 break;
2363
2364 case RV_LTV:
2365@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2366 * addresses have been selected...
2367 */
2368 uint64_t gp;
2369- if (mod->core_size > MAX_LTOFF)
2370+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2371 /*
2372 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2373 * at the end of the module.
2374 */
2375- gp = mod->core_size - MAX_LTOFF / 2;
2376+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2377 else
2378- gp = mod->core_size / 2;
2379- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2380+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2381+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2382 mod->arch.gp = gp;
2383 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2384 }
2385diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2386index 609d500..7dde2a8 100644
2387--- a/arch/ia64/kernel/sys_ia64.c
2388+++ b/arch/ia64/kernel/sys_ia64.c
2389@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2390 if (REGION_NUMBER(addr) == RGN_HPAGE)
2391 addr = 0;
2392 #endif
2393+
2394+#ifdef CONFIG_PAX_RANDMMAP
2395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2396+ addr = mm->free_area_cache;
2397+ else
2398+#endif
2399+
2400 if (!addr)
2401 addr = mm->free_area_cache;
2402
2403@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2404 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2405 /* At this point: (!vma || addr < vma->vm_end). */
2406 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2407- if (start_addr != TASK_UNMAPPED_BASE) {
2408+ if (start_addr != mm->mmap_base) {
2409 /* Start a new search --- just in case we missed some holes. */
2410- addr = TASK_UNMAPPED_BASE;
2411+ addr = mm->mmap_base;
2412 goto full_search;
2413 }
2414 return -ENOMEM;
2415 }
2416- if (!vma || addr + len <= vma->vm_start) {
2417+ if (check_heap_stack_gap(vma, addr, len)) {
2418 /* Remember the address where we stopped this search: */
2419 mm->free_area_cache = addr + len;
2420 return addr;
2421diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2422index 53c0ba0..2accdde 100644
2423--- a/arch/ia64/kernel/vmlinux.lds.S
2424+++ b/arch/ia64/kernel/vmlinux.lds.S
2425@@ -199,7 +199,7 @@ SECTIONS {
2426 /* Per-cpu data: */
2427 . = ALIGN(PERCPU_PAGE_SIZE);
2428 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2429- __phys_per_cpu_start = __per_cpu_load;
2430+ __phys_per_cpu_start = per_cpu_load;
2431 /*
2432 * ensure percpu data fits
2433 * into percpu page size
2434diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2435index 20b3593..1ce77f0 100644
2436--- a/arch/ia64/mm/fault.c
2437+++ b/arch/ia64/mm/fault.c
2438@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2439 return pte_present(pte);
2440 }
2441
2442+#ifdef CONFIG_PAX_PAGEEXEC
2443+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2444+{
2445+ unsigned long i;
2446+
2447+ printk(KERN_ERR "PAX: bytes at PC: ");
2448+ for (i = 0; i < 8; i++) {
2449+ unsigned int c;
2450+ if (get_user(c, (unsigned int *)pc+i))
2451+ printk(KERN_CONT "???????? ");
2452+ else
2453+ printk(KERN_CONT "%08x ", c);
2454+ }
2455+ printk("\n");
2456+}
2457+#endif
2458+
2459 void __kprobes
2460 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2461 {
2462@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2463 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2464 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2465
2466- if ((vma->vm_flags & mask) != mask)
2467+ if ((vma->vm_flags & mask) != mask) {
2468+
2469+#ifdef CONFIG_PAX_PAGEEXEC
2470+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2471+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2472+ goto bad_area;
2473+
2474+ up_read(&mm->mmap_sem);
2475+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2476+ do_group_exit(SIGKILL);
2477+ }
2478+#endif
2479+
2480 goto bad_area;
2481
2482+ }
2483+
2484 /*
2485 * If for any reason at all we couldn't handle the fault, make
2486 * sure we exit gracefully rather than endlessly redo the
2487diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2488index 5ca674b..e0e1b70 100644
2489--- a/arch/ia64/mm/hugetlbpage.c
2490+++ b/arch/ia64/mm/hugetlbpage.c
2491@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2492 /* At this point: (!vmm || addr < vmm->vm_end). */
2493 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2494 return -ENOMEM;
2495- if (!vmm || (addr + len) <= vmm->vm_start)
2496+ if (check_heap_stack_gap(vmm, addr, len))
2497 return addr;
2498 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2499 }
2500diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2501index 13df239d..cb52116 100644
2502--- a/arch/ia64/mm/init.c
2503+++ b/arch/ia64/mm/init.c
2504@@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2505 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2506 vma->vm_end = vma->vm_start + PAGE_SIZE;
2507 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2508+
2509+#ifdef CONFIG_PAX_PAGEEXEC
2510+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2511+ vma->vm_flags &= ~VM_EXEC;
2512+
2513+#ifdef CONFIG_PAX_MPROTECT
2514+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2515+ vma->vm_flags &= ~VM_MAYEXEC;
2516+#endif
2517+
2518+ }
2519+#endif
2520+
2521 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2522 down_write(&current->mm->mmap_sem);
2523 if (insert_vm_struct(current->mm, vma)) {
2524diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2525index 40b3ee9..8c2c112 100644
2526--- a/arch/m32r/include/asm/cache.h
2527+++ b/arch/m32r/include/asm/cache.h
2528@@ -1,8 +1,10 @@
2529 #ifndef _ASM_M32R_CACHE_H
2530 #define _ASM_M32R_CACHE_H
2531
2532+#include <linux/const.h>
2533+
2534 /* L1 cache line size */
2535 #define L1_CACHE_SHIFT 4
2536-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2537+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2538
2539 #endif /* _ASM_M32R_CACHE_H */
2540diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2541index 82abd15..d95ae5d 100644
2542--- a/arch/m32r/lib/usercopy.c
2543+++ b/arch/m32r/lib/usercopy.c
2544@@ -14,6 +14,9 @@
2545 unsigned long
2546 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2547 {
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 prefetch(from);
2552 if (access_ok(VERIFY_WRITE, to, n))
2553 __copy_user(to,from,n);
2554@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2555 unsigned long
2556 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2557 {
2558+ if ((long)n < 0)
2559+ return n;
2560+
2561 prefetchw(to);
2562 if (access_ok(VERIFY_READ, from, n))
2563 __copy_user_zeroing(to,from,n);
2564diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2565index 0395c51..5f26031 100644
2566--- a/arch/m68k/include/asm/cache.h
2567+++ b/arch/m68k/include/asm/cache.h
2568@@ -4,9 +4,11 @@
2569 #ifndef __ARCH_M68K_CACHE_H
2570 #define __ARCH_M68K_CACHE_H
2571
2572+#include <linux/const.h>
2573+
2574 /* bytes per L1 cache line */
2575 #define L1_CACHE_SHIFT 4
2576-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2577+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2578
2579 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2580
2581diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2582index 4efe96a..60e8699 100644
2583--- a/arch/microblaze/include/asm/cache.h
2584+++ b/arch/microblaze/include/asm/cache.h
2585@@ -13,11 +13,12 @@
2586 #ifndef _ASM_MICROBLAZE_CACHE_H
2587 #define _ASM_MICROBLAZE_CACHE_H
2588
2589+#include <linux/const.h>
2590 #include <asm/registers.h>
2591
2592 #define L1_CACHE_SHIFT 5
2593 /* word-granular cache in microblaze */
2594-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2595+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2596
2597 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2598
2599diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2600index 1d93f81..67794d0 100644
2601--- a/arch/mips/include/asm/atomic.h
2602+++ b/arch/mips/include/asm/atomic.h
2603@@ -21,6 +21,10 @@
2604 #include <asm/war.h>
2605 #include <asm/system.h>
2606
2607+#ifdef CONFIG_GENERIC_ATOMIC64
2608+#include <asm-generic/atomic64.h>
2609+#endif
2610+
2611 #define ATOMIC_INIT(i) { (i) }
2612
2613 /*
2614@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2615 */
2616 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2617
2618+#define atomic64_read_unchecked(v) atomic64_read(v)
2619+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2620+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2621+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2622+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2623+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2624+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2625+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2626+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2627+
2628 #endif /* CONFIG_64BIT */
2629
2630 /*
2631diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2632index b4db69f..8f3b093 100644
2633--- a/arch/mips/include/asm/cache.h
2634+++ b/arch/mips/include/asm/cache.h
2635@@ -9,10 +9,11 @@
2636 #ifndef _ASM_CACHE_H
2637 #define _ASM_CACHE_H
2638
2639+#include <linux/const.h>
2640 #include <kmalloc.h>
2641
2642 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2643-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2644+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2645
2646 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2647 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2648diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2649index 455c0ac..ad65fbe 100644
2650--- a/arch/mips/include/asm/elf.h
2651+++ b/arch/mips/include/asm/elf.h
2652@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2653 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2654 #endif
2655
2656+#ifdef CONFIG_PAX_ASLR
2657+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2658+
2659+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2660+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2661+#endif
2662+
2663 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2664 struct linux_binprm;
2665 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2666 int uses_interp);
2667
2668-struct mm_struct;
2669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2670-#define arch_randomize_brk arch_randomize_brk
2671-
2672 #endif /* _ASM_ELF_H */
2673diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2674index da9bd7d..91aa7ab 100644
2675--- a/arch/mips/include/asm/page.h
2676+++ b/arch/mips/include/asm/page.h
2677@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2678 #ifdef CONFIG_CPU_MIPS32
2679 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2680 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2681- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2682+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2683 #else
2684 typedef struct { unsigned long long pte; } pte_t;
2685 #define pte_val(x) ((x).pte)
2686diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2687index 6018c80..7c37203 100644
2688--- a/arch/mips/include/asm/system.h
2689+++ b/arch/mips/include/asm/system.h
2690@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2691 */
2692 #define __ARCH_WANT_UNLOCKED_CTXSW
2693
2694-extern unsigned long arch_align_stack(unsigned long sp);
2695+#define arch_align_stack(x) ((x) & ~0xfUL)
2696
2697 #endif /* _ASM_SYSTEM_H */
2698diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2699index 9fdd8bc..4bd7f1a 100644
2700--- a/arch/mips/kernel/binfmt_elfn32.c
2701+++ b/arch/mips/kernel/binfmt_elfn32.c
2702@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2703 #undef ELF_ET_DYN_BASE
2704 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2705
2706+#ifdef CONFIG_PAX_ASLR
2707+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2708+
2709+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2710+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2711+#endif
2712+
2713 #include <asm/processor.h>
2714 #include <linux/module.h>
2715 #include <linux/elfcore.h>
2716diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2717index ff44823..97f8906 100644
2718--- a/arch/mips/kernel/binfmt_elfo32.c
2719+++ b/arch/mips/kernel/binfmt_elfo32.c
2720@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2721 #undef ELF_ET_DYN_BASE
2722 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2723
2724+#ifdef CONFIG_PAX_ASLR
2725+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2726+
2727+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2728+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2729+#endif
2730+
2731 #include <asm/processor.h>
2732
2733 /*
2734diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2735index 7955409..ceaea7c 100644
2736--- a/arch/mips/kernel/process.c
2737+++ b/arch/mips/kernel/process.c
2738@@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2739 out:
2740 return pc;
2741 }
2742-
2743-/*
2744- * Don't forget that the stack pointer must be aligned on a 8 bytes
2745- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2746- */
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751-
2752- return sp & ALMASK;
2753-}
2754diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2755index 69ebd58..e4bff83 100644
2756--- a/arch/mips/mm/fault.c
2757+++ b/arch/mips/mm/fault.c
2758@@ -28,6 +28,23 @@
2759 #include <asm/highmem.h> /* For VMALLOC_END */
2760 #include <linux/kdebug.h>
2761
2762+#ifdef CONFIG_PAX_PAGEEXEC
2763+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2764+{
2765+ unsigned long i;
2766+
2767+ printk(KERN_ERR "PAX: bytes at PC: ");
2768+ for (i = 0; i < 5; i++) {
2769+ unsigned int c;
2770+ if (get_user(c, (unsigned int *)pc+i))
2771+ printk(KERN_CONT "???????? ");
2772+ else
2773+ printk(KERN_CONT "%08x ", c);
2774+ }
2775+ printk("\n");
2776+}
2777+#endif
2778+
2779 /*
2780 * This routine handles page faults. It determines the address,
2781 * and the problem, and then passes it off to one of the appropriate
2782diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2783index 302d779..7d35bf8 100644
2784--- a/arch/mips/mm/mmap.c
2785+++ b/arch/mips/mm/mmap.c
2786@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2787 do_color_align = 1;
2788
2789 /* requesting a specific address */
2790+
2791+#ifdef CONFIG_PAX_RANDMMAP
2792+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2793+#endif
2794+
2795 if (addr) {
2796 if (do_color_align)
2797 addr = COLOUR_ALIGN(addr, pgoff);
2798@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2799 addr = PAGE_ALIGN(addr);
2800
2801 vma = find_vma(mm, addr);
2802- if (TASK_SIZE - len >= addr &&
2803- (!vma || addr + len <= vma->vm_start))
2804+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2805 return addr;
2806 }
2807
2808@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2809 /* At this point: (!vma || addr < vma->vm_end). */
2810 if (TASK_SIZE - len < addr)
2811 return -ENOMEM;
2812- if (!vma || addr + len <= vma->vm_start)
2813+ if (check_heap_stack_gap(vmm, addr, len))
2814 return addr;
2815 addr = vma->vm_end;
2816 if (do_color_align)
2817@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2818 /* make sure it can fit in the remaining address space */
2819 if (likely(addr > len)) {
2820 vma = find_vma(mm, addr - len);
2821- if (!vma || addr <= vma->vm_start) {
2822+ if (check_heap_stack_gap(vmm, addr - len, len))
2823 /* cache the address as a hint for next time */
2824 return mm->free_area_cache = addr - len;
2825 }
2826@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2827 * return with success:
2828 */
2829 vma = find_vma(mm, addr);
2830- if (likely(!vma || addr + len <= vma->vm_start)) {
2831+ if (check_heap_stack_gap(vmm, addr, len)) {
2832 /* cache the address as a hint for next time */
2833 return mm->free_area_cache = addr;
2834 }
2835@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2836 mm->unmap_area = arch_unmap_area_topdown;
2837 }
2838 }
2839-
2840-static inline unsigned long brk_rnd(void)
2841-{
2842- unsigned long rnd = get_random_int();
2843-
2844- rnd = rnd << PAGE_SHIFT;
2845- /* 8MB for 32bit, 256MB for 64bit */
2846- if (TASK_IS_32BIT_ADDR)
2847- rnd = rnd & 0x7ffffful;
2848- else
2849- rnd = rnd & 0xffffffful;
2850-
2851- return rnd;
2852-}
2853-
2854-unsigned long arch_randomize_brk(struct mm_struct *mm)
2855-{
2856- unsigned long base = mm->brk;
2857- unsigned long ret;
2858-
2859- ret = PAGE_ALIGN(base + brk_rnd());
2860-
2861- if (ret < mm->brk)
2862- return mm->brk;
2863-
2864- return ret;
2865-}
2866diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2867index 967d144..db12197 100644
2868--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2869+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2870@@ -11,12 +11,14 @@
2871 #ifndef _ASM_PROC_CACHE_H
2872 #define _ASM_PROC_CACHE_H
2873
2874+#include <linux/const.h>
2875+
2876 /* L1 cache */
2877
2878 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2879 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2880-#define L1_CACHE_BYTES 16 /* bytes per entry */
2881 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2882+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2883 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2884
2885 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2886diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2887index bcb5df2..84fabd2 100644
2888--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2889+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2890@@ -16,13 +16,15 @@
2891 #ifndef _ASM_PROC_CACHE_H
2892 #define _ASM_PROC_CACHE_H
2893
2894+#include <linux/const.h>
2895+
2896 /*
2897 * L1 cache
2898 */
2899 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2900 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2901-#define L1_CACHE_BYTES 32 /* bytes per entry */
2902 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2903+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2904 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2905
2906 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2907diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2908index 4ce7a01..449202a 100644
2909--- a/arch/openrisc/include/asm/cache.h
2910+++ b/arch/openrisc/include/asm/cache.h
2911@@ -19,11 +19,13 @@
2912 #ifndef __ASM_OPENRISC_CACHE_H
2913 #define __ASM_OPENRISC_CACHE_H
2914
2915+#include <linux/const.h>
2916+
2917 /* FIXME: How can we replace these with values from the CPU...
2918 * they shouldn't be hard-coded!
2919 */
2920
2921-#define L1_CACHE_BYTES 16
2922 #define L1_CACHE_SHIFT 4
2923+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2924
2925 #endif /* __ASM_OPENRISC_CACHE_H */
2926diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2927index 4054b31..a10c105 100644
2928--- a/arch/parisc/include/asm/atomic.h
2929+++ b/arch/parisc/include/asm/atomic.h
2930@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2931
2932 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2933
2934+#define atomic64_read_unchecked(v) atomic64_read(v)
2935+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2936+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2937+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2938+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2939+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2940+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2941+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2942+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2943+
2944 #endif /* !CONFIG_64BIT */
2945
2946
2947diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2948index 47f11c7..3420df2 100644
2949--- a/arch/parisc/include/asm/cache.h
2950+++ b/arch/parisc/include/asm/cache.h
2951@@ -5,6 +5,7 @@
2952 #ifndef __ARCH_PARISC_CACHE_H
2953 #define __ARCH_PARISC_CACHE_H
2954
2955+#include <linux/const.h>
2956
2957 /*
2958 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2959@@ -15,13 +16,13 @@
2960 * just ruin performance.
2961 */
2962 #ifdef CONFIG_PA20
2963-#define L1_CACHE_BYTES 64
2964 #define L1_CACHE_SHIFT 6
2965 #else
2966-#define L1_CACHE_BYTES 32
2967 #define L1_CACHE_SHIFT 5
2968 #endif
2969
2970+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2971+
2972 #ifndef __ASSEMBLY__
2973
2974 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2975diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2976index 19f6cb1..6c78cf2 100644
2977--- a/arch/parisc/include/asm/elf.h
2978+++ b/arch/parisc/include/asm/elf.h
2979@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2980
2981 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2982
2983+#ifdef CONFIG_PAX_ASLR
2984+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2985+
2986+#define PAX_DELTA_MMAP_LEN 16
2987+#define PAX_DELTA_STACK_LEN 16
2988+#endif
2989+
2990 /* This yields a mask that user programs can use to figure out what
2991 instruction set this CPU supports. This could be done in user space,
2992 but it's not easy, and we've already done it here. */
2993diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2994index 22dadeb..f6c2be4 100644
2995--- a/arch/parisc/include/asm/pgtable.h
2996+++ b/arch/parisc/include/asm/pgtable.h
2997@@ -210,6 +210,17 @@ struct vm_area_struct;
2998 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2999 #define PAGE_COPY PAGE_EXECREAD
3000 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3001+
3002+#ifdef CONFIG_PAX_PAGEEXEC
3003+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3004+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3005+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3006+#else
3007+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3008+# define PAGE_COPY_NOEXEC PAGE_COPY
3009+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3010+#endif
3011+
3012 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3013 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3014 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3015diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3016index 5e34ccf..672bc9c 100644
3017--- a/arch/parisc/kernel/module.c
3018+++ b/arch/parisc/kernel/module.c
3019@@ -98,16 +98,38 @@
3020
3021 /* three functions to determine where in the module core
3022 * or init pieces the location is */
3023+static inline int in_init_rx(struct module *me, void *loc)
3024+{
3025+ return (loc >= me->module_init_rx &&
3026+ loc < (me->module_init_rx + me->init_size_rx));
3027+}
3028+
3029+static inline int in_init_rw(struct module *me, void *loc)
3030+{
3031+ return (loc >= me->module_init_rw &&
3032+ loc < (me->module_init_rw + me->init_size_rw));
3033+}
3034+
3035 static inline int in_init(struct module *me, void *loc)
3036 {
3037- return (loc >= me->module_init &&
3038- loc <= (me->module_init + me->init_size));
3039+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3040+}
3041+
3042+static inline int in_core_rx(struct module *me, void *loc)
3043+{
3044+ return (loc >= me->module_core_rx &&
3045+ loc < (me->module_core_rx + me->core_size_rx));
3046+}
3047+
3048+static inline int in_core_rw(struct module *me, void *loc)
3049+{
3050+ return (loc >= me->module_core_rw &&
3051+ loc < (me->module_core_rw + me->core_size_rw));
3052 }
3053
3054 static inline int in_core(struct module *me, void *loc)
3055 {
3056- return (loc >= me->module_core &&
3057- loc <= (me->module_core + me->core_size));
3058+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3059 }
3060
3061 static inline int in_local(struct module *me, void *loc)
3062@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3063 }
3064
3065 /* align things a bit */
3066- me->core_size = ALIGN(me->core_size, 16);
3067- me->arch.got_offset = me->core_size;
3068- me->core_size += gots * sizeof(struct got_entry);
3069+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3070+ me->arch.got_offset = me->core_size_rw;
3071+ me->core_size_rw += gots * sizeof(struct got_entry);
3072
3073- me->core_size = ALIGN(me->core_size, 16);
3074- me->arch.fdesc_offset = me->core_size;
3075- me->core_size += fdescs * sizeof(Elf_Fdesc);
3076+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3077+ me->arch.fdesc_offset = me->core_size_rw;
3078+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3079
3080 me->arch.got_max = gots;
3081 me->arch.fdesc_max = fdescs;
3082@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3083
3084 BUG_ON(value == 0);
3085
3086- got = me->module_core + me->arch.got_offset;
3087+ got = me->module_core_rw + me->arch.got_offset;
3088 for (i = 0; got[i].addr; i++)
3089 if (got[i].addr == value)
3090 goto out;
3091@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3092 #ifdef CONFIG_64BIT
3093 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3094 {
3095- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3096+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3097
3098 if (!value) {
3099 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3100@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3101
3102 /* Create new one */
3103 fdesc->addr = value;
3104- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3105+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3106 return (Elf_Addr)fdesc;
3107 }
3108 #endif /* CONFIG_64BIT */
3109@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3110
3111 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3112 end = table + sechdrs[me->arch.unwind_section].sh_size;
3113- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3114+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3115
3116 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3117 me->arch.unwind_section, table, end, gp);
3118diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3119index c9b9322..02d8940 100644
3120--- a/arch/parisc/kernel/sys_parisc.c
3121+++ b/arch/parisc/kernel/sys_parisc.c
3122@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3123 /* At this point: (!vma || addr < vma->vm_end). */
3124 if (TASK_SIZE - len < addr)
3125 return -ENOMEM;
3126- if (!vma || addr + len <= vma->vm_start)
3127+ if (check_heap_stack_gap(vma, addr, len))
3128 return addr;
3129 addr = vma->vm_end;
3130 }
3131@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3132 /* At this point: (!vma || addr < vma->vm_end). */
3133 if (TASK_SIZE - len < addr)
3134 return -ENOMEM;
3135- if (!vma || addr + len <= vma->vm_start)
3136+ if (check_heap_stack_gap(vma, addr, len))
3137 return addr;
3138 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3139 if (addr < vma->vm_end) /* handle wraparound */
3140@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3141 if (flags & MAP_FIXED)
3142 return addr;
3143 if (!addr)
3144- addr = TASK_UNMAPPED_BASE;
3145+ addr = current->mm->mmap_base;
3146
3147 if (filp) {
3148 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3149diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3150index f19e660..414fe24 100644
3151--- a/arch/parisc/kernel/traps.c
3152+++ b/arch/parisc/kernel/traps.c
3153@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3154
3155 down_read(&current->mm->mmap_sem);
3156 vma = find_vma(current->mm,regs->iaoq[0]);
3157- if (vma && (regs->iaoq[0] >= vma->vm_start)
3158- && (vma->vm_flags & VM_EXEC)) {
3159-
3160+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3161 fault_address = regs->iaoq[0];
3162 fault_space = regs->iasq[0];
3163
3164diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3165index 18162ce..94de376 100644
3166--- a/arch/parisc/mm/fault.c
3167+++ b/arch/parisc/mm/fault.c
3168@@ -15,6 +15,7 @@
3169 #include <linux/sched.h>
3170 #include <linux/interrupt.h>
3171 #include <linux/module.h>
3172+#include <linux/unistd.h>
3173
3174 #include <asm/uaccess.h>
3175 #include <asm/traps.h>
3176@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3177 static unsigned long
3178 parisc_acctyp(unsigned long code, unsigned int inst)
3179 {
3180- if (code == 6 || code == 16)
3181+ if (code == 6 || code == 7 || code == 16)
3182 return VM_EXEC;
3183
3184 switch (inst & 0xf0000000) {
3185@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3186 }
3187 #endif
3188
3189+#ifdef CONFIG_PAX_PAGEEXEC
3190+/*
3191+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3192+ *
3193+ * returns 1 when task should be killed
3194+ * 2 when rt_sigreturn trampoline was detected
3195+ * 3 when unpatched PLT trampoline was detected
3196+ */
3197+static int pax_handle_fetch_fault(struct pt_regs *regs)
3198+{
3199+
3200+#ifdef CONFIG_PAX_EMUPLT
3201+ int err;
3202+
3203+ do { /* PaX: unpatched PLT emulation */
3204+ unsigned int bl, depwi;
3205+
3206+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3207+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3208+
3209+ if (err)
3210+ break;
3211+
3212+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3213+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3214+
3215+ err = get_user(ldw, (unsigned int *)addr);
3216+ err |= get_user(bv, (unsigned int *)(addr+4));
3217+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3218+
3219+ if (err)
3220+ break;
3221+
3222+ if (ldw == 0x0E801096U &&
3223+ bv == 0xEAC0C000U &&
3224+ ldw2 == 0x0E881095U)
3225+ {
3226+ unsigned int resolver, map;
3227+
3228+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3229+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3230+ if (err)
3231+ break;
3232+
3233+ regs->gr[20] = instruction_pointer(regs)+8;
3234+ regs->gr[21] = map;
3235+ regs->gr[22] = resolver;
3236+ regs->iaoq[0] = resolver | 3UL;
3237+ regs->iaoq[1] = regs->iaoq[0] + 4;
3238+ return 3;
3239+ }
3240+ }
3241+ } while (0);
3242+#endif
3243+
3244+#ifdef CONFIG_PAX_EMUTRAMP
3245+
3246+#ifndef CONFIG_PAX_EMUSIGRT
3247+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3248+ return 1;
3249+#endif
3250+
3251+ do { /* PaX: rt_sigreturn emulation */
3252+ unsigned int ldi1, ldi2, bel, nop;
3253+
3254+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3255+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3256+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3257+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3258+
3259+ if (err)
3260+ break;
3261+
3262+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3263+ ldi2 == 0x3414015AU &&
3264+ bel == 0xE4008200U &&
3265+ nop == 0x08000240U)
3266+ {
3267+ regs->gr[25] = (ldi1 & 2) >> 1;
3268+ regs->gr[20] = __NR_rt_sigreturn;
3269+ regs->gr[31] = regs->iaoq[1] + 16;
3270+ regs->sr[0] = regs->iasq[1];
3271+ regs->iaoq[0] = 0x100UL;
3272+ regs->iaoq[1] = regs->iaoq[0] + 4;
3273+ regs->iasq[0] = regs->sr[2];
3274+ regs->iasq[1] = regs->sr[2];
3275+ return 2;
3276+ }
3277+ } while (0);
3278+#endif
3279+
3280+ return 1;
3281+}
3282+
3283+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3284+{
3285+ unsigned long i;
3286+
3287+ printk(KERN_ERR "PAX: bytes at PC: ");
3288+ for (i = 0; i < 5; i++) {
3289+ unsigned int c;
3290+ if (get_user(c, (unsigned int *)pc+i))
3291+ printk(KERN_CONT "???????? ");
3292+ else
3293+ printk(KERN_CONT "%08x ", c);
3294+ }
3295+ printk("\n");
3296+}
3297+#endif
3298+
3299 int fixup_exception(struct pt_regs *regs)
3300 {
3301 const struct exception_table_entry *fix;
3302@@ -192,8 +303,33 @@ good_area:
3303
3304 acc_type = parisc_acctyp(code,regs->iir);
3305
3306- if ((vma->vm_flags & acc_type) != acc_type)
3307+ if ((vma->vm_flags & acc_type) != acc_type) {
3308+
3309+#ifdef CONFIG_PAX_PAGEEXEC
3310+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3311+ (address & ~3UL) == instruction_pointer(regs))
3312+ {
3313+ up_read(&mm->mmap_sem);
3314+ switch (pax_handle_fetch_fault(regs)) {
3315+
3316+#ifdef CONFIG_PAX_EMUPLT
3317+ case 3:
3318+ return;
3319+#endif
3320+
3321+#ifdef CONFIG_PAX_EMUTRAMP
3322+ case 2:
3323+ return;
3324+#endif
3325+
3326+ }
3327+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3328+ do_group_exit(SIGKILL);
3329+ }
3330+#endif
3331+
3332 goto bad_area;
3333+ }
3334
3335 /*
3336 * If for any reason at all we couldn't handle the fault, make
3337diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3338index 02e41b5..ec6e26c 100644
3339--- a/arch/powerpc/include/asm/atomic.h
3340+++ b/arch/powerpc/include/asm/atomic.h
3341@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3342
3343 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3344
3345+#define atomic64_read_unchecked(v) atomic64_read(v)
3346+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3347+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3348+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3349+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3350+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3351+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3352+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3353+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3354+
3355 #endif /* __powerpc64__ */
3356
3357 #endif /* __KERNEL__ */
3358diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3359index 4b50941..5605819 100644
3360--- a/arch/powerpc/include/asm/cache.h
3361+++ b/arch/powerpc/include/asm/cache.h
3362@@ -3,6 +3,7 @@
3363
3364 #ifdef __KERNEL__
3365
3366+#include <linux/const.h>
3367
3368 /* bytes per L1 cache line */
3369 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3370@@ -22,7 +23,7 @@
3371 #define L1_CACHE_SHIFT 7
3372 #endif
3373
3374-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3375+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3376
3377 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3378
3379diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3380index 3bf9cca..e7457d0 100644
3381--- a/arch/powerpc/include/asm/elf.h
3382+++ b/arch/powerpc/include/asm/elf.h
3383@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3384 the loader. We need to make sure that it is out of the way of the program
3385 that it will "exec", and that there is sufficient room for the brk. */
3386
3387-extern unsigned long randomize_et_dyn(unsigned long base);
3388-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3389+#define ELF_ET_DYN_BASE (0x20000000)
3390+
3391+#ifdef CONFIG_PAX_ASLR
3392+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3393+
3394+#ifdef __powerpc64__
3395+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3396+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3397+#else
3398+#define PAX_DELTA_MMAP_LEN 15
3399+#define PAX_DELTA_STACK_LEN 15
3400+#endif
3401+#endif
3402
3403 /*
3404 * Our registers are always unsigned longs, whether we're a 32 bit
3405@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3406 (0x7ff >> (PAGE_SHIFT - 12)) : \
3407 (0x3ffff >> (PAGE_SHIFT - 12)))
3408
3409-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3410-#define arch_randomize_brk arch_randomize_brk
3411-
3412 #endif /* __KERNEL__ */
3413
3414 /*
3415diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3416index bca8fdc..61e9580 100644
3417--- a/arch/powerpc/include/asm/kmap_types.h
3418+++ b/arch/powerpc/include/asm/kmap_types.h
3419@@ -27,6 +27,7 @@ enum km_type {
3420 KM_PPC_SYNC_PAGE,
3421 KM_PPC_SYNC_ICACHE,
3422 KM_KDB,
3423+ KM_CLEARPAGE,
3424 KM_TYPE_NR
3425 };
3426
3427diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3428index d4a7f64..451de1c 100644
3429--- a/arch/powerpc/include/asm/mman.h
3430+++ b/arch/powerpc/include/asm/mman.h
3431@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3432 }
3433 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3434
3435-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3436+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3437 {
3438 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3439 }
3440diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3441index f072e97..b436dee 100644
3442--- a/arch/powerpc/include/asm/page.h
3443+++ b/arch/powerpc/include/asm/page.h
3444@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3445 * and needs to be executable. This means the whole heap ends
3446 * up being executable.
3447 */
3448-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3449- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3450+#define VM_DATA_DEFAULT_FLAGS32 \
3451+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3452+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3453
3454 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3455 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3456@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3457 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3458 #endif
3459
3460+#define ktla_ktva(addr) (addr)
3461+#define ktva_ktla(addr) (addr)
3462+
3463 /*
3464 * Use the top bit of the higher-level page table entries to indicate whether
3465 * the entries we point to contain hugepages. This works because we know that
3466diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3467index fed85e6..da5c71b 100644
3468--- a/arch/powerpc/include/asm/page_64.h
3469+++ b/arch/powerpc/include/asm/page_64.h
3470@@ -146,15 +146,18 @@ do { \
3471 * stack by default, so in the absence of a PT_GNU_STACK program header
3472 * we turn execute permission off.
3473 */
3474-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3475- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3476+#define VM_STACK_DEFAULT_FLAGS32 \
3477+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3478+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3479
3480 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3481 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3482
3483+#ifndef CONFIG_PAX_PAGEEXEC
3484 #define VM_STACK_DEFAULT_FLAGS \
3485 (is_32bit_task() ? \
3486 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3487+#endif
3488
3489 #include <asm-generic/getorder.h>
3490
3491diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3492index 2e0e411..7899c68 100644
3493--- a/arch/powerpc/include/asm/pgtable.h
3494+++ b/arch/powerpc/include/asm/pgtable.h
3495@@ -2,6 +2,7 @@
3496 #define _ASM_POWERPC_PGTABLE_H
3497 #ifdef __KERNEL__
3498
3499+#include <linux/const.h>
3500 #ifndef __ASSEMBLY__
3501 #include <asm/processor.h> /* For TASK_SIZE */
3502 #include <asm/mmu.h>
3503diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3504index 4aad413..85d86bf 100644
3505--- a/arch/powerpc/include/asm/pte-hash32.h
3506+++ b/arch/powerpc/include/asm/pte-hash32.h
3507@@ -21,6 +21,7 @@
3508 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3509 #define _PAGE_USER 0x004 /* usermode access allowed */
3510 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3511+#define _PAGE_EXEC _PAGE_GUARDED
3512 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3513 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3514 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3515diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3516index 7fdc2c0..e47a9b02d3 100644
3517--- a/arch/powerpc/include/asm/reg.h
3518+++ b/arch/powerpc/include/asm/reg.h
3519@@ -212,6 +212,7 @@
3520 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3521 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3522 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3523+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3524 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3525 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3526 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3527diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3528index c377457..3c69fbc 100644
3529--- a/arch/powerpc/include/asm/system.h
3530+++ b/arch/powerpc/include/asm/system.h
3531@@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3532 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3533 #endif
3534
3535-extern unsigned long arch_align_stack(unsigned long sp);
3536+#define arch_align_stack(x) ((x) & ~0xfUL)
3537
3538 /* Used in very early kernel initialization. */
3539 extern unsigned long reloc_offset(void);
3540diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3541index bd0fb84..a42a14b 100644
3542--- a/arch/powerpc/include/asm/uaccess.h
3543+++ b/arch/powerpc/include/asm/uaccess.h
3544@@ -13,6 +13,8 @@
3545 #define VERIFY_READ 0
3546 #define VERIFY_WRITE 1
3547
3548+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3549+
3550 /*
3551 * The fs value determines whether argument validity checking should be
3552 * performed or not. If get_fs() == USER_DS, checking is performed, with
3553@@ -327,52 +329,6 @@ do { \
3554 extern unsigned long __copy_tofrom_user(void __user *to,
3555 const void __user *from, unsigned long size);
3556
3557-#ifndef __powerpc64__
3558-
3559-static inline unsigned long copy_from_user(void *to,
3560- const void __user *from, unsigned long n)
3561-{
3562- unsigned long over;
3563-
3564- if (access_ok(VERIFY_READ, from, n))
3565- return __copy_tofrom_user((__force void __user *)to, from, n);
3566- if ((unsigned long)from < TASK_SIZE) {
3567- over = (unsigned long)from + n - TASK_SIZE;
3568- return __copy_tofrom_user((__force void __user *)to, from,
3569- n - over) + over;
3570- }
3571- return n;
3572-}
3573-
3574-static inline unsigned long copy_to_user(void __user *to,
3575- const void *from, unsigned long n)
3576-{
3577- unsigned long over;
3578-
3579- if (access_ok(VERIFY_WRITE, to, n))
3580- return __copy_tofrom_user(to, (__force void __user *)from, n);
3581- if ((unsigned long)to < TASK_SIZE) {
3582- over = (unsigned long)to + n - TASK_SIZE;
3583- return __copy_tofrom_user(to, (__force void __user *)from,
3584- n - over) + over;
3585- }
3586- return n;
3587-}
3588-
3589-#else /* __powerpc64__ */
3590-
3591-#define __copy_in_user(to, from, size) \
3592- __copy_tofrom_user((to), (from), (size))
3593-
3594-extern unsigned long copy_from_user(void *to, const void __user *from,
3595- unsigned long n);
3596-extern unsigned long copy_to_user(void __user *to, const void *from,
3597- unsigned long n);
3598-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3599- unsigned long n);
3600-
3601-#endif /* __powerpc64__ */
3602-
3603 static inline unsigned long __copy_from_user_inatomic(void *to,
3604 const void __user *from, unsigned long n)
3605 {
3606@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3607 if (ret == 0)
3608 return 0;
3609 }
3610+
3611+ if (!__builtin_constant_p(n))
3612+ check_object_size(to, n, false);
3613+
3614 return __copy_tofrom_user((__force void __user *)to, from, n);
3615 }
3616
3617@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3618 if (ret == 0)
3619 return 0;
3620 }
3621+
3622+ if (!__builtin_constant_p(n))
3623+ check_object_size(from, n, true);
3624+
3625 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3626 }
3627
3628@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3629 return __copy_to_user_inatomic(to, from, size);
3630 }
3631
3632+#ifndef __powerpc64__
3633+
3634+static inline unsigned long __must_check copy_from_user(void *to,
3635+ const void __user *from, unsigned long n)
3636+{
3637+ unsigned long over;
3638+
3639+ if ((long)n < 0)
3640+ return n;
3641+
3642+ if (access_ok(VERIFY_READ, from, n)) {
3643+ if (!__builtin_constant_p(n))
3644+ check_object_size(to, n, false);
3645+ return __copy_tofrom_user((__force void __user *)to, from, n);
3646+ }
3647+ if ((unsigned long)from < TASK_SIZE) {
3648+ over = (unsigned long)from + n - TASK_SIZE;
3649+ if (!__builtin_constant_p(n - over))
3650+ check_object_size(to, n - over, false);
3651+ return __copy_tofrom_user((__force void __user *)to, from,
3652+ n - over) + over;
3653+ }
3654+ return n;
3655+}
3656+
3657+static inline unsigned long __must_check copy_to_user(void __user *to,
3658+ const void *from, unsigned long n)
3659+{
3660+ unsigned long over;
3661+
3662+ if ((long)n < 0)
3663+ return n;
3664+
3665+ if (access_ok(VERIFY_WRITE, to, n)) {
3666+ if (!__builtin_constant_p(n))
3667+ check_object_size(from, n, true);
3668+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3669+ }
3670+ if ((unsigned long)to < TASK_SIZE) {
3671+ over = (unsigned long)to + n - TASK_SIZE;
3672+ if (!__builtin_constant_p(n))
3673+ check_object_size(from, n - over, true);
3674+ return __copy_tofrom_user(to, (__force void __user *)from,
3675+ n - over) + over;
3676+ }
3677+ return n;
3678+}
3679+
3680+#else /* __powerpc64__ */
3681+
3682+#define __copy_in_user(to, from, size) \
3683+ __copy_tofrom_user((to), (from), (size))
3684+
3685+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3686+{
3687+ if ((long)n < 0 || n > INT_MAX)
3688+ return n;
3689+
3690+ if (!__builtin_constant_p(n))
3691+ check_object_size(to, n, false);
3692+
3693+ if (likely(access_ok(VERIFY_READ, from, n)))
3694+ n = __copy_from_user(to, from, n);
3695+ else
3696+ memset(to, 0, n);
3697+ return n;
3698+}
3699+
3700+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3701+{
3702+ if ((long)n < 0 || n > INT_MAX)
3703+ return n;
3704+
3705+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3706+ if (!__builtin_constant_p(n))
3707+ check_object_size(from, n, true);
3708+ n = __copy_to_user(to, from, n);
3709+ }
3710+ return n;
3711+}
3712+
3713+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3714+ unsigned long n);
3715+
3716+#endif /* __powerpc64__ */
3717+
3718 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3719
3720 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3721diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3722index 429983c..7af363b 100644
3723--- a/arch/powerpc/kernel/exceptions-64e.S
3724+++ b/arch/powerpc/kernel/exceptions-64e.S
3725@@ -587,6 +587,7 @@ storage_fault_common:
3726 std r14,_DAR(r1)
3727 std r15,_DSISR(r1)
3728 addi r3,r1,STACK_FRAME_OVERHEAD
3729+ bl .save_nvgprs
3730 mr r4,r14
3731 mr r5,r15
3732 ld r14,PACA_EXGEN+EX_R14(r13)
3733@@ -596,8 +597,7 @@ storage_fault_common:
3734 cmpdi r3,0
3735 bne- 1f
3736 b .ret_from_except_lite
3737-1: bl .save_nvgprs
3738- mr r5,r3
3739+1: mr r5,r3
3740 addi r3,r1,STACK_FRAME_OVERHEAD
3741 ld r4,_DAR(r1)
3742 bl .bad_page_fault
3743diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3744index 15c5a4f..22a4000 100644
3745--- a/arch/powerpc/kernel/exceptions-64s.S
3746+++ b/arch/powerpc/kernel/exceptions-64s.S
3747@@ -1004,10 +1004,10 @@ handle_page_fault:
3748 11: ld r4,_DAR(r1)
3749 ld r5,_DSISR(r1)
3750 addi r3,r1,STACK_FRAME_OVERHEAD
3751+ bl .save_nvgprs
3752 bl .do_page_fault
3753 cmpdi r3,0
3754 beq+ 13f
3755- bl .save_nvgprs
3756 mr r5,r3
3757 addi r3,r1,STACK_FRAME_OVERHEAD
3758 lwz r4,_DAR(r1)
3759diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3760index 01e2877..a1ba360 100644
3761--- a/arch/powerpc/kernel/irq.c
3762+++ b/arch/powerpc/kernel/irq.c
3763@@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3764 host->ops = ops;
3765 host->of_node = of_node_get(of_node);
3766
3767- if (host->ops->match == NULL)
3768- host->ops->match = default_irq_host_match;
3769-
3770 raw_spin_lock_irqsave(&irq_big_lock, flags);
3771
3772 /* If it's a legacy controller, check for duplicates and
3773@@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3774 */
3775 raw_spin_lock_irqsave(&irq_big_lock, flags);
3776 list_for_each_entry(h, &irq_hosts, link)
3777- if (h->ops->match(h, node)) {
3778+ if (h->ops->match) {
3779+ if (h->ops->match(h, node)) {
3780+ found = h;
3781+ break;
3782+ }
3783+ } else if (default_irq_host_match(h, node)) {
3784 found = h;
3785 break;
3786 }
3787diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3788index 0b6d796..d760ddb 100644
3789--- a/arch/powerpc/kernel/module_32.c
3790+++ b/arch/powerpc/kernel/module_32.c
3791@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3792 me->arch.core_plt_section = i;
3793 }
3794 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3795- printk("Module doesn't contain .plt or .init.plt sections.\n");
3796+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3797 return -ENOEXEC;
3798 }
3799
3800@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3801
3802 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3803 /* Init, or core PLT? */
3804- if (location >= mod->module_core
3805- && location < mod->module_core + mod->core_size)
3806+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3807+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3808 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3809- else
3810+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3811+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3812 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3813+ else {
3814+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3815+ return ~0UL;
3816+ }
3817
3818 /* Find this entry, or if that fails, the next avail. entry */
3819 while (entry->jump[0]) {
3820diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3821index d817ab0..b23b18e 100644
3822--- a/arch/powerpc/kernel/process.c
3823+++ b/arch/powerpc/kernel/process.c
3824@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3825 * Lookup NIP late so we have the best change of getting the
3826 * above info out without failing
3827 */
3828- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3829- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3830+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3831+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3832 #endif
3833 show_stack(current, (unsigned long *) regs->gpr[1]);
3834 if (!user_mode(regs))
3835@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3836 newsp = stack[0];
3837 ip = stack[STACK_FRAME_LR_SAVE];
3838 if (!firstframe || ip != lr) {
3839- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3840+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3842 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3843- printk(" (%pS)",
3844+ printk(" (%pA)",
3845 (void *)current->ret_stack[curr_frame].ret);
3846 curr_frame--;
3847 }
3848@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3849 struct pt_regs *regs = (struct pt_regs *)
3850 (sp + STACK_FRAME_OVERHEAD);
3851 lr = regs->link;
3852- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3853+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3854 regs->trap, (void *)regs->nip, (void *)lr);
3855 firstframe = 1;
3856 }
3857@@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
3858 }
3859
3860 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3861-
3862-unsigned long arch_align_stack(unsigned long sp)
3863-{
3864- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3865- sp -= get_random_int() & ~PAGE_MASK;
3866- return sp & ~0xf;
3867-}
3868-
3869-static inline unsigned long brk_rnd(void)
3870-{
3871- unsigned long rnd = 0;
3872-
3873- /* 8MB for 32bit, 1GB for 64bit */
3874- if (is_32bit_task())
3875- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3876- else
3877- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3878-
3879- return rnd << PAGE_SHIFT;
3880-}
3881-
3882-unsigned long arch_randomize_brk(struct mm_struct *mm)
3883-{
3884- unsigned long base = mm->brk;
3885- unsigned long ret;
3886-
3887-#ifdef CONFIG_PPC_STD_MMU_64
3888- /*
3889- * If we are using 1TB segments and we are allowed to randomise
3890- * the heap, we can put it above 1TB so it is backed by a 1TB
3891- * segment. Otherwise the heap will be in the bottom 1TB
3892- * which always uses 256MB segments and this may result in a
3893- * performance penalty.
3894- */
3895- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3896- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3897-#endif
3898-
3899- ret = PAGE_ALIGN(base + brk_rnd());
3900-
3901- if (ret < mm->brk)
3902- return mm->brk;
3903-
3904- return ret;
3905-}
3906-
3907-unsigned long randomize_et_dyn(unsigned long base)
3908-{
3909- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3910-
3911- if (ret < base)
3912- return base;
3913-
3914- return ret;
3915-}
3916diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3917index 836a5a1..27289a3 100644
3918--- a/arch/powerpc/kernel/signal_32.c
3919+++ b/arch/powerpc/kernel/signal_32.c
3920@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3921 /* Save user registers on the stack */
3922 frame = &rt_sf->uc.uc_mcontext;
3923 addr = frame;
3924- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3925+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3926 if (save_user_regs(regs, frame, 0, 1))
3927 goto badframe;
3928 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3929diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3930index a50b5ec..547078a 100644
3931--- a/arch/powerpc/kernel/signal_64.c
3932+++ b/arch/powerpc/kernel/signal_64.c
3933@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3934 current->thread.fpscr.val = 0;
3935
3936 /* Set up to return from userspace. */
3937- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3938+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3939 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3940 } else {
3941 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3942diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3943index c091527..5592625 100644
3944--- a/arch/powerpc/kernel/traps.c
3945+++ b/arch/powerpc/kernel/traps.c
3946@@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
3947 return flags;
3948 }
3949
3950+extern void gr_handle_kernel_exploit(void);
3951+
3952 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3953 int signr)
3954 {
3955@@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3956 panic("Fatal exception in interrupt");
3957 if (panic_on_oops)
3958 panic("Fatal exception");
3959+
3960+ gr_handle_kernel_exploit();
3961+
3962 do_exit(signr);
3963 }
3964
3965diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3966index 7d14bb6..1305601 100644
3967--- a/arch/powerpc/kernel/vdso.c
3968+++ b/arch/powerpc/kernel/vdso.c
3969@@ -35,6 +35,7 @@
3970 #include <asm/firmware.h>
3971 #include <asm/vdso.h>
3972 #include <asm/vdso_datapage.h>
3973+#include <asm/mman.h>
3974
3975 #include "setup.h"
3976
3977@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3978 vdso_base = VDSO32_MBASE;
3979 #endif
3980
3981- current->mm->context.vdso_base = 0;
3982+ current->mm->context.vdso_base = ~0UL;
3983
3984 /* vDSO has a problem and was disabled, just don't "enable" it for the
3985 * process
3986@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3987 vdso_base = get_unmapped_area(NULL, vdso_base,
3988 (vdso_pages << PAGE_SHIFT) +
3989 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3990- 0, 0);
3991+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3992 if (IS_ERR_VALUE(vdso_base)) {
3993 rc = vdso_base;
3994 goto fail_mmapsem;
3995diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3996index 5eea6f3..5d10396 100644
3997--- a/arch/powerpc/lib/usercopy_64.c
3998+++ b/arch/powerpc/lib/usercopy_64.c
3999@@ -9,22 +9,6 @@
4000 #include <linux/module.h>
4001 #include <asm/uaccess.h>
4002
4003-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4004-{
4005- if (likely(access_ok(VERIFY_READ, from, n)))
4006- n = __copy_from_user(to, from, n);
4007- else
4008- memset(to, 0, n);
4009- return n;
4010-}
4011-
4012-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4013-{
4014- if (likely(access_ok(VERIFY_WRITE, to, n)))
4015- n = __copy_to_user(to, from, n);
4016- return n;
4017-}
4018-
4019 unsigned long copy_in_user(void __user *to, const void __user *from,
4020 unsigned long n)
4021 {
4022@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4023 return n;
4024 }
4025
4026-EXPORT_SYMBOL(copy_from_user);
4027-EXPORT_SYMBOL(copy_to_user);
4028 EXPORT_SYMBOL(copy_in_user);
4029
4030diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4031index 2f0d1b0..36fb5cc 100644
4032--- a/arch/powerpc/mm/fault.c
4033+++ b/arch/powerpc/mm/fault.c
4034@@ -32,6 +32,10 @@
4035 #include <linux/perf_event.h>
4036 #include <linux/magic.h>
4037 #include <linux/ratelimit.h>
4038+#include <linux/slab.h>
4039+#include <linux/pagemap.h>
4040+#include <linux/compiler.h>
4041+#include <linux/unistd.h>
4042
4043 #include <asm/firmware.h>
4044 #include <asm/page.h>
4045@@ -43,6 +47,7 @@
4046 #include <asm/tlbflush.h>
4047 #include <asm/siginfo.h>
4048 #include <mm/mmu_decl.h>
4049+#include <asm/ptrace.h>
4050
4051 #include "icswx.h"
4052
4053@@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4054 }
4055 #endif
4056
4057+#ifdef CONFIG_PAX_PAGEEXEC
4058+/*
4059+ * PaX: decide what to do with offenders (regs->nip = fault address)
4060+ *
4061+ * returns 1 when task should be killed
4062+ */
4063+static int pax_handle_fetch_fault(struct pt_regs *regs)
4064+{
4065+ return 1;
4066+}
4067+
4068+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4069+{
4070+ unsigned long i;
4071+
4072+ printk(KERN_ERR "PAX: bytes at PC: ");
4073+ for (i = 0; i < 5; i++) {
4074+ unsigned int c;
4075+ if (get_user(c, (unsigned int __user *)pc+i))
4076+ printk(KERN_CONT "???????? ");
4077+ else
4078+ printk(KERN_CONT "%08x ", c);
4079+ }
4080+ printk("\n");
4081+}
4082+#endif
4083+
4084 /*
4085 * Check whether the instruction at regs->nip is a store using
4086 * an update addressing form which will update r1.
4087@@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4088 * indicate errors in DSISR but can validly be set in SRR1.
4089 */
4090 if (trap == 0x400)
4091- error_code &= 0x48200000;
4092+ error_code &= 0x58200000;
4093 else
4094 is_write = error_code & DSISR_ISSTORE;
4095 #else
4096@@ -276,7 +308,7 @@ good_area:
4097 * "undefined". Of those that can be set, this is the only
4098 * one which seems bad.
4099 */
4100- if (error_code & 0x10000000)
4101+ if (error_code & DSISR_GUARDED)
4102 /* Guarded storage error. */
4103 goto bad_area;
4104 #endif /* CONFIG_8xx */
4105@@ -291,7 +323,7 @@ good_area:
4106 * processors use the same I/D cache coherency mechanism
4107 * as embedded.
4108 */
4109- if (error_code & DSISR_PROTFAULT)
4110+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4111 goto bad_area;
4112 #endif /* CONFIG_PPC_STD_MMU */
4113
4114@@ -360,6 +392,23 @@ bad_area:
4115 bad_area_nosemaphore:
4116 /* User mode accesses cause a SIGSEGV */
4117 if (user_mode(regs)) {
4118+
4119+#ifdef CONFIG_PAX_PAGEEXEC
4120+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4121+#ifdef CONFIG_PPC_STD_MMU
4122+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4123+#else
4124+ if (is_exec && regs->nip == address) {
4125+#endif
4126+ switch (pax_handle_fetch_fault(regs)) {
4127+ }
4128+
4129+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4130+ do_group_exit(SIGKILL);
4131+ }
4132+ }
4133+#endif
4134+
4135 _exception(SIGSEGV, regs, code, address);
4136 return 0;
4137 }
4138diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4139index 67a42ed..1c7210c 100644
4140--- a/arch/powerpc/mm/mmap_64.c
4141+++ b/arch/powerpc/mm/mmap_64.c
4142@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4143 */
4144 if (mmap_is_legacy()) {
4145 mm->mmap_base = TASK_UNMAPPED_BASE;
4146+
4147+#ifdef CONFIG_PAX_RANDMMAP
4148+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4149+ mm->mmap_base += mm->delta_mmap;
4150+#endif
4151+
4152 mm->get_unmapped_area = arch_get_unmapped_area;
4153 mm->unmap_area = arch_unmap_area;
4154 } else {
4155 mm->mmap_base = mmap_base();
4156+
4157+#ifdef CONFIG_PAX_RANDMMAP
4158+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4159+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4160+#endif
4161+
4162 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4163 mm->unmap_area = arch_unmap_area_topdown;
4164 }
4165diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4166index 73709f7..6b90313 100644
4167--- a/arch/powerpc/mm/slice.c
4168+++ b/arch/powerpc/mm/slice.c
4169@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4170 if ((mm->task_size - len) < addr)
4171 return 0;
4172 vma = find_vma(mm, addr);
4173- return (!vma || (addr + len) <= vma->vm_start);
4174+ return check_heap_stack_gap(vma, addr, len);
4175 }
4176
4177 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4178@@ -256,7 +256,7 @@ full_search:
4179 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4180 continue;
4181 }
4182- if (!vma || addr + len <= vma->vm_start) {
4183+ if (check_heap_stack_gap(vma, addr, len)) {
4184 /*
4185 * Remember the place where we stopped the search:
4186 */
4187@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4188 }
4189 }
4190
4191- addr = mm->mmap_base;
4192- while (addr > len) {
4193+ if (mm->mmap_base < len)
4194+ addr = -ENOMEM;
4195+ else
4196+ addr = mm->mmap_base - len;
4197+
4198+ while (!IS_ERR_VALUE(addr)) {
4199 /* Go down by chunk size */
4200- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4201+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4202
4203 /* Check for hit with different page size */
4204 mask = slice_range_to_mask(addr, len);
4205@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4206 * return with success:
4207 */
4208 vma = find_vma(mm, addr);
4209- if (!vma || (addr + len) <= vma->vm_start) {
4210+ if (check_heap_stack_gap(vma, addr, len)) {
4211 /* remember the address as a hint for next time */
4212 if (use_cache)
4213 mm->free_area_cache = addr;
4214@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4215 mm->cached_hole_size = vma->vm_start - addr;
4216
4217 /* try just below the current vma->vm_start */
4218- addr = vma->vm_start;
4219+ addr = skip_heap_stack_gap(vma, len);
4220 }
4221
4222 /*
4223@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4224 if (fixed && addr > (mm->task_size - len))
4225 return -EINVAL;
4226
4227+#ifdef CONFIG_PAX_RANDMMAP
4228+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4229+ addr = 0;
4230+#endif
4231+
4232 /* If hint, make sure it matches our alignment restrictions */
4233 if (!fixed && addr) {
4234 addr = _ALIGN_UP(addr, 1ul << pshift);
4235diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4236index 8517d2a..d2738d4 100644
4237--- a/arch/s390/include/asm/atomic.h
4238+++ b/arch/s390/include/asm/atomic.h
4239@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4240 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4241 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4242
4243+#define atomic64_read_unchecked(v) atomic64_read(v)
4244+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4245+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4246+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4247+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4248+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4249+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4250+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4251+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4252+
4253 #define smp_mb__before_atomic_dec() smp_mb()
4254 #define smp_mb__after_atomic_dec() smp_mb()
4255 #define smp_mb__before_atomic_inc() smp_mb()
4256diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4257index 2a30d5a..5e5586f 100644
4258--- a/arch/s390/include/asm/cache.h
4259+++ b/arch/s390/include/asm/cache.h
4260@@ -11,8 +11,10 @@
4261 #ifndef __ARCH_S390_CACHE_H
4262 #define __ARCH_S390_CACHE_H
4263
4264-#define L1_CACHE_BYTES 256
4265+#include <linux/const.h>
4266+
4267 #define L1_CACHE_SHIFT 8
4268+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4269 #define NET_SKB_PAD 32
4270
4271 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4272diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4273index 547f1a6..0b22b53 100644
4274--- a/arch/s390/include/asm/elf.h
4275+++ b/arch/s390/include/asm/elf.h
4276@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4277 the loader. We need to make sure that it is out of the way of the program
4278 that it will "exec", and that there is sufficient room for the brk. */
4279
4280-extern unsigned long randomize_et_dyn(unsigned long base);
4281-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4282+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4283+
4284+#ifdef CONFIG_PAX_ASLR
4285+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4286+
4287+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4288+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4289+#endif
4290
4291 /* This yields a mask that user programs can use to figure out what
4292 instruction set this CPU supports. */
4293@@ -211,7 +217,4 @@ struct linux_binprm;
4294 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4295 int arch_setup_additional_pages(struct linux_binprm *, int);
4296
4297-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4298-#define arch_randomize_brk arch_randomize_brk
4299-
4300 #endif
4301diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4302index d73cc6b..1a296ad 100644
4303--- a/arch/s390/include/asm/system.h
4304+++ b/arch/s390/include/asm/system.h
4305@@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4306 extern void (*_machine_halt)(void);
4307 extern void (*_machine_power_off)(void);
4308
4309-extern unsigned long arch_align_stack(unsigned long sp);
4310+#define arch_align_stack(x) ((x) & ~0xfUL)
4311
4312 static inline int tprot(unsigned long addr)
4313 {
4314diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4315index 2b23885..e136e31 100644
4316--- a/arch/s390/include/asm/uaccess.h
4317+++ b/arch/s390/include/asm/uaccess.h
4318@@ -235,6 +235,10 @@ static inline unsigned long __must_check
4319 copy_to_user(void __user *to, const void *from, unsigned long n)
4320 {
4321 might_fault();
4322+
4323+ if ((long)n < 0)
4324+ return n;
4325+
4326 if (access_ok(VERIFY_WRITE, to, n))
4327 n = __copy_to_user(to, from, n);
4328 return n;
4329@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4330 static inline unsigned long __must_check
4331 __copy_from_user(void *to, const void __user *from, unsigned long n)
4332 {
4333+ if ((long)n < 0)
4334+ return n;
4335+
4336 if (__builtin_constant_p(n) && (n <= 256))
4337 return uaccess.copy_from_user_small(n, from, to);
4338 else
4339@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4340 unsigned int sz = __compiletime_object_size(to);
4341
4342 might_fault();
4343+
4344+ if ((long)n < 0)
4345+ return n;
4346+
4347 if (unlikely(sz != -1 && sz < n)) {
4348 copy_from_user_overflow();
4349 return n;
4350diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4351index dfcb343..eda788a 100644
4352--- a/arch/s390/kernel/module.c
4353+++ b/arch/s390/kernel/module.c
4354@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4355
4356 /* Increase core size by size of got & plt and set start
4357 offsets for got and plt. */
4358- me->core_size = ALIGN(me->core_size, 4);
4359- me->arch.got_offset = me->core_size;
4360- me->core_size += me->arch.got_size;
4361- me->arch.plt_offset = me->core_size;
4362- me->core_size += me->arch.plt_size;
4363+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4364+ me->arch.got_offset = me->core_size_rw;
4365+ me->core_size_rw += me->arch.got_size;
4366+ me->arch.plt_offset = me->core_size_rx;
4367+ me->core_size_rx += me->arch.plt_size;
4368 return 0;
4369 }
4370
4371@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4372 if (info->got_initialized == 0) {
4373 Elf_Addr *gotent;
4374
4375- gotent = me->module_core + me->arch.got_offset +
4376+ gotent = me->module_core_rw + me->arch.got_offset +
4377 info->got_offset;
4378 *gotent = val;
4379 info->got_initialized = 1;
4380@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4381 else if (r_type == R_390_GOTENT ||
4382 r_type == R_390_GOTPLTENT)
4383 *(unsigned int *) loc =
4384- (val + (Elf_Addr) me->module_core - loc) >> 1;
4385+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4386 else if (r_type == R_390_GOT64 ||
4387 r_type == R_390_GOTPLT64)
4388 *(unsigned long *) loc = val;
4389@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4390 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4391 if (info->plt_initialized == 0) {
4392 unsigned int *ip;
4393- ip = me->module_core + me->arch.plt_offset +
4394+ ip = me->module_core_rx + me->arch.plt_offset +
4395 info->plt_offset;
4396 #ifndef CONFIG_64BIT
4397 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4398@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4399 val - loc + 0xffffUL < 0x1ffffeUL) ||
4400 (r_type == R_390_PLT32DBL &&
4401 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4402- val = (Elf_Addr) me->module_core +
4403+ val = (Elf_Addr) me->module_core_rx +
4404 me->arch.plt_offset +
4405 info->plt_offset;
4406 val += rela->r_addend - loc;
4407@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4408 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4409 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4410 val = val + rela->r_addend -
4411- ((Elf_Addr) me->module_core + me->arch.got_offset);
4412+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4413 if (r_type == R_390_GOTOFF16)
4414 *(unsigned short *) loc = val;
4415 else if (r_type == R_390_GOTOFF32)
4416@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4417 break;
4418 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4419 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4420- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4421+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4422 rela->r_addend - loc;
4423 if (r_type == R_390_GOTPC)
4424 *(unsigned int *) loc = val;
4425diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4426index e795933..b32563c 100644
4427--- a/arch/s390/kernel/process.c
4428+++ b/arch/s390/kernel/process.c
4429@@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4430 }
4431 return 0;
4432 }
4433-
4434-unsigned long arch_align_stack(unsigned long sp)
4435-{
4436- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4437- sp -= get_random_int() & ~PAGE_MASK;
4438- return sp & ~0xf;
4439-}
4440-
4441-static inline unsigned long brk_rnd(void)
4442-{
4443- /* 8MB for 32bit, 1GB for 64bit */
4444- if (is_32bit_task())
4445- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4446- else
4447- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4448-}
4449-
4450-unsigned long arch_randomize_brk(struct mm_struct *mm)
4451-{
4452- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4453-
4454- if (ret < mm->brk)
4455- return mm->brk;
4456- return ret;
4457-}
4458-
4459-unsigned long randomize_et_dyn(unsigned long base)
4460-{
4461- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4462-
4463- if (!(current->flags & PF_RANDOMIZE))
4464- return base;
4465- if (ret < base)
4466- return base;
4467- return ret;
4468-}
4469diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4470index a0155c0..34cc491 100644
4471--- a/arch/s390/mm/mmap.c
4472+++ b/arch/s390/mm/mmap.c
4473@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4474 */
4475 if (mmap_is_legacy()) {
4476 mm->mmap_base = TASK_UNMAPPED_BASE;
4477+
4478+#ifdef CONFIG_PAX_RANDMMAP
4479+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4480+ mm->mmap_base += mm->delta_mmap;
4481+#endif
4482+
4483 mm->get_unmapped_area = arch_get_unmapped_area;
4484 mm->unmap_area = arch_unmap_area;
4485 } else {
4486 mm->mmap_base = mmap_base();
4487+
4488+#ifdef CONFIG_PAX_RANDMMAP
4489+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4490+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4491+#endif
4492+
4493 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4494 mm->unmap_area = arch_unmap_area_topdown;
4495 }
4496@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4497 */
4498 if (mmap_is_legacy()) {
4499 mm->mmap_base = TASK_UNMAPPED_BASE;
4500+
4501+#ifdef CONFIG_PAX_RANDMMAP
4502+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4503+ mm->mmap_base += mm->delta_mmap;
4504+#endif
4505+
4506 mm->get_unmapped_area = s390_get_unmapped_area;
4507 mm->unmap_area = arch_unmap_area;
4508 } else {
4509 mm->mmap_base = mmap_base();
4510+
4511+#ifdef CONFIG_PAX_RANDMMAP
4512+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4513+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4514+#endif
4515+
4516 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4517 mm->unmap_area = arch_unmap_area_topdown;
4518 }
4519diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4520index ae3d59f..f65f075 100644
4521--- a/arch/score/include/asm/cache.h
4522+++ b/arch/score/include/asm/cache.h
4523@@ -1,7 +1,9 @@
4524 #ifndef _ASM_SCORE_CACHE_H
4525 #define _ASM_SCORE_CACHE_H
4526
4527+#include <linux/const.h>
4528+
4529 #define L1_CACHE_SHIFT 4
4530-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4531+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4532
4533 #endif /* _ASM_SCORE_CACHE_H */
4534diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4535index 589d5c7..669e274 100644
4536--- a/arch/score/include/asm/system.h
4537+++ b/arch/score/include/asm/system.h
4538@@ -17,7 +17,7 @@ do { \
4539 #define finish_arch_switch(prev) do {} while (0)
4540
4541 typedef void (*vi_handler_t)(void);
4542-extern unsigned long arch_align_stack(unsigned long sp);
4543+#define arch_align_stack(x) (x)
4544
4545 #define mb() barrier()
4546 #define rmb() barrier()
4547diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4548index 25d0803..d6c8e36 100644
4549--- a/arch/score/kernel/process.c
4550+++ b/arch/score/kernel/process.c
4551@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4552
4553 return task_pt_regs(task)->cp0_epc;
4554 }
4555-
4556-unsigned long arch_align_stack(unsigned long sp)
4557-{
4558- return sp;
4559-}
4560diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4561index ef9e555..331bd29 100644
4562--- a/arch/sh/include/asm/cache.h
4563+++ b/arch/sh/include/asm/cache.h
4564@@ -9,10 +9,11 @@
4565 #define __ASM_SH_CACHE_H
4566 #ifdef __KERNEL__
4567
4568+#include <linux/const.h>
4569 #include <linux/init.h>
4570 #include <cpu/cache.h>
4571
4572-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4573+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4574
4575 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4576
4577diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4578index afeb710..d1d1289 100644
4579--- a/arch/sh/mm/mmap.c
4580+++ b/arch/sh/mm/mmap.c
4581@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4582 addr = PAGE_ALIGN(addr);
4583
4584 vma = find_vma(mm, addr);
4585- if (TASK_SIZE - len >= addr &&
4586- (!vma || addr + len <= vma->vm_start))
4587+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4588 return addr;
4589 }
4590
4591@@ -106,7 +105,7 @@ full_search:
4592 }
4593 return -ENOMEM;
4594 }
4595- if (likely(!vma || addr + len <= vma->vm_start)) {
4596+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4597 /*
4598 * Remember the place where we stopped the search:
4599 */
4600@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4601 addr = PAGE_ALIGN(addr);
4602
4603 vma = find_vma(mm, addr);
4604- if (TASK_SIZE - len >= addr &&
4605- (!vma || addr + len <= vma->vm_start))
4606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4607 return addr;
4608 }
4609
4610@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4611 /* make sure it can fit in the remaining address space */
4612 if (likely(addr > len)) {
4613 vma = find_vma(mm, addr-len);
4614- if (!vma || addr <= vma->vm_start) {
4615+ if (check_heap_stack_gap(vma, addr - len, len)) {
4616 /* remember the address as a hint for next time */
4617 return (mm->free_area_cache = addr-len);
4618 }
4619@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4620 if (unlikely(mm->mmap_base < len))
4621 goto bottomup;
4622
4623- addr = mm->mmap_base-len;
4624- if (do_colour_align)
4625- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4626+ addr = mm->mmap_base - len;
4627
4628 do {
4629+ if (do_colour_align)
4630+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4631 /*
4632 * Lookup failure means no vma is above this address,
4633 * else if new region fits below vma->vm_start,
4634 * return with success:
4635 */
4636 vma = find_vma(mm, addr);
4637- if (likely(!vma || addr+len <= vma->vm_start)) {
4638+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4639 /* remember the address as a hint for next time */
4640 return (mm->free_area_cache = addr);
4641 }
4642@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4643 mm->cached_hole_size = vma->vm_start - addr;
4644
4645 /* try just below the current vma->vm_start */
4646- addr = vma->vm_start-len;
4647- if (do_colour_align)
4648- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4649- } while (likely(len < vma->vm_start));
4650+ addr = skip_heap_stack_gap(vma, len);
4651+ } while (!IS_ERR_VALUE(addr));
4652
4653 bottomup:
4654 /*
4655diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4656index eddcfb3..b117d90 100644
4657--- a/arch/sparc/Makefile
4658+++ b/arch/sparc/Makefile
4659@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4660 # Export what is needed by arch/sparc/boot/Makefile
4661 export VMLINUX_INIT VMLINUX_MAIN
4662 VMLINUX_INIT := $(head-y) $(init-y)
4663-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4664+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4665 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4666 VMLINUX_MAIN += $(drivers-y) $(net-y)
4667
4668diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4669index 9f421df..b81fc12 100644
4670--- a/arch/sparc/include/asm/atomic_64.h
4671+++ b/arch/sparc/include/asm/atomic_64.h
4672@@ -14,18 +14,40 @@
4673 #define ATOMIC64_INIT(i) { (i) }
4674
4675 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4676+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4677+{
4678+ return v->counter;
4679+}
4680 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4681+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4682+{
4683+ return v->counter;
4684+}
4685
4686 #define atomic_set(v, i) (((v)->counter) = i)
4687+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4688+{
4689+ v->counter = i;
4690+}
4691 #define atomic64_set(v, i) (((v)->counter) = i)
4692+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4693+{
4694+ v->counter = i;
4695+}
4696
4697 extern void atomic_add(int, atomic_t *);
4698+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4699 extern void atomic64_add(long, atomic64_t *);
4700+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4701 extern void atomic_sub(int, atomic_t *);
4702+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4703 extern void atomic64_sub(long, atomic64_t *);
4704+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4705
4706 extern int atomic_add_ret(int, atomic_t *);
4707+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4708 extern long atomic64_add_ret(long, atomic64_t *);
4709+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4710 extern int atomic_sub_ret(int, atomic_t *);
4711 extern long atomic64_sub_ret(long, atomic64_t *);
4712
4713@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4714 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4715
4716 #define atomic_inc_return(v) atomic_add_ret(1, v)
4717+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4718+{
4719+ return atomic_add_ret_unchecked(1, v);
4720+}
4721 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4722+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4723+{
4724+ return atomic64_add_ret_unchecked(1, v);
4725+}
4726
4727 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4728 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4729
4730 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4731+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4732+{
4733+ return atomic_add_ret_unchecked(i, v);
4734+}
4735 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4736+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4737+{
4738+ return atomic64_add_ret_unchecked(i, v);
4739+}
4740
4741 /*
4742 * atomic_inc_and_test - increment and test
4743@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4744 * other cases.
4745 */
4746 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4747+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4748+{
4749+ return atomic_inc_return_unchecked(v) == 0;
4750+}
4751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4752
4753 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4754@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4755 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4756
4757 #define atomic_inc(v) atomic_add(1, v)
4758+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4759+{
4760+ atomic_add_unchecked(1, v);
4761+}
4762 #define atomic64_inc(v) atomic64_add(1, v)
4763+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4764+{
4765+ atomic64_add_unchecked(1, v);
4766+}
4767
4768 #define atomic_dec(v) atomic_sub(1, v)
4769+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4770+{
4771+ atomic_sub_unchecked(1, v);
4772+}
4773 #define atomic64_dec(v) atomic64_sub(1, v)
4774+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4775+{
4776+ atomic64_sub_unchecked(1, v);
4777+}
4778
4779 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4780 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4781
4782 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4783+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4784+{
4785+ return cmpxchg(&v->counter, old, new);
4786+}
4787 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4788+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4789+{
4790+ return xchg(&v->counter, new);
4791+}
4792
4793 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4794 {
4795- int c, old;
4796+ int c, old, new;
4797 c = atomic_read(v);
4798 for (;;) {
4799- if (unlikely(c == (u)))
4800+ if (unlikely(c == u))
4801 break;
4802- old = atomic_cmpxchg((v), c, c + (a));
4803+
4804+ asm volatile("addcc %2, %0, %0\n"
4805+
4806+#ifdef CONFIG_PAX_REFCOUNT
4807+ "tvs %%icc, 6\n"
4808+#endif
4809+
4810+ : "=r" (new)
4811+ : "0" (c), "ir" (a)
4812+ : "cc");
4813+
4814+ old = atomic_cmpxchg(v, c, new);
4815 if (likely(old == c))
4816 break;
4817 c = old;
4818@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4819 #define atomic64_cmpxchg(v, o, n) \
4820 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4821 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4822+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4823+{
4824+ return xchg(&v->counter, new);
4825+}
4826
4827 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4828 {
4829- long c, old;
4830+ long c, old, new;
4831 c = atomic64_read(v);
4832 for (;;) {
4833- if (unlikely(c == (u)))
4834+ if (unlikely(c == u))
4835 break;
4836- old = atomic64_cmpxchg((v), c, c + (a));
4837+
4838+ asm volatile("addcc %2, %0, %0\n"
4839+
4840+#ifdef CONFIG_PAX_REFCOUNT
4841+ "tvs %%xcc, 6\n"
4842+#endif
4843+
4844+ : "=r" (new)
4845+ : "0" (c), "ir" (a)
4846+ : "cc");
4847+
4848+ old = atomic64_cmpxchg(v, c, new);
4849 if (likely(old == c))
4850 break;
4851 c = old;
4852 }
4853- return c != (u);
4854+ return c != u;
4855 }
4856
4857 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4858diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4859index 69358b5..9d0d492 100644
4860--- a/arch/sparc/include/asm/cache.h
4861+++ b/arch/sparc/include/asm/cache.h
4862@@ -7,10 +7,12 @@
4863 #ifndef _SPARC_CACHE_H
4864 #define _SPARC_CACHE_H
4865
4866+#include <linux/const.h>
4867+
4868 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4869
4870 #define L1_CACHE_SHIFT 5
4871-#define L1_CACHE_BYTES 32
4872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4873
4874 #ifdef CONFIG_SPARC32
4875 #define SMP_CACHE_BYTES_SHIFT 5
4876diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4877index 4269ca6..e3da77f 100644
4878--- a/arch/sparc/include/asm/elf_32.h
4879+++ b/arch/sparc/include/asm/elf_32.h
4880@@ -114,6 +114,13 @@ typedef struct {
4881
4882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4883
4884+#ifdef CONFIG_PAX_ASLR
4885+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4886+
4887+#define PAX_DELTA_MMAP_LEN 16
4888+#define PAX_DELTA_STACK_LEN 16
4889+#endif
4890+
4891 /* This yields a mask that user programs can use to figure out what
4892 instruction set this cpu supports. This can NOT be done in userspace
4893 on Sparc. */
4894diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4895index 7df8b7f..4946269 100644
4896--- a/arch/sparc/include/asm/elf_64.h
4897+++ b/arch/sparc/include/asm/elf_64.h
4898@@ -180,6 +180,13 @@ typedef struct {
4899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4901
4902+#ifdef CONFIG_PAX_ASLR
4903+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4904+
4905+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4906+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4907+#endif
4908+
4909 extern unsigned long sparc64_elf_hwcap;
4910 #define ELF_HWCAP sparc64_elf_hwcap
4911
4912diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4913index a790cc6..091ed94 100644
4914--- a/arch/sparc/include/asm/pgtable_32.h
4915+++ b/arch/sparc/include/asm/pgtable_32.h
4916@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4917 BTFIXUPDEF_INT(page_none)
4918 BTFIXUPDEF_INT(page_copy)
4919 BTFIXUPDEF_INT(page_readonly)
4920+
4921+#ifdef CONFIG_PAX_PAGEEXEC
4922+BTFIXUPDEF_INT(page_shared_noexec)
4923+BTFIXUPDEF_INT(page_copy_noexec)
4924+BTFIXUPDEF_INT(page_readonly_noexec)
4925+#endif
4926+
4927 BTFIXUPDEF_INT(page_kernel)
4928
4929 #define PMD_SHIFT SUN4C_PMD_SHIFT
4930@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4931 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4932 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4933
4934+#ifdef CONFIG_PAX_PAGEEXEC
4935+extern pgprot_t PAGE_SHARED_NOEXEC;
4936+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4937+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4938+#else
4939+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4940+# define PAGE_COPY_NOEXEC PAGE_COPY
4941+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4942+#endif
4943+
4944 extern unsigned long page_kernel;
4945
4946 #ifdef MODULE
4947diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4948index f6ae2b2..b03ffc7 100644
4949--- a/arch/sparc/include/asm/pgtsrmmu.h
4950+++ b/arch/sparc/include/asm/pgtsrmmu.h
4951@@ -115,6 +115,13 @@
4952 SRMMU_EXEC | SRMMU_REF)
4953 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4954 SRMMU_EXEC | SRMMU_REF)
4955+
4956+#ifdef CONFIG_PAX_PAGEEXEC
4957+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4958+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4959+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4960+#endif
4961+
4962 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4963 SRMMU_DIRTY | SRMMU_REF)
4964
4965diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4966index 9689176..63c18ea 100644
4967--- a/arch/sparc/include/asm/spinlock_64.h
4968+++ b/arch/sparc/include/asm/spinlock_64.h
4969@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4970
4971 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4972
4973-static void inline arch_read_lock(arch_rwlock_t *lock)
4974+static inline void arch_read_lock(arch_rwlock_t *lock)
4975 {
4976 unsigned long tmp1, tmp2;
4977
4978 __asm__ __volatile__ (
4979 "1: ldsw [%2], %0\n"
4980 " brlz,pn %0, 2f\n"
4981-"4: add %0, 1, %1\n"
4982+"4: addcc %0, 1, %1\n"
4983+
4984+#ifdef CONFIG_PAX_REFCOUNT
4985+" tvs %%icc, 6\n"
4986+#endif
4987+
4988 " cas [%2], %0, %1\n"
4989 " cmp %0, %1\n"
4990 " bne,pn %%icc, 1b\n"
4991@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
4992 " .previous"
4993 : "=&r" (tmp1), "=&r" (tmp2)
4994 : "r" (lock)
4995- : "memory");
4996+ : "memory", "cc");
4997 }
4998
4999-static int inline arch_read_trylock(arch_rwlock_t *lock)
5000+static inline int arch_read_trylock(arch_rwlock_t *lock)
5001 {
5002 int tmp1, tmp2;
5003
5004@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5005 "1: ldsw [%2], %0\n"
5006 " brlz,a,pn %0, 2f\n"
5007 " mov 0, %0\n"
5008-" add %0, 1, %1\n"
5009+" addcc %0, 1, %1\n"
5010+
5011+#ifdef CONFIG_PAX_REFCOUNT
5012+" tvs %%icc, 6\n"
5013+#endif
5014+
5015 " cas [%2], %0, %1\n"
5016 " cmp %0, %1\n"
5017 " bne,pn %%icc, 1b\n"
5018@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5019 return tmp1;
5020 }
5021
5022-static void inline arch_read_unlock(arch_rwlock_t *lock)
5023+static inline void arch_read_unlock(arch_rwlock_t *lock)
5024 {
5025 unsigned long tmp1, tmp2;
5026
5027 __asm__ __volatile__(
5028 "1: lduw [%2], %0\n"
5029-" sub %0, 1, %1\n"
5030+" subcc %0, 1, %1\n"
5031+
5032+#ifdef CONFIG_PAX_REFCOUNT
5033+" tvs %%icc, 6\n"
5034+#endif
5035+
5036 " cas [%2], %0, %1\n"
5037 " cmp %0, %1\n"
5038 " bne,pn %%xcc, 1b\n"
5039@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5040 : "memory");
5041 }
5042
5043-static void inline arch_write_lock(arch_rwlock_t *lock)
5044+static inline void arch_write_lock(arch_rwlock_t *lock)
5045 {
5046 unsigned long mask, tmp1, tmp2;
5047
5048@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5049 : "memory");
5050 }
5051
5052-static void inline arch_write_unlock(arch_rwlock_t *lock)
5053+static inline void arch_write_unlock(arch_rwlock_t *lock)
5054 {
5055 __asm__ __volatile__(
5056 " stw %%g0, [%0]"
5057@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5058 : "memory");
5059 }
5060
5061-static int inline arch_write_trylock(arch_rwlock_t *lock)
5062+static inline int arch_write_trylock(arch_rwlock_t *lock)
5063 {
5064 unsigned long mask, tmp1, tmp2, result;
5065
5066diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5067index c2a1080..21ed218 100644
5068--- a/arch/sparc/include/asm/thread_info_32.h
5069+++ b/arch/sparc/include/asm/thread_info_32.h
5070@@ -50,6 +50,8 @@ struct thread_info {
5071 unsigned long w_saved;
5072
5073 struct restart_block restart_block;
5074+
5075+ unsigned long lowest_stack;
5076 };
5077
5078 /*
5079diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5080index 01d057f..0a02f7e 100644
5081--- a/arch/sparc/include/asm/thread_info_64.h
5082+++ b/arch/sparc/include/asm/thread_info_64.h
5083@@ -63,6 +63,8 @@ struct thread_info {
5084 struct pt_regs *kern_una_regs;
5085 unsigned int kern_una_insn;
5086
5087+ unsigned long lowest_stack;
5088+
5089 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5090 };
5091
5092diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5093index e88fbe5..96b0ce5 100644
5094--- a/arch/sparc/include/asm/uaccess.h
5095+++ b/arch/sparc/include/asm/uaccess.h
5096@@ -1,5 +1,13 @@
5097 #ifndef ___ASM_SPARC_UACCESS_H
5098 #define ___ASM_SPARC_UACCESS_H
5099+
5100+#ifdef __KERNEL__
5101+#ifndef __ASSEMBLY__
5102+#include <linux/types.h>
5103+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5104+#endif
5105+#endif
5106+
5107 #if defined(__sparc__) && defined(__arch64__)
5108 #include <asm/uaccess_64.h>
5109 #else
5110diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5111index 8303ac4..07f333d 100644
5112--- a/arch/sparc/include/asm/uaccess_32.h
5113+++ b/arch/sparc/include/asm/uaccess_32.h
5114@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5115
5116 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5117 {
5118- if (n && __access_ok((unsigned long) to, n))
5119+ if ((long)n < 0)
5120+ return n;
5121+
5122+ if (n && __access_ok((unsigned long) to, n)) {
5123+ if (!__builtin_constant_p(n))
5124+ check_object_size(from, n, true);
5125 return __copy_user(to, (__force void __user *) from, n);
5126- else
5127+ } else
5128 return n;
5129 }
5130
5131 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5132 {
5133+ if ((long)n < 0)
5134+ return n;
5135+
5136+ if (!__builtin_constant_p(n))
5137+ check_object_size(from, n, true);
5138+
5139 return __copy_user(to, (__force void __user *) from, n);
5140 }
5141
5142 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5143 {
5144- if (n && __access_ok((unsigned long) from, n))
5145+ if ((long)n < 0)
5146+ return n;
5147+
5148+ if (n && __access_ok((unsigned long) from, n)) {
5149+ if (!__builtin_constant_p(n))
5150+ check_object_size(to, n, false);
5151 return __copy_user((__force void __user *) to, from, n);
5152- else
5153+ } else
5154 return n;
5155 }
5156
5157 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5158 {
5159+ if ((long)n < 0)
5160+ return n;
5161+
5162 return __copy_user((__force void __user *) to, from, n);
5163 }
5164
5165diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5166index 3e1449f..5293a0e 100644
5167--- a/arch/sparc/include/asm/uaccess_64.h
5168+++ b/arch/sparc/include/asm/uaccess_64.h
5169@@ -10,6 +10,7 @@
5170 #include <linux/compiler.h>
5171 #include <linux/string.h>
5172 #include <linux/thread_info.h>
5173+#include <linux/kernel.h>
5174 #include <asm/asi.h>
5175 #include <asm/system.h>
5176 #include <asm/spitfire.h>
5177@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5178 static inline unsigned long __must_check
5179 copy_from_user(void *to, const void __user *from, unsigned long size)
5180 {
5181- unsigned long ret = ___copy_from_user(to, from, size);
5182+ unsigned long ret;
5183
5184+ if ((long)size < 0 || size > INT_MAX)
5185+ return size;
5186+
5187+ if (!__builtin_constant_p(size))
5188+ check_object_size(to, size, false);
5189+
5190+ ret = ___copy_from_user(to, from, size);
5191 if (unlikely(ret))
5192 ret = copy_from_user_fixup(to, from, size);
5193
5194@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5195 static inline unsigned long __must_check
5196 copy_to_user(void __user *to, const void *from, unsigned long size)
5197 {
5198- unsigned long ret = ___copy_to_user(to, from, size);
5199+ unsigned long ret;
5200
5201+ if ((long)size < 0 || size > INT_MAX)
5202+ return size;
5203+
5204+ if (!__builtin_constant_p(size))
5205+ check_object_size(from, size, true);
5206+
5207+ ret = ___copy_to_user(to, from, size);
5208 if (unlikely(ret))
5209 ret = copy_to_user_fixup(to, from, size);
5210 return ret;
5211diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5212index cb85458..e063f17 100644
5213--- a/arch/sparc/kernel/Makefile
5214+++ b/arch/sparc/kernel/Makefile
5215@@ -3,7 +3,7 @@
5216 #
5217
5218 asflags-y := -ansi
5219-ccflags-y := -Werror
5220+#ccflags-y := -Werror
5221
5222 extra-y := head_$(BITS).o
5223 extra-y += init_task.o
5224diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5225index f793742..4d880af 100644
5226--- a/arch/sparc/kernel/process_32.c
5227+++ b/arch/sparc/kernel/process_32.c
5228@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5229 rw->ins[4], rw->ins[5],
5230 rw->ins[6],
5231 rw->ins[7]);
5232- printk("%pS\n", (void *) rw->ins[7]);
5233+ printk("%pA\n", (void *) rw->ins[7]);
5234 rw = (struct reg_window32 *) rw->ins[6];
5235 }
5236 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5237@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5238
5239 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5240 r->psr, r->pc, r->npc, r->y, print_tainted());
5241- printk("PC: <%pS>\n", (void *) r->pc);
5242+ printk("PC: <%pA>\n", (void *) r->pc);
5243 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5244 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5245 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5246 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5247 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5248 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5249- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5250+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5251
5252 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5253 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5254@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5255 rw = (struct reg_window32 *) fp;
5256 pc = rw->ins[7];
5257 printk("[%08lx : ", pc);
5258- printk("%pS ] ", (void *) pc);
5259+ printk("%pA ] ", (void *) pc);
5260 fp = rw->ins[6];
5261 } while (++count < 16);
5262 printk("\n");
5263diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5264index 39d8b05..d1a7d90 100644
5265--- a/arch/sparc/kernel/process_64.c
5266+++ b/arch/sparc/kernel/process_64.c
5267@@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5268 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5269 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5270 if (regs->tstate & TSTATE_PRIV)
5271- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5272+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5273 }
5274
5275 void show_regs(struct pt_regs *regs)
5276 {
5277 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5278 regs->tpc, regs->tnpc, regs->y, print_tainted());
5279- printk("TPC: <%pS>\n", (void *) regs->tpc);
5280+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5281 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5282 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5283 regs->u_regs[3]);
5284@@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5285 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5286 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5287 regs->u_regs[15]);
5288- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5289+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5290 show_regwindow(regs);
5291 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5292 }
5293@@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5294 ((tp && tp->task) ? tp->task->pid : -1));
5295
5296 if (gp->tstate & TSTATE_PRIV) {
5297- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5298+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5299 (void *) gp->tpc,
5300 (void *) gp->o7,
5301 (void *) gp->i7,
5302diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5303index 42b282f..28ce9f2 100644
5304--- a/arch/sparc/kernel/sys_sparc_32.c
5305+++ b/arch/sparc/kernel/sys_sparc_32.c
5306@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5307 if (ARCH_SUN4C && len > 0x20000000)
5308 return -ENOMEM;
5309 if (!addr)
5310- addr = TASK_UNMAPPED_BASE;
5311+ addr = current->mm->mmap_base;
5312
5313 if (flags & MAP_SHARED)
5314 addr = COLOUR_ALIGN(addr);
5315@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5316 }
5317 if (TASK_SIZE - PAGE_SIZE - len < addr)
5318 return -ENOMEM;
5319- if (!vmm || addr + len <= vmm->vm_start)
5320+ if (check_heap_stack_gap(vmm, addr, len))
5321 return addr;
5322 addr = vmm->vm_end;
5323 if (flags & MAP_SHARED)
5324diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5325index 232df99..cee1f9c 100644
5326--- a/arch/sparc/kernel/sys_sparc_64.c
5327+++ b/arch/sparc/kernel/sys_sparc_64.c
5328@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5329 /* We do not accept a shared mapping if it would violate
5330 * cache aliasing constraints.
5331 */
5332- if ((flags & MAP_SHARED) &&
5333+ if ((filp || (flags & MAP_SHARED)) &&
5334 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5335 return -EINVAL;
5336 return addr;
5337@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 if (filp || (flags & MAP_SHARED))
5339 do_color_align = 1;
5340
5341+#ifdef CONFIG_PAX_RANDMMAP
5342+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5343+#endif
5344+
5345 if (addr) {
5346 if (do_color_align)
5347 addr = COLOUR_ALIGN(addr, pgoff);
5348@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5349 addr = PAGE_ALIGN(addr);
5350
5351 vma = find_vma(mm, addr);
5352- if (task_size - len >= addr &&
5353- (!vma || addr + len <= vma->vm_start))
5354+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5355 return addr;
5356 }
5357
5358 if (len > mm->cached_hole_size) {
5359- start_addr = addr = mm->free_area_cache;
5360+ start_addr = addr = mm->free_area_cache;
5361 } else {
5362- start_addr = addr = TASK_UNMAPPED_BASE;
5363+ start_addr = addr = mm->mmap_base;
5364 mm->cached_hole_size = 0;
5365 }
5366
5367@@ -174,14 +177,14 @@ full_search:
5368 vma = find_vma(mm, VA_EXCLUDE_END);
5369 }
5370 if (unlikely(task_size < addr)) {
5371- if (start_addr != TASK_UNMAPPED_BASE) {
5372- start_addr = addr = TASK_UNMAPPED_BASE;
5373+ if (start_addr != mm->mmap_base) {
5374+ start_addr = addr = mm->mmap_base;
5375 mm->cached_hole_size = 0;
5376 goto full_search;
5377 }
5378 return -ENOMEM;
5379 }
5380- if (likely(!vma || addr + len <= vma->vm_start)) {
5381+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5382 /*
5383 * Remember the place where we stopped the search:
5384 */
5385@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5386 /* We do not accept a shared mapping if it would violate
5387 * cache aliasing constraints.
5388 */
5389- if ((flags & MAP_SHARED) &&
5390+ if ((filp || (flags & MAP_SHARED)) &&
5391 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5392 return -EINVAL;
5393 return addr;
5394@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 addr = PAGE_ALIGN(addr);
5396
5397 vma = find_vma(mm, addr);
5398- if (task_size - len >= addr &&
5399- (!vma || addr + len <= vma->vm_start))
5400+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5401 return addr;
5402 }
5403
5404@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5405 /* make sure it can fit in the remaining address space */
5406 if (likely(addr > len)) {
5407 vma = find_vma(mm, addr-len);
5408- if (!vma || addr <= vma->vm_start) {
5409+ if (check_heap_stack_gap(vma, addr - len, len)) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr-len);
5412 }
5413@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 if (unlikely(mm->mmap_base < len))
5415 goto bottomup;
5416
5417- addr = mm->mmap_base-len;
5418- if (do_color_align)
5419- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5420+ addr = mm->mmap_base - len;
5421
5422 do {
5423+ if (do_color_align)
5424+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5425 /*
5426 * Lookup failure means no vma is above this address,
5427 * else if new region fits below vma->vm_start,
5428 * return with success:
5429 */
5430 vma = find_vma(mm, addr);
5431- if (likely(!vma || addr+len <= vma->vm_start)) {
5432+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5433 /* remember the address as a hint for next time */
5434 return (mm->free_area_cache = addr);
5435 }
5436@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5437 mm->cached_hole_size = vma->vm_start - addr;
5438
5439 /* try just below the current vma->vm_start */
5440- addr = vma->vm_start-len;
5441- if (do_color_align)
5442- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5443- } while (likely(len < vma->vm_start));
5444+ addr = skip_heap_stack_gap(vma, len);
5445+ } while (!IS_ERR_VALUE(addr));
5446
5447 bottomup:
5448 /*
5449@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5450 gap == RLIM_INFINITY ||
5451 sysctl_legacy_va_layout) {
5452 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5453+
5454+#ifdef CONFIG_PAX_RANDMMAP
5455+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5456+ mm->mmap_base += mm->delta_mmap;
5457+#endif
5458+
5459 mm->get_unmapped_area = arch_get_unmapped_area;
5460 mm->unmap_area = arch_unmap_area;
5461 } else {
5462@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5463 gap = (task_size / 6 * 5);
5464
5465 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5466+
5467+#ifdef CONFIG_PAX_RANDMMAP
5468+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5469+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5470+#endif
5471+
5472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5473 mm->unmap_area = arch_unmap_area_topdown;
5474 }
5475diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5476index 591f20c..0f1b925 100644
5477--- a/arch/sparc/kernel/traps_32.c
5478+++ b/arch/sparc/kernel/traps_32.c
5479@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5480 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5481 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5482
5483+extern void gr_handle_kernel_exploit(void);
5484+
5485 void die_if_kernel(char *str, struct pt_regs *regs)
5486 {
5487 static int die_counter;
5488@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5489 count++ < 30 &&
5490 (((unsigned long) rw) >= PAGE_OFFSET) &&
5491 !(((unsigned long) rw) & 0x7)) {
5492- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5493+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5494 (void *) rw->ins[7]);
5495 rw = (struct reg_window32 *)rw->ins[6];
5496 }
5497 }
5498 printk("Instruction DUMP:");
5499 instruction_dump ((unsigned long *) regs->pc);
5500- if(regs->psr & PSR_PS)
5501+ if(regs->psr & PSR_PS) {
5502+ gr_handle_kernel_exploit();
5503 do_exit(SIGKILL);
5504+ }
5505 do_exit(SIGSEGV);
5506 }
5507
5508diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5509index 0cbdaa4..438e4c9 100644
5510--- a/arch/sparc/kernel/traps_64.c
5511+++ b/arch/sparc/kernel/traps_64.c
5512@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5513 i + 1,
5514 p->trapstack[i].tstate, p->trapstack[i].tpc,
5515 p->trapstack[i].tnpc, p->trapstack[i].tt);
5516- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5517+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5518 }
5519 }
5520
5521@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5522
5523 lvl -= 0x100;
5524 if (regs->tstate & TSTATE_PRIV) {
5525+
5526+#ifdef CONFIG_PAX_REFCOUNT
5527+ if (lvl == 6)
5528+ pax_report_refcount_overflow(regs);
5529+#endif
5530+
5531 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5532 die_if_kernel(buffer, regs);
5533 }
5534@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5535 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5536 {
5537 char buffer[32];
5538-
5539+
5540 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5541 0, lvl, SIGTRAP) == NOTIFY_STOP)
5542 return;
5543
5544+#ifdef CONFIG_PAX_REFCOUNT
5545+ if (lvl == 6)
5546+ pax_report_refcount_overflow(regs);
5547+#endif
5548+
5549 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5550
5551 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5552@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5553 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5554 printk("%s" "ERROR(%d): ",
5555 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5556- printk("TPC<%pS>\n", (void *) regs->tpc);
5557+ printk("TPC<%pA>\n", (void *) regs->tpc);
5558 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5559 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5560 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5561@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5562 smp_processor_id(),
5563 (type & 0x1) ? 'I' : 'D',
5564 regs->tpc);
5565- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5566+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5567 panic("Irrecoverable Cheetah+ parity error.");
5568 }
5569
5570@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5571 smp_processor_id(),
5572 (type & 0x1) ? 'I' : 'D',
5573 regs->tpc);
5574- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5575+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5576 }
5577
5578 struct sun4v_error_entry {
5579@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5580
5581 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5582 regs->tpc, tl);
5583- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5584+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5585 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5586- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5587+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5588 (void *) regs->u_regs[UREG_I7]);
5589 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5590 "pte[%lx] error[%lx]\n",
5591@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5592
5593 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5594 regs->tpc, tl);
5595- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5596+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5597 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5598- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5599+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5600 (void *) regs->u_regs[UREG_I7]);
5601 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5602 "pte[%lx] error[%lx]\n",
5603@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5604 fp = (unsigned long)sf->fp + STACK_BIAS;
5605 }
5606
5607- printk(" [%016lx] %pS\n", pc, (void *) pc);
5608+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5609 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5610 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5611 int index = tsk->curr_ret_stack;
5612 if (tsk->ret_stack && index >= graph) {
5613 pc = tsk->ret_stack[index - graph].ret;
5614- printk(" [%016lx] %pS\n", pc, (void *) pc);
5615+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5616 graph++;
5617 }
5618 }
5619@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5620 return (struct reg_window *) (fp + STACK_BIAS);
5621 }
5622
5623+extern void gr_handle_kernel_exploit(void);
5624+
5625 void die_if_kernel(char *str, struct pt_regs *regs)
5626 {
5627 static int die_counter;
5628@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5629 while (rw &&
5630 count++ < 30 &&
5631 kstack_valid(tp, (unsigned long) rw)) {
5632- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5633+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5634 (void *) rw->ins[7]);
5635
5636 rw = kernel_stack_up(rw);
5637@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5638 }
5639 user_instruction_dump ((unsigned int __user *) regs->tpc);
5640 }
5641- if (regs->tstate & TSTATE_PRIV)
5642+ if (regs->tstate & TSTATE_PRIV) {
5643+ gr_handle_kernel_exploit();
5644 do_exit(SIGKILL);
5645+ }
5646 do_exit(SIGSEGV);
5647 }
5648 EXPORT_SYMBOL(die_if_kernel);
5649diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5650index 76e4ac1..78f8bb1 100644
5651--- a/arch/sparc/kernel/unaligned_64.c
5652+++ b/arch/sparc/kernel/unaligned_64.c
5653@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5654 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5655
5656 if (__ratelimit(&ratelimit)) {
5657- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5658+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5659 regs->tpc, (void *) regs->tpc);
5660 }
5661 }
5662diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5663index a3fc437..fea9957 100644
5664--- a/arch/sparc/lib/Makefile
5665+++ b/arch/sparc/lib/Makefile
5666@@ -2,7 +2,7 @@
5667 #
5668
5669 asflags-y := -ansi -DST_DIV0=0x02
5670-ccflags-y := -Werror
5671+#ccflags-y := -Werror
5672
5673 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5674 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5675diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5676index 59186e0..f747d7a 100644
5677--- a/arch/sparc/lib/atomic_64.S
5678+++ b/arch/sparc/lib/atomic_64.S
5679@@ -18,7 +18,12 @@
5680 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683- add %g1, %o0, %g7
5684+ addcc %g1, %o0, %g7
5685+
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+ tvs %icc, 6
5688+#endif
5689+
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5693@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_add, .-atomic_add
5696
5697+ .globl atomic_add_unchecked
5698+ .type atomic_add_unchecked,#function
5699+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5700+ BACKOFF_SETUP(%o2)
5701+1: lduw [%o1], %g1
5702+ add %g1, %o0, %g7
5703+ cas [%o1], %g1, %g7
5704+ cmp %g1, %g7
5705+ bne,pn %icc, 2f
5706+ nop
5707+ retl
5708+ nop
5709+2: BACKOFF_SPIN(%o2, %o3, 1b)
5710+ .size atomic_add_unchecked, .-atomic_add_unchecked
5711+
5712 .globl atomic_sub
5713 .type atomic_sub,#function
5714 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717- sub %g1, %o0, %g7
5718+ subcc %g1, %o0, %g7
5719+
5720+#ifdef CONFIG_PAX_REFCOUNT
5721+ tvs %icc, 6
5722+#endif
5723+
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5727@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_sub, .-atomic_sub
5730
5731+ .globl atomic_sub_unchecked
5732+ .type atomic_sub_unchecked,#function
5733+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5734+ BACKOFF_SETUP(%o2)
5735+1: lduw [%o1], %g1
5736+ sub %g1, %o0, %g7
5737+ cas [%o1], %g1, %g7
5738+ cmp %g1, %g7
5739+ bne,pn %icc, 2f
5740+ nop
5741+ retl
5742+ nop
5743+2: BACKOFF_SPIN(%o2, %o3, 1b)
5744+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5745+
5746 .globl atomic_add_ret
5747 .type atomic_add_ret,#function
5748 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5749 BACKOFF_SETUP(%o2)
5750 1: lduw [%o1], %g1
5751- add %g1, %o0, %g7
5752+ addcc %g1, %o0, %g7
5753+
5754+#ifdef CONFIG_PAX_REFCOUNT
5755+ tvs %icc, 6
5756+#endif
5757+
5758 cas [%o1], %g1, %g7
5759 cmp %g1, %g7
5760 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5761@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5762 2: BACKOFF_SPIN(%o2, %o3, 1b)
5763 .size atomic_add_ret, .-atomic_add_ret
5764
5765+ .globl atomic_add_ret_unchecked
5766+ .type atomic_add_ret_unchecked,#function
5767+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5768+ BACKOFF_SETUP(%o2)
5769+1: lduw [%o1], %g1
5770+ addcc %g1, %o0, %g7
5771+ cas [%o1], %g1, %g7
5772+ cmp %g1, %g7
5773+ bne,pn %icc, 2f
5774+ add %g7, %o0, %g7
5775+ sra %g7, 0, %o0
5776+ retl
5777+ nop
5778+2: BACKOFF_SPIN(%o2, %o3, 1b)
5779+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5780+
5781 .globl atomic_sub_ret
5782 .type atomic_sub_ret,#function
5783 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5784 BACKOFF_SETUP(%o2)
5785 1: lduw [%o1], %g1
5786- sub %g1, %o0, %g7
5787+ subcc %g1, %o0, %g7
5788+
5789+#ifdef CONFIG_PAX_REFCOUNT
5790+ tvs %icc, 6
5791+#endif
5792+
5793 cas [%o1], %g1, %g7
5794 cmp %g1, %g7
5795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5796@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5797 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800- add %g1, %o0, %g7
5801+ addcc %g1, %o0, %g7
5802+
5803+#ifdef CONFIG_PAX_REFCOUNT
5804+ tvs %xcc, 6
5805+#endif
5806+
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5810@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_add, .-atomic64_add
5813
5814+ .globl atomic64_add_unchecked
5815+ .type atomic64_add_unchecked,#function
5816+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5817+ BACKOFF_SETUP(%o2)
5818+1: ldx [%o1], %g1
5819+ addcc %g1, %o0, %g7
5820+ casx [%o1], %g1, %g7
5821+ cmp %g1, %g7
5822+ bne,pn %xcc, 2f
5823+ nop
5824+ retl
5825+ nop
5826+2: BACKOFF_SPIN(%o2, %o3, 1b)
5827+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5828+
5829 .globl atomic64_sub
5830 .type atomic64_sub,#function
5831 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834- sub %g1, %o0, %g7
5835+ subcc %g1, %o0, %g7
5836+
5837+#ifdef CONFIG_PAX_REFCOUNT
5838+ tvs %xcc, 6
5839+#endif
5840+
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5844@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_sub, .-atomic64_sub
5847
5848+ .globl atomic64_sub_unchecked
5849+ .type atomic64_sub_unchecked,#function
5850+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5851+ BACKOFF_SETUP(%o2)
5852+1: ldx [%o1], %g1
5853+ subcc %g1, %o0, %g7
5854+ casx [%o1], %g1, %g7
5855+ cmp %g1, %g7
5856+ bne,pn %xcc, 2f
5857+ nop
5858+ retl
5859+ nop
5860+2: BACKOFF_SPIN(%o2, %o3, 1b)
5861+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5862+
5863 .globl atomic64_add_ret
5864 .type atomic64_add_ret,#function
5865 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5866 BACKOFF_SETUP(%o2)
5867 1: ldx [%o1], %g1
5868- add %g1, %o0, %g7
5869+ addcc %g1, %o0, %g7
5870+
5871+#ifdef CONFIG_PAX_REFCOUNT
5872+ tvs %xcc, 6
5873+#endif
5874+
5875 casx [%o1], %g1, %g7
5876 cmp %g1, %g7
5877 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5878@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5879 2: BACKOFF_SPIN(%o2, %o3, 1b)
5880 .size atomic64_add_ret, .-atomic64_add_ret
5881
5882+ .globl atomic64_add_ret_unchecked
5883+ .type atomic64_add_ret_unchecked,#function
5884+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5885+ BACKOFF_SETUP(%o2)
5886+1: ldx [%o1], %g1
5887+ addcc %g1, %o0, %g7
5888+ casx [%o1], %g1, %g7
5889+ cmp %g1, %g7
5890+ bne,pn %xcc, 2f
5891+ add %g7, %o0, %g7
5892+ mov %g7, %o0
5893+ retl
5894+ nop
5895+2: BACKOFF_SPIN(%o2, %o3, 1b)
5896+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5897+
5898 .globl atomic64_sub_ret
5899 .type atomic64_sub_ret,#function
5900 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5901 BACKOFF_SETUP(%o2)
5902 1: ldx [%o1], %g1
5903- sub %g1, %o0, %g7
5904+ subcc %g1, %o0, %g7
5905+
5906+#ifdef CONFIG_PAX_REFCOUNT
5907+ tvs %xcc, 6
5908+#endif
5909+
5910 casx [%o1], %g1, %g7
5911 cmp %g1, %g7
5912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5913diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5914index f73c224..662af10 100644
5915--- a/arch/sparc/lib/ksyms.c
5916+++ b/arch/sparc/lib/ksyms.c
5917@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
5918
5919 /* Atomic counter implementation. */
5920 EXPORT_SYMBOL(atomic_add);
5921+EXPORT_SYMBOL(atomic_add_unchecked);
5922 EXPORT_SYMBOL(atomic_add_ret);
5923+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5924 EXPORT_SYMBOL(atomic_sub);
5925+EXPORT_SYMBOL(atomic_sub_unchecked);
5926 EXPORT_SYMBOL(atomic_sub_ret);
5927 EXPORT_SYMBOL(atomic64_add);
5928+EXPORT_SYMBOL(atomic64_add_unchecked);
5929 EXPORT_SYMBOL(atomic64_add_ret);
5930+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5931 EXPORT_SYMBOL(atomic64_sub);
5932+EXPORT_SYMBOL(atomic64_sub_unchecked);
5933 EXPORT_SYMBOL(atomic64_sub_ret);
5934
5935 /* Atomic bit operations. */
5936diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5937index 301421c..e2535d1 100644
5938--- a/arch/sparc/mm/Makefile
5939+++ b/arch/sparc/mm/Makefile
5940@@ -2,7 +2,7 @@
5941 #
5942
5943 asflags-y := -ansi
5944-ccflags-y := -Werror
5945+#ccflags-y := -Werror
5946
5947 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5948 obj-y += fault_$(BITS).o
5949diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5950index 8023fd7..c8e89e9 100644
5951--- a/arch/sparc/mm/fault_32.c
5952+++ b/arch/sparc/mm/fault_32.c
5953@@ -21,6 +21,9 @@
5954 #include <linux/perf_event.h>
5955 #include <linux/interrupt.h>
5956 #include <linux/kdebug.h>
5957+#include <linux/slab.h>
5958+#include <linux/pagemap.h>
5959+#include <linux/compiler.h>
5960
5961 #include <asm/system.h>
5962 #include <asm/page.h>
5963@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5964 return safe_compute_effective_address(regs, insn);
5965 }
5966
5967+#ifdef CONFIG_PAX_PAGEEXEC
5968+#ifdef CONFIG_PAX_DLRESOLVE
5969+static void pax_emuplt_close(struct vm_area_struct *vma)
5970+{
5971+ vma->vm_mm->call_dl_resolve = 0UL;
5972+}
5973+
5974+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5975+{
5976+ unsigned int *kaddr;
5977+
5978+ vmf->page = alloc_page(GFP_HIGHUSER);
5979+ if (!vmf->page)
5980+ return VM_FAULT_OOM;
5981+
5982+ kaddr = kmap(vmf->page);
5983+ memset(kaddr, 0, PAGE_SIZE);
5984+ kaddr[0] = 0x9DE3BFA8U; /* save */
5985+ flush_dcache_page(vmf->page);
5986+ kunmap(vmf->page);
5987+ return VM_FAULT_MAJOR;
5988+}
5989+
5990+static const struct vm_operations_struct pax_vm_ops = {
5991+ .close = pax_emuplt_close,
5992+ .fault = pax_emuplt_fault
5993+};
5994+
5995+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5996+{
5997+ int ret;
5998+
5999+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6000+ vma->vm_mm = current->mm;
6001+ vma->vm_start = addr;
6002+ vma->vm_end = addr + PAGE_SIZE;
6003+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6004+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6005+ vma->vm_ops = &pax_vm_ops;
6006+
6007+ ret = insert_vm_struct(current->mm, vma);
6008+ if (ret)
6009+ return ret;
6010+
6011+ ++current->mm->total_vm;
6012+ return 0;
6013+}
6014+#endif
6015+
6016+/*
6017+ * PaX: decide what to do with offenders (regs->pc = fault address)
6018+ *
6019+ * returns 1 when task should be killed
6020+ * 2 when patched PLT trampoline was detected
6021+ * 3 when unpatched PLT trampoline was detected
6022+ */
6023+static int pax_handle_fetch_fault(struct pt_regs *regs)
6024+{
6025+
6026+#ifdef CONFIG_PAX_EMUPLT
6027+ int err;
6028+
6029+ do { /* PaX: patched PLT emulation #1 */
6030+ unsigned int sethi1, sethi2, jmpl;
6031+
6032+ err = get_user(sethi1, (unsigned int *)regs->pc);
6033+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6034+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6035+
6036+ if (err)
6037+ break;
6038+
6039+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6040+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6041+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6042+ {
6043+ unsigned int addr;
6044+
6045+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6046+ addr = regs->u_regs[UREG_G1];
6047+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6048+ regs->pc = addr;
6049+ regs->npc = addr+4;
6050+ return 2;
6051+ }
6052+ } while (0);
6053+
6054+ { /* PaX: patched PLT emulation #2 */
6055+ unsigned int ba;
6056+
6057+ err = get_user(ba, (unsigned int *)regs->pc);
6058+
6059+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6060+ unsigned int addr;
6061+
6062+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6063+ regs->pc = addr;
6064+ regs->npc = addr+4;
6065+ return 2;
6066+ }
6067+ }
6068+
6069+ do { /* PaX: patched PLT emulation #3 */
6070+ unsigned int sethi, jmpl, nop;
6071+
6072+ err = get_user(sethi, (unsigned int *)regs->pc);
6073+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6074+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6075+
6076+ if (err)
6077+ break;
6078+
6079+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6080+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6081+ nop == 0x01000000U)
6082+ {
6083+ unsigned int addr;
6084+
6085+ addr = (sethi & 0x003FFFFFU) << 10;
6086+ regs->u_regs[UREG_G1] = addr;
6087+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6088+ regs->pc = addr;
6089+ regs->npc = addr+4;
6090+ return 2;
6091+ }
6092+ } while (0);
6093+
6094+ do { /* PaX: unpatched PLT emulation step 1 */
6095+ unsigned int sethi, ba, nop;
6096+
6097+ err = get_user(sethi, (unsigned int *)regs->pc);
6098+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6099+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6100+
6101+ if (err)
6102+ break;
6103+
6104+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6105+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6106+ nop == 0x01000000U)
6107+ {
6108+ unsigned int addr, save, call;
6109+
6110+ if ((ba & 0xFFC00000U) == 0x30800000U)
6111+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6112+ else
6113+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6114+
6115+ err = get_user(save, (unsigned int *)addr);
6116+ err |= get_user(call, (unsigned int *)(addr+4));
6117+ err |= get_user(nop, (unsigned int *)(addr+8));
6118+ if (err)
6119+ break;
6120+
6121+#ifdef CONFIG_PAX_DLRESOLVE
6122+ if (save == 0x9DE3BFA8U &&
6123+ (call & 0xC0000000U) == 0x40000000U &&
6124+ nop == 0x01000000U)
6125+ {
6126+ struct vm_area_struct *vma;
6127+ unsigned long call_dl_resolve;
6128+
6129+ down_read(&current->mm->mmap_sem);
6130+ call_dl_resolve = current->mm->call_dl_resolve;
6131+ up_read(&current->mm->mmap_sem);
6132+ if (likely(call_dl_resolve))
6133+ goto emulate;
6134+
6135+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6136+
6137+ down_write(&current->mm->mmap_sem);
6138+ if (current->mm->call_dl_resolve) {
6139+ call_dl_resolve = current->mm->call_dl_resolve;
6140+ up_write(&current->mm->mmap_sem);
6141+ if (vma)
6142+ kmem_cache_free(vm_area_cachep, vma);
6143+ goto emulate;
6144+ }
6145+
6146+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6147+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6148+ up_write(&current->mm->mmap_sem);
6149+ if (vma)
6150+ kmem_cache_free(vm_area_cachep, vma);
6151+ return 1;
6152+ }
6153+
6154+ if (pax_insert_vma(vma, call_dl_resolve)) {
6155+ up_write(&current->mm->mmap_sem);
6156+ kmem_cache_free(vm_area_cachep, vma);
6157+ return 1;
6158+ }
6159+
6160+ current->mm->call_dl_resolve = call_dl_resolve;
6161+ up_write(&current->mm->mmap_sem);
6162+
6163+emulate:
6164+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6165+ regs->pc = call_dl_resolve;
6166+ regs->npc = addr+4;
6167+ return 3;
6168+ }
6169+#endif
6170+
6171+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6172+ if ((save & 0xFFC00000U) == 0x05000000U &&
6173+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6174+ nop == 0x01000000U)
6175+ {
6176+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6177+ regs->u_regs[UREG_G2] = addr + 4;
6178+ addr = (save & 0x003FFFFFU) << 10;
6179+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6180+ regs->pc = addr;
6181+ regs->npc = addr+4;
6182+ return 3;
6183+ }
6184+ }
6185+ } while (0);
6186+
6187+ do { /* PaX: unpatched PLT emulation step 2 */
6188+ unsigned int save, call, nop;
6189+
6190+ err = get_user(save, (unsigned int *)(regs->pc-4));
6191+ err |= get_user(call, (unsigned int *)regs->pc);
6192+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6193+ if (err)
6194+ break;
6195+
6196+ if (save == 0x9DE3BFA8U &&
6197+ (call & 0xC0000000U) == 0x40000000U &&
6198+ nop == 0x01000000U)
6199+ {
6200+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6201+
6202+ regs->u_regs[UREG_RETPC] = regs->pc;
6203+ regs->pc = dl_resolve;
6204+ regs->npc = dl_resolve+4;
6205+ return 3;
6206+ }
6207+ } while (0);
6208+#endif
6209+
6210+ return 1;
6211+}
6212+
6213+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6214+{
6215+ unsigned long i;
6216+
6217+ printk(KERN_ERR "PAX: bytes at PC: ");
6218+ for (i = 0; i < 8; i++) {
6219+ unsigned int c;
6220+ if (get_user(c, (unsigned int *)pc+i))
6221+ printk(KERN_CONT "???????? ");
6222+ else
6223+ printk(KERN_CONT "%08x ", c);
6224+ }
6225+ printk("\n");
6226+}
6227+#endif
6228+
6229 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6230 int text_fault)
6231 {
6232@@ -280,6 +545,24 @@ good_area:
6233 if(!(vma->vm_flags & VM_WRITE))
6234 goto bad_area;
6235 } else {
6236+
6237+#ifdef CONFIG_PAX_PAGEEXEC
6238+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6239+ up_read(&mm->mmap_sem);
6240+ switch (pax_handle_fetch_fault(regs)) {
6241+
6242+#ifdef CONFIG_PAX_EMUPLT
6243+ case 2:
6244+ case 3:
6245+ return;
6246+#endif
6247+
6248+ }
6249+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6250+ do_group_exit(SIGKILL);
6251+ }
6252+#endif
6253+
6254 /* Allow reads even for write-only mappings */
6255 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6256 goto bad_area;
6257diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6258index 504c062..6fcb9c6 100644
6259--- a/arch/sparc/mm/fault_64.c
6260+++ b/arch/sparc/mm/fault_64.c
6261@@ -21,6 +21,9 @@
6262 #include <linux/kprobes.h>
6263 #include <linux/kdebug.h>
6264 #include <linux/percpu.h>
6265+#include <linux/slab.h>
6266+#include <linux/pagemap.h>
6267+#include <linux/compiler.h>
6268
6269 #include <asm/page.h>
6270 #include <asm/pgtable.h>
6271@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6272 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6273 regs->tpc);
6274 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6275- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6276+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6277 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6278 dump_stack();
6279 unhandled_fault(regs->tpc, current, regs);
6280@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6281 show_regs(regs);
6282 }
6283
6284+#ifdef CONFIG_PAX_PAGEEXEC
6285+#ifdef CONFIG_PAX_DLRESOLVE
6286+static void pax_emuplt_close(struct vm_area_struct *vma)
6287+{
6288+ vma->vm_mm->call_dl_resolve = 0UL;
6289+}
6290+
6291+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6292+{
6293+ unsigned int *kaddr;
6294+
6295+ vmf->page = alloc_page(GFP_HIGHUSER);
6296+ if (!vmf->page)
6297+ return VM_FAULT_OOM;
6298+
6299+ kaddr = kmap(vmf->page);
6300+ memset(kaddr, 0, PAGE_SIZE);
6301+ kaddr[0] = 0x9DE3BFA8U; /* save */
6302+ flush_dcache_page(vmf->page);
6303+ kunmap(vmf->page);
6304+ return VM_FAULT_MAJOR;
6305+}
6306+
6307+static const struct vm_operations_struct pax_vm_ops = {
6308+ .close = pax_emuplt_close,
6309+ .fault = pax_emuplt_fault
6310+};
6311+
6312+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6313+{
6314+ int ret;
6315+
6316+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6317+ vma->vm_mm = current->mm;
6318+ vma->vm_start = addr;
6319+ vma->vm_end = addr + PAGE_SIZE;
6320+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6321+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6322+ vma->vm_ops = &pax_vm_ops;
6323+
6324+ ret = insert_vm_struct(current->mm, vma);
6325+ if (ret)
6326+ return ret;
6327+
6328+ ++current->mm->total_vm;
6329+ return 0;
6330+}
6331+#endif
6332+
6333+/*
6334+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6335+ *
6336+ * returns 1 when task should be killed
6337+ * 2 when patched PLT trampoline was detected
6338+ * 3 when unpatched PLT trampoline was detected
6339+ */
6340+static int pax_handle_fetch_fault(struct pt_regs *regs)
6341+{
6342+
6343+#ifdef CONFIG_PAX_EMUPLT
6344+ int err;
6345+
6346+ do { /* PaX: patched PLT emulation #1 */
6347+ unsigned int sethi1, sethi2, jmpl;
6348+
6349+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6350+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6351+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6352+
6353+ if (err)
6354+ break;
6355+
6356+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6357+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6358+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6359+ {
6360+ unsigned long addr;
6361+
6362+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6363+ addr = regs->u_regs[UREG_G1];
6364+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6365+
6366+ if (test_thread_flag(TIF_32BIT))
6367+ addr &= 0xFFFFFFFFUL;
6368+
6369+ regs->tpc = addr;
6370+ regs->tnpc = addr+4;
6371+ return 2;
6372+ }
6373+ } while (0);
6374+
6375+ { /* PaX: patched PLT emulation #2 */
6376+ unsigned int ba;
6377+
6378+ err = get_user(ba, (unsigned int *)regs->tpc);
6379+
6380+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6381+ unsigned long addr;
6382+
6383+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384+
6385+ if (test_thread_flag(TIF_32BIT))
6386+ addr &= 0xFFFFFFFFUL;
6387+
6388+ regs->tpc = addr;
6389+ regs->tnpc = addr+4;
6390+ return 2;
6391+ }
6392+ }
6393+
6394+ do { /* PaX: patched PLT emulation #3 */
6395+ unsigned int sethi, jmpl, nop;
6396+
6397+ err = get_user(sethi, (unsigned int *)regs->tpc);
6398+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6399+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6400+
6401+ if (err)
6402+ break;
6403+
6404+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6405+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6406+ nop == 0x01000000U)
6407+ {
6408+ unsigned long addr;
6409+
6410+ addr = (sethi & 0x003FFFFFU) << 10;
6411+ regs->u_regs[UREG_G1] = addr;
6412+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6413+
6414+ if (test_thread_flag(TIF_32BIT))
6415+ addr &= 0xFFFFFFFFUL;
6416+
6417+ regs->tpc = addr;
6418+ regs->tnpc = addr+4;
6419+ return 2;
6420+ }
6421+ } while (0);
6422+
6423+ do { /* PaX: patched PLT emulation #4 */
6424+ unsigned int sethi, mov1, call, mov2;
6425+
6426+ err = get_user(sethi, (unsigned int *)regs->tpc);
6427+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6428+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6429+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6430+
6431+ if (err)
6432+ break;
6433+
6434+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6435+ mov1 == 0x8210000FU &&
6436+ (call & 0xC0000000U) == 0x40000000U &&
6437+ mov2 == 0x9E100001U)
6438+ {
6439+ unsigned long addr;
6440+
6441+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6442+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6443+
6444+ if (test_thread_flag(TIF_32BIT))
6445+ addr &= 0xFFFFFFFFUL;
6446+
6447+ regs->tpc = addr;
6448+ regs->tnpc = addr+4;
6449+ return 2;
6450+ }
6451+ } while (0);
6452+
6453+ do { /* PaX: patched PLT emulation #5 */
6454+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6455+
6456+ err = get_user(sethi, (unsigned int *)regs->tpc);
6457+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6458+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6459+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6460+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6461+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6462+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6463+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6464+
6465+ if (err)
6466+ break;
6467+
6468+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6469+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6470+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6471+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6472+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6473+ sllx == 0x83287020U &&
6474+ jmpl == 0x81C04005U &&
6475+ nop == 0x01000000U)
6476+ {
6477+ unsigned long addr;
6478+
6479+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6480+ regs->u_regs[UREG_G1] <<= 32;
6481+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6482+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6483+ regs->tpc = addr;
6484+ regs->tnpc = addr+4;
6485+ return 2;
6486+ }
6487+ } while (0);
6488+
6489+ do { /* PaX: patched PLT emulation #6 */
6490+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6491+
6492+ err = get_user(sethi, (unsigned int *)regs->tpc);
6493+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6494+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6495+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6496+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6497+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6498+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6499+
6500+ if (err)
6501+ break;
6502+
6503+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6504+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6505+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6506+ sllx == 0x83287020U &&
6507+ (or & 0xFFFFE000U) == 0x8A116000U &&
6508+ jmpl == 0x81C04005U &&
6509+ nop == 0x01000000U)
6510+ {
6511+ unsigned long addr;
6512+
6513+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6514+ regs->u_regs[UREG_G1] <<= 32;
6515+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6516+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6517+ regs->tpc = addr;
6518+ regs->tnpc = addr+4;
6519+ return 2;
6520+ }
6521+ } while (0);
6522+
6523+ do { /* PaX: unpatched PLT emulation step 1 */
6524+ unsigned int sethi, ba, nop;
6525+
6526+ err = get_user(sethi, (unsigned int *)regs->tpc);
6527+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6528+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6529+
6530+ if (err)
6531+ break;
6532+
6533+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6534+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6535+ nop == 0x01000000U)
6536+ {
6537+ unsigned long addr;
6538+ unsigned int save, call;
6539+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6540+
6541+ if ((ba & 0xFFC00000U) == 0x30800000U)
6542+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6543+ else
6544+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545+
6546+ if (test_thread_flag(TIF_32BIT))
6547+ addr &= 0xFFFFFFFFUL;
6548+
6549+ err = get_user(save, (unsigned int *)addr);
6550+ err |= get_user(call, (unsigned int *)(addr+4));
6551+ err |= get_user(nop, (unsigned int *)(addr+8));
6552+ if (err)
6553+ break;
6554+
6555+#ifdef CONFIG_PAX_DLRESOLVE
6556+ if (save == 0x9DE3BFA8U &&
6557+ (call & 0xC0000000U) == 0x40000000U &&
6558+ nop == 0x01000000U)
6559+ {
6560+ struct vm_area_struct *vma;
6561+ unsigned long call_dl_resolve;
6562+
6563+ down_read(&current->mm->mmap_sem);
6564+ call_dl_resolve = current->mm->call_dl_resolve;
6565+ up_read(&current->mm->mmap_sem);
6566+ if (likely(call_dl_resolve))
6567+ goto emulate;
6568+
6569+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6570+
6571+ down_write(&current->mm->mmap_sem);
6572+ if (current->mm->call_dl_resolve) {
6573+ call_dl_resolve = current->mm->call_dl_resolve;
6574+ up_write(&current->mm->mmap_sem);
6575+ if (vma)
6576+ kmem_cache_free(vm_area_cachep, vma);
6577+ goto emulate;
6578+ }
6579+
6580+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6581+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6582+ up_write(&current->mm->mmap_sem);
6583+ if (vma)
6584+ kmem_cache_free(vm_area_cachep, vma);
6585+ return 1;
6586+ }
6587+
6588+ if (pax_insert_vma(vma, call_dl_resolve)) {
6589+ up_write(&current->mm->mmap_sem);
6590+ kmem_cache_free(vm_area_cachep, vma);
6591+ return 1;
6592+ }
6593+
6594+ current->mm->call_dl_resolve = call_dl_resolve;
6595+ up_write(&current->mm->mmap_sem);
6596+
6597+emulate:
6598+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6599+ regs->tpc = call_dl_resolve;
6600+ regs->tnpc = addr+4;
6601+ return 3;
6602+ }
6603+#endif
6604+
6605+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6606+ if ((save & 0xFFC00000U) == 0x05000000U &&
6607+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6608+ nop == 0x01000000U)
6609+ {
6610+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6611+ regs->u_regs[UREG_G2] = addr + 4;
6612+ addr = (save & 0x003FFFFFU) << 10;
6613+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6614+
6615+ if (test_thread_flag(TIF_32BIT))
6616+ addr &= 0xFFFFFFFFUL;
6617+
6618+ regs->tpc = addr;
6619+ regs->tnpc = addr+4;
6620+ return 3;
6621+ }
6622+
6623+ /* PaX: 64-bit PLT stub */
6624+ err = get_user(sethi1, (unsigned int *)addr);
6625+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6626+ err |= get_user(or1, (unsigned int *)(addr+8));
6627+ err |= get_user(or2, (unsigned int *)(addr+12));
6628+ err |= get_user(sllx, (unsigned int *)(addr+16));
6629+ err |= get_user(add, (unsigned int *)(addr+20));
6630+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6631+ err |= get_user(nop, (unsigned int *)(addr+28));
6632+ if (err)
6633+ break;
6634+
6635+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6636+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6637+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6638+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6639+ sllx == 0x89293020U &&
6640+ add == 0x8A010005U &&
6641+ jmpl == 0x89C14000U &&
6642+ nop == 0x01000000U)
6643+ {
6644+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6645+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6646+ regs->u_regs[UREG_G4] <<= 32;
6647+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6648+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6649+ regs->u_regs[UREG_G4] = addr + 24;
6650+ addr = regs->u_regs[UREG_G5];
6651+ regs->tpc = addr;
6652+ regs->tnpc = addr+4;
6653+ return 3;
6654+ }
6655+ }
6656+ } while (0);
6657+
6658+#ifdef CONFIG_PAX_DLRESOLVE
6659+ do { /* PaX: unpatched PLT emulation step 2 */
6660+ unsigned int save, call, nop;
6661+
6662+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6663+ err |= get_user(call, (unsigned int *)regs->tpc);
6664+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6665+ if (err)
6666+ break;
6667+
6668+ if (save == 0x9DE3BFA8U &&
6669+ (call & 0xC0000000U) == 0x40000000U &&
6670+ nop == 0x01000000U)
6671+ {
6672+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6673+
6674+ if (test_thread_flag(TIF_32BIT))
6675+ dl_resolve &= 0xFFFFFFFFUL;
6676+
6677+ regs->u_regs[UREG_RETPC] = regs->tpc;
6678+ regs->tpc = dl_resolve;
6679+ regs->tnpc = dl_resolve+4;
6680+ return 3;
6681+ }
6682+ } while (0);
6683+#endif
6684+
6685+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6686+ unsigned int sethi, ba, nop;
6687+
6688+ err = get_user(sethi, (unsigned int *)regs->tpc);
6689+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6690+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6691+
6692+ if (err)
6693+ break;
6694+
6695+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6696+ (ba & 0xFFF00000U) == 0x30600000U &&
6697+ nop == 0x01000000U)
6698+ {
6699+ unsigned long addr;
6700+
6701+ addr = (sethi & 0x003FFFFFU) << 10;
6702+ regs->u_regs[UREG_G1] = addr;
6703+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6704+
6705+ if (test_thread_flag(TIF_32BIT))
6706+ addr &= 0xFFFFFFFFUL;
6707+
6708+ regs->tpc = addr;
6709+ regs->tnpc = addr+4;
6710+ return 2;
6711+ }
6712+ } while (0);
6713+
6714+#endif
6715+
6716+ return 1;
6717+}
6718+
6719+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6720+{
6721+ unsigned long i;
6722+
6723+ printk(KERN_ERR "PAX: bytes at PC: ");
6724+ for (i = 0; i < 8; i++) {
6725+ unsigned int c;
6726+ if (get_user(c, (unsigned int *)pc+i))
6727+ printk(KERN_CONT "???????? ");
6728+ else
6729+ printk(KERN_CONT "%08x ", c);
6730+ }
6731+ printk("\n");
6732+}
6733+#endif
6734+
6735 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6736 {
6737 struct mm_struct *mm = current->mm;
6738@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6739 if (!vma)
6740 goto bad_area;
6741
6742+#ifdef CONFIG_PAX_PAGEEXEC
6743+ /* PaX: detect ITLB misses on non-exec pages */
6744+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6745+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6746+ {
6747+ if (address != regs->tpc)
6748+ goto good_area;
6749+
6750+ up_read(&mm->mmap_sem);
6751+ switch (pax_handle_fetch_fault(regs)) {
6752+
6753+#ifdef CONFIG_PAX_EMUPLT
6754+ case 2:
6755+ case 3:
6756+ return;
6757+#endif
6758+
6759+ }
6760+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6761+ do_group_exit(SIGKILL);
6762+ }
6763+#endif
6764+
6765 /* Pure DTLB misses do not tell us whether the fault causing
6766 * load/store/atomic was a write or not, it only says that there
6767 * was no match. So in such a case we (carefully) read the
6768diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6769index 07e1453..0a7d9e9 100644
6770--- a/arch/sparc/mm/hugetlbpage.c
6771+++ b/arch/sparc/mm/hugetlbpage.c
6772@@ -67,7 +67,7 @@ full_search:
6773 }
6774 return -ENOMEM;
6775 }
6776- if (likely(!vma || addr + len <= vma->vm_start)) {
6777+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6778 /*
6779 * Remember the place where we stopped the search:
6780 */
6781@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6782 /* make sure it can fit in the remaining address space */
6783 if (likely(addr > len)) {
6784 vma = find_vma(mm, addr-len);
6785- if (!vma || addr <= vma->vm_start) {
6786+ if (check_heap_stack_gap(vma, addr - len, len)) {
6787 /* remember the address as a hint for next time */
6788 return (mm->free_area_cache = addr-len);
6789 }
6790@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6791 if (unlikely(mm->mmap_base < len))
6792 goto bottomup;
6793
6794- addr = (mm->mmap_base-len) & HPAGE_MASK;
6795+ addr = mm->mmap_base - len;
6796
6797 do {
6798+ addr &= HPAGE_MASK;
6799 /*
6800 * Lookup failure means no vma is above this address,
6801 * else if new region fits below vma->vm_start,
6802 * return with success:
6803 */
6804 vma = find_vma(mm, addr);
6805- if (likely(!vma || addr+len <= vma->vm_start)) {
6806+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6807 /* remember the address as a hint for next time */
6808 return (mm->free_area_cache = addr);
6809 }
6810@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6811 mm->cached_hole_size = vma->vm_start - addr;
6812
6813 /* try just below the current vma->vm_start */
6814- addr = (vma->vm_start-len) & HPAGE_MASK;
6815- } while (likely(len < vma->vm_start));
6816+ addr = skip_heap_stack_gap(vma, len);
6817+ } while (!IS_ERR_VALUE(addr));
6818
6819 bottomup:
6820 /*
6821@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6822 if (addr) {
6823 addr = ALIGN(addr, HPAGE_SIZE);
6824 vma = find_vma(mm, addr);
6825- if (task_size - len >= addr &&
6826- (!vma || addr + len <= vma->vm_start))
6827+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6828 return addr;
6829 }
6830 if (mm->get_unmapped_area == arch_get_unmapped_area)
6831diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6832index 7b00de6..78239f4 100644
6833--- a/arch/sparc/mm/init_32.c
6834+++ b/arch/sparc/mm/init_32.c
6835@@ -316,6 +316,9 @@ extern void device_scan(void);
6836 pgprot_t PAGE_SHARED __read_mostly;
6837 EXPORT_SYMBOL(PAGE_SHARED);
6838
6839+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6840+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6841+
6842 void __init paging_init(void)
6843 {
6844 switch(sparc_cpu_model) {
6845@@ -344,17 +347,17 @@ void __init paging_init(void)
6846
6847 /* Initialize the protection map with non-constant, MMU dependent values. */
6848 protection_map[0] = PAGE_NONE;
6849- protection_map[1] = PAGE_READONLY;
6850- protection_map[2] = PAGE_COPY;
6851- protection_map[3] = PAGE_COPY;
6852+ protection_map[1] = PAGE_READONLY_NOEXEC;
6853+ protection_map[2] = PAGE_COPY_NOEXEC;
6854+ protection_map[3] = PAGE_COPY_NOEXEC;
6855 protection_map[4] = PAGE_READONLY;
6856 protection_map[5] = PAGE_READONLY;
6857 protection_map[6] = PAGE_COPY;
6858 protection_map[7] = PAGE_COPY;
6859 protection_map[8] = PAGE_NONE;
6860- protection_map[9] = PAGE_READONLY;
6861- protection_map[10] = PAGE_SHARED;
6862- protection_map[11] = PAGE_SHARED;
6863+ protection_map[9] = PAGE_READONLY_NOEXEC;
6864+ protection_map[10] = PAGE_SHARED_NOEXEC;
6865+ protection_map[11] = PAGE_SHARED_NOEXEC;
6866 protection_map[12] = PAGE_READONLY;
6867 protection_map[13] = PAGE_READONLY;
6868 protection_map[14] = PAGE_SHARED;
6869diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6870index cbef74e..c38fead 100644
6871--- a/arch/sparc/mm/srmmu.c
6872+++ b/arch/sparc/mm/srmmu.c
6873@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6874 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6875 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6876 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6877+
6878+#ifdef CONFIG_PAX_PAGEEXEC
6879+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6880+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6881+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6882+#endif
6883+
6884 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6885 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6886
6887diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6888index 27fe667..36d474c 100644
6889--- a/arch/tile/include/asm/atomic_64.h
6890+++ b/arch/tile/include/asm/atomic_64.h
6891@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6892
6893 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6894
6895+#define atomic64_read_unchecked(v) atomic64_read(v)
6896+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6897+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6898+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6899+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6900+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6901+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6902+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6903+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6904+
6905 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6906 #define smp_mb__before_atomic_dec() smp_mb()
6907 #define smp_mb__after_atomic_dec() smp_mb()
6908diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6909index 392e533..536b092 100644
6910--- a/arch/tile/include/asm/cache.h
6911+++ b/arch/tile/include/asm/cache.h
6912@@ -15,11 +15,12 @@
6913 #ifndef _ASM_TILE_CACHE_H
6914 #define _ASM_TILE_CACHE_H
6915
6916+#include <linux/const.h>
6917 #include <arch/chip.h>
6918
6919 /* bytes per L1 data cache line */
6920 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6921-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6922+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6923
6924 /* bytes per L2 cache line */
6925 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6926diff --git a/arch/um/Makefile b/arch/um/Makefile
6927index 28688e6..4c0aa1c 100644
6928--- a/arch/um/Makefile
6929+++ b/arch/um/Makefile
6930@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6931 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6932 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6933
6934+ifdef CONSTIFY_PLUGIN
6935+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6936+endif
6937+
6938 #This will adjust *FLAGS accordingly to the platform.
6939 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6940
6941diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6942index 19e1bdd..3665b77 100644
6943--- a/arch/um/include/asm/cache.h
6944+++ b/arch/um/include/asm/cache.h
6945@@ -1,6 +1,7 @@
6946 #ifndef __UM_CACHE_H
6947 #define __UM_CACHE_H
6948
6949+#include <linux/const.h>
6950
6951 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6952 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6953@@ -12,6 +13,6 @@
6954 # define L1_CACHE_SHIFT 5
6955 #endif
6956
6957-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6958+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6959
6960 #endif
6961diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6962index 6c03acd..a5e0215 100644
6963--- a/arch/um/include/asm/kmap_types.h
6964+++ b/arch/um/include/asm/kmap_types.h
6965@@ -23,6 +23,7 @@ enum km_type {
6966 KM_IRQ1,
6967 KM_SOFTIRQ0,
6968 KM_SOFTIRQ1,
6969+ KM_CLEARPAGE,
6970 KM_TYPE_NR
6971 };
6972
6973diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6974index 7cfc3ce..cbd1a58 100644
6975--- a/arch/um/include/asm/page.h
6976+++ b/arch/um/include/asm/page.h
6977@@ -14,6 +14,9 @@
6978 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6979 #define PAGE_MASK (~(PAGE_SIZE-1))
6980
6981+#define ktla_ktva(addr) (addr)
6982+#define ktva_ktla(addr) (addr)
6983+
6984 #ifndef __ASSEMBLY__
6985
6986 struct page;
6987diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6988index 69f2490..2634831 100644
6989--- a/arch/um/kernel/process.c
6990+++ b/arch/um/kernel/process.c
6991@@ -408,22 +408,6 @@ int singlestepping(void * t)
6992 return 2;
6993 }
6994
6995-/*
6996- * Only x86 and x86_64 have an arch_align_stack().
6997- * All other arches have "#define arch_align_stack(x) (x)"
6998- * in their asm/system.h
6999- * As this is included in UML from asm-um/system-generic.h,
7000- * we can use it to behave as the subarch does.
7001- */
7002-#ifndef arch_align_stack
7003-unsigned long arch_align_stack(unsigned long sp)
7004-{
7005- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7006- sp -= get_random_int() % 8192;
7007- return sp & ~0xf;
7008-}
7009-#endif
7010-
7011 unsigned long get_wchan(struct task_struct *p)
7012 {
7013 unsigned long stack_page, sp, ip;
7014diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7015index ad8f795..2c7eec6 100644
7016--- a/arch/unicore32/include/asm/cache.h
7017+++ b/arch/unicore32/include/asm/cache.h
7018@@ -12,8 +12,10 @@
7019 #ifndef __UNICORE_CACHE_H__
7020 #define __UNICORE_CACHE_H__
7021
7022-#define L1_CACHE_SHIFT (5)
7023-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7024+#include <linux/const.h>
7025+
7026+#define L1_CACHE_SHIFT 5
7027+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7028
7029 /*
7030 * Memory returned by kmalloc() may be used for DMA, so we must make
7031diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7032index 5bed94e..fbcf200 100644
7033--- a/arch/x86/Kconfig
7034+++ b/arch/x86/Kconfig
7035@@ -226,7 +226,7 @@ config X86_HT
7036
7037 config X86_32_LAZY_GS
7038 def_bool y
7039- depends on X86_32 && !CC_STACKPROTECTOR
7040+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7041
7042 config ARCH_HWEIGHT_CFLAGS
7043 string
7044@@ -1058,7 +1058,7 @@ choice
7045
7046 config NOHIGHMEM
7047 bool "off"
7048- depends on !X86_NUMAQ
7049+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7050 ---help---
7051 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7052 However, the address space of 32-bit x86 processors is only 4
7053@@ -1095,7 +1095,7 @@ config NOHIGHMEM
7054
7055 config HIGHMEM4G
7056 bool "4GB"
7057- depends on !X86_NUMAQ
7058+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7059 ---help---
7060 Select this if you have a 32-bit processor and between 1 and 4
7061 gigabytes of physical RAM.
7062@@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7063 hex
7064 default 0xB0000000 if VMSPLIT_3G_OPT
7065 default 0x80000000 if VMSPLIT_2G
7066- default 0x78000000 if VMSPLIT_2G_OPT
7067+ default 0x70000000 if VMSPLIT_2G_OPT
7068 default 0x40000000 if VMSPLIT_1G
7069 default 0xC0000000
7070 depends on X86_32
7071@@ -1539,6 +1539,7 @@ config SECCOMP
7072
7073 config CC_STACKPROTECTOR
7074 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7075+ depends on X86_64 || !PAX_MEMORY_UDEREF
7076 ---help---
7077 This option turns on the -fstack-protector GCC feature. This
7078 feature puts, at the beginning of functions, a canary value on
7079@@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7080 config PHYSICAL_START
7081 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7082 default "0x1000000"
7083+ range 0x400000 0x40000000
7084 ---help---
7085 This gives the physical address where the kernel is loaded.
7086
7087@@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7088 config PHYSICAL_ALIGN
7089 hex "Alignment value to which kernel should be aligned" if X86_32
7090 default "0x1000000"
7091+ range 0x400000 0x1000000 if PAX_KERNEXEC
7092 range 0x2000 0x1000000
7093 ---help---
7094 This value puts the alignment restrictions on physical address
7095@@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7096 Say N if you want to disable CPU hotplug.
7097
7098 config COMPAT_VDSO
7099- def_bool y
7100+ def_bool n
7101 prompt "Compat VDSO support"
7102 depends on X86_32 || IA32_EMULATION
7103+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7104 ---help---
7105 Map the 32-bit VDSO to the predictable old-style address too.
7106
7107diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7108index 3c57033..22d44aa 100644
7109--- a/arch/x86/Kconfig.cpu
7110+++ b/arch/x86/Kconfig.cpu
7111@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7112
7113 config X86_F00F_BUG
7114 def_bool y
7115- depends on M586MMX || M586TSC || M586 || M486 || M386
7116+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7117
7118 config X86_INVD_BUG
7119 def_bool y
7120@@ -359,7 +359,7 @@ config X86_POPAD_OK
7121
7122 config X86_ALIGNMENT_16
7123 def_bool y
7124- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7125+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7126
7127 config X86_INTEL_USERCOPY
7128 def_bool y
7129@@ -405,7 +405,7 @@ config X86_CMPXCHG64
7130 # generates cmov.
7131 config X86_CMOV
7132 def_bool y
7133- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7134+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7135
7136 config X86_MINIMUM_CPU_FAMILY
7137 int
7138diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7139index e46c214..7c72b55 100644
7140--- a/arch/x86/Kconfig.debug
7141+++ b/arch/x86/Kconfig.debug
7142@@ -84,7 +84,7 @@ config X86_PTDUMP
7143 config DEBUG_RODATA
7144 bool "Write protect kernel read-only data structures"
7145 default y
7146- depends on DEBUG_KERNEL
7147+ depends on DEBUG_KERNEL && BROKEN
7148 ---help---
7149 Mark the kernel read-only data as write-protected in the pagetables,
7150 in order to catch accidental (and incorrect) writes to such const
7151@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7152
7153 config DEBUG_SET_MODULE_RONX
7154 bool "Set loadable kernel module data as NX and text as RO"
7155- depends on MODULES
7156+ depends on MODULES && BROKEN
7157 ---help---
7158 This option helps catch unintended modifications to loadable
7159 kernel module's text and read-only data. It also prevents execution
7160diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7161index 209ba12..15140db 100644
7162--- a/arch/x86/Makefile
7163+++ b/arch/x86/Makefile
7164@@ -46,6 +46,7 @@ else
7165 UTS_MACHINE := x86_64
7166 CHECKFLAGS += -D__x86_64__ -m64
7167
7168+ biarch := $(call cc-option,-m64)
7169 KBUILD_AFLAGS += -m64
7170 KBUILD_CFLAGS += -m64
7171
7172@@ -201,3 +202,12 @@ define archhelp
7173 echo ' FDARGS="..." arguments for the booted kernel'
7174 echo ' FDINITRD=file initrd for the booted kernel'
7175 endef
7176+
7177+define OLD_LD
7178+
7179+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7180+*** Please upgrade your binutils to 2.18 or newer
7181+endef
7182+
7183+archprepare:
7184+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7185diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7186index 95365a8..52f857b 100644
7187--- a/arch/x86/boot/Makefile
7188+++ b/arch/x86/boot/Makefile
7189@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7190 $(call cc-option, -fno-stack-protector) \
7191 $(call cc-option, -mpreferred-stack-boundary=2)
7192 KBUILD_CFLAGS += $(call cc-option, -m32)
7193+ifdef CONSTIFY_PLUGIN
7194+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7195+endif
7196 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7197 GCOV_PROFILE := n
7198
7199diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7200index 878e4b9..20537ab 100644
7201--- a/arch/x86/boot/bitops.h
7202+++ b/arch/x86/boot/bitops.h
7203@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7204 u8 v;
7205 const u32 *p = (const u32 *)addr;
7206
7207- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7208+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7209 return v;
7210 }
7211
7212@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7213
7214 static inline void set_bit(int nr, void *addr)
7215 {
7216- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7217+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7218 }
7219
7220 #endif /* BOOT_BITOPS_H */
7221diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7222index c7093bd..d4247ffe0 100644
7223--- a/arch/x86/boot/boot.h
7224+++ b/arch/x86/boot/boot.h
7225@@ -85,7 +85,7 @@ static inline void io_delay(void)
7226 static inline u16 ds(void)
7227 {
7228 u16 seg;
7229- asm("movw %%ds,%0" : "=rm" (seg));
7230+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7231 return seg;
7232 }
7233
7234@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7235 static inline int memcmp(const void *s1, const void *s2, size_t len)
7236 {
7237 u8 diff;
7238- asm("repe; cmpsb; setnz %0"
7239+ asm volatile("repe; cmpsb; setnz %0"
7240 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7241 return diff;
7242 }
7243diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7244index b123b9a..2cf2f23 100644
7245--- a/arch/x86/boot/compressed/Makefile
7246+++ b/arch/x86/boot/compressed/Makefile
7247@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7248 KBUILD_CFLAGS += $(cflags-y)
7249 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7250 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7251+ifdef CONSTIFY_PLUGIN
7252+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7253+endif
7254
7255 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7256 GCOV_PROFILE := n
7257diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7258index a055993..47e126c 100644
7259--- a/arch/x86/boot/compressed/head_32.S
7260+++ b/arch/x86/boot/compressed/head_32.S
7261@@ -98,7 +98,7 @@ preferred_addr:
7262 notl %eax
7263 andl %eax, %ebx
7264 #else
7265- movl $LOAD_PHYSICAL_ADDR, %ebx
7266+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7267 #endif
7268
7269 /* Target address to relocate to for decompression */
7270@@ -184,7 +184,7 @@ relocated:
7271 * and where it was actually loaded.
7272 */
7273 movl %ebp, %ebx
7274- subl $LOAD_PHYSICAL_ADDR, %ebx
7275+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7276 jz 2f /* Nothing to be done if loaded at compiled addr. */
7277 /*
7278 * Process relocations.
7279@@ -192,8 +192,7 @@ relocated:
7280
7281 1: subl $4, %edi
7282 movl (%edi), %ecx
7283- testl %ecx, %ecx
7284- jz 2f
7285+ jecxz 2f
7286 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7287 jmp 1b
7288 2:
7289diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7290index 558d76c..606aa24 100644
7291--- a/arch/x86/boot/compressed/head_64.S
7292+++ b/arch/x86/boot/compressed/head_64.S
7293@@ -91,7 +91,7 @@ ENTRY(startup_32)
7294 notl %eax
7295 andl %eax, %ebx
7296 #else
7297- movl $LOAD_PHYSICAL_ADDR, %ebx
7298+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7299 #endif
7300
7301 /* Target address to relocate to for decompression */
7302@@ -253,7 +253,7 @@ preferred_addr:
7303 notq %rax
7304 andq %rax, %rbp
7305 #else
7306- movq $LOAD_PHYSICAL_ADDR, %rbp
7307+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7308 #endif
7309
7310 /* Target address to relocate to for decompression */
7311diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7312index 7116dcb..d9ae1d7 100644
7313--- a/arch/x86/boot/compressed/misc.c
7314+++ b/arch/x86/boot/compressed/misc.c
7315@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7316 case PT_LOAD:
7317 #ifdef CONFIG_RELOCATABLE
7318 dest = output;
7319- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7320+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7321 #else
7322 dest = (void *)(phdr->p_paddr);
7323 #endif
7324@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7325 error("Destination address too large");
7326 #endif
7327 #ifndef CONFIG_RELOCATABLE
7328- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7329+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7330 error("Wrong destination address");
7331 #endif
7332
7333diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7334index 89bbf4e..869908e 100644
7335--- a/arch/x86/boot/compressed/relocs.c
7336+++ b/arch/x86/boot/compressed/relocs.c
7337@@ -13,8 +13,11 @@
7338
7339 static void die(char *fmt, ...);
7340
7341+#include "../../../../include/generated/autoconf.h"
7342+
7343 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7344 static Elf32_Ehdr ehdr;
7345+static Elf32_Phdr *phdr;
7346 static unsigned long reloc_count, reloc_idx;
7347 static unsigned long *relocs;
7348
7349@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7350 }
7351 }
7352
7353+static void read_phdrs(FILE *fp)
7354+{
7355+ unsigned int i;
7356+
7357+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7358+ if (!phdr) {
7359+ die("Unable to allocate %d program headers\n",
7360+ ehdr.e_phnum);
7361+ }
7362+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7363+ die("Seek to %d failed: %s\n",
7364+ ehdr.e_phoff, strerror(errno));
7365+ }
7366+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7367+ die("Cannot read ELF program headers: %s\n",
7368+ strerror(errno));
7369+ }
7370+ for(i = 0; i < ehdr.e_phnum; i++) {
7371+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7372+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7373+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7374+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7375+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7376+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7377+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7378+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7379+ }
7380+
7381+}
7382+
7383 static void read_shdrs(FILE *fp)
7384 {
7385- int i;
7386+ unsigned int i;
7387 Elf32_Shdr shdr;
7388
7389 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7390@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7391
7392 static void read_strtabs(FILE *fp)
7393 {
7394- int i;
7395+ unsigned int i;
7396 for (i = 0; i < ehdr.e_shnum; i++) {
7397 struct section *sec = &secs[i];
7398 if (sec->shdr.sh_type != SHT_STRTAB) {
7399@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7400
7401 static void read_symtabs(FILE *fp)
7402 {
7403- int i,j;
7404+ unsigned int i,j;
7405 for (i = 0; i < ehdr.e_shnum; i++) {
7406 struct section *sec = &secs[i];
7407 if (sec->shdr.sh_type != SHT_SYMTAB) {
7408@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7409
7410 static void read_relocs(FILE *fp)
7411 {
7412- int i,j;
7413+ unsigned int i,j;
7414+ uint32_t base;
7415+
7416 for (i = 0; i < ehdr.e_shnum; i++) {
7417 struct section *sec = &secs[i];
7418 if (sec->shdr.sh_type != SHT_REL) {
7419@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7420 die("Cannot read symbol table: %s\n",
7421 strerror(errno));
7422 }
7423+ base = 0;
7424+ for (j = 0; j < ehdr.e_phnum; j++) {
7425+ if (phdr[j].p_type != PT_LOAD )
7426+ continue;
7427+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7428+ continue;
7429+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7430+ break;
7431+ }
7432 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7433 Elf32_Rel *rel = &sec->reltab[j];
7434- rel->r_offset = elf32_to_cpu(rel->r_offset);
7435+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7436 rel->r_info = elf32_to_cpu(rel->r_info);
7437 }
7438 }
7439@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7440
7441 static void print_absolute_symbols(void)
7442 {
7443- int i;
7444+ unsigned int i;
7445 printf("Absolute symbols\n");
7446 printf(" Num: Value Size Type Bind Visibility Name\n");
7447 for (i = 0; i < ehdr.e_shnum; i++) {
7448 struct section *sec = &secs[i];
7449 char *sym_strtab;
7450 Elf32_Sym *sh_symtab;
7451- int j;
7452+ unsigned int j;
7453
7454 if (sec->shdr.sh_type != SHT_SYMTAB) {
7455 continue;
7456@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7457
7458 static void print_absolute_relocs(void)
7459 {
7460- int i, printed = 0;
7461+ unsigned int i, printed = 0;
7462
7463 for (i = 0; i < ehdr.e_shnum; i++) {
7464 struct section *sec = &secs[i];
7465 struct section *sec_applies, *sec_symtab;
7466 char *sym_strtab;
7467 Elf32_Sym *sh_symtab;
7468- int j;
7469+ unsigned int j;
7470 if (sec->shdr.sh_type != SHT_REL) {
7471 continue;
7472 }
7473@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7474
7475 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7476 {
7477- int i;
7478+ unsigned int i;
7479 /* Walk through the relocations */
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 char *sym_strtab;
7482 Elf32_Sym *sh_symtab;
7483 struct section *sec_applies, *sec_symtab;
7484- int j;
7485+ unsigned int j;
7486 struct section *sec = &secs[i];
7487
7488 if (sec->shdr.sh_type != SHT_REL) {
7489@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7490 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7491 continue;
7492 }
7493+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7494+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7495+ continue;
7496+
7497+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7498+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7499+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7500+ continue;
7501+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7502+ continue;
7503+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7504+ continue;
7505+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7506+ continue;
7507+#endif
7508+
7509 switch (r_type) {
7510 case R_386_NONE:
7511 case R_386_PC32:
7512@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7513
7514 static void emit_relocs(int as_text)
7515 {
7516- int i;
7517+ unsigned int i;
7518 /* Count how many relocations I have and allocate space for them. */
7519 reloc_count = 0;
7520 walk_relocs(count_reloc);
7521@@ -665,6 +725,7 @@ int main(int argc, char **argv)
7522 fname, strerror(errno));
7523 }
7524 read_ehdr(fp);
7525+ read_phdrs(fp);
7526 read_shdrs(fp);
7527 read_strtabs(fp);
7528 read_symtabs(fp);
7529diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7530index 4d3ff03..e4972ff 100644
7531--- a/arch/x86/boot/cpucheck.c
7532+++ b/arch/x86/boot/cpucheck.c
7533@@ -74,7 +74,7 @@ static int has_fpu(void)
7534 u16 fcw = -1, fsw = -1;
7535 u32 cr0;
7536
7537- asm("movl %%cr0,%0" : "=r" (cr0));
7538+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7539 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7540 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7541 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7542@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7543 {
7544 u32 f0, f1;
7545
7546- asm("pushfl ; "
7547+ asm volatile("pushfl ; "
7548 "pushfl ; "
7549 "popl %0 ; "
7550 "movl %0,%1 ; "
7551@@ -115,7 +115,7 @@ static void get_flags(void)
7552 set_bit(X86_FEATURE_FPU, cpu.flags);
7553
7554 if (has_eflag(X86_EFLAGS_ID)) {
7555- asm("cpuid"
7556+ asm volatile("cpuid"
7557 : "=a" (max_intel_level),
7558 "=b" (cpu_vendor[0]),
7559 "=d" (cpu_vendor[1]),
7560@@ -124,7 +124,7 @@ static void get_flags(void)
7561
7562 if (max_intel_level >= 0x00000001 &&
7563 max_intel_level <= 0x0000ffff) {
7564- asm("cpuid"
7565+ asm volatile("cpuid"
7566 : "=a" (tfms),
7567 "=c" (cpu.flags[4]),
7568 "=d" (cpu.flags[0])
7569@@ -136,7 +136,7 @@ static void get_flags(void)
7570 cpu.model += ((tfms >> 16) & 0xf) << 4;
7571 }
7572
7573- asm("cpuid"
7574+ asm volatile("cpuid"
7575 : "=a" (max_amd_level)
7576 : "a" (0x80000000)
7577 : "ebx", "ecx", "edx");
7578@@ -144,7 +144,7 @@ static void get_flags(void)
7579 if (max_amd_level >= 0x80000001 &&
7580 max_amd_level <= 0x8000ffff) {
7581 u32 eax = 0x80000001;
7582- asm("cpuid"
7583+ asm volatile("cpuid"
7584 : "+a" (eax),
7585 "=c" (cpu.flags[6]),
7586 "=d" (cpu.flags[1])
7587@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7588 u32 ecx = MSR_K7_HWCR;
7589 u32 eax, edx;
7590
7591- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7592+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7593 eax &= ~(1 << 15);
7594- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7595+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7596
7597 get_flags(); /* Make sure it really did something */
7598 err = check_flags();
7599@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7600 u32 ecx = MSR_VIA_FCR;
7601 u32 eax, edx;
7602
7603- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7604+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7605 eax |= (1<<1)|(1<<7);
7606- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7607+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7608
7609 set_bit(X86_FEATURE_CX8, cpu.flags);
7610 err = check_flags();
7611@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7612 u32 eax, edx;
7613 u32 level = 1;
7614
7615- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7616- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7617- asm("cpuid"
7618+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7619+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7620+ asm volatile("cpuid"
7621 : "+a" (level), "=d" (cpu.flags[0])
7622 : : "ecx", "ebx");
7623- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7624+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7625
7626 err = check_flags();
7627 }
7628diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7629index f1bbeeb..aff09cb 100644
7630--- a/arch/x86/boot/header.S
7631+++ b/arch/x86/boot/header.S
7632@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7633 # single linked list of
7634 # struct setup_data
7635
7636-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7637+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7638
7639 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7640 #define VO_INIT_SIZE (VO__end - VO__text)
7641diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7642index db75d07..8e6d0af 100644
7643--- a/arch/x86/boot/memory.c
7644+++ b/arch/x86/boot/memory.c
7645@@ -19,7 +19,7 @@
7646
7647 static int detect_memory_e820(void)
7648 {
7649- int count = 0;
7650+ unsigned int count = 0;
7651 struct biosregs ireg, oreg;
7652 struct e820entry *desc = boot_params.e820_map;
7653 static struct e820entry buf; /* static so it is zeroed */
7654diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7655index 11e8c6e..fdbb1ed 100644
7656--- a/arch/x86/boot/video-vesa.c
7657+++ b/arch/x86/boot/video-vesa.c
7658@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7659
7660 boot_params.screen_info.vesapm_seg = oreg.es;
7661 boot_params.screen_info.vesapm_off = oreg.di;
7662+ boot_params.screen_info.vesapm_size = oreg.cx;
7663 }
7664
7665 /*
7666diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7667index 43eda28..5ab5fdb 100644
7668--- a/arch/x86/boot/video.c
7669+++ b/arch/x86/boot/video.c
7670@@ -96,7 +96,7 @@ static void store_mode_params(void)
7671 static unsigned int get_entry(void)
7672 {
7673 char entry_buf[4];
7674- int i, len = 0;
7675+ unsigned int i, len = 0;
7676 int key;
7677 unsigned int v;
7678
7679diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7680index 5b577d5..3c1fed4 100644
7681--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7682+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7683@@ -8,6 +8,8 @@
7684 * including this sentence is retained in full.
7685 */
7686
7687+#include <asm/alternative-asm.h>
7688+
7689 .extern crypto_ft_tab
7690 .extern crypto_it_tab
7691 .extern crypto_fl_tab
7692@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7693 je B192; \
7694 leaq 32(r9),r9;
7695
7696+#define ret pax_force_retaddr 0, 1; ret
7697+
7698 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7699 movq r1,r2; \
7700 movq r3,r4; \
7701diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7702index be6d9e3..21fbbca 100644
7703--- a/arch/x86/crypto/aesni-intel_asm.S
7704+++ b/arch/x86/crypto/aesni-intel_asm.S
7705@@ -31,6 +31,7 @@
7706
7707 #include <linux/linkage.h>
7708 #include <asm/inst.h>
7709+#include <asm/alternative-asm.h>
7710
7711 #ifdef __x86_64__
7712 .data
7713@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7714 pop %r14
7715 pop %r13
7716 pop %r12
7717+ pax_force_retaddr 0, 1
7718 ret
7719+ENDPROC(aesni_gcm_dec)
7720
7721
7722 /*****************************************************************************
7723@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7724 pop %r14
7725 pop %r13
7726 pop %r12
7727+ pax_force_retaddr 0, 1
7728 ret
7729+ENDPROC(aesni_gcm_enc)
7730
7731 #endif
7732
7733@@ -1714,6 +1719,7 @@ _key_expansion_256a:
7734 pxor %xmm1, %xmm0
7735 movaps %xmm0, (TKEYP)
7736 add $0x10, TKEYP
7737+ pax_force_retaddr_bts
7738 ret
7739
7740 .align 4
7741@@ -1738,6 +1744,7 @@ _key_expansion_192a:
7742 shufps $0b01001110, %xmm2, %xmm1
7743 movaps %xmm1, 0x10(TKEYP)
7744 add $0x20, TKEYP
7745+ pax_force_retaddr_bts
7746 ret
7747
7748 .align 4
7749@@ -1757,6 +1764,7 @@ _key_expansion_192b:
7750
7751 movaps %xmm0, (TKEYP)
7752 add $0x10, TKEYP
7753+ pax_force_retaddr_bts
7754 ret
7755
7756 .align 4
7757@@ -1769,6 +1777,7 @@ _key_expansion_256b:
7758 pxor %xmm1, %xmm2
7759 movaps %xmm2, (TKEYP)
7760 add $0x10, TKEYP
7761+ pax_force_retaddr_bts
7762 ret
7763
7764 /*
7765@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7766 #ifndef __x86_64__
7767 popl KEYP
7768 #endif
7769+ pax_force_retaddr 0, 1
7770 ret
7771+ENDPROC(aesni_set_key)
7772
7773 /*
7774 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7775@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7776 popl KLEN
7777 popl KEYP
7778 #endif
7779+ pax_force_retaddr 0, 1
7780 ret
7781+ENDPROC(aesni_enc)
7782
7783 /*
7784 * _aesni_enc1: internal ABI
7785@@ -1959,6 +1972,7 @@ _aesni_enc1:
7786 AESENC KEY STATE
7787 movaps 0x70(TKEYP), KEY
7788 AESENCLAST KEY STATE
7789+ pax_force_retaddr_bts
7790 ret
7791
7792 /*
7793@@ -2067,6 +2081,7 @@ _aesni_enc4:
7794 AESENCLAST KEY STATE2
7795 AESENCLAST KEY STATE3
7796 AESENCLAST KEY STATE4
7797+ pax_force_retaddr_bts
7798 ret
7799
7800 /*
7801@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7802 popl KLEN
7803 popl KEYP
7804 #endif
7805+ pax_force_retaddr 0, 1
7806 ret
7807+ENDPROC(aesni_dec)
7808
7809 /*
7810 * _aesni_dec1: internal ABI
7811@@ -2146,6 +2163,7 @@ _aesni_dec1:
7812 AESDEC KEY STATE
7813 movaps 0x70(TKEYP), KEY
7814 AESDECLAST KEY STATE
7815+ pax_force_retaddr_bts
7816 ret
7817
7818 /*
7819@@ -2254,6 +2272,7 @@ _aesni_dec4:
7820 AESDECLAST KEY STATE2
7821 AESDECLAST KEY STATE3
7822 AESDECLAST KEY STATE4
7823+ pax_force_retaddr_bts
7824 ret
7825
7826 /*
7827@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7828 popl KEYP
7829 popl LEN
7830 #endif
7831+ pax_force_retaddr 0, 1
7832 ret
7833+ENDPROC(aesni_ecb_enc)
7834
7835 /*
7836 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7837@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7838 popl KEYP
7839 popl LEN
7840 #endif
7841+ pax_force_retaddr 0, 1
7842 ret
7843+ENDPROC(aesni_ecb_dec)
7844
7845 /*
7846 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7847@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7848 popl LEN
7849 popl IVP
7850 #endif
7851+ pax_force_retaddr 0, 1
7852 ret
7853+ENDPROC(aesni_cbc_enc)
7854
7855 /*
7856 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7857@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7858 popl LEN
7859 popl IVP
7860 #endif
7861+ pax_force_retaddr 0, 1
7862 ret
7863+ENDPROC(aesni_cbc_dec)
7864
7865 #ifdef __x86_64__
7866 .align 16
7867@@ -2524,6 +2551,7 @@ _aesni_inc_init:
7868 mov $1, TCTR_LOW
7869 MOVQ_R64_XMM TCTR_LOW INC
7870 MOVQ_R64_XMM CTR TCTR_LOW
7871+ pax_force_retaddr_bts
7872 ret
7873
7874 /*
7875@@ -2552,6 +2580,7 @@ _aesni_inc:
7876 .Linc_low:
7877 movaps CTR, IV
7878 PSHUFB_XMM BSWAP_MASK IV
7879+ pax_force_retaddr_bts
7880 ret
7881
7882 /*
7883@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7884 .Lctr_enc_ret:
7885 movups IV, (IVP)
7886 .Lctr_enc_just_ret:
7887+ pax_force_retaddr 0, 1
7888 ret
7889+ENDPROC(aesni_ctr_enc)
7890 #endif
7891diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7892index 391d245..67f35c2 100644
7893--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7894+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7895@@ -20,6 +20,8 @@
7896 *
7897 */
7898
7899+#include <asm/alternative-asm.h>
7900+
7901 .file "blowfish-x86_64-asm.S"
7902 .text
7903
7904@@ -151,9 +153,11 @@ __blowfish_enc_blk:
7905 jnz __enc_xor;
7906
7907 write_block();
7908+ pax_force_retaddr 0, 1
7909 ret;
7910 __enc_xor:
7911 xor_block();
7912+ pax_force_retaddr 0, 1
7913 ret;
7914
7915 .align 8
7916@@ -188,6 +192,7 @@ blowfish_dec_blk:
7917
7918 movq %r11, %rbp;
7919
7920+ pax_force_retaddr 0, 1
7921 ret;
7922
7923 /**********************************************************************
7924@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7925
7926 popq %rbx;
7927 popq %rbp;
7928+ pax_force_retaddr 0, 1
7929 ret;
7930
7931 __enc_xor4:
7932@@ -349,6 +355,7 @@ __enc_xor4:
7933
7934 popq %rbx;
7935 popq %rbp;
7936+ pax_force_retaddr 0, 1
7937 ret;
7938
7939 .align 8
7940@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7941 popq %rbx;
7942 popq %rbp;
7943
7944+ pax_force_retaddr 0, 1
7945 ret;
7946
7947diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7948index 6214a9b..1f4fc9a 100644
7949--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7950+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7951@@ -1,3 +1,5 @@
7952+#include <asm/alternative-asm.h>
7953+
7954 # enter ECRYPT_encrypt_bytes
7955 .text
7956 .p2align 5
7957@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7958 add %r11,%rsp
7959 mov %rdi,%rax
7960 mov %rsi,%rdx
7961+ pax_force_retaddr 0, 1
7962 ret
7963 # bytesatleast65:
7964 ._bytesatleast65:
7965@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7966 add %r11,%rsp
7967 mov %rdi,%rax
7968 mov %rsi,%rdx
7969+ pax_force_retaddr
7970 ret
7971 # enter ECRYPT_ivsetup
7972 .text
7973@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7974 add %r11,%rsp
7975 mov %rdi,%rax
7976 mov %rsi,%rdx
7977+ pax_force_retaddr
7978 ret
7979diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7980index 7f24a15..9cd3ffe 100644
7981--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7982+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7983@@ -24,6 +24,8 @@
7984 *
7985 */
7986
7987+#include <asm/alternative-asm.h>
7988+
7989 .file "serpent-sse2-x86_64-asm_64.S"
7990 .text
7991
7992@@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
7993 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
7994 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
7995
7996+ pax_force_retaddr
7997 ret;
7998
7999 __enc_xor8:
8000 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8001 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8002
8003+ pax_force_retaddr
8004 ret;
8005
8006 .align 8
8007@@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8008 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8009 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8010
8011+ pax_force_retaddr
8012 ret;
8013diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8014index b2c2f57..8470cab 100644
8015--- a/arch/x86/crypto/sha1_ssse3_asm.S
8016+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8017@@ -28,6 +28,8 @@
8018 * (at your option) any later version.
8019 */
8020
8021+#include <asm/alternative-asm.h>
8022+
8023 #define CTX %rdi // arg1
8024 #define BUF %rsi // arg2
8025 #define CNT %rdx // arg3
8026@@ -104,6 +106,7 @@
8027 pop %r12
8028 pop %rbp
8029 pop %rbx
8030+ pax_force_retaddr 0, 1
8031 ret
8032
8033 .size \name, .-\name
8034diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8035index 5b012a2..36d5364 100644
8036--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8037+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8038@@ -20,6 +20,8 @@
8039 *
8040 */
8041
8042+#include <asm/alternative-asm.h>
8043+
8044 .file "twofish-x86_64-asm-3way.S"
8045 .text
8046
8047@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8048 popq %r13;
8049 popq %r14;
8050 popq %r15;
8051+ pax_force_retaddr 0, 1
8052 ret;
8053
8054 __enc_xor3:
8055@@ -271,6 +274,7 @@ __enc_xor3:
8056 popq %r13;
8057 popq %r14;
8058 popq %r15;
8059+ pax_force_retaddr 0, 1
8060 ret;
8061
8062 .global twofish_dec_blk_3way
8063@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8064 popq %r13;
8065 popq %r14;
8066 popq %r15;
8067+ pax_force_retaddr 0, 1
8068 ret;
8069
8070diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8071index 7bcf3fc..f53832f 100644
8072--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8073+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8074@@ -21,6 +21,7 @@
8075 .text
8076
8077 #include <asm/asm-offsets.h>
8078+#include <asm/alternative-asm.h>
8079
8080 #define a_offset 0
8081 #define b_offset 4
8082@@ -268,6 +269,7 @@ twofish_enc_blk:
8083
8084 popq R1
8085 movq $1,%rax
8086+ pax_force_retaddr 0, 1
8087 ret
8088
8089 twofish_dec_blk:
8090@@ -319,4 +321,5 @@ twofish_dec_blk:
8091
8092 popq R1
8093 movq $1,%rax
8094+ pax_force_retaddr 0, 1
8095 ret
8096diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8097index 39e4909..887aa7e 100644
8098--- a/arch/x86/ia32/ia32_aout.c
8099+++ b/arch/x86/ia32/ia32_aout.c
8100@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8101 unsigned long dump_start, dump_size;
8102 struct user32 dump;
8103
8104+ memset(&dump, 0, sizeof(dump));
8105+
8106 fs = get_fs();
8107 set_fs(KERNEL_DS);
8108 has_dumped = 1;
8109diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8110index 6557769..ef6ae89 100644
8111--- a/arch/x86/ia32/ia32_signal.c
8112+++ b/arch/x86/ia32/ia32_signal.c
8113@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8114 }
8115 seg = get_fs();
8116 set_fs(KERNEL_DS);
8117- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8118+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8119 set_fs(seg);
8120 if (ret >= 0 && uoss_ptr) {
8121 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8122@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8123 */
8124 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8125 size_t frame_size,
8126- void **fpstate)
8127+ void __user **fpstate)
8128 {
8129 unsigned long sp;
8130
8131@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8132
8133 if (used_math()) {
8134 sp = sp - sig_xstate_ia32_size;
8135- *fpstate = (struct _fpstate_ia32 *) sp;
8136+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8137 if (save_i387_xstate_ia32(*fpstate) < 0)
8138 return (void __user *) -1L;
8139 }
8140@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8141 sp -= frame_size;
8142 /* Align the stack pointer according to the i386 ABI,
8143 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8144- sp = ((sp + 4) & -16ul) - 4;
8145+ sp = ((sp - 12) & -16ul) - 4;
8146 return (void __user *) sp;
8147 }
8148
8149@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8150 * These are actually not used anymore, but left because some
8151 * gdb versions depend on them as a marker.
8152 */
8153- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8154+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8155 } put_user_catch(err);
8156
8157 if (err)
8158@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8159 0xb8,
8160 __NR_ia32_rt_sigreturn,
8161 0x80cd,
8162- 0,
8163+ 0
8164 };
8165
8166 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8167@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8168
8169 if (ka->sa.sa_flags & SA_RESTORER)
8170 restorer = ka->sa.sa_restorer;
8171+ else if (current->mm->context.vdso)
8172+ /* Return stub is in 32bit vsyscall page */
8173+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8174 else
8175- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8176- rt_sigreturn);
8177+ restorer = &frame->retcode;
8178 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8179
8180 /*
8181 * Not actually used anymore, but left because some gdb
8182 * versions need it.
8183 */
8184- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8185+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8186 } put_user_catch(err);
8187
8188 if (err)
8189diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8190index e3e7340..05ed805 100644
8191--- a/arch/x86/ia32/ia32entry.S
8192+++ b/arch/x86/ia32/ia32entry.S
8193@@ -13,8 +13,10 @@
8194 #include <asm/thread_info.h>
8195 #include <asm/segment.h>
8196 #include <asm/irqflags.h>
8197+#include <asm/pgtable.h>
8198 #include <linux/linkage.h>
8199 #include <linux/err.h>
8200+#include <asm/alternative-asm.h>
8201
8202 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8203 #include <linux/elf-em.h>
8204@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8205 ENDPROC(native_irq_enable_sysexit)
8206 #endif
8207
8208+ .macro pax_enter_kernel_user
8209+ pax_set_fptr_mask
8210+#ifdef CONFIG_PAX_MEMORY_UDEREF
8211+ call pax_enter_kernel_user
8212+#endif
8213+ .endm
8214+
8215+ .macro pax_exit_kernel_user
8216+#ifdef CONFIG_PAX_MEMORY_UDEREF
8217+ call pax_exit_kernel_user
8218+#endif
8219+#ifdef CONFIG_PAX_RANDKSTACK
8220+ pushq %rax
8221+ pushq %r11
8222+ call pax_randomize_kstack
8223+ popq %r11
8224+ popq %rax
8225+#endif
8226+ .endm
8227+
8228+.macro pax_erase_kstack
8229+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8230+ call pax_erase_kstack
8231+#endif
8232+.endm
8233+
8234 /*
8235 * 32bit SYSENTER instruction entry.
8236 *
8237@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8238 CFI_REGISTER rsp,rbp
8239 SWAPGS_UNSAFE_STACK
8240 movq PER_CPU_VAR(kernel_stack), %rsp
8241- addq $(KERNEL_STACK_OFFSET),%rsp
8242- /*
8243- * No need to follow this irqs on/off section: the syscall
8244- * disabled irqs, here we enable it straight after entry:
8245- */
8246- ENABLE_INTERRUPTS(CLBR_NONE)
8247 movl %ebp,%ebp /* zero extension */
8248 pushq_cfi $__USER32_DS
8249 /*CFI_REL_OFFSET ss,0*/
8250@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8251 CFI_REL_OFFSET rsp,0
8252 pushfq_cfi
8253 /*CFI_REL_OFFSET rflags,0*/
8254- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8255- CFI_REGISTER rip,r10
8256+ orl $X86_EFLAGS_IF,(%rsp)
8257+ GET_THREAD_INFO(%r11)
8258+ movl TI_sysenter_return(%r11), %r11d
8259+ CFI_REGISTER rip,r11
8260 pushq_cfi $__USER32_CS
8261 /*CFI_REL_OFFSET cs,0*/
8262 movl %eax, %eax
8263- pushq_cfi %r10
8264+ pushq_cfi %r11
8265 CFI_REL_OFFSET rip,0
8266 pushq_cfi %rax
8267 cld
8268 SAVE_ARGS 0,1,0
8269+ pax_enter_kernel_user
8270+ /*
8271+ * No need to follow this irqs on/off section: the syscall
8272+ * disabled irqs, here we enable it straight after entry:
8273+ */
8274+ ENABLE_INTERRUPTS(CLBR_NONE)
8275 /* no need to do an access_ok check here because rbp has been
8276 32bit zero extended */
8277+
8278+#ifdef CONFIG_PAX_MEMORY_UDEREF
8279+ mov $PAX_USER_SHADOW_BASE,%r11
8280+ add %r11,%rbp
8281+#endif
8282+
8283 1: movl (%rbp),%ebp
8284 .section __ex_table,"a"
8285 .quad 1b,ia32_badarg
8286 .previous
8287- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8288- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8289+ GET_THREAD_INFO(%r11)
8290+ orl $TS_COMPAT,TI_status(%r11)
8291+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8292 CFI_REMEMBER_STATE
8293 jnz sysenter_tracesys
8294 cmpq $(IA32_NR_syscalls-1),%rax
8295@@ -160,12 +197,15 @@ sysenter_do_call:
8296 sysenter_dispatch:
8297 call *ia32_sys_call_table(,%rax,8)
8298 movq %rax,RAX-ARGOFFSET(%rsp)
8299+ GET_THREAD_INFO(%r11)
8300 DISABLE_INTERRUPTS(CLBR_NONE)
8301 TRACE_IRQS_OFF
8302- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8303+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8304 jnz sysexit_audit
8305 sysexit_from_sys_call:
8306- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8307+ pax_exit_kernel_user
8308+ pax_erase_kstack
8309+ andl $~TS_COMPAT,TI_status(%r11)
8310 /* clear IF, that popfq doesn't enable interrupts early */
8311 andl $~0x200,EFLAGS-R11(%rsp)
8312 movl RIP-R11(%rsp),%edx /* User %eip */
8313@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8314 movl %eax,%esi /* 2nd arg: syscall number */
8315 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8316 call __audit_syscall_entry
8317+
8318+ pax_erase_kstack
8319+
8320 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8321 cmpq $(IA32_NR_syscalls-1),%rax
8322 ja ia32_badsys
8323@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8324 .endm
8325
8326 .macro auditsys_exit exit
8327- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8328+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8329 jnz ia32_ret_from_sys_call
8330 TRACE_IRQS_ON
8331 sti
8332@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8333 1: setbe %al /* 1 if error, 0 if not */
8334 movzbl %al,%edi /* zero-extend that into %edi */
8335 call __audit_syscall_exit
8336+ GET_THREAD_INFO(%r11)
8337 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8338 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8339 cli
8340 TRACE_IRQS_OFF
8341- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8342+ testl %edi,TI_flags(%r11)
8343 jz \exit
8344 CLEAR_RREGS -ARGOFFSET
8345 jmp int_with_check
8346@@ -235,7 +279,7 @@ sysexit_audit:
8347
8348 sysenter_tracesys:
8349 #ifdef CONFIG_AUDITSYSCALL
8350- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8351+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8352 jz sysenter_auditsys
8353 #endif
8354 SAVE_REST
8355@@ -243,6 +287,9 @@ sysenter_tracesys:
8356 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8357 movq %rsp,%rdi /* &pt_regs -> arg1 */
8358 call syscall_trace_enter
8359+
8360+ pax_erase_kstack
8361+
8362 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8363 RESTORE_REST
8364 cmpq $(IA32_NR_syscalls-1),%rax
8365@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8366 ENTRY(ia32_cstar_target)
8367 CFI_STARTPROC32 simple
8368 CFI_SIGNAL_FRAME
8369- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8370+ CFI_DEF_CFA rsp,0
8371 CFI_REGISTER rip,rcx
8372 /*CFI_REGISTER rflags,r11*/
8373 SWAPGS_UNSAFE_STACK
8374 movl %esp,%r8d
8375 CFI_REGISTER rsp,r8
8376 movq PER_CPU_VAR(kernel_stack),%rsp
8377+ SAVE_ARGS 8*6,0,0
8378+ pax_enter_kernel_user
8379 /*
8380 * No need to follow this irqs on/off section: the syscall
8381 * disabled irqs and here we enable it straight after entry:
8382 */
8383 ENABLE_INTERRUPTS(CLBR_NONE)
8384- SAVE_ARGS 8,0,0
8385 movl %eax,%eax /* zero extension */
8386 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8387 movq %rcx,RIP-ARGOFFSET(%rsp)
8388@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8389 /* no need to do an access_ok check here because r8 has been
8390 32bit zero extended */
8391 /* hardware stack frame is complete now */
8392+
8393+#ifdef CONFIG_PAX_MEMORY_UDEREF
8394+ mov $PAX_USER_SHADOW_BASE,%r11
8395+ add %r11,%r8
8396+#endif
8397+
8398 1: movl (%r8),%r9d
8399 .section __ex_table,"a"
8400 .quad 1b,ia32_badarg
8401 .previous
8402- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8403- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8404+ GET_THREAD_INFO(%r11)
8405+ orl $TS_COMPAT,TI_status(%r11)
8406+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8407 CFI_REMEMBER_STATE
8408 jnz cstar_tracesys
8409 cmpq $IA32_NR_syscalls-1,%rax
8410@@ -317,12 +372,15 @@ cstar_do_call:
8411 cstar_dispatch:
8412 call *ia32_sys_call_table(,%rax,8)
8413 movq %rax,RAX-ARGOFFSET(%rsp)
8414+ GET_THREAD_INFO(%r11)
8415 DISABLE_INTERRUPTS(CLBR_NONE)
8416 TRACE_IRQS_OFF
8417- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8418+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8419 jnz sysretl_audit
8420 sysretl_from_sys_call:
8421- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8422+ pax_exit_kernel_user
8423+ pax_erase_kstack
8424+ andl $~TS_COMPAT,TI_status(%r11)
8425 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8426 movl RIP-ARGOFFSET(%rsp),%ecx
8427 CFI_REGISTER rip,rcx
8428@@ -350,7 +408,7 @@ sysretl_audit:
8429
8430 cstar_tracesys:
8431 #ifdef CONFIG_AUDITSYSCALL
8432- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8433+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8434 jz cstar_auditsys
8435 #endif
8436 xchgl %r9d,%ebp
8437@@ -359,6 +417,9 @@ cstar_tracesys:
8438 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8439 movq %rsp,%rdi /* &pt_regs -> arg1 */
8440 call syscall_trace_enter
8441+
8442+ pax_erase_kstack
8443+
8444 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8445 RESTORE_REST
8446 xchgl %ebp,%r9d
8447@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8448 CFI_REL_OFFSET rip,RIP-RIP
8449 PARAVIRT_ADJUST_EXCEPTION_FRAME
8450 SWAPGS
8451- /*
8452- * No need to follow this irqs on/off section: the syscall
8453- * disabled irqs and here we enable it straight after entry:
8454- */
8455- ENABLE_INTERRUPTS(CLBR_NONE)
8456 movl %eax,%eax
8457 pushq_cfi %rax
8458 cld
8459 /* note the registers are not zero extended to the sf.
8460 this could be a problem. */
8461 SAVE_ARGS 0,1,0
8462- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8463- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8464+ pax_enter_kernel_user
8465+ /*
8466+ * No need to follow this irqs on/off section: the syscall
8467+ * disabled irqs and here we enable it straight after entry:
8468+ */
8469+ ENABLE_INTERRUPTS(CLBR_NONE)
8470+ GET_THREAD_INFO(%r11)
8471+ orl $TS_COMPAT,TI_status(%r11)
8472+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8473 jnz ia32_tracesys
8474 cmpq $(IA32_NR_syscalls-1),%rax
8475 ja ia32_badsys
8476@@ -435,6 +498,9 @@ ia32_tracesys:
8477 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8478 movq %rsp,%rdi /* &pt_regs -> arg1 */
8479 call syscall_trace_enter
8480+
8481+ pax_erase_kstack
8482+
8483 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8484 RESTORE_REST
8485 cmpq $(IA32_NR_syscalls-1),%rax
8486diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8487index f6f5c53..b358b28 100644
8488--- a/arch/x86/ia32/sys_ia32.c
8489+++ b/arch/x86/ia32/sys_ia32.c
8490@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8491 */
8492 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8493 {
8494- typeof(ubuf->st_uid) uid = 0;
8495- typeof(ubuf->st_gid) gid = 0;
8496+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8497+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8498 SET_UID(uid, stat->uid);
8499 SET_GID(gid, stat->gid);
8500 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8501@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8502 }
8503 set_fs(KERNEL_DS);
8504 ret = sys_rt_sigprocmask(how,
8505- set ? (sigset_t __user *)&s : NULL,
8506- oset ? (sigset_t __user *)&s : NULL,
8507+ set ? (sigset_t __force_user *)&s : NULL,
8508+ oset ? (sigset_t __force_user *)&s : NULL,
8509 sigsetsize);
8510 set_fs(old_fs);
8511 if (ret)
8512@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8513 return alarm_setitimer(seconds);
8514 }
8515
8516-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8517+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8518 int options)
8519 {
8520 return compat_sys_wait4(pid, stat_addr, options, NULL);
8521@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8522 mm_segment_t old_fs = get_fs();
8523
8524 set_fs(KERNEL_DS);
8525- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8526+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8527 set_fs(old_fs);
8528 if (put_compat_timespec(&t, interval))
8529 return -EFAULT;
8530@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8531 mm_segment_t old_fs = get_fs();
8532
8533 set_fs(KERNEL_DS);
8534- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8535+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8536 set_fs(old_fs);
8537 if (!ret) {
8538 switch (_NSIG_WORDS) {
8539@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8540 if (copy_siginfo_from_user32(&info, uinfo))
8541 return -EFAULT;
8542 set_fs(KERNEL_DS);
8543- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8544+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8545 set_fs(old_fs);
8546 return ret;
8547 }
8548@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8549 return -EFAULT;
8550
8551 set_fs(KERNEL_DS);
8552- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8553+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8554 count);
8555 set_fs(old_fs);
8556
8557diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8558index 952bd01..7692c6f 100644
8559--- a/arch/x86/include/asm/alternative-asm.h
8560+++ b/arch/x86/include/asm/alternative-asm.h
8561@@ -15,6 +15,45 @@
8562 .endm
8563 #endif
8564
8565+#ifdef KERNEXEC_PLUGIN
8566+ .macro pax_force_retaddr_bts rip=0
8567+ btsq $63,\rip(%rsp)
8568+ .endm
8569+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8570+ .macro pax_force_retaddr rip=0, reload=0
8571+ btsq $63,\rip(%rsp)
8572+ .endm
8573+ .macro pax_force_fptr ptr
8574+ btsq $63,\ptr
8575+ .endm
8576+ .macro pax_set_fptr_mask
8577+ .endm
8578+#endif
8579+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8580+ .macro pax_force_retaddr rip=0, reload=0
8581+ .if \reload
8582+ pax_set_fptr_mask
8583+ .endif
8584+ orq %r10,\rip(%rsp)
8585+ .endm
8586+ .macro pax_force_fptr ptr
8587+ orq %r10,\ptr
8588+ .endm
8589+ .macro pax_set_fptr_mask
8590+ movabs $0x8000000000000000,%r10
8591+ .endm
8592+#endif
8593+#else
8594+ .macro pax_force_retaddr rip=0, reload=0
8595+ .endm
8596+ .macro pax_force_fptr ptr
8597+ .endm
8598+ .macro pax_force_retaddr_bts rip=0
8599+ .endm
8600+ .macro pax_set_fptr_mask
8601+ .endm
8602+#endif
8603+
8604 .macro altinstruction_entry orig alt feature orig_len alt_len
8605 .long \orig - .
8606 .long \alt - .
8607diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8608index 37ad100..7d47faa 100644
8609--- a/arch/x86/include/asm/alternative.h
8610+++ b/arch/x86/include/asm/alternative.h
8611@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8612 ".section .discard,\"aw\",@progbits\n" \
8613 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8614 ".previous\n" \
8615- ".section .altinstr_replacement, \"ax\"\n" \
8616+ ".section .altinstr_replacement, \"a\"\n" \
8617 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8618 ".previous"
8619
8620diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8621index 3ab9bdd..238033e 100644
8622--- a/arch/x86/include/asm/apic.h
8623+++ b/arch/x86/include/asm/apic.h
8624@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8625
8626 #ifdef CONFIG_X86_LOCAL_APIC
8627
8628-extern unsigned int apic_verbosity;
8629+extern int apic_verbosity;
8630 extern int local_apic_timer_c2_ok;
8631
8632 extern int disable_apic;
8633diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8634index 20370c6..a2eb9b0 100644
8635--- a/arch/x86/include/asm/apm.h
8636+++ b/arch/x86/include/asm/apm.h
8637@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8638 __asm__ __volatile__(APM_DO_ZERO_SEGS
8639 "pushl %%edi\n\t"
8640 "pushl %%ebp\n\t"
8641- "lcall *%%cs:apm_bios_entry\n\t"
8642+ "lcall *%%ss:apm_bios_entry\n\t"
8643 "setc %%al\n\t"
8644 "popl %%ebp\n\t"
8645 "popl %%edi\n\t"
8646@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8647 __asm__ __volatile__(APM_DO_ZERO_SEGS
8648 "pushl %%edi\n\t"
8649 "pushl %%ebp\n\t"
8650- "lcall *%%cs:apm_bios_entry\n\t"
8651+ "lcall *%%ss:apm_bios_entry\n\t"
8652 "setc %%bl\n\t"
8653 "popl %%ebp\n\t"
8654 "popl %%edi\n\t"
8655diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8656index 58cb6d4..ca9010d 100644
8657--- a/arch/x86/include/asm/atomic.h
8658+++ b/arch/x86/include/asm/atomic.h
8659@@ -22,7 +22,18 @@
8660 */
8661 static inline int atomic_read(const atomic_t *v)
8662 {
8663- return (*(volatile int *)&(v)->counter);
8664+ return (*(volatile const int *)&(v)->counter);
8665+}
8666+
8667+/**
8668+ * atomic_read_unchecked - read atomic variable
8669+ * @v: pointer of type atomic_unchecked_t
8670+ *
8671+ * Atomically reads the value of @v.
8672+ */
8673+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8674+{
8675+ return (*(volatile const int *)&(v)->counter);
8676 }
8677
8678 /**
8679@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8680 }
8681
8682 /**
8683+ * atomic_set_unchecked - set atomic variable
8684+ * @v: pointer of type atomic_unchecked_t
8685+ * @i: required value
8686+ *
8687+ * Atomically sets the value of @v to @i.
8688+ */
8689+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8690+{
8691+ v->counter = i;
8692+}
8693+
8694+/**
8695 * atomic_add - add integer to atomic variable
8696 * @i: integer value to add
8697 * @v: pointer of type atomic_t
8698@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8699 */
8700 static inline void atomic_add(int i, atomic_t *v)
8701 {
8702- asm volatile(LOCK_PREFIX "addl %1,%0"
8703+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8704+
8705+#ifdef CONFIG_PAX_REFCOUNT
8706+ "jno 0f\n"
8707+ LOCK_PREFIX "subl %1,%0\n"
8708+ "int $4\n0:\n"
8709+ _ASM_EXTABLE(0b, 0b)
8710+#endif
8711+
8712+ : "+m" (v->counter)
8713+ : "ir" (i));
8714+}
8715+
8716+/**
8717+ * atomic_add_unchecked - add integer to atomic variable
8718+ * @i: integer value to add
8719+ * @v: pointer of type atomic_unchecked_t
8720+ *
8721+ * Atomically adds @i to @v.
8722+ */
8723+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8724+{
8725+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8726 : "+m" (v->counter)
8727 : "ir" (i));
8728 }
8729@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8730 */
8731 static inline void atomic_sub(int i, atomic_t *v)
8732 {
8733- asm volatile(LOCK_PREFIX "subl %1,%0"
8734+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8735+
8736+#ifdef CONFIG_PAX_REFCOUNT
8737+ "jno 0f\n"
8738+ LOCK_PREFIX "addl %1,%0\n"
8739+ "int $4\n0:\n"
8740+ _ASM_EXTABLE(0b, 0b)
8741+#endif
8742+
8743+ : "+m" (v->counter)
8744+ : "ir" (i));
8745+}
8746+
8747+/**
8748+ * atomic_sub_unchecked - subtract integer from atomic variable
8749+ * @i: integer value to subtract
8750+ * @v: pointer of type atomic_unchecked_t
8751+ *
8752+ * Atomically subtracts @i from @v.
8753+ */
8754+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8755+{
8756+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8757 : "+m" (v->counter)
8758 : "ir" (i));
8759 }
8760@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8761 {
8762 unsigned char c;
8763
8764- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8765+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8766+
8767+#ifdef CONFIG_PAX_REFCOUNT
8768+ "jno 0f\n"
8769+ LOCK_PREFIX "addl %2,%0\n"
8770+ "int $4\n0:\n"
8771+ _ASM_EXTABLE(0b, 0b)
8772+#endif
8773+
8774+ "sete %1\n"
8775 : "+m" (v->counter), "=qm" (c)
8776 : "ir" (i) : "memory");
8777 return c;
8778@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8779 */
8780 static inline void atomic_inc(atomic_t *v)
8781 {
8782- asm volatile(LOCK_PREFIX "incl %0"
8783+ asm volatile(LOCK_PREFIX "incl %0\n"
8784+
8785+#ifdef CONFIG_PAX_REFCOUNT
8786+ "jno 0f\n"
8787+ LOCK_PREFIX "decl %0\n"
8788+ "int $4\n0:\n"
8789+ _ASM_EXTABLE(0b, 0b)
8790+#endif
8791+
8792+ : "+m" (v->counter));
8793+}
8794+
8795+/**
8796+ * atomic_inc_unchecked - increment atomic variable
8797+ * @v: pointer of type atomic_unchecked_t
8798+ *
8799+ * Atomically increments @v by 1.
8800+ */
8801+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8802+{
8803+ asm volatile(LOCK_PREFIX "incl %0\n"
8804 : "+m" (v->counter));
8805 }
8806
8807@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8808 */
8809 static inline void atomic_dec(atomic_t *v)
8810 {
8811- asm volatile(LOCK_PREFIX "decl %0"
8812+ asm volatile(LOCK_PREFIX "decl %0\n"
8813+
8814+#ifdef CONFIG_PAX_REFCOUNT
8815+ "jno 0f\n"
8816+ LOCK_PREFIX "incl %0\n"
8817+ "int $4\n0:\n"
8818+ _ASM_EXTABLE(0b, 0b)
8819+#endif
8820+
8821+ : "+m" (v->counter));
8822+}
8823+
8824+/**
8825+ * atomic_dec_unchecked - decrement atomic variable
8826+ * @v: pointer of type atomic_unchecked_t
8827+ *
8828+ * Atomically decrements @v by 1.
8829+ */
8830+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8831+{
8832+ asm volatile(LOCK_PREFIX "decl %0\n"
8833 : "+m" (v->counter));
8834 }
8835
8836@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8837 {
8838 unsigned char c;
8839
8840- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8841+ asm volatile(LOCK_PREFIX "decl %0\n"
8842+
8843+#ifdef CONFIG_PAX_REFCOUNT
8844+ "jno 0f\n"
8845+ LOCK_PREFIX "incl %0\n"
8846+ "int $4\n0:\n"
8847+ _ASM_EXTABLE(0b, 0b)
8848+#endif
8849+
8850+ "sete %1\n"
8851 : "+m" (v->counter), "=qm" (c)
8852 : : "memory");
8853 return c != 0;
8854@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8855 {
8856 unsigned char c;
8857
8858- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8859+ asm volatile(LOCK_PREFIX "incl %0\n"
8860+
8861+#ifdef CONFIG_PAX_REFCOUNT
8862+ "jno 0f\n"
8863+ LOCK_PREFIX "decl %0\n"
8864+ "int $4\n0:\n"
8865+ _ASM_EXTABLE(0b, 0b)
8866+#endif
8867+
8868+ "sete %1\n"
8869+ : "+m" (v->counter), "=qm" (c)
8870+ : : "memory");
8871+ return c != 0;
8872+}
8873+
8874+/**
8875+ * atomic_inc_and_test_unchecked - increment and test
8876+ * @v: pointer of type atomic_unchecked_t
8877+ *
8878+ * Atomically increments @v by 1
8879+ * and returns true if the result is zero, or false for all
8880+ * other cases.
8881+ */
8882+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8883+{
8884+ unsigned char c;
8885+
8886+ asm volatile(LOCK_PREFIX "incl %0\n"
8887+ "sete %1\n"
8888 : "+m" (v->counter), "=qm" (c)
8889 : : "memory");
8890 return c != 0;
8891@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8892 {
8893 unsigned char c;
8894
8895- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8896+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8897+
8898+#ifdef CONFIG_PAX_REFCOUNT
8899+ "jno 0f\n"
8900+ LOCK_PREFIX "subl %2,%0\n"
8901+ "int $4\n0:\n"
8902+ _ASM_EXTABLE(0b, 0b)
8903+#endif
8904+
8905+ "sets %1\n"
8906 : "+m" (v->counter), "=qm" (c)
8907 : "ir" (i) : "memory");
8908 return c;
8909@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8910 goto no_xadd;
8911 #endif
8912 /* Modern 486+ processor */
8913- return i + xadd(&v->counter, i);
8914+ return i + xadd_check_overflow(&v->counter, i);
8915
8916 #ifdef CONFIG_M386
8917 no_xadd: /* Legacy 386 processor */
8918@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8919 }
8920
8921 /**
8922+ * atomic_add_return_unchecked - add integer and return
8923+ * @i: integer value to add
8924+ * @v: pointer of type atomic_unchecked_t
8925+ *
8926+ * Atomically adds @i to @v and returns @i + @v
8927+ */
8928+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8929+{
8930+#ifdef CONFIG_M386
8931+ int __i;
8932+ unsigned long flags;
8933+ if (unlikely(boot_cpu_data.x86 <= 3))
8934+ goto no_xadd;
8935+#endif
8936+ /* Modern 486+ processor */
8937+ return i + xadd(&v->counter, i);
8938+
8939+#ifdef CONFIG_M386
8940+no_xadd: /* Legacy 386 processor */
8941+ raw_local_irq_save(flags);
8942+ __i = atomic_read_unchecked(v);
8943+ atomic_set_unchecked(v, i + __i);
8944+ raw_local_irq_restore(flags);
8945+ return i + __i;
8946+#endif
8947+}
8948+
8949+/**
8950 * atomic_sub_return - subtract integer and return
8951 * @v: pointer of type atomic_t
8952 * @i: integer value to subtract
8953@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8954 }
8955
8956 #define atomic_inc_return(v) (atomic_add_return(1, v))
8957+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8958+{
8959+ return atomic_add_return_unchecked(1, v);
8960+}
8961 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8962
8963 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8964@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8965 return cmpxchg(&v->counter, old, new);
8966 }
8967
8968+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8969+{
8970+ return cmpxchg(&v->counter, old, new);
8971+}
8972+
8973 static inline int atomic_xchg(atomic_t *v, int new)
8974 {
8975 return xchg(&v->counter, new);
8976 }
8977
8978+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8979+{
8980+ return xchg(&v->counter, new);
8981+}
8982+
8983 /**
8984 * __atomic_add_unless - add unless the number is already a given value
8985 * @v: pointer of type atomic_t
8986@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
8987 */
8988 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8989 {
8990- int c, old;
8991+ int c, old, new;
8992 c = atomic_read(v);
8993 for (;;) {
8994- if (unlikely(c == (u)))
8995+ if (unlikely(c == u))
8996 break;
8997- old = atomic_cmpxchg((v), c, c + (a));
8998+
8999+ asm volatile("addl %2,%0\n"
9000+
9001+#ifdef CONFIG_PAX_REFCOUNT
9002+ "jno 0f\n"
9003+ "subl %2,%0\n"
9004+ "int $4\n0:\n"
9005+ _ASM_EXTABLE(0b, 0b)
9006+#endif
9007+
9008+ : "=r" (new)
9009+ : "0" (c), "ir" (a));
9010+
9011+ old = atomic_cmpxchg(v, c, new);
9012 if (likely(old == c))
9013 break;
9014 c = old;
9015@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9016 return c;
9017 }
9018
9019+/**
9020+ * atomic_inc_not_zero_hint - increment if not null
9021+ * @v: pointer of type atomic_t
9022+ * @hint: probable value of the atomic before the increment
9023+ *
9024+ * This version of atomic_inc_not_zero() gives a hint of probable
9025+ * value of the atomic. This helps processor to not read the memory
9026+ * before doing the atomic read/modify/write cycle, lowering
9027+ * number of bus transactions on some arches.
9028+ *
9029+ * Returns: 0 if increment was not done, 1 otherwise.
9030+ */
9031+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9032+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9033+{
9034+ int val, c = hint, new;
9035+
9036+ /* sanity test, should be removed by compiler if hint is a constant */
9037+ if (!hint)
9038+ return __atomic_add_unless(v, 1, 0);
9039+
9040+ do {
9041+ asm volatile("incl %0\n"
9042+
9043+#ifdef CONFIG_PAX_REFCOUNT
9044+ "jno 0f\n"
9045+ "decl %0\n"
9046+ "int $4\n0:\n"
9047+ _ASM_EXTABLE(0b, 0b)
9048+#endif
9049+
9050+ : "=r" (new)
9051+ : "0" (c));
9052+
9053+ val = atomic_cmpxchg(v, c, new);
9054+ if (val == c)
9055+ return 1;
9056+ c = val;
9057+ } while (c);
9058+
9059+ return 0;
9060+}
9061
9062 /*
9063 * atomic_dec_if_positive - decrement by 1 if old value positive
9064diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9065index fa13f0e..27c2e08 100644
9066--- a/arch/x86/include/asm/atomic64_32.h
9067+++ b/arch/x86/include/asm/atomic64_32.h
9068@@ -12,6 +12,14 @@ typedef struct {
9069 u64 __aligned(8) counter;
9070 } atomic64_t;
9071
9072+#ifdef CONFIG_PAX_REFCOUNT
9073+typedef struct {
9074+ u64 __aligned(8) counter;
9075+} atomic64_unchecked_t;
9076+#else
9077+typedef atomic64_t atomic64_unchecked_t;
9078+#endif
9079+
9080 #define ATOMIC64_INIT(val) { (val) }
9081
9082 #ifdef CONFIG_X86_CMPXCHG64
9083@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9084 }
9085
9086 /**
9087+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9088+ * @p: pointer to type atomic64_unchecked_t
9089+ * @o: expected value
9090+ * @n: new value
9091+ *
9092+ * Atomically sets @v to @n if it was equal to @o and returns
9093+ * the old value.
9094+ */
9095+
9096+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9097+{
9098+ return cmpxchg64(&v->counter, o, n);
9099+}
9100+
9101+/**
9102 * atomic64_xchg - xchg atomic64 variable
9103 * @v: pointer to type atomic64_t
9104 * @n: value to assign
9105@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9106 }
9107
9108 /**
9109+ * atomic64_set_unchecked - set atomic64 variable
9110+ * @v: pointer to type atomic64_unchecked_t
9111+ * @n: value to assign
9112+ *
9113+ * Atomically sets the value of @v to @n.
9114+ */
9115+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9116+{
9117+ unsigned high = (unsigned)(i >> 32);
9118+ unsigned low = (unsigned)i;
9119+ asm volatile(ATOMIC64_ALTERNATIVE(set)
9120+ : "+b" (low), "+c" (high)
9121+ : "S" (v)
9122+ : "eax", "edx", "memory"
9123+ );
9124+}
9125+
9126+/**
9127 * atomic64_read - read atomic64 variable
9128 * @v: pointer to type atomic64_t
9129 *
9130@@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134+ * atomic64_read_unchecked - read atomic64 variable
9135+ * @v: pointer to type atomic64_unchecked_t
9136+ *
9137+ * Atomically reads the value of @v and returns it.
9138+ */
9139+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9140+{
9141+ long long r;
9142+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9143+ : "=A" (r), "+c" (v)
9144+ : : "memory"
9145+ );
9146+ return r;
9147+ }
9148+
9149+/**
9150 * atomic64_add_return - add and return
9151 * @i: integer value to add
9152 * @v: pointer to type atomic64_t
9153@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9154 return i;
9155 }
9156
9157+/**
9158+ * atomic64_add_return_unchecked - add and return
9159+ * @i: integer value to add
9160+ * @v: pointer to type atomic64_unchecked_t
9161+ *
9162+ * Atomically adds @i to @v and returns @i + *@v
9163+ */
9164+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9165+{
9166+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9167+ : "+A" (i), "+c" (v)
9168+ : : "memory"
9169+ );
9170+ return i;
9171+}
9172+
9173 /*
9174 * Other variants with different arithmetic operators:
9175 */
9176@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9177 return a;
9178 }
9179
9180+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9181+{
9182+ long long a;
9183+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9184+ : "=A" (a)
9185+ : "S" (v)
9186+ : "memory", "ecx"
9187+ );
9188+ return a;
9189+}
9190+
9191 static inline long long atomic64_dec_return(atomic64_t *v)
9192 {
9193 long long a;
9194@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9195 }
9196
9197 /**
9198+ * atomic64_add_unchecked - add integer to atomic64 variable
9199+ * @i: integer value to add
9200+ * @v: pointer to type atomic64_unchecked_t
9201+ *
9202+ * Atomically adds @i to @v.
9203+ */
9204+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9205+{
9206+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9207+ : "+A" (i), "+c" (v)
9208+ : : "memory"
9209+ );
9210+ return i;
9211+}
9212+
9213+/**
9214 * atomic64_sub - subtract the atomic64 variable
9215 * @i: integer value to subtract
9216 * @v: pointer to type atomic64_t
9217diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9218index 0e1cbfc..5623683 100644
9219--- a/arch/x86/include/asm/atomic64_64.h
9220+++ b/arch/x86/include/asm/atomic64_64.h
9221@@ -18,7 +18,19 @@
9222 */
9223 static inline long atomic64_read(const atomic64_t *v)
9224 {
9225- return (*(volatile long *)&(v)->counter);
9226+ return (*(volatile const long *)&(v)->counter);
9227+}
9228+
9229+/**
9230+ * atomic64_read_unchecked - read atomic64 variable
9231+ * @v: pointer of type atomic64_unchecked_t
9232+ *
9233+ * Atomically reads the value of @v.
9234+ * Doesn't imply a read memory barrier.
9235+ */
9236+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9237+{
9238+ return (*(volatile const long *)&(v)->counter);
9239 }
9240
9241 /**
9242@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9243 }
9244
9245 /**
9246+ * atomic64_set_unchecked - set atomic64 variable
9247+ * @v: pointer to type atomic64_unchecked_t
9248+ * @i: required value
9249+ *
9250+ * Atomically sets the value of @v to @i.
9251+ */
9252+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9253+{
9254+ v->counter = i;
9255+}
9256+
9257+/**
9258 * atomic64_add - add integer to atomic64 variable
9259 * @i: integer value to add
9260 * @v: pointer to type atomic64_t
9261@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9262 */
9263 static inline void atomic64_add(long i, atomic64_t *v)
9264 {
9265+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "subq %1,%0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "er" (i), "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_add_unchecked - add integer to atomic64 variable
9280+ * @i: integer value to add
9281+ * @v: pointer to type atomic64_unchecked_t
9282+ *
9283+ * Atomically adds @i to @v.
9284+ */
9285+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9286+{
9287 asm volatile(LOCK_PREFIX "addq %1,%0"
9288 : "=m" (v->counter)
9289 : "er" (i), "m" (v->counter));
9290@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9291 */
9292 static inline void atomic64_sub(long i, atomic64_t *v)
9293 {
9294- asm volatile(LOCK_PREFIX "subq %1,%0"
9295+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9296+
9297+#ifdef CONFIG_PAX_REFCOUNT
9298+ "jno 0f\n"
9299+ LOCK_PREFIX "addq %1,%0\n"
9300+ "int $4\n0:\n"
9301+ _ASM_EXTABLE(0b, 0b)
9302+#endif
9303+
9304+ : "=m" (v->counter)
9305+ : "er" (i), "m" (v->counter));
9306+}
9307+
9308+/**
9309+ * atomic64_sub_unchecked - subtract the atomic64 variable
9310+ * @i: integer value to subtract
9311+ * @v: pointer to type atomic64_unchecked_t
9312+ *
9313+ * Atomically subtracts @i from @v.
9314+ */
9315+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9316+{
9317+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9318 : "=m" (v->counter)
9319 : "er" (i), "m" (v->counter));
9320 }
9321@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9322 {
9323 unsigned char c;
9324
9325- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9326+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9327+
9328+#ifdef CONFIG_PAX_REFCOUNT
9329+ "jno 0f\n"
9330+ LOCK_PREFIX "addq %2,%0\n"
9331+ "int $4\n0:\n"
9332+ _ASM_EXTABLE(0b, 0b)
9333+#endif
9334+
9335+ "sete %1\n"
9336 : "=m" (v->counter), "=qm" (c)
9337 : "er" (i), "m" (v->counter) : "memory");
9338 return c;
9339@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9340 */
9341 static inline void atomic64_inc(atomic64_t *v)
9342 {
9343+ asm volatile(LOCK_PREFIX "incq %0\n"
9344+
9345+#ifdef CONFIG_PAX_REFCOUNT
9346+ "jno 0f\n"
9347+ LOCK_PREFIX "decq %0\n"
9348+ "int $4\n0:\n"
9349+ _ASM_EXTABLE(0b, 0b)
9350+#endif
9351+
9352+ : "=m" (v->counter)
9353+ : "m" (v->counter));
9354+}
9355+
9356+/**
9357+ * atomic64_inc_unchecked - increment atomic64 variable
9358+ * @v: pointer to type atomic64_unchecked_t
9359+ *
9360+ * Atomically increments @v by 1.
9361+ */
9362+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9363+{
9364 asm volatile(LOCK_PREFIX "incq %0"
9365 : "=m" (v->counter)
9366 : "m" (v->counter));
9367@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9368 */
9369 static inline void atomic64_dec(atomic64_t *v)
9370 {
9371- asm volatile(LOCK_PREFIX "decq %0"
9372+ asm volatile(LOCK_PREFIX "decq %0\n"
9373+
9374+#ifdef CONFIG_PAX_REFCOUNT
9375+ "jno 0f\n"
9376+ LOCK_PREFIX "incq %0\n"
9377+ "int $4\n0:\n"
9378+ _ASM_EXTABLE(0b, 0b)
9379+#endif
9380+
9381+ : "=m" (v->counter)
9382+ : "m" (v->counter));
9383+}
9384+
9385+/**
9386+ * atomic64_dec_unchecked - decrement atomic64 variable
9387+ * @v: pointer to type atomic64_t
9388+ *
9389+ * Atomically decrements @v by 1.
9390+ */
9391+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9392+{
9393+ asm volatile(LOCK_PREFIX "decq %0\n"
9394 : "=m" (v->counter)
9395 : "m" (v->counter));
9396 }
9397@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9398 {
9399 unsigned char c;
9400
9401- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9402+ asm volatile(LOCK_PREFIX "decq %0\n"
9403+
9404+#ifdef CONFIG_PAX_REFCOUNT
9405+ "jno 0f\n"
9406+ LOCK_PREFIX "incq %0\n"
9407+ "int $4\n0:\n"
9408+ _ASM_EXTABLE(0b, 0b)
9409+#endif
9410+
9411+ "sete %1\n"
9412 : "=m" (v->counter), "=qm" (c)
9413 : "m" (v->counter) : "memory");
9414 return c != 0;
9415@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9416 {
9417 unsigned char c;
9418
9419- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9420+ asm volatile(LOCK_PREFIX "incq %0\n"
9421+
9422+#ifdef CONFIG_PAX_REFCOUNT
9423+ "jno 0f\n"
9424+ LOCK_PREFIX "decq %0\n"
9425+ "int $4\n0:\n"
9426+ _ASM_EXTABLE(0b, 0b)
9427+#endif
9428+
9429+ "sete %1\n"
9430 : "=m" (v->counter), "=qm" (c)
9431 : "m" (v->counter) : "memory");
9432 return c != 0;
9433@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9434 {
9435 unsigned char c;
9436
9437- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9438+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9439+
9440+#ifdef CONFIG_PAX_REFCOUNT
9441+ "jno 0f\n"
9442+ LOCK_PREFIX "subq %2,%0\n"
9443+ "int $4\n0:\n"
9444+ _ASM_EXTABLE(0b, 0b)
9445+#endif
9446+
9447+ "sets %1\n"
9448 : "=m" (v->counter), "=qm" (c)
9449 : "er" (i), "m" (v->counter) : "memory");
9450 return c;
9451@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9452 */
9453 static inline long atomic64_add_return(long i, atomic64_t *v)
9454 {
9455+ return i + xadd_check_overflow(&v->counter, i);
9456+}
9457+
9458+/**
9459+ * atomic64_add_return_unchecked - add and return
9460+ * @i: integer value to add
9461+ * @v: pointer to type atomic64_unchecked_t
9462+ *
9463+ * Atomically adds @i to @v and returns @i + @v
9464+ */
9465+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9466+{
9467 return i + xadd(&v->counter, i);
9468 }
9469
9470@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9471 }
9472
9473 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9474+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9475+{
9476+ return atomic64_add_return_unchecked(1, v);
9477+}
9478 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9479
9480 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9481@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9482 return cmpxchg(&v->counter, old, new);
9483 }
9484
9485+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9486+{
9487+ return cmpxchg(&v->counter, old, new);
9488+}
9489+
9490 static inline long atomic64_xchg(atomic64_t *v, long new)
9491 {
9492 return xchg(&v->counter, new);
9493@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9494 */
9495 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9496 {
9497- long c, old;
9498+ long c, old, new;
9499 c = atomic64_read(v);
9500 for (;;) {
9501- if (unlikely(c == (u)))
9502+ if (unlikely(c == u))
9503 break;
9504- old = atomic64_cmpxchg((v), c, c + (a));
9505+
9506+ asm volatile("add %2,%0\n"
9507+
9508+#ifdef CONFIG_PAX_REFCOUNT
9509+ "jno 0f\n"
9510+ "sub %2,%0\n"
9511+ "int $4\n0:\n"
9512+ _ASM_EXTABLE(0b, 0b)
9513+#endif
9514+
9515+ : "=r" (new)
9516+ : "0" (c), "ir" (a));
9517+
9518+ old = atomic64_cmpxchg(v, c, new);
9519 if (likely(old == c))
9520 break;
9521 c = old;
9522 }
9523- return c != (u);
9524+ return c != u;
9525 }
9526
9527 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9528diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9529index b97596e..9bd48b06 100644
9530--- a/arch/x86/include/asm/bitops.h
9531+++ b/arch/x86/include/asm/bitops.h
9532@@ -38,7 +38,7 @@
9533 * a mask operation on a byte.
9534 */
9535 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9536-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9537+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9538 #define CONST_MASK(nr) (1 << ((nr) & 7))
9539
9540 /**
9541diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9542index 5e1a2ee..c9f9533 100644
9543--- a/arch/x86/include/asm/boot.h
9544+++ b/arch/x86/include/asm/boot.h
9545@@ -11,10 +11,15 @@
9546 #include <asm/pgtable_types.h>
9547
9548 /* Physical address where kernel should be loaded. */
9549-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9550+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9552 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9553
9554+#ifndef __ASSEMBLY__
9555+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9556+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9557+#endif
9558+
9559 /* Minimum kernel alignment, as a power of two */
9560 #ifdef CONFIG_X86_64
9561 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9562diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9563index 48f99f1..d78ebf9 100644
9564--- a/arch/x86/include/asm/cache.h
9565+++ b/arch/x86/include/asm/cache.h
9566@@ -5,12 +5,13 @@
9567
9568 /* L1 cache line size */
9569 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9570-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9571+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9572
9573 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9574+#define __read_only __attribute__((__section__(".data..read_only")))
9575
9576 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9577-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9578+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9579
9580 #ifdef CONFIG_X86_VSMP
9581 #ifdef CONFIG_SMP
9582diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9583index 4e12668..501d239 100644
9584--- a/arch/x86/include/asm/cacheflush.h
9585+++ b/arch/x86/include/asm/cacheflush.h
9586@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9587 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9588
9589 if (pg_flags == _PGMT_DEFAULT)
9590- return -1;
9591+ return ~0UL;
9592 else if (pg_flags == _PGMT_WC)
9593 return _PAGE_CACHE_WC;
9594 else if (pg_flags == _PGMT_UC_MINUS)
9595diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9596index 46fc474..b02b0f9 100644
9597--- a/arch/x86/include/asm/checksum_32.h
9598+++ b/arch/x86/include/asm/checksum_32.h
9599@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9600 int len, __wsum sum,
9601 int *src_err_ptr, int *dst_err_ptr);
9602
9603+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9604+ int len, __wsum sum,
9605+ int *src_err_ptr, int *dst_err_ptr);
9606+
9607+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9608+ int len, __wsum sum,
9609+ int *src_err_ptr, int *dst_err_ptr);
9610+
9611 /*
9612 * Note: when you get a NULL pointer exception here this means someone
9613 * passed in an incorrect kernel address to one of these functions.
9614@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9615 int *err_ptr)
9616 {
9617 might_sleep();
9618- return csum_partial_copy_generic((__force void *)src, dst,
9619+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9620 len, sum, err_ptr, NULL);
9621 }
9622
9623@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9624 {
9625 might_sleep();
9626 if (access_ok(VERIFY_WRITE, dst, len))
9627- return csum_partial_copy_generic(src, (__force void *)dst,
9628+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9629 len, sum, NULL, err_ptr);
9630
9631 if (len)
9632diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9633index b3b7332..3935f40 100644
9634--- a/arch/x86/include/asm/cmpxchg.h
9635+++ b/arch/x86/include/asm/cmpxchg.h
9636@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9637 __compiletime_error("Bad argument size for cmpxchg");
9638 extern void __xadd_wrong_size(void)
9639 __compiletime_error("Bad argument size for xadd");
9640+extern void __xadd_check_overflow_wrong_size(void)
9641+ __compiletime_error("Bad argument size for xadd_check_overflow");
9642 extern void __add_wrong_size(void)
9643 __compiletime_error("Bad argument size for add");
9644+extern void __add_check_overflow_wrong_size(void)
9645+ __compiletime_error("Bad argument size for add_check_overflow");
9646
9647 /*
9648 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9649@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9650 __ret; \
9651 })
9652
9653+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9654+ ({ \
9655+ __typeof__ (*(ptr)) __ret = (arg); \
9656+ switch (sizeof(*(ptr))) { \
9657+ case __X86_CASE_L: \
9658+ asm volatile (lock #op "l %0, %1\n" \
9659+ "jno 0f\n" \
9660+ "mov %0,%1\n" \
9661+ "int $4\n0:\n" \
9662+ _ASM_EXTABLE(0b, 0b) \
9663+ : "+r" (__ret), "+m" (*(ptr)) \
9664+ : : "memory", "cc"); \
9665+ break; \
9666+ case __X86_CASE_Q: \
9667+ asm volatile (lock #op "q %q0, %1\n" \
9668+ "jno 0f\n" \
9669+ "mov %0,%1\n" \
9670+ "int $4\n0:\n" \
9671+ _ASM_EXTABLE(0b, 0b) \
9672+ : "+r" (__ret), "+m" (*(ptr)) \
9673+ : : "memory", "cc"); \
9674+ break; \
9675+ default: \
9676+ __ ## op ## _check_overflow_wrong_size(); \
9677+ } \
9678+ __ret; \
9679+ })
9680+
9681 /*
9682 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9683 * Since this is generally used to protect other memory information, we
9684@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9685 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9686 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9687
9688+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9689+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9690+
9691 #define __add(ptr, inc, lock) \
9692 ({ \
9693 __typeof__ (*(ptr)) __ret = (inc); \
9694diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9695index 8d67d42..183d0eb 100644
9696--- a/arch/x86/include/asm/cpufeature.h
9697+++ b/arch/x86/include/asm/cpufeature.h
9698@@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9699 ".section .discard,\"aw\",@progbits\n"
9700 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9701 ".previous\n"
9702- ".section .altinstr_replacement,\"ax\"\n"
9703+ ".section .altinstr_replacement,\"a\"\n"
9704 "3: movb $1,%0\n"
9705 "4:\n"
9706 ".previous\n"
9707diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9708index e95822d..a90010e 100644
9709--- a/arch/x86/include/asm/desc.h
9710+++ b/arch/x86/include/asm/desc.h
9711@@ -4,6 +4,7 @@
9712 #include <asm/desc_defs.h>
9713 #include <asm/ldt.h>
9714 #include <asm/mmu.h>
9715+#include <asm/pgtable.h>
9716
9717 #include <linux/smp.h>
9718
9719@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9720
9721 desc->type = (info->read_exec_only ^ 1) << 1;
9722 desc->type |= info->contents << 2;
9723+ desc->type |= info->seg_not_present ^ 1;
9724
9725 desc->s = 1;
9726 desc->dpl = 0x3;
9727@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9728 }
9729
9730 extern struct desc_ptr idt_descr;
9731-extern gate_desc idt_table[];
9732 extern struct desc_ptr nmi_idt_descr;
9733-extern gate_desc nmi_idt_table[];
9734-
9735-struct gdt_page {
9736- struct desc_struct gdt[GDT_ENTRIES];
9737-} __attribute__((aligned(PAGE_SIZE)));
9738-
9739-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9740+extern gate_desc idt_table[256];
9741+extern gate_desc nmi_idt_table[256];
9742
9743+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9744 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9745 {
9746- return per_cpu(gdt_page, cpu).gdt;
9747+ return cpu_gdt_table[cpu];
9748 }
9749
9750 #ifdef CONFIG_X86_64
9751@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9752 unsigned long base, unsigned dpl, unsigned flags,
9753 unsigned short seg)
9754 {
9755- gate->a = (seg << 16) | (base & 0xffff);
9756- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9757+ gate->gate.offset_low = base;
9758+ gate->gate.seg = seg;
9759+ gate->gate.reserved = 0;
9760+ gate->gate.type = type;
9761+ gate->gate.s = 0;
9762+ gate->gate.dpl = dpl;
9763+ gate->gate.p = 1;
9764+ gate->gate.offset_high = base >> 16;
9765 }
9766
9767 #endif
9768@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9769
9770 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9771 {
9772+ pax_open_kernel();
9773 memcpy(&idt[entry], gate, sizeof(*gate));
9774+ pax_close_kernel();
9775 }
9776
9777 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9778 {
9779+ pax_open_kernel();
9780 memcpy(&ldt[entry], desc, 8);
9781+ pax_close_kernel();
9782 }
9783
9784 static inline void
9785@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9786 default: size = sizeof(*gdt); break;
9787 }
9788
9789+ pax_open_kernel();
9790 memcpy(&gdt[entry], desc, size);
9791+ pax_close_kernel();
9792 }
9793
9794 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9795@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9796
9797 static inline void native_load_tr_desc(void)
9798 {
9799+ pax_open_kernel();
9800 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9801+ pax_close_kernel();
9802 }
9803
9804 static inline void native_load_gdt(const struct desc_ptr *dtr)
9805@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9806 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9807 unsigned int i;
9808
9809+ pax_open_kernel();
9810 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9811 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9812+ pax_close_kernel();
9813 }
9814
9815 #define _LDT_empty(info) \
9816@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9817 }
9818
9819 #ifdef CONFIG_X86_64
9820-static inline void set_nmi_gate(int gate, void *addr)
9821+static inline void set_nmi_gate(int gate, const void *addr)
9822 {
9823 gate_desc s;
9824
9825@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
9826 }
9827 #endif
9828
9829-static inline void _set_gate(int gate, unsigned type, void *addr,
9830+static inline void _set_gate(int gate, unsigned type, const void *addr,
9831 unsigned dpl, unsigned ist, unsigned seg)
9832 {
9833 gate_desc s;
9834@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9835 * Pentium F0 0F bugfix can have resulted in the mapped
9836 * IDT being write-protected.
9837 */
9838-static inline void set_intr_gate(unsigned int n, void *addr)
9839+static inline void set_intr_gate(unsigned int n, const void *addr)
9840 {
9841 BUG_ON((unsigned)n > 0xFF);
9842 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9843@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9844 /*
9845 * This routine sets up an interrupt gate at directory privilege level 3.
9846 */
9847-static inline void set_system_intr_gate(unsigned int n, void *addr)
9848+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9849 {
9850 BUG_ON((unsigned)n > 0xFF);
9851 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9852 }
9853
9854-static inline void set_system_trap_gate(unsigned int n, void *addr)
9855+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9856 {
9857 BUG_ON((unsigned)n > 0xFF);
9858 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9859 }
9860
9861-static inline void set_trap_gate(unsigned int n, void *addr)
9862+static inline void set_trap_gate(unsigned int n, const void *addr)
9863 {
9864 BUG_ON((unsigned)n > 0xFF);
9865 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9866@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9867 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9868 {
9869 BUG_ON((unsigned)n > 0xFF);
9870- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9871+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9872 }
9873
9874-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9875+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9876 {
9877 BUG_ON((unsigned)n > 0xFF);
9878 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9879 }
9880
9881-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9882+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9883 {
9884 BUG_ON((unsigned)n > 0xFF);
9885 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9886 }
9887
9888+#ifdef CONFIG_X86_32
9889+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9890+{
9891+ struct desc_struct d;
9892+
9893+ if (likely(limit))
9894+ limit = (limit - 1UL) >> PAGE_SHIFT;
9895+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9896+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9897+}
9898+#endif
9899+
9900 #endif /* _ASM_X86_DESC_H */
9901diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9902index 278441f..b95a174 100644
9903--- a/arch/x86/include/asm/desc_defs.h
9904+++ b/arch/x86/include/asm/desc_defs.h
9905@@ -31,6 +31,12 @@ struct desc_struct {
9906 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9907 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9908 };
9909+ struct {
9910+ u16 offset_low;
9911+ u16 seg;
9912+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9913+ unsigned offset_high: 16;
9914+ } gate;
9915 };
9916 } __attribute__((packed));
9917
9918diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9919index 3778256..c5d4fce 100644
9920--- a/arch/x86/include/asm/e820.h
9921+++ b/arch/x86/include/asm/e820.h
9922@@ -69,7 +69,7 @@ struct e820map {
9923 #define ISA_START_ADDRESS 0xa0000
9924 #define ISA_END_ADDRESS 0x100000
9925
9926-#define BIOS_BEGIN 0x000a0000
9927+#define BIOS_BEGIN 0x000c0000
9928 #define BIOS_END 0x00100000
9929
9930 #define BIOS_ROM_BASE 0xffe00000
9931diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9932index 5f962df..7289f09 100644
9933--- a/arch/x86/include/asm/elf.h
9934+++ b/arch/x86/include/asm/elf.h
9935@@ -238,7 +238,25 @@ extern int force_personality32;
9936 the loader. We need to make sure that it is out of the way of the program
9937 that it will "exec", and that there is sufficient room for the brk. */
9938
9939+#ifdef CONFIG_PAX_SEGMEXEC
9940+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9941+#else
9942 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9943+#endif
9944+
9945+#ifdef CONFIG_PAX_ASLR
9946+#ifdef CONFIG_X86_32
9947+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9948+
9949+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9950+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9951+#else
9952+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9953+
9954+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9955+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9956+#endif
9957+#endif
9958
9959 /* This yields a mask that user programs can use to figure out what
9960 instruction set this CPU supports. This could be done in user space,
9961@@ -291,9 +309,7 @@ do { \
9962
9963 #define ARCH_DLINFO \
9964 do { \
9965- if (vdso_enabled) \
9966- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9967- (unsigned long)current->mm->context.vdso); \
9968+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9969 } while (0)
9970
9971 #define AT_SYSINFO 32
9972@@ -304,7 +320,7 @@ do { \
9973
9974 #endif /* !CONFIG_X86_32 */
9975
9976-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9977+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9978
9979 #define VDSO_ENTRY \
9980 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9981@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9982 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9983 #define compat_arch_setup_additional_pages syscall32_setup_pages
9984
9985-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9986-#define arch_randomize_brk arch_randomize_brk
9987-
9988 /*
9989 * True on X86_32 or when emulating IA32 on X86_64
9990 */
9991diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9992index cc70c1c..d96d011 100644
9993--- a/arch/x86/include/asm/emergency-restart.h
9994+++ b/arch/x86/include/asm/emergency-restart.h
9995@@ -15,6 +15,6 @@ enum reboot_type {
9996
9997 extern enum reboot_type reboot_type;
9998
9999-extern void machine_emergency_restart(void);
10000+extern void machine_emergency_restart(void) __noreturn;
10001
10002 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10003diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10004index d09bb03..4ea4194 100644
10005--- a/arch/x86/include/asm/futex.h
10006+++ b/arch/x86/include/asm/futex.h
10007@@ -12,16 +12,18 @@
10008 #include <asm/system.h>
10009
10010 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10011+ typecheck(u32 __user *, uaddr); \
10012 asm volatile("1:\t" insn "\n" \
10013 "2:\t.section .fixup,\"ax\"\n" \
10014 "3:\tmov\t%3, %1\n" \
10015 "\tjmp\t2b\n" \
10016 "\t.previous\n" \
10017 _ASM_EXTABLE(1b, 3b) \
10018- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10019+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10020 : "i" (-EFAULT), "0" (oparg), "1" (0))
10021
10022 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10023+ typecheck(u32 __user *, uaddr); \
10024 asm volatile("1:\tmovl %2, %0\n" \
10025 "\tmovl\t%0, %3\n" \
10026 "\t" insn "\n" \
10027@@ -34,7 +36,7 @@
10028 _ASM_EXTABLE(1b, 4b) \
10029 _ASM_EXTABLE(2b, 4b) \
10030 : "=&a" (oldval), "=&r" (ret), \
10031- "+m" (*uaddr), "=&r" (tem) \
10032+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10033 : "r" (oparg), "i" (-EFAULT), "1" (0))
10034
10035 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10036@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10037
10038 switch (op) {
10039 case FUTEX_OP_SET:
10040- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10041+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10042 break;
10043 case FUTEX_OP_ADD:
10044- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10045+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10046 uaddr, oparg);
10047 break;
10048 case FUTEX_OP_OR:
10049@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10050 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10051 return -EFAULT;
10052
10053- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10054+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10055 "2:\t.section .fixup, \"ax\"\n"
10056 "3:\tmov %3, %0\n"
10057 "\tjmp 2b\n"
10058 "\t.previous\n"
10059 _ASM_EXTABLE(1b, 3b)
10060- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10061+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10062 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10063 : "memory"
10064 );
10065diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10066index eb92a6e..b98b2f4 100644
10067--- a/arch/x86/include/asm/hw_irq.h
10068+++ b/arch/x86/include/asm/hw_irq.h
10069@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10070 extern void enable_IO_APIC(void);
10071
10072 /* Statistics */
10073-extern atomic_t irq_err_count;
10074-extern atomic_t irq_mis_count;
10075+extern atomic_unchecked_t irq_err_count;
10076+extern atomic_unchecked_t irq_mis_count;
10077
10078 /* EISA */
10079 extern void eisa_set_level_irq(unsigned int irq);
10080diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10081index 2479049..3fb9795 100644
10082--- a/arch/x86/include/asm/i387.h
10083+++ b/arch/x86/include/asm/i387.h
10084@@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10085 {
10086 int err;
10087
10088+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10089+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10090+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10091+#endif
10092+
10093 /* See comment in fxsave() below. */
10094 #ifdef CONFIG_AS_FXSAVEQ
10095 asm volatile("1: fxrstorq %[fx]\n\t"
10096@@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10097 {
10098 int err;
10099
10100+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10101+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10102+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10103+#endif
10104+
10105 /*
10106 * Clear the bytes not touched by the fxsave and reserved
10107 * for the SW usage.
10108@@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10109 "emms\n\t" /* clear stack tags */
10110 "fildl %P[addr]", /* set F?P to defined value */
10111 X86_FEATURE_FXSAVE_LEAK,
10112- [addr] "m" (tsk->thread.fpu.has_fpu));
10113+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10114
10115 return fpu_restore_checking(&tsk->thread.fpu);
10116 }
10117@@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10118 static inline bool interrupted_user_mode(void)
10119 {
10120 struct pt_regs *regs = get_irq_regs();
10121- return regs && user_mode_vm(regs);
10122+ return regs && user_mode(regs);
10123 }
10124
10125 /*
10126diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10127index d8e8eef..99f81ae 100644
10128--- a/arch/x86/include/asm/io.h
10129+++ b/arch/x86/include/asm/io.h
10130@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10131
10132 #include <linux/vmalloc.h>
10133
10134+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10135+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10136+{
10137+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10138+}
10139+
10140+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10141+{
10142+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10143+}
10144+
10145 /*
10146 * Convert a virtual cached pointer to an uncached pointer
10147 */
10148diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10149index bba3cf8..06bc8da 100644
10150--- a/arch/x86/include/asm/irqflags.h
10151+++ b/arch/x86/include/asm/irqflags.h
10152@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10153 sti; \
10154 sysexit
10155
10156+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10157+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10158+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10159+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10160+
10161 #else
10162 #define INTERRUPT_RETURN iret
10163 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10164diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10165index 5478825..839e88c 100644
10166--- a/arch/x86/include/asm/kprobes.h
10167+++ b/arch/x86/include/asm/kprobes.h
10168@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10169 #define RELATIVEJUMP_SIZE 5
10170 #define RELATIVECALL_OPCODE 0xe8
10171 #define RELATIVE_ADDR_SIZE 4
10172-#define MAX_STACK_SIZE 64
10173-#define MIN_STACK_SIZE(ADDR) \
10174- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10175- THREAD_SIZE - (unsigned long)(ADDR))) \
10176- ? (MAX_STACK_SIZE) \
10177- : (((unsigned long)current_thread_info()) + \
10178- THREAD_SIZE - (unsigned long)(ADDR)))
10179+#define MAX_STACK_SIZE 64UL
10180+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10181
10182 #define flush_insn_slot(p) do { } while (0)
10183
10184diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10185index 52d6640..a013b87 100644
10186--- a/arch/x86/include/asm/kvm_host.h
10187+++ b/arch/x86/include/asm/kvm_host.h
10188@@ -663,7 +663,7 @@ struct kvm_x86_ops {
10189 int (*check_intercept)(struct kvm_vcpu *vcpu,
10190 struct x86_instruction_info *info,
10191 enum x86_intercept_stage stage);
10192-};
10193+} __do_const;
10194
10195 struct kvm_arch_async_pf {
10196 u32 token;
10197diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10198index 9cdae5d..300d20f 100644
10199--- a/arch/x86/include/asm/local.h
10200+++ b/arch/x86/include/asm/local.h
10201@@ -18,26 +18,58 @@ typedef struct {
10202
10203 static inline void local_inc(local_t *l)
10204 {
10205- asm volatile(_ASM_INC "%0"
10206+ asm volatile(_ASM_INC "%0\n"
10207+
10208+#ifdef CONFIG_PAX_REFCOUNT
10209+ "jno 0f\n"
10210+ _ASM_DEC "%0\n"
10211+ "int $4\n0:\n"
10212+ _ASM_EXTABLE(0b, 0b)
10213+#endif
10214+
10215 : "+m" (l->a.counter));
10216 }
10217
10218 static inline void local_dec(local_t *l)
10219 {
10220- asm volatile(_ASM_DEC "%0"
10221+ asm volatile(_ASM_DEC "%0\n"
10222+
10223+#ifdef CONFIG_PAX_REFCOUNT
10224+ "jno 0f\n"
10225+ _ASM_INC "%0\n"
10226+ "int $4\n0:\n"
10227+ _ASM_EXTABLE(0b, 0b)
10228+#endif
10229+
10230 : "+m" (l->a.counter));
10231 }
10232
10233 static inline void local_add(long i, local_t *l)
10234 {
10235- asm volatile(_ASM_ADD "%1,%0"
10236+ asm volatile(_ASM_ADD "%1,%0\n"
10237+
10238+#ifdef CONFIG_PAX_REFCOUNT
10239+ "jno 0f\n"
10240+ _ASM_SUB "%1,%0\n"
10241+ "int $4\n0:\n"
10242+ _ASM_EXTABLE(0b, 0b)
10243+#endif
10244+
10245 : "+m" (l->a.counter)
10246 : "ir" (i));
10247 }
10248
10249 static inline void local_sub(long i, local_t *l)
10250 {
10251- asm volatile(_ASM_SUB "%1,%0"
10252+ asm volatile(_ASM_SUB "%1,%0\n"
10253+
10254+#ifdef CONFIG_PAX_REFCOUNT
10255+ "jno 0f\n"
10256+ _ASM_ADD "%1,%0\n"
10257+ "int $4\n0:\n"
10258+ _ASM_EXTABLE(0b, 0b)
10259+#endif
10260+
10261 : "+m" (l->a.counter)
10262 : "ir" (i));
10263 }
10264@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10265 {
10266 unsigned char c;
10267
10268- asm volatile(_ASM_SUB "%2,%0; sete %1"
10269+ asm volatile(_ASM_SUB "%2,%0\n"
10270+
10271+#ifdef CONFIG_PAX_REFCOUNT
10272+ "jno 0f\n"
10273+ _ASM_ADD "%2,%0\n"
10274+ "int $4\n0:\n"
10275+ _ASM_EXTABLE(0b, 0b)
10276+#endif
10277+
10278+ "sete %1\n"
10279 : "+m" (l->a.counter), "=qm" (c)
10280 : "ir" (i) : "memory");
10281 return c;
10282@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10283 {
10284 unsigned char c;
10285
10286- asm volatile(_ASM_DEC "%0; sete %1"
10287+ asm volatile(_ASM_DEC "%0\n"
10288+
10289+#ifdef CONFIG_PAX_REFCOUNT
10290+ "jno 0f\n"
10291+ _ASM_INC "%0\n"
10292+ "int $4\n0:\n"
10293+ _ASM_EXTABLE(0b, 0b)
10294+#endif
10295+
10296+ "sete %1\n"
10297 : "+m" (l->a.counter), "=qm" (c)
10298 : : "memory");
10299 return c != 0;
10300@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10301 {
10302 unsigned char c;
10303
10304- asm volatile(_ASM_INC "%0; sete %1"
10305+ asm volatile(_ASM_INC "%0\n"
10306+
10307+#ifdef CONFIG_PAX_REFCOUNT
10308+ "jno 0f\n"
10309+ _ASM_DEC "%0\n"
10310+ "int $4\n0:\n"
10311+ _ASM_EXTABLE(0b, 0b)
10312+#endif
10313+
10314+ "sete %1\n"
10315 : "+m" (l->a.counter), "=qm" (c)
10316 : : "memory");
10317 return c != 0;
10318@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10319 {
10320 unsigned char c;
10321
10322- asm volatile(_ASM_ADD "%2,%0; sets %1"
10323+ asm volatile(_ASM_ADD "%2,%0\n"
10324+
10325+#ifdef CONFIG_PAX_REFCOUNT
10326+ "jno 0f\n"
10327+ _ASM_SUB "%2,%0\n"
10328+ "int $4\n0:\n"
10329+ _ASM_EXTABLE(0b, 0b)
10330+#endif
10331+
10332+ "sets %1\n"
10333 : "+m" (l->a.counter), "=qm" (c)
10334 : "ir" (i) : "memory");
10335 return c;
10336@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10337 #endif
10338 /* Modern 486+ processor */
10339 __i = i;
10340- asm volatile(_ASM_XADD "%0, %1;"
10341+ asm volatile(_ASM_XADD "%0, %1\n"
10342+
10343+#ifdef CONFIG_PAX_REFCOUNT
10344+ "jno 0f\n"
10345+ _ASM_MOV "%0,%1\n"
10346+ "int $4\n0:\n"
10347+ _ASM_EXTABLE(0b, 0b)
10348+#endif
10349+
10350 : "+r" (i), "+m" (l->a.counter)
10351 : : "memory");
10352 return i + __i;
10353diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10354index 593e51d..fa69c9a 100644
10355--- a/arch/x86/include/asm/mman.h
10356+++ b/arch/x86/include/asm/mman.h
10357@@ -5,4 +5,14 @@
10358
10359 #include <asm-generic/mman.h>
10360
10361+#ifdef __KERNEL__
10362+#ifndef __ASSEMBLY__
10363+#ifdef CONFIG_X86_32
10364+#define arch_mmap_check i386_mmap_check
10365+int i386_mmap_check(unsigned long addr, unsigned long len,
10366+ unsigned long flags);
10367+#endif
10368+#endif
10369+#endif
10370+
10371 #endif /* _ASM_X86_MMAN_H */
10372diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10373index 5f55e69..e20bfb1 100644
10374--- a/arch/x86/include/asm/mmu.h
10375+++ b/arch/x86/include/asm/mmu.h
10376@@ -9,7 +9,7 @@
10377 * we put the segment information here.
10378 */
10379 typedef struct {
10380- void *ldt;
10381+ struct desc_struct *ldt;
10382 int size;
10383
10384 #ifdef CONFIG_X86_64
10385@@ -18,7 +18,19 @@ typedef struct {
10386 #endif
10387
10388 struct mutex lock;
10389- void *vdso;
10390+ unsigned long vdso;
10391+
10392+#ifdef CONFIG_X86_32
10393+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10394+ unsigned long user_cs_base;
10395+ unsigned long user_cs_limit;
10396+
10397+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10398+ cpumask_t cpu_user_cs_mask;
10399+#endif
10400+
10401+#endif
10402+#endif
10403 } mm_context_t;
10404
10405 #ifdef CONFIG_SMP
10406diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10407index 6902152..399f3a2 100644
10408--- a/arch/x86/include/asm/mmu_context.h
10409+++ b/arch/x86/include/asm/mmu_context.h
10410@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10411
10412 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10413 {
10414+
10415+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10416+ unsigned int i;
10417+ pgd_t *pgd;
10418+
10419+ pax_open_kernel();
10420+ pgd = get_cpu_pgd(smp_processor_id());
10421+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10422+ set_pgd_batched(pgd+i, native_make_pgd(0));
10423+ pax_close_kernel();
10424+#endif
10425+
10426 #ifdef CONFIG_SMP
10427 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10428 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10429@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10430 struct task_struct *tsk)
10431 {
10432 unsigned cpu = smp_processor_id();
10433+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10434+ int tlbstate = TLBSTATE_OK;
10435+#endif
10436
10437 if (likely(prev != next)) {
10438 #ifdef CONFIG_SMP
10439+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10440+ tlbstate = percpu_read(cpu_tlbstate.state);
10441+#endif
10442 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10443 percpu_write(cpu_tlbstate.active_mm, next);
10444 #endif
10445 cpumask_set_cpu(cpu, mm_cpumask(next));
10446
10447 /* Re-load page tables */
10448+#ifdef CONFIG_PAX_PER_CPU_PGD
10449+ pax_open_kernel();
10450+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10451+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10452+ pax_close_kernel();
10453+ load_cr3(get_cpu_pgd(cpu));
10454+#else
10455 load_cr3(next->pgd);
10456+#endif
10457
10458 /* stop flush ipis for the previous mm */
10459 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10460@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10461 */
10462 if (unlikely(prev->context.ldt != next->context.ldt))
10463 load_LDT_nolock(&next->context);
10464- }
10465+
10466+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10467+ if (!(__supported_pte_mask & _PAGE_NX)) {
10468+ smp_mb__before_clear_bit();
10469+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10470+ smp_mb__after_clear_bit();
10471+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10472+ }
10473+#endif
10474+
10475+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10476+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10477+ prev->context.user_cs_limit != next->context.user_cs_limit))
10478+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10479 #ifdef CONFIG_SMP
10480+ else if (unlikely(tlbstate != TLBSTATE_OK))
10481+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10482+#endif
10483+#endif
10484+
10485+ }
10486 else {
10487+
10488+#ifdef CONFIG_PAX_PER_CPU_PGD
10489+ pax_open_kernel();
10490+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10491+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10492+ pax_close_kernel();
10493+ load_cr3(get_cpu_pgd(cpu));
10494+#endif
10495+
10496+#ifdef CONFIG_SMP
10497 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10498 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10499
10500@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10501 * tlb flush IPI delivery. We must reload CR3
10502 * to make sure to use no freed page tables.
10503 */
10504+
10505+#ifndef CONFIG_PAX_PER_CPU_PGD
10506 load_cr3(next->pgd);
10507+#endif
10508+
10509 load_LDT_nolock(&next->context);
10510+
10511+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10512+ if (!(__supported_pte_mask & _PAGE_NX))
10513+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10514+#endif
10515+
10516+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10517+#ifdef CONFIG_PAX_PAGEEXEC
10518+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10519+#endif
10520+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10521+#endif
10522+
10523 }
10524+#endif
10525 }
10526-#endif
10527 }
10528
10529 #define activate_mm(prev, next) \
10530diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10531index 9eae775..c914fea 100644
10532--- a/arch/x86/include/asm/module.h
10533+++ b/arch/x86/include/asm/module.h
10534@@ -5,6 +5,7 @@
10535
10536 #ifdef CONFIG_X86_64
10537 /* X86_64 does not define MODULE_PROC_FAMILY */
10538+#define MODULE_PROC_FAMILY ""
10539 #elif defined CONFIG_M386
10540 #define MODULE_PROC_FAMILY "386 "
10541 #elif defined CONFIG_M486
10542@@ -59,8 +60,20 @@
10543 #error unknown processor family
10544 #endif
10545
10546-#ifdef CONFIG_X86_32
10547-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10548+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10549+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10550+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10551+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10552+#else
10553+#define MODULE_PAX_KERNEXEC ""
10554 #endif
10555
10556+#ifdef CONFIG_PAX_MEMORY_UDEREF
10557+#define MODULE_PAX_UDEREF "UDEREF "
10558+#else
10559+#define MODULE_PAX_UDEREF ""
10560+#endif
10561+
10562+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10563+
10564 #endif /* _ASM_X86_MODULE_H */
10565diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10566index 7639dbf..e08a58c 100644
10567--- a/arch/x86/include/asm/page_64_types.h
10568+++ b/arch/x86/include/asm/page_64_types.h
10569@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10570
10571 /* duplicated to the one in bootmem.h */
10572 extern unsigned long max_pfn;
10573-extern unsigned long phys_base;
10574+extern const unsigned long phys_base;
10575
10576 extern unsigned long __phys_addr(unsigned long);
10577 #define __phys_reloc_hide(x) (x)
10578diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10579index a7d2db9..edb023e 100644
10580--- a/arch/x86/include/asm/paravirt.h
10581+++ b/arch/x86/include/asm/paravirt.h
10582@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10583 val);
10584 }
10585
10586+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10587+{
10588+ pgdval_t val = native_pgd_val(pgd);
10589+
10590+ if (sizeof(pgdval_t) > sizeof(long))
10591+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10592+ val, (u64)val >> 32);
10593+ else
10594+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10595+ val);
10596+}
10597+
10598 static inline void pgd_clear(pgd_t *pgdp)
10599 {
10600 set_pgd(pgdp, __pgd(0));
10601@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10602 pv_mmu_ops.set_fixmap(idx, phys, flags);
10603 }
10604
10605+#ifdef CONFIG_PAX_KERNEXEC
10606+static inline unsigned long pax_open_kernel(void)
10607+{
10608+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10609+}
10610+
10611+static inline unsigned long pax_close_kernel(void)
10612+{
10613+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10614+}
10615+#else
10616+static inline unsigned long pax_open_kernel(void) { return 0; }
10617+static inline unsigned long pax_close_kernel(void) { return 0; }
10618+#endif
10619+
10620 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10621
10622 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10623@@ -964,7 +991,7 @@ extern void default_banner(void);
10624
10625 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10626 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10627-#define PARA_INDIRECT(addr) *%cs:addr
10628+#define PARA_INDIRECT(addr) *%ss:addr
10629 #endif
10630
10631 #define INTERRUPT_RETURN \
10632@@ -1041,6 +1068,21 @@ extern void default_banner(void);
10633 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10634 CLBR_NONE, \
10635 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10636+
10637+#define GET_CR0_INTO_RDI \
10638+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10639+ mov %rax,%rdi
10640+
10641+#define SET_RDI_INTO_CR0 \
10642+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10643+
10644+#define GET_CR3_INTO_RDI \
10645+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10646+ mov %rax,%rdi
10647+
10648+#define SET_RDI_INTO_CR3 \
10649+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10650+
10651 #endif /* CONFIG_X86_32 */
10652
10653 #endif /* __ASSEMBLY__ */
10654diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10655index 8e8b9a4..f07d725 100644
10656--- a/arch/x86/include/asm/paravirt_types.h
10657+++ b/arch/x86/include/asm/paravirt_types.h
10658@@ -84,20 +84,20 @@ struct pv_init_ops {
10659 */
10660 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10661 unsigned long addr, unsigned len);
10662-};
10663+} __no_const;
10664
10665
10666 struct pv_lazy_ops {
10667 /* Set deferred update mode, used for batching operations. */
10668 void (*enter)(void);
10669 void (*leave)(void);
10670-};
10671+} __no_const;
10672
10673 struct pv_time_ops {
10674 unsigned long long (*sched_clock)(void);
10675 unsigned long long (*steal_clock)(int cpu);
10676 unsigned long (*get_tsc_khz)(void);
10677-};
10678+} __no_const;
10679
10680 struct pv_cpu_ops {
10681 /* hooks for various privileged instructions */
10682@@ -193,7 +193,7 @@ struct pv_cpu_ops {
10683
10684 void (*start_context_switch)(struct task_struct *prev);
10685 void (*end_context_switch)(struct task_struct *next);
10686-};
10687+} __no_const;
10688
10689 struct pv_irq_ops {
10690 /*
10691@@ -224,7 +224,7 @@ struct pv_apic_ops {
10692 unsigned long start_eip,
10693 unsigned long start_esp);
10694 #endif
10695-};
10696+} __no_const;
10697
10698 struct pv_mmu_ops {
10699 unsigned long (*read_cr2)(void);
10700@@ -313,6 +313,7 @@ struct pv_mmu_ops {
10701 struct paravirt_callee_save make_pud;
10702
10703 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10704+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10705 #endif /* PAGETABLE_LEVELS == 4 */
10706 #endif /* PAGETABLE_LEVELS >= 3 */
10707
10708@@ -324,6 +325,12 @@ struct pv_mmu_ops {
10709 an mfn. We can tell which is which from the index. */
10710 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10711 phys_addr_t phys, pgprot_t flags);
10712+
10713+#ifdef CONFIG_PAX_KERNEXEC
10714+ unsigned long (*pax_open_kernel)(void);
10715+ unsigned long (*pax_close_kernel)(void);
10716+#endif
10717+
10718 };
10719
10720 struct arch_spinlock;
10721@@ -334,7 +341,7 @@ struct pv_lock_ops {
10722 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10723 int (*spin_trylock)(struct arch_spinlock *lock);
10724 void (*spin_unlock)(struct arch_spinlock *lock);
10725-};
10726+} __no_const;
10727
10728 /* This contains all the paravirt structures: we get a convenient
10729 * number for each function using the offset which we use to indicate
10730diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10731index b4389a4..b7ff22c 100644
10732--- a/arch/x86/include/asm/pgalloc.h
10733+++ b/arch/x86/include/asm/pgalloc.h
10734@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10735 pmd_t *pmd, pte_t *pte)
10736 {
10737 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10738+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10739+}
10740+
10741+static inline void pmd_populate_user(struct mm_struct *mm,
10742+ pmd_t *pmd, pte_t *pte)
10743+{
10744+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10745 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10746 }
10747
10748diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10749index 98391db..8f6984e 100644
10750--- a/arch/x86/include/asm/pgtable-2level.h
10751+++ b/arch/x86/include/asm/pgtable-2level.h
10752@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10753
10754 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10755 {
10756+ pax_open_kernel();
10757 *pmdp = pmd;
10758+ pax_close_kernel();
10759 }
10760
10761 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10762diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10763index effff47..f9e4035 100644
10764--- a/arch/x86/include/asm/pgtable-3level.h
10765+++ b/arch/x86/include/asm/pgtable-3level.h
10766@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10767
10768 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10769 {
10770+ pax_open_kernel();
10771 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10772+ pax_close_kernel();
10773 }
10774
10775 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10776 {
10777+ pax_open_kernel();
10778 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10779+ pax_close_kernel();
10780 }
10781
10782 /*
10783diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10784index 49afb3f..ed14d07 100644
10785--- a/arch/x86/include/asm/pgtable.h
10786+++ b/arch/x86/include/asm/pgtable.h
10787@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10788
10789 #ifndef __PAGETABLE_PUD_FOLDED
10790 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10791+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10792 #define pgd_clear(pgd) native_pgd_clear(pgd)
10793 #endif
10794
10795@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10796
10797 #define arch_end_context_switch(prev) do {} while(0)
10798
10799+#define pax_open_kernel() native_pax_open_kernel()
10800+#define pax_close_kernel() native_pax_close_kernel()
10801 #endif /* CONFIG_PARAVIRT */
10802
10803+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10804+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10805+
10806+#ifdef CONFIG_PAX_KERNEXEC
10807+static inline unsigned long native_pax_open_kernel(void)
10808+{
10809+ unsigned long cr0;
10810+
10811+ preempt_disable();
10812+ barrier();
10813+ cr0 = read_cr0() ^ X86_CR0_WP;
10814+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
10815+ write_cr0(cr0);
10816+ return cr0 ^ X86_CR0_WP;
10817+}
10818+
10819+static inline unsigned long native_pax_close_kernel(void)
10820+{
10821+ unsigned long cr0;
10822+
10823+ cr0 = read_cr0() ^ X86_CR0_WP;
10824+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10825+ write_cr0(cr0);
10826+ barrier();
10827+ preempt_enable_no_resched();
10828+ return cr0 ^ X86_CR0_WP;
10829+}
10830+#else
10831+static inline unsigned long native_pax_open_kernel(void) { return 0; }
10832+static inline unsigned long native_pax_close_kernel(void) { return 0; }
10833+#endif
10834+
10835 /*
10836 * The following only work if pte_present() is true.
10837 * Undefined behaviour if not..
10838 */
10839+static inline int pte_user(pte_t pte)
10840+{
10841+ return pte_val(pte) & _PAGE_USER;
10842+}
10843+
10844 static inline int pte_dirty(pte_t pte)
10845 {
10846 return pte_flags(pte) & _PAGE_DIRTY;
10847@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10848 return pte_clear_flags(pte, _PAGE_RW);
10849 }
10850
10851+static inline pte_t pte_mkread(pte_t pte)
10852+{
10853+ return __pte(pte_val(pte) | _PAGE_USER);
10854+}
10855+
10856 static inline pte_t pte_mkexec(pte_t pte)
10857 {
10858- return pte_clear_flags(pte, _PAGE_NX);
10859+#ifdef CONFIG_X86_PAE
10860+ if (__supported_pte_mask & _PAGE_NX)
10861+ return pte_clear_flags(pte, _PAGE_NX);
10862+ else
10863+#endif
10864+ return pte_set_flags(pte, _PAGE_USER);
10865+}
10866+
10867+static inline pte_t pte_exprotect(pte_t pte)
10868+{
10869+#ifdef CONFIG_X86_PAE
10870+ if (__supported_pte_mask & _PAGE_NX)
10871+ return pte_set_flags(pte, _PAGE_NX);
10872+ else
10873+#endif
10874+ return pte_clear_flags(pte, _PAGE_USER);
10875 }
10876
10877 static inline pte_t pte_mkdirty(pte_t pte)
10878@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10879 #endif
10880
10881 #ifndef __ASSEMBLY__
10882+
10883+#ifdef CONFIG_PAX_PER_CPU_PGD
10884+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10885+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10886+{
10887+ return cpu_pgd[cpu];
10888+}
10889+#endif
10890+
10891 #include <linux/mm_types.h>
10892
10893 static inline int pte_none(pte_t pte)
10894@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10895
10896 static inline int pgd_bad(pgd_t pgd)
10897 {
10898- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10899+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10900 }
10901
10902 static inline int pgd_none(pgd_t pgd)
10903@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10904 * pgd_offset() returns a (pgd_t *)
10905 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10906 */
10907-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10908+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10909+
10910+#ifdef CONFIG_PAX_PER_CPU_PGD
10911+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10912+#endif
10913+
10914 /*
10915 * a shortcut which implies the use of the kernel's pgd, instead
10916 * of a process's
10917@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10918 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10919 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10920
10921+#ifdef CONFIG_X86_32
10922+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10923+#else
10924+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10925+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10926+
10927+#ifdef CONFIG_PAX_MEMORY_UDEREF
10928+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10929+#else
10930+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10931+#endif
10932+
10933+#endif
10934+
10935 #ifndef __ASSEMBLY__
10936
10937 extern int direct_gbpages;
10938@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10939 * dst and src can be on the same page, but the range must not overlap,
10940 * and must not cross a page boundary.
10941 */
10942-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10943+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10944 {
10945- memcpy(dst, src, count * sizeof(pgd_t));
10946+ pax_open_kernel();
10947+ while (count--)
10948+ *dst++ = *src++;
10949+ pax_close_kernel();
10950 }
10951
10952+#ifdef CONFIG_PAX_PER_CPU_PGD
10953+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10954+#endif
10955+
10956+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10957+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10958+#else
10959+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10960+#endif
10961
10962 #include <asm-generic/pgtable.h>
10963 #endif /* __ASSEMBLY__ */
10964diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
10965index 0c92113..34a77c6 100644
10966--- a/arch/x86/include/asm/pgtable_32.h
10967+++ b/arch/x86/include/asm/pgtable_32.h
10968@@ -25,9 +25,6 @@
10969 struct mm_struct;
10970 struct vm_area_struct;
10971
10972-extern pgd_t swapper_pg_dir[1024];
10973-extern pgd_t initial_page_table[1024];
10974-
10975 static inline void pgtable_cache_init(void) { }
10976 static inline void check_pgt_cache(void) { }
10977 void paging_init(void);
10978@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10979 # include <asm/pgtable-2level.h>
10980 #endif
10981
10982+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
10983+extern pgd_t initial_page_table[PTRS_PER_PGD];
10984+#ifdef CONFIG_X86_PAE
10985+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
10986+#endif
10987+
10988 #if defined(CONFIG_HIGHPTE)
10989 #define pte_offset_map(dir, address) \
10990 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
10991@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10992 /* Clear a kernel PTE and flush it from the TLB */
10993 #define kpte_clear_flush(ptep, vaddr) \
10994 do { \
10995+ pax_open_kernel(); \
10996 pte_clear(&init_mm, (vaddr), (ptep)); \
10997+ pax_close_kernel(); \
10998 __flush_tlb_one((vaddr)); \
10999 } while (0)
11000
11001@@ -74,6 +79,9 @@ do { \
11002
11003 #endif /* !__ASSEMBLY__ */
11004
11005+#define HAVE_ARCH_UNMAPPED_AREA
11006+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11007+
11008 /*
11009 * kern_addr_valid() is (1) for FLATMEM and (0) for
11010 * SPARSEMEM and DISCONTIGMEM
11011diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11012index ed5903b..c7fe163 100644
11013--- a/arch/x86/include/asm/pgtable_32_types.h
11014+++ b/arch/x86/include/asm/pgtable_32_types.h
11015@@ -8,7 +8,7 @@
11016 */
11017 #ifdef CONFIG_X86_PAE
11018 # include <asm/pgtable-3level_types.h>
11019-# define PMD_SIZE (1UL << PMD_SHIFT)
11020+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11021 # define PMD_MASK (~(PMD_SIZE - 1))
11022 #else
11023 # include <asm/pgtable-2level_types.h>
11024@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11025 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11026 #endif
11027
11028+#ifdef CONFIG_PAX_KERNEXEC
11029+#ifndef __ASSEMBLY__
11030+extern unsigned char MODULES_EXEC_VADDR[];
11031+extern unsigned char MODULES_EXEC_END[];
11032+#endif
11033+#include <asm/boot.h>
11034+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11035+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11036+#else
11037+#define ktla_ktva(addr) (addr)
11038+#define ktva_ktla(addr) (addr)
11039+#endif
11040+
11041 #define MODULES_VADDR VMALLOC_START
11042 #define MODULES_END VMALLOC_END
11043 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11044diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11045index 975f709..107976d 100644
11046--- a/arch/x86/include/asm/pgtable_64.h
11047+++ b/arch/x86/include/asm/pgtable_64.h
11048@@ -16,10 +16,14 @@
11049
11050 extern pud_t level3_kernel_pgt[512];
11051 extern pud_t level3_ident_pgt[512];
11052+extern pud_t level3_vmalloc_start_pgt[512];
11053+extern pud_t level3_vmalloc_end_pgt[512];
11054+extern pud_t level3_vmemmap_pgt[512];
11055+extern pud_t level2_vmemmap_pgt[512];
11056 extern pmd_t level2_kernel_pgt[512];
11057 extern pmd_t level2_fixmap_pgt[512];
11058-extern pmd_t level2_ident_pgt[512];
11059-extern pgd_t init_level4_pgt[];
11060+extern pmd_t level2_ident_pgt[512*2];
11061+extern pgd_t init_level4_pgt[512];
11062
11063 #define swapper_pg_dir init_level4_pgt
11064
11065@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11066
11067 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11068 {
11069+ pax_open_kernel();
11070 *pmdp = pmd;
11071+ pax_close_kernel();
11072 }
11073
11074 static inline void native_pmd_clear(pmd_t *pmd)
11075@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11076
11077 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11078 {
11079+ pax_open_kernel();
11080+ *pgdp = pgd;
11081+ pax_close_kernel();
11082+}
11083+
11084+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11085+{
11086 *pgdp = pgd;
11087 }
11088
11089diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11090index 766ea16..5b96cb3 100644
11091--- a/arch/x86/include/asm/pgtable_64_types.h
11092+++ b/arch/x86/include/asm/pgtable_64_types.h
11093@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11094 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11095 #define MODULES_END _AC(0xffffffffff000000, UL)
11096 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11097+#define MODULES_EXEC_VADDR MODULES_VADDR
11098+#define MODULES_EXEC_END MODULES_END
11099+
11100+#define ktla_ktva(addr) (addr)
11101+#define ktva_ktla(addr) (addr)
11102
11103 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11104diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11105index 013286a..8b42f4f 100644
11106--- a/arch/x86/include/asm/pgtable_types.h
11107+++ b/arch/x86/include/asm/pgtable_types.h
11108@@ -16,13 +16,12 @@
11109 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11110 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11111 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11112-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11113+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11114 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11115 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11116 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11117-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11118-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11119-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11120+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11121+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11122 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11123
11124 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11125@@ -40,7 +39,6 @@
11126 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11127 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11128 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11129-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11130 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11131 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11132 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11133@@ -57,8 +55,10 @@
11134
11135 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11136 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11137-#else
11138+#elif defined(CONFIG_KMEMCHECK)
11139 #define _PAGE_NX (_AT(pteval_t, 0))
11140+#else
11141+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11142 #endif
11143
11144 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11145@@ -96,6 +96,9 @@
11146 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11147 _PAGE_ACCESSED)
11148
11149+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11150+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11151+
11152 #define __PAGE_KERNEL_EXEC \
11153 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11154 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11155@@ -106,7 +109,7 @@
11156 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11157 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11158 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11159-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11160+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11161 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11162 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11163 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11164@@ -168,8 +171,8 @@
11165 * bits are combined, this will alow user to access the high address mapped
11166 * VDSO in the presence of CONFIG_COMPAT_VDSO
11167 */
11168-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11169-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11170+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11171+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11172 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11173 #endif
11174
11175@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11176 {
11177 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11178 }
11179+#endif
11180
11181+#if PAGETABLE_LEVELS == 3
11182+#include <asm-generic/pgtable-nopud.h>
11183+#endif
11184+
11185+#if PAGETABLE_LEVELS == 2
11186+#include <asm-generic/pgtable-nopmd.h>
11187+#endif
11188+
11189+#ifndef __ASSEMBLY__
11190 #if PAGETABLE_LEVELS > 3
11191 typedef struct { pudval_t pud; } pud_t;
11192
11193@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11194 return pud.pud;
11195 }
11196 #else
11197-#include <asm-generic/pgtable-nopud.h>
11198-
11199 static inline pudval_t native_pud_val(pud_t pud)
11200 {
11201 return native_pgd_val(pud.pgd);
11202@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11203 return pmd.pmd;
11204 }
11205 #else
11206-#include <asm-generic/pgtable-nopmd.h>
11207-
11208 static inline pmdval_t native_pmd_val(pmd_t pmd)
11209 {
11210 return native_pgd_val(pmd.pud.pgd);
11211@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11212
11213 extern pteval_t __supported_pte_mask;
11214 extern void set_nx(void);
11215-extern int nx_enabled;
11216
11217 #define pgprot_writecombine pgprot_writecombine
11218 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11219diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11220index 58545c9..fe6fc38e 100644
11221--- a/arch/x86/include/asm/processor.h
11222+++ b/arch/x86/include/asm/processor.h
11223@@ -266,7 +266,7 @@ struct tss_struct {
11224
11225 } ____cacheline_aligned;
11226
11227-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11228+extern struct tss_struct init_tss[NR_CPUS];
11229
11230 /*
11231 * Save the original ist values for checking stack pointers during debugging
11232@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11233 */
11234 #define TASK_SIZE PAGE_OFFSET
11235 #define TASK_SIZE_MAX TASK_SIZE
11236+
11237+#ifdef CONFIG_PAX_SEGMEXEC
11238+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11239+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11240+#else
11241 #define STACK_TOP TASK_SIZE
11242-#define STACK_TOP_MAX STACK_TOP
11243+#endif
11244+
11245+#define STACK_TOP_MAX TASK_SIZE
11246
11247 #define INIT_THREAD { \
11248- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11249+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11250 .vm86_info = NULL, \
11251 .sysenter_cs = __KERNEL_CS, \
11252 .io_bitmap_ptr = NULL, \
11253@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11254 */
11255 #define INIT_TSS { \
11256 .x86_tss = { \
11257- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11258+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11259 .ss0 = __KERNEL_DS, \
11260 .ss1 = __KERNEL_CS, \
11261 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11262@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11263 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11264
11265 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11266-#define KSTK_TOP(info) \
11267-({ \
11268- unsigned long *__ptr = (unsigned long *)(info); \
11269- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11270-})
11271+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11272
11273 /*
11274 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11275@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11276 #define task_pt_regs(task) \
11277 ({ \
11278 struct pt_regs *__regs__; \
11279- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11280+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11281 __regs__ - 1; \
11282 })
11283
11284@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11285 /*
11286 * User space process size. 47bits minus one guard page.
11287 */
11288-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11289+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11290
11291 /* This decides where the kernel will search for a free chunk of vm
11292 * space during mmap's.
11293 */
11294 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11295- 0xc0000000 : 0xFFFFe000)
11296+ 0xc0000000 : 0xFFFFf000)
11297
11298 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11299 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11300@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11301 #define STACK_TOP_MAX TASK_SIZE_MAX
11302
11303 #define INIT_THREAD { \
11304- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11305+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11306 }
11307
11308 #define INIT_TSS { \
11309- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11310+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11311 }
11312
11313 /*
11314@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11315 */
11316 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11317
11318+#ifdef CONFIG_PAX_SEGMEXEC
11319+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11320+#endif
11321+
11322 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11323
11324 /* Get/set a process' ability to use the timestamp counter instruction */
11325diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11326index 3566454..4bdfb8c 100644
11327--- a/arch/x86/include/asm/ptrace.h
11328+++ b/arch/x86/include/asm/ptrace.h
11329@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11330 }
11331
11332 /*
11333- * user_mode_vm(regs) determines whether a register set came from user mode.
11334+ * user_mode(regs) determines whether a register set came from user mode.
11335 * This is true if V8086 mode was enabled OR if the register set was from
11336 * protected mode with RPL-3 CS value. This tricky test checks that with
11337 * one comparison. Many places in the kernel can bypass this full check
11338- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11339+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11340+ * be used.
11341 */
11342-static inline int user_mode(struct pt_regs *regs)
11343+static inline int user_mode_novm(struct pt_regs *regs)
11344 {
11345 #ifdef CONFIG_X86_32
11346 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11347 #else
11348- return !!(regs->cs & 3);
11349+ return !!(regs->cs & SEGMENT_RPL_MASK);
11350 #endif
11351 }
11352
11353-static inline int user_mode_vm(struct pt_regs *regs)
11354+static inline int user_mode(struct pt_regs *regs)
11355 {
11356 #ifdef CONFIG_X86_32
11357 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11358 USER_RPL;
11359 #else
11360- return user_mode(regs);
11361+ return user_mode_novm(regs);
11362 #endif
11363 }
11364
11365@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11366 #ifdef CONFIG_X86_64
11367 static inline bool user_64bit_mode(struct pt_regs *regs)
11368 {
11369+ unsigned long cs = regs->cs & 0xffff;
11370 #ifndef CONFIG_PARAVIRT
11371 /*
11372 * On non-paravirt systems, this is the only long mode CPL 3
11373 * selector. We do not allow long mode selectors in the LDT.
11374 */
11375- return regs->cs == __USER_CS;
11376+ return cs == __USER_CS;
11377 #else
11378 /* Headers are too twisted for this to go in paravirt.h. */
11379- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11380+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11381 #endif
11382 }
11383 #endif
11384diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11385index 92f29706..a79cbbb 100644
11386--- a/arch/x86/include/asm/reboot.h
11387+++ b/arch/x86/include/asm/reboot.h
11388@@ -6,19 +6,19 @@
11389 struct pt_regs;
11390
11391 struct machine_ops {
11392- void (*restart)(char *cmd);
11393- void (*halt)(void);
11394- void (*power_off)(void);
11395+ void (* __noreturn restart)(char *cmd);
11396+ void (* __noreturn halt)(void);
11397+ void (* __noreturn power_off)(void);
11398 void (*shutdown)(void);
11399 void (*crash_shutdown)(struct pt_regs *);
11400- void (*emergency_restart)(void);
11401-};
11402+ void (* __noreturn emergency_restart)(void);
11403+} __no_const;
11404
11405 extern struct machine_ops machine_ops;
11406
11407 void native_machine_crash_shutdown(struct pt_regs *regs);
11408 void native_machine_shutdown(void);
11409-void machine_real_restart(unsigned int type);
11410+void machine_real_restart(unsigned int type) __noreturn;
11411 /* These must match dispatch_table in reboot_32.S */
11412 #define MRR_BIOS 0
11413 #define MRR_APM 1
11414diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11415index 2dbe4a7..ce1db00 100644
11416--- a/arch/x86/include/asm/rwsem.h
11417+++ b/arch/x86/include/asm/rwsem.h
11418@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11419 {
11420 asm volatile("# beginning down_read\n\t"
11421 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11422+
11423+#ifdef CONFIG_PAX_REFCOUNT
11424+ "jno 0f\n"
11425+ LOCK_PREFIX _ASM_DEC "(%1)\n"
11426+ "int $4\n0:\n"
11427+ _ASM_EXTABLE(0b, 0b)
11428+#endif
11429+
11430 /* adds 0x00000001 */
11431 " jns 1f\n"
11432 " call call_rwsem_down_read_failed\n"
11433@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11434 "1:\n\t"
11435 " mov %1,%2\n\t"
11436 " add %3,%2\n\t"
11437+
11438+#ifdef CONFIG_PAX_REFCOUNT
11439+ "jno 0f\n"
11440+ "sub %3,%2\n"
11441+ "int $4\n0:\n"
11442+ _ASM_EXTABLE(0b, 0b)
11443+#endif
11444+
11445 " jle 2f\n\t"
11446 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11447 " jnz 1b\n\t"
11448@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11449 long tmp;
11450 asm volatile("# beginning down_write\n\t"
11451 LOCK_PREFIX " xadd %1,(%2)\n\t"
11452+
11453+#ifdef CONFIG_PAX_REFCOUNT
11454+ "jno 0f\n"
11455+ "mov %1,(%2)\n"
11456+ "int $4\n0:\n"
11457+ _ASM_EXTABLE(0b, 0b)
11458+#endif
11459+
11460 /* adds 0xffff0001, returns the old value */
11461 " test %1,%1\n\t"
11462 /* was the count 0 before? */
11463@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11464 long tmp;
11465 asm volatile("# beginning __up_read\n\t"
11466 LOCK_PREFIX " xadd %1,(%2)\n\t"
11467+
11468+#ifdef CONFIG_PAX_REFCOUNT
11469+ "jno 0f\n"
11470+ "mov %1,(%2)\n"
11471+ "int $4\n0:\n"
11472+ _ASM_EXTABLE(0b, 0b)
11473+#endif
11474+
11475 /* subtracts 1, returns the old value */
11476 " jns 1f\n\t"
11477 " call call_rwsem_wake\n" /* expects old value in %edx */
11478@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11479 long tmp;
11480 asm volatile("# beginning __up_write\n\t"
11481 LOCK_PREFIX " xadd %1,(%2)\n\t"
11482+
11483+#ifdef CONFIG_PAX_REFCOUNT
11484+ "jno 0f\n"
11485+ "mov %1,(%2)\n"
11486+ "int $4\n0:\n"
11487+ _ASM_EXTABLE(0b, 0b)
11488+#endif
11489+
11490 /* subtracts 0xffff0001, returns the old value */
11491 " jns 1f\n\t"
11492 " call call_rwsem_wake\n" /* expects old value in %edx */
11493@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11494 {
11495 asm volatile("# beginning __downgrade_write\n\t"
11496 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11497+
11498+#ifdef CONFIG_PAX_REFCOUNT
11499+ "jno 0f\n"
11500+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11501+ "int $4\n0:\n"
11502+ _ASM_EXTABLE(0b, 0b)
11503+#endif
11504+
11505 /*
11506 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11507 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11508@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11509 */
11510 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11511 {
11512- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11513+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11514+
11515+#ifdef CONFIG_PAX_REFCOUNT
11516+ "jno 0f\n"
11517+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11518+ "int $4\n0:\n"
11519+ _ASM_EXTABLE(0b, 0b)
11520+#endif
11521+
11522 : "+m" (sem->count)
11523 : "er" (delta));
11524 }
11525@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11526 */
11527 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11528 {
11529- return delta + xadd(&sem->count, delta);
11530+ return delta + xadd_check_overflow(&sem->count, delta);
11531 }
11532
11533 #endif /* __KERNEL__ */
11534diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11535index 5e64171..f58957e 100644
11536--- a/arch/x86/include/asm/segment.h
11537+++ b/arch/x86/include/asm/segment.h
11538@@ -64,10 +64,15 @@
11539 * 26 - ESPFIX small SS
11540 * 27 - per-cpu [ offset to per-cpu data area ]
11541 * 28 - stack_canary-20 [ for stack protector ]
11542- * 29 - unused
11543- * 30 - unused
11544+ * 29 - PCI BIOS CS
11545+ * 30 - PCI BIOS DS
11546 * 31 - TSS for double fault handler
11547 */
11548+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11549+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11550+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11551+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11552+
11553 #define GDT_ENTRY_TLS_MIN 6
11554 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11555
11556@@ -79,6 +84,8 @@
11557
11558 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11559
11560+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11561+
11562 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11563
11564 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11565@@ -104,6 +111,12 @@
11566 #define __KERNEL_STACK_CANARY 0
11567 #endif
11568
11569+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11570+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11571+
11572+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11573+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11574+
11575 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11576
11577 /*
11578@@ -141,7 +154,7 @@
11579 */
11580
11581 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11582-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11583+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11584
11585
11586 #else
11587@@ -165,6 +178,8 @@
11588 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11589 #define __USER32_DS __USER_DS
11590
11591+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11592+
11593 #define GDT_ENTRY_TSS 8 /* needs two entries */
11594 #define GDT_ENTRY_LDT 10 /* needs two entries */
11595 #define GDT_ENTRY_TLS_MIN 12
11596@@ -185,6 +200,7 @@
11597 #endif
11598
11599 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11600+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11601 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11602 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11603 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11604diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11605index 0434c40..1714bf0 100644
11606--- a/arch/x86/include/asm/smp.h
11607+++ b/arch/x86/include/asm/smp.h
11608@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11609 /* cpus sharing the last level cache: */
11610 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11611 DECLARE_PER_CPU(u16, cpu_llc_id);
11612-DECLARE_PER_CPU(int, cpu_number);
11613+DECLARE_PER_CPU(unsigned int, cpu_number);
11614
11615 static inline struct cpumask *cpu_sibling_mask(int cpu)
11616 {
11617@@ -77,7 +77,7 @@ struct smp_ops {
11618
11619 void (*send_call_func_ipi)(const struct cpumask *mask);
11620 void (*send_call_func_single_ipi)(int cpu);
11621-};
11622+} __no_const;
11623
11624 /* Globals due to paravirt */
11625 extern void set_cpu_sibling_map(int cpu);
11626@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11627 extern int safe_smp_processor_id(void);
11628
11629 #elif defined(CONFIG_X86_64_SMP)
11630-#define raw_smp_processor_id() (percpu_read(cpu_number))
11631-
11632-#define stack_smp_processor_id() \
11633-({ \
11634- struct thread_info *ti; \
11635- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11636- ti->cpu; \
11637-})
11638+#define raw_smp_processor_id() (percpu_read(cpu_number))
11639+#define stack_smp_processor_id() raw_smp_processor_id()
11640 #define safe_smp_processor_id() smp_processor_id()
11641
11642 #endif
11643diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11644index a82c2bf..2198f61 100644
11645--- a/arch/x86/include/asm/spinlock.h
11646+++ b/arch/x86/include/asm/spinlock.h
11647@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11648 static inline void arch_read_lock(arch_rwlock_t *rw)
11649 {
11650 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11651+
11652+#ifdef CONFIG_PAX_REFCOUNT
11653+ "jno 0f\n"
11654+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11655+ "int $4\n0:\n"
11656+ _ASM_EXTABLE(0b, 0b)
11657+#endif
11658+
11659 "jns 1f\n"
11660 "call __read_lock_failed\n\t"
11661 "1:\n"
11662@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11663 static inline void arch_write_lock(arch_rwlock_t *rw)
11664 {
11665 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11666+
11667+#ifdef CONFIG_PAX_REFCOUNT
11668+ "jno 0f\n"
11669+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11670+ "int $4\n0:\n"
11671+ _ASM_EXTABLE(0b, 0b)
11672+#endif
11673+
11674 "jz 1f\n"
11675 "call __write_lock_failed\n\t"
11676 "1:\n"
11677@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11678
11679 static inline void arch_read_unlock(arch_rwlock_t *rw)
11680 {
11681- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11682+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11683+
11684+#ifdef CONFIG_PAX_REFCOUNT
11685+ "jno 0f\n"
11686+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11687+ "int $4\n0:\n"
11688+ _ASM_EXTABLE(0b, 0b)
11689+#endif
11690+
11691 :"+m" (rw->lock) : : "memory");
11692 }
11693
11694 static inline void arch_write_unlock(arch_rwlock_t *rw)
11695 {
11696- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11697+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11698+
11699+#ifdef CONFIG_PAX_REFCOUNT
11700+ "jno 0f\n"
11701+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11702+ "int $4\n0:\n"
11703+ _ASM_EXTABLE(0b, 0b)
11704+#endif
11705+
11706 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11707 }
11708
11709diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11710index 1575177..cb23f52 100644
11711--- a/arch/x86/include/asm/stackprotector.h
11712+++ b/arch/x86/include/asm/stackprotector.h
11713@@ -48,7 +48,7 @@
11714 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11715 */
11716 #define GDT_STACK_CANARY_INIT \
11717- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11718+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11719
11720 /*
11721 * Initialize the stackprotector canary value.
11722@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11723
11724 static inline void load_stack_canary_segment(void)
11725 {
11726-#ifdef CONFIG_X86_32
11727+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11728 asm volatile ("mov %0, %%gs" : : "r" (0));
11729 #endif
11730 }
11731diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11732index 70bbe39..4ae2bd4 100644
11733--- a/arch/x86/include/asm/stacktrace.h
11734+++ b/arch/x86/include/asm/stacktrace.h
11735@@ -11,28 +11,20 @@
11736
11737 extern int kstack_depth_to_print;
11738
11739-struct thread_info;
11740+struct task_struct;
11741 struct stacktrace_ops;
11742
11743-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11744- unsigned long *stack,
11745- unsigned long bp,
11746- const struct stacktrace_ops *ops,
11747- void *data,
11748- unsigned long *end,
11749- int *graph);
11750+typedef unsigned long walk_stack_t(struct task_struct *task,
11751+ void *stack_start,
11752+ unsigned long *stack,
11753+ unsigned long bp,
11754+ const struct stacktrace_ops *ops,
11755+ void *data,
11756+ unsigned long *end,
11757+ int *graph);
11758
11759-extern unsigned long
11760-print_context_stack(struct thread_info *tinfo,
11761- unsigned long *stack, unsigned long bp,
11762- const struct stacktrace_ops *ops, void *data,
11763- unsigned long *end, int *graph);
11764-
11765-extern unsigned long
11766-print_context_stack_bp(struct thread_info *tinfo,
11767- unsigned long *stack, unsigned long bp,
11768- const struct stacktrace_ops *ops, void *data,
11769- unsigned long *end, int *graph);
11770+extern walk_stack_t print_context_stack;
11771+extern walk_stack_t print_context_stack_bp;
11772
11773 /* Generic stack tracer with callbacks */
11774
11775@@ -40,7 +32,7 @@ struct stacktrace_ops {
11776 void (*address)(void *data, unsigned long address, int reliable);
11777 /* On negative return stop dumping */
11778 int (*stack)(void *data, char *name);
11779- walk_stack_t walk_stack;
11780+ walk_stack_t *walk_stack;
11781 };
11782
11783 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11784diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11785index cb23852..2dde194 100644
11786--- a/arch/x86/include/asm/sys_ia32.h
11787+++ b/arch/x86/include/asm/sys_ia32.h
11788@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11789 compat_sigset_t __user *, unsigned int);
11790 asmlinkage long sys32_alarm(unsigned int);
11791
11792-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11793+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11794 asmlinkage long sys32_sysfs(int, u32, u32);
11795
11796 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11797diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11798index 2d2f01c..f985723 100644
11799--- a/arch/x86/include/asm/system.h
11800+++ b/arch/x86/include/asm/system.h
11801@@ -129,7 +129,7 @@ do { \
11802 "call __switch_to\n\t" \
11803 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11804 __switch_canary \
11805- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11806+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11807 "movq %%rax,%%rdi\n\t" \
11808 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11809 "jnz ret_from_fork\n\t" \
11810@@ -140,7 +140,7 @@ do { \
11811 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11812 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11813 [_tif_fork] "i" (_TIF_FORK), \
11814- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11815+ [thread_info] "m" (current_tinfo), \
11816 [current_task] "m" (current_task) \
11817 __switch_canary_iparam \
11818 : "memory", "cc" __EXTRA_CLOBBER)
11819@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11820 {
11821 unsigned long __limit;
11822 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11823- return __limit + 1;
11824+ return __limit;
11825 }
11826
11827 static inline void native_clts(void)
11828@@ -397,13 +397,13 @@ void enable_hlt(void);
11829
11830 void cpu_idle_wait(void);
11831
11832-extern unsigned long arch_align_stack(unsigned long sp);
11833+#define arch_align_stack(x) ((x) & ~0xfUL)
11834 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11835
11836 void default_idle(void);
11837 bool set_pm_idle_to_default(void);
11838
11839-void stop_this_cpu(void *dummy);
11840+void stop_this_cpu(void *dummy) __noreturn;
11841
11842 /*
11843 * Force strict CPU ordering.
11844diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11845index cfd8144..1b1127d 100644
11846--- a/arch/x86/include/asm/thread_info.h
11847+++ b/arch/x86/include/asm/thread_info.h
11848@@ -10,6 +10,7 @@
11849 #include <linux/compiler.h>
11850 #include <asm/page.h>
11851 #include <asm/types.h>
11852+#include <asm/percpu.h>
11853
11854 /*
11855 * low level task data that entry.S needs immediate access to
11856@@ -24,7 +25,6 @@ struct exec_domain;
11857 #include <linux/atomic.h>
11858
11859 struct thread_info {
11860- struct task_struct *task; /* main task structure */
11861 struct exec_domain *exec_domain; /* execution domain */
11862 __u32 flags; /* low level flags */
11863 __u32 status; /* thread synchronous flags */
11864@@ -34,19 +34,13 @@ struct thread_info {
11865 mm_segment_t addr_limit;
11866 struct restart_block restart_block;
11867 void __user *sysenter_return;
11868-#ifdef CONFIG_X86_32
11869- unsigned long previous_esp; /* ESP of the previous stack in
11870- case of nested (IRQ) stacks
11871- */
11872- __u8 supervisor_stack[0];
11873-#endif
11874+ unsigned long lowest_stack;
11875 unsigned int sig_on_uaccess_error:1;
11876 unsigned int uaccess_err:1; /* uaccess failed */
11877 };
11878
11879-#define INIT_THREAD_INFO(tsk) \
11880+#define INIT_THREAD_INFO \
11881 { \
11882- .task = &tsk, \
11883 .exec_domain = &default_exec_domain, \
11884 .flags = 0, \
11885 .cpu = 0, \
11886@@ -57,7 +51,7 @@ struct thread_info {
11887 }, \
11888 }
11889
11890-#define init_thread_info (init_thread_union.thread_info)
11891+#define init_thread_info (init_thread_union.stack)
11892 #define init_stack (init_thread_union.stack)
11893
11894 #else /* !__ASSEMBLY__ */
11895@@ -169,45 +163,40 @@ struct thread_info {
11896 ret; \
11897 })
11898
11899-#ifdef CONFIG_X86_32
11900-
11901-#define STACK_WARN (THREAD_SIZE/8)
11902-/*
11903- * macros/functions for gaining access to the thread information structure
11904- *
11905- * preempt_count needs to be 1 initially, until the scheduler is functional.
11906- */
11907-#ifndef __ASSEMBLY__
11908-
11909-
11910-/* how to get the current stack pointer from C */
11911-register unsigned long current_stack_pointer asm("esp") __used;
11912-
11913-/* how to get the thread information struct from C */
11914-static inline struct thread_info *current_thread_info(void)
11915-{
11916- return (struct thread_info *)
11917- (current_stack_pointer & ~(THREAD_SIZE - 1));
11918-}
11919-
11920-#else /* !__ASSEMBLY__ */
11921-
11922+#ifdef __ASSEMBLY__
11923 /* how to get the thread information struct from ASM */
11924 #define GET_THREAD_INFO(reg) \
11925- movl $-THREAD_SIZE, reg; \
11926- andl %esp, reg
11927+ mov PER_CPU_VAR(current_tinfo), reg
11928
11929 /* use this one if reg already contains %esp */
11930-#define GET_THREAD_INFO_WITH_ESP(reg) \
11931- andl $-THREAD_SIZE, reg
11932+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11933+#else
11934+/* how to get the thread information struct from C */
11935+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
11936+
11937+static __always_inline struct thread_info *current_thread_info(void)
11938+{
11939+ return percpu_read_stable(current_tinfo);
11940+}
11941+#endif
11942+
11943+#ifdef CONFIG_X86_32
11944+
11945+#define STACK_WARN (THREAD_SIZE/8)
11946+/*
11947+ * macros/functions for gaining access to the thread information structure
11948+ *
11949+ * preempt_count needs to be 1 initially, until the scheduler is functional.
11950+ */
11951+#ifndef __ASSEMBLY__
11952+
11953+/* how to get the current stack pointer from C */
11954+register unsigned long current_stack_pointer asm("esp") __used;
11955
11956 #endif
11957
11958 #else /* X86_32 */
11959
11960-#include <asm/percpu.h>
11961-#define KERNEL_STACK_OFFSET (5*8)
11962-
11963 /*
11964 * macros/functions for gaining access to the thread information structure
11965 * preempt_count needs to be 1 initially, until the scheduler is functional.
11966@@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
11967 #ifndef __ASSEMBLY__
11968 DECLARE_PER_CPU(unsigned long, kernel_stack);
11969
11970-static inline struct thread_info *current_thread_info(void)
11971-{
11972- struct thread_info *ti;
11973- ti = (void *)(percpu_read_stable(kernel_stack) +
11974- KERNEL_STACK_OFFSET - THREAD_SIZE);
11975- return ti;
11976-}
11977-
11978-#else /* !__ASSEMBLY__ */
11979-
11980-/* how to get the thread information struct from ASM */
11981-#define GET_THREAD_INFO(reg) \
11982- movq PER_CPU_VAR(kernel_stack),reg ; \
11983- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
11984-
11985-/*
11986- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
11987- * a certain register (to be used in assembler memory operands).
11988- */
11989-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
11990-
11991+/* how to get the current stack pointer from C */
11992+register unsigned long current_stack_pointer asm("rsp") __used;
11993 #endif
11994
11995 #endif /* !X86_32 */
11996@@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
11997 extern void free_thread_info(struct thread_info *ti);
11998 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
11999 #define arch_task_cache_init arch_task_cache_init
12000+
12001+#define __HAVE_THREAD_FUNCTIONS
12002+#define task_thread_info(task) (&(task)->tinfo)
12003+#define task_stack_page(task) ((task)->stack)
12004+#define setup_thread_stack(p, org) do {} while (0)
12005+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12006+
12007+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12008+extern struct task_struct *alloc_task_struct_node(int node);
12009+extern void free_task_struct(struct task_struct *);
12010+
12011 #endif
12012 #endif /* _ASM_X86_THREAD_INFO_H */
12013diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12014index 8be5f54..7ae826d 100644
12015--- a/arch/x86/include/asm/uaccess.h
12016+++ b/arch/x86/include/asm/uaccess.h
12017@@ -7,12 +7,15 @@
12018 #include <linux/compiler.h>
12019 #include <linux/thread_info.h>
12020 #include <linux/string.h>
12021+#include <linux/sched.h>
12022 #include <asm/asm.h>
12023 #include <asm/page.h>
12024
12025 #define VERIFY_READ 0
12026 #define VERIFY_WRITE 1
12027
12028+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12029+
12030 /*
12031 * The fs value determines whether argument validity checking should be
12032 * performed or not. If get_fs() == USER_DS, checking is performed, with
12033@@ -28,7 +31,12 @@
12034
12035 #define get_ds() (KERNEL_DS)
12036 #define get_fs() (current_thread_info()->addr_limit)
12037+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12038+void __set_fs(mm_segment_t x);
12039+void set_fs(mm_segment_t x);
12040+#else
12041 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12042+#endif
12043
12044 #define segment_eq(a, b) ((a).seg == (b).seg)
12045
12046@@ -76,7 +84,33 @@
12047 * checks that the pointer is in the user space range - after calling
12048 * this function, memory access functions may still return -EFAULT.
12049 */
12050-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12051+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12052+#define access_ok(type, addr, size) \
12053+({ \
12054+ long __size = size; \
12055+ unsigned long __addr = (unsigned long)addr; \
12056+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12057+ unsigned long __end_ao = __addr + __size - 1; \
12058+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12059+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12060+ while(__addr_ao <= __end_ao) { \
12061+ char __c_ao; \
12062+ __addr_ao += PAGE_SIZE; \
12063+ if (__size > PAGE_SIZE) \
12064+ cond_resched(); \
12065+ if (__get_user(__c_ao, (char __user *)__addr)) \
12066+ break; \
12067+ if (type != VERIFY_WRITE) { \
12068+ __addr = __addr_ao; \
12069+ continue; \
12070+ } \
12071+ if (__put_user(__c_ao, (char __user *)__addr)) \
12072+ break; \
12073+ __addr = __addr_ao; \
12074+ } \
12075+ } \
12076+ __ret_ao; \
12077+})
12078
12079 /*
12080 * The exception table consists of pairs of addresses: the first is the
12081@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12082 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12083 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12084
12085-
12086+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12087+#define __copyuser_seg "gs;"
12088+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12089+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12090+#else
12091+#define __copyuser_seg
12092+#define __COPYUSER_SET_ES
12093+#define __COPYUSER_RESTORE_ES
12094+#endif
12095
12096 #ifdef CONFIG_X86_32
12097 #define __put_user_asm_u64(x, addr, err, errret) \
12098- asm volatile("1: movl %%eax,0(%2)\n" \
12099- "2: movl %%edx,4(%2)\n" \
12100+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12101+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12102 "3:\n" \
12103 ".section .fixup,\"ax\"\n" \
12104 "4: movl %3,%0\n" \
12105@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12106 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12107
12108 #define __put_user_asm_ex_u64(x, addr) \
12109- asm volatile("1: movl %%eax,0(%1)\n" \
12110- "2: movl %%edx,4(%1)\n" \
12111+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12112+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12113 "3:\n" \
12114 _ASM_EXTABLE(1b, 2b - 1b) \
12115 _ASM_EXTABLE(2b, 3b - 2b) \
12116@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12117 __typeof__(*(ptr)) __pu_val; \
12118 __chk_user_ptr(ptr); \
12119 might_fault(); \
12120- __pu_val = x; \
12121+ __pu_val = (x); \
12122 switch (sizeof(*(ptr))) { \
12123 case 1: \
12124 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12125@@ -373,7 +415,7 @@ do { \
12126 } while (0)
12127
12128 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12129- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12130+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12131 "2:\n" \
12132 ".section .fixup,\"ax\"\n" \
12133 "3: mov %3,%0\n" \
12134@@ -381,7 +423,7 @@ do { \
12135 " jmp 2b\n" \
12136 ".previous\n" \
12137 _ASM_EXTABLE(1b, 3b) \
12138- : "=r" (err), ltype(x) \
12139+ : "=r" (err), ltype (x) \
12140 : "m" (__m(addr)), "i" (errret), "0" (err))
12141
12142 #define __get_user_size_ex(x, ptr, size) \
12143@@ -406,7 +448,7 @@ do { \
12144 } while (0)
12145
12146 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12147- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12148+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12149 "2:\n" \
12150 _ASM_EXTABLE(1b, 2b - 1b) \
12151 : ltype(x) : "m" (__m(addr)))
12152@@ -423,13 +465,24 @@ do { \
12153 int __gu_err; \
12154 unsigned long __gu_val; \
12155 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12156- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12157+ (x) = (__typeof__(*(ptr)))__gu_val; \
12158 __gu_err; \
12159 })
12160
12161 /* FIXME: this hack is definitely wrong -AK */
12162 struct __large_struct { unsigned long buf[100]; };
12163-#define __m(x) (*(struct __large_struct __user *)(x))
12164+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12165+#define ____m(x) \
12166+({ \
12167+ unsigned long ____x = (unsigned long)(x); \
12168+ if (____x < PAX_USER_SHADOW_BASE) \
12169+ ____x += PAX_USER_SHADOW_BASE; \
12170+ (void __user *)____x; \
12171+})
12172+#else
12173+#define ____m(x) (x)
12174+#endif
12175+#define __m(x) (*(struct __large_struct __user *)____m(x))
12176
12177 /*
12178 * Tell gcc we read from memory instead of writing: this is because
12179@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12180 * aliasing issues.
12181 */
12182 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12183- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12184+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12185 "2:\n" \
12186 ".section .fixup,\"ax\"\n" \
12187 "3: mov %3,%0\n" \
12188@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12189 ".previous\n" \
12190 _ASM_EXTABLE(1b, 3b) \
12191 : "=r"(err) \
12192- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12193+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12194
12195 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12196- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12197+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12198 "2:\n" \
12199 _ASM_EXTABLE(1b, 2b - 1b) \
12200 : : ltype(x), "m" (__m(addr)))
12201@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12202 * On error, the variable @x is set to zero.
12203 */
12204
12205+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12206+#define __get_user(x, ptr) get_user((x), (ptr))
12207+#else
12208 #define __get_user(x, ptr) \
12209 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12210+#endif
12211
12212 /**
12213 * __put_user: - Write a simple value into user space, with less checking.
12214@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12215 * Returns zero on success, or -EFAULT on error.
12216 */
12217
12218+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12219+#define __put_user(x, ptr) put_user((x), (ptr))
12220+#else
12221 #define __put_user(x, ptr) \
12222 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12223+#endif
12224
12225 #define __get_user_unaligned __get_user
12226 #define __put_user_unaligned __put_user
12227@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12228 #define get_user_ex(x, ptr) do { \
12229 unsigned long __gue_val; \
12230 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12231- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12232+ (x) = (__typeof__(*(ptr)))__gue_val; \
12233 } while (0)
12234
12235 #ifdef CONFIG_X86_WP_WORKS_OK
12236diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12237index 566e803..b9521e9 100644
12238--- a/arch/x86/include/asm/uaccess_32.h
12239+++ b/arch/x86/include/asm/uaccess_32.h
12240@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12241 static __always_inline unsigned long __must_check
12242 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12243 {
12244+ if ((long)n < 0)
12245+ return n;
12246+
12247 if (__builtin_constant_p(n)) {
12248 unsigned long ret;
12249
12250@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12251 return ret;
12252 }
12253 }
12254+ if (!__builtin_constant_p(n))
12255+ check_object_size(from, n, true);
12256 return __copy_to_user_ll(to, from, n);
12257 }
12258
12259@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12260 __copy_to_user(void __user *to, const void *from, unsigned long n)
12261 {
12262 might_fault();
12263+
12264 return __copy_to_user_inatomic(to, from, n);
12265 }
12266
12267 static __always_inline unsigned long
12268 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12269 {
12270+ if ((long)n < 0)
12271+ return n;
12272+
12273 /* Avoid zeroing the tail if the copy fails..
12274 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12275 * but as the zeroing behaviour is only significant when n is not
12276@@ -137,6 +146,10 @@ static __always_inline unsigned long
12277 __copy_from_user(void *to, const void __user *from, unsigned long n)
12278 {
12279 might_fault();
12280+
12281+ if ((long)n < 0)
12282+ return n;
12283+
12284 if (__builtin_constant_p(n)) {
12285 unsigned long ret;
12286
12287@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12288 return ret;
12289 }
12290 }
12291+ if (!__builtin_constant_p(n))
12292+ check_object_size(to, n, false);
12293 return __copy_from_user_ll(to, from, n);
12294 }
12295
12296@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12297 const void __user *from, unsigned long n)
12298 {
12299 might_fault();
12300+
12301+ if ((long)n < 0)
12302+ return n;
12303+
12304 if (__builtin_constant_p(n)) {
12305 unsigned long ret;
12306
12307@@ -181,15 +200,19 @@ static __always_inline unsigned long
12308 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12309 unsigned long n)
12310 {
12311- return __copy_from_user_ll_nocache_nozero(to, from, n);
12312+ if ((long)n < 0)
12313+ return n;
12314+
12315+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12316 }
12317
12318-unsigned long __must_check copy_to_user(void __user *to,
12319- const void *from, unsigned long n);
12320-unsigned long __must_check _copy_from_user(void *to,
12321- const void __user *from,
12322- unsigned long n);
12323-
12324+extern void copy_to_user_overflow(void)
12325+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12326+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12327+#else
12328+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12329+#endif
12330+;
12331
12332 extern void copy_from_user_overflow(void)
12333 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12334@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12335 #endif
12336 ;
12337
12338-static inline unsigned long __must_check copy_from_user(void *to,
12339- const void __user *from,
12340- unsigned long n)
12341+/**
12342+ * copy_to_user: - Copy a block of data into user space.
12343+ * @to: Destination address, in user space.
12344+ * @from: Source address, in kernel space.
12345+ * @n: Number of bytes to copy.
12346+ *
12347+ * Context: User context only. This function may sleep.
12348+ *
12349+ * Copy data from kernel space to user space.
12350+ *
12351+ * Returns number of bytes that could not be copied.
12352+ * On success, this will be zero.
12353+ */
12354+static inline unsigned long __must_check
12355+copy_to_user(void __user *to, const void *from, unsigned long n)
12356+{
12357+ int sz = __compiletime_object_size(from);
12358+
12359+ if (unlikely(sz != -1 && sz < n))
12360+ copy_to_user_overflow();
12361+ else if (access_ok(VERIFY_WRITE, to, n))
12362+ n = __copy_to_user(to, from, n);
12363+ return n;
12364+}
12365+
12366+/**
12367+ * copy_from_user: - Copy a block of data from user space.
12368+ * @to: Destination address, in kernel space.
12369+ * @from: Source address, in user space.
12370+ * @n: Number of bytes to copy.
12371+ *
12372+ * Context: User context only. This function may sleep.
12373+ *
12374+ * Copy data from user space to kernel space.
12375+ *
12376+ * Returns number of bytes that could not be copied.
12377+ * On success, this will be zero.
12378+ *
12379+ * If some data could not be copied, this function will pad the copied
12380+ * data to the requested size using zero bytes.
12381+ */
12382+static inline unsigned long __must_check
12383+copy_from_user(void *to, const void __user *from, unsigned long n)
12384 {
12385 int sz = __compiletime_object_size(to);
12386
12387- if (likely(sz == -1 || sz >= n))
12388- n = _copy_from_user(to, from, n);
12389- else
12390+ if (unlikely(sz != -1 && sz < n))
12391 copy_from_user_overflow();
12392-
12393+ else if (access_ok(VERIFY_READ, from, n))
12394+ n = __copy_from_user(to, from, n);
12395+ else if ((long)n > 0) {
12396+ if (!__builtin_constant_p(n))
12397+ check_object_size(to, n, false);
12398+ memset(to, 0, n);
12399+ }
12400 return n;
12401 }
12402
12403diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12404index 1c66d30..e66922c 100644
12405--- a/arch/x86/include/asm/uaccess_64.h
12406+++ b/arch/x86/include/asm/uaccess_64.h
12407@@ -10,6 +10,9 @@
12408 #include <asm/alternative.h>
12409 #include <asm/cpufeature.h>
12410 #include <asm/page.h>
12411+#include <asm/pgtable.h>
12412+
12413+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12414
12415 /*
12416 * Copy To/From Userspace
12417@@ -17,12 +20,12 @@
12418
12419 /* Handles exceptions in both to and from, but doesn't do access_ok */
12420 __must_check unsigned long
12421-copy_user_generic_string(void *to, const void *from, unsigned len);
12422+copy_user_generic_string(void *to, const void *from, unsigned long len);
12423 __must_check unsigned long
12424-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12425+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
12426
12427 static __always_inline __must_check unsigned long
12428-copy_user_generic(void *to, const void *from, unsigned len)
12429+copy_user_generic(void *to, const void *from, unsigned long len)
12430 {
12431 unsigned ret;
12432
12433@@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
12434 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12435 "=d" (len)),
12436 "1" (to), "2" (from), "3" (len)
12437- : "memory", "rcx", "r8", "r9", "r10", "r11");
12438+ : "memory", "rcx", "r8", "r9", "r11");
12439 return ret;
12440 }
12441
12442+static __always_inline __must_check unsigned long
12443+__copy_to_user(void __user *to, const void *from, unsigned long len);
12444+static __always_inline __must_check unsigned long
12445+__copy_from_user(void *to, const void __user *from, unsigned long len);
12446 __must_check unsigned long
12447-_copy_to_user(void __user *to, const void *from, unsigned len);
12448-__must_check unsigned long
12449-_copy_from_user(void *to, const void __user *from, unsigned len);
12450-__must_check unsigned long
12451-copy_in_user(void __user *to, const void __user *from, unsigned len);
12452+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12453
12454 static inline unsigned long __must_check copy_from_user(void *to,
12455 const void __user *from,
12456 unsigned long n)
12457 {
12458- int sz = __compiletime_object_size(to);
12459-
12460 might_fault();
12461- if (likely(sz == -1 || sz >= n))
12462- n = _copy_from_user(to, from, n);
12463-#ifdef CONFIG_DEBUG_VM
12464- else
12465- WARN(1, "Buffer overflow detected!\n");
12466-#endif
12467+
12468+ if (access_ok(VERIFY_READ, from, n))
12469+ n = __copy_from_user(to, from, n);
12470+ else if (n < INT_MAX) {
12471+ if (!__builtin_constant_p(n))
12472+ check_object_size(to, n, false);
12473+ memset(to, 0, n);
12474+ }
12475 return n;
12476 }
12477
12478 static __always_inline __must_check
12479-int copy_to_user(void __user *dst, const void *src, unsigned size)
12480+int copy_to_user(void __user *dst, const void *src, unsigned long size)
12481 {
12482 might_fault();
12483
12484- return _copy_to_user(dst, src, size);
12485+ if (access_ok(VERIFY_WRITE, dst, size))
12486+ size = __copy_to_user(dst, src, size);
12487+ return size;
12488 }
12489
12490 static __always_inline __must_check
12491-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12492+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12493 {
12494- int ret = 0;
12495+ int sz = __compiletime_object_size(dst);
12496+ unsigned ret = 0;
12497
12498 might_fault();
12499- if (!__builtin_constant_p(size))
12500- return copy_user_generic(dst, (__force void *)src, size);
12501+
12502+ if (size > INT_MAX)
12503+ return size;
12504+
12505+#ifdef CONFIG_PAX_MEMORY_UDEREF
12506+ if (!__access_ok(VERIFY_READ, src, size))
12507+ return size;
12508+#endif
12509+
12510+ if (unlikely(sz != -1 && sz < size)) {
12511+#ifdef CONFIG_DEBUG_VM
12512+ WARN(1, "Buffer overflow detected!\n");
12513+#endif
12514+ return size;
12515+ }
12516+
12517+ if (!__builtin_constant_p(size)) {
12518+ check_object_size(dst, size, false);
12519+
12520+#ifdef CONFIG_PAX_MEMORY_UDEREF
12521+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12522+ src += PAX_USER_SHADOW_BASE;
12523+#endif
12524+
12525+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12526+ }
12527 switch (size) {
12528- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12529+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12530 ret, "b", "b", "=q", 1);
12531 return ret;
12532- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12533+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12534 ret, "w", "w", "=r", 2);
12535 return ret;
12536- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12537+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12538 ret, "l", "k", "=r", 4);
12539 return ret;
12540- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12541+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12542 ret, "q", "", "=r", 8);
12543 return ret;
12544 case 10:
12545- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12546+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12547 ret, "q", "", "=r", 10);
12548 if (unlikely(ret))
12549 return ret;
12550 __get_user_asm(*(u16 *)(8 + (char *)dst),
12551- (u16 __user *)(8 + (char __user *)src),
12552+ (const u16 __user *)(8 + (const char __user *)src),
12553 ret, "w", "w", "=r", 2);
12554 return ret;
12555 case 16:
12556- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12557+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12558 ret, "q", "", "=r", 16);
12559 if (unlikely(ret))
12560 return ret;
12561 __get_user_asm(*(u64 *)(8 + (char *)dst),
12562- (u64 __user *)(8 + (char __user *)src),
12563+ (const u64 __user *)(8 + (const char __user *)src),
12564 ret, "q", "", "=r", 8);
12565 return ret;
12566 default:
12567- return copy_user_generic(dst, (__force void *)src, size);
12568+
12569+#ifdef CONFIG_PAX_MEMORY_UDEREF
12570+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12571+ src += PAX_USER_SHADOW_BASE;
12572+#endif
12573+
12574+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12575 }
12576 }
12577
12578 static __always_inline __must_check
12579-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12580+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12581 {
12582- int ret = 0;
12583+ int sz = __compiletime_object_size(src);
12584+ unsigned ret = 0;
12585
12586 might_fault();
12587- if (!__builtin_constant_p(size))
12588- return copy_user_generic((__force void *)dst, src, size);
12589+
12590+ if (size > INT_MAX)
12591+ return size;
12592+
12593+#ifdef CONFIG_PAX_MEMORY_UDEREF
12594+ if (!__access_ok(VERIFY_WRITE, dst, size))
12595+ return size;
12596+#endif
12597+
12598+ if (unlikely(sz != -1 && sz < size)) {
12599+#ifdef CONFIG_DEBUG_VM
12600+ WARN(1, "Buffer overflow detected!\n");
12601+#endif
12602+ return size;
12603+ }
12604+
12605+ if (!__builtin_constant_p(size)) {
12606+ check_object_size(src, size, true);
12607+
12608+#ifdef CONFIG_PAX_MEMORY_UDEREF
12609+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12610+ dst += PAX_USER_SHADOW_BASE;
12611+#endif
12612+
12613+ return copy_user_generic((__force_kernel void *)dst, src, size);
12614+ }
12615 switch (size) {
12616- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12617+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12618 ret, "b", "b", "iq", 1);
12619 return ret;
12620- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12621+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12622 ret, "w", "w", "ir", 2);
12623 return ret;
12624- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12625+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12626 ret, "l", "k", "ir", 4);
12627 return ret;
12628- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12629+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12630 ret, "q", "", "er", 8);
12631 return ret;
12632 case 10:
12633- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12634+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12635 ret, "q", "", "er", 10);
12636 if (unlikely(ret))
12637 return ret;
12638 asm("":::"memory");
12639- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12640+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12641 ret, "w", "w", "ir", 2);
12642 return ret;
12643 case 16:
12644- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12645+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12646 ret, "q", "", "er", 16);
12647 if (unlikely(ret))
12648 return ret;
12649 asm("":::"memory");
12650- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12651+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12652 ret, "q", "", "er", 8);
12653 return ret;
12654 default:
12655- return copy_user_generic((__force void *)dst, src, size);
12656+
12657+#ifdef CONFIG_PAX_MEMORY_UDEREF
12658+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659+ dst += PAX_USER_SHADOW_BASE;
12660+#endif
12661+
12662+ return copy_user_generic((__force_kernel void *)dst, src, size);
12663 }
12664 }
12665
12666 static __always_inline __must_check
12667-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12668+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12669 {
12670- int ret = 0;
12671+ unsigned ret = 0;
12672
12673 might_fault();
12674- if (!__builtin_constant_p(size))
12675- return copy_user_generic((__force void *)dst,
12676- (__force void *)src, size);
12677+
12678+ if (size > INT_MAX)
12679+ return size;
12680+
12681+#ifdef CONFIG_PAX_MEMORY_UDEREF
12682+ if (!__access_ok(VERIFY_READ, src, size))
12683+ return size;
12684+ if (!__access_ok(VERIFY_WRITE, dst, size))
12685+ return size;
12686+#endif
12687+
12688+ if (!__builtin_constant_p(size)) {
12689+
12690+#ifdef CONFIG_PAX_MEMORY_UDEREF
12691+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12692+ src += PAX_USER_SHADOW_BASE;
12693+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12694+ dst += PAX_USER_SHADOW_BASE;
12695+#endif
12696+
12697+ return copy_user_generic((__force_kernel void *)dst,
12698+ (__force_kernel const void *)src, size);
12699+ }
12700 switch (size) {
12701 case 1: {
12702 u8 tmp;
12703- __get_user_asm(tmp, (u8 __user *)src,
12704+ __get_user_asm(tmp, (const u8 __user *)src,
12705 ret, "b", "b", "=q", 1);
12706 if (likely(!ret))
12707 __put_user_asm(tmp, (u8 __user *)dst,
12708@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12709 }
12710 case 2: {
12711 u16 tmp;
12712- __get_user_asm(tmp, (u16 __user *)src,
12713+ __get_user_asm(tmp, (const u16 __user *)src,
12714 ret, "w", "w", "=r", 2);
12715 if (likely(!ret))
12716 __put_user_asm(tmp, (u16 __user *)dst,
12717@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12718
12719 case 4: {
12720 u32 tmp;
12721- __get_user_asm(tmp, (u32 __user *)src,
12722+ __get_user_asm(tmp, (const u32 __user *)src,
12723 ret, "l", "k", "=r", 4);
12724 if (likely(!ret))
12725 __put_user_asm(tmp, (u32 __user *)dst,
12726@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12727 }
12728 case 8: {
12729 u64 tmp;
12730- __get_user_asm(tmp, (u64 __user *)src,
12731+ __get_user_asm(tmp, (const u64 __user *)src,
12732 ret, "q", "", "=r", 8);
12733 if (likely(!ret))
12734 __put_user_asm(tmp, (u64 __user *)dst,
12735@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12736 return ret;
12737 }
12738 default:
12739- return copy_user_generic((__force void *)dst,
12740- (__force void *)src, size);
12741+
12742+#ifdef CONFIG_PAX_MEMORY_UDEREF
12743+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12744+ src += PAX_USER_SHADOW_BASE;
12745+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12746+ dst += PAX_USER_SHADOW_BASE;
12747+#endif
12748+
12749+ return copy_user_generic((__force_kernel void *)dst,
12750+ (__force_kernel const void *)src, size);
12751 }
12752 }
12753
12754@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12755 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12756
12757 static __must_check __always_inline int
12758-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12759+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12760 {
12761- return copy_user_generic(dst, (__force const void *)src, size);
12762+ if (size > INT_MAX)
12763+ return size;
12764+
12765+#ifdef CONFIG_PAX_MEMORY_UDEREF
12766+ if (!__access_ok(VERIFY_READ, src, size))
12767+ return size;
12768+
12769+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12770+ src += PAX_USER_SHADOW_BASE;
12771+#endif
12772+
12773+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12774 }
12775
12776-static __must_check __always_inline int
12777-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12778+static __must_check __always_inline unsigned long
12779+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12780 {
12781- return copy_user_generic((__force void *)dst, src, size);
12782+ if (size > INT_MAX)
12783+ return size;
12784+
12785+#ifdef CONFIG_PAX_MEMORY_UDEREF
12786+ if (!__access_ok(VERIFY_WRITE, dst, size))
12787+ return size;
12788+
12789+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12790+ dst += PAX_USER_SHADOW_BASE;
12791+#endif
12792+
12793+ return copy_user_generic((__force_kernel void *)dst, src, size);
12794 }
12795
12796-extern long __copy_user_nocache(void *dst, const void __user *src,
12797- unsigned size, int zerorest);
12798+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12799+ unsigned long size, int zerorest);
12800
12801-static inline int
12802-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12803+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12804 {
12805 might_sleep();
12806+
12807+ if (size > INT_MAX)
12808+ return size;
12809+
12810+#ifdef CONFIG_PAX_MEMORY_UDEREF
12811+ if (!__access_ok(VERIFY_READ, src, size))
12812+ return size;
12813+#endif
12814+
12815 return __copy_user_nocache(dst, src, size, 1);
12816 }
12817
12818-static inline int
12819-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12820- unsigned size)
12821+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12822+ unsigned long size)
12823 {
12824+ if (size > INT_MAX)
12825+ return size;
12826+
12827+#ifdef CONFIG_PAX_MEMORY_UDEREF
12828+ if (!__access_ok(VERIFY_READ, src, size))
12829+ return size;
12830+#endif
12831+
12832 return __copy_user_nocache(dst, src, size, 0);
12833 }
12834
12835-unsigned long
12836-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12837+extern unsigned long
12838+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12839
12840 #endif /* _ASM_X86_UACCESS_64_H */
12841diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12842index bb05228..d763d5b 100644
12843--- a/arch/x86/include/asm/vdso.h
12844+++ b/arch/x86/include/asm/vdso.h
12845@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12846 #define VDSO32_SYMBOL(base, name) \
12847 ({ \
12848 extern const char VDSO32_##name[]; \
12849- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12850+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12851 })
12852 #endif
12853
12854diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12855index 517d476..a1cb4d9 100644
12856--- a/arch/x86/include/asm/x86_init.h
12857+++ b/arch/x86/include/asm/x86_init.h
12858@@ -29,7 +29,7 @@ struct x86_init_mpparse {
12859 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12860 void (*find_smp_config)(void);
12861 void (*get_smp_config)(unsigned int early);
12862-};
12863+} __no_const;
12864
12865 /**
12866 * struct x86_init_resources - platform specific resource related ops
12867@@ -43,7 +43,7 @@ struct x86_init_resources {
12868 void (*probe_roms)(void);
12869 void (*reserve_resources)(void);
12870 char *(*memory_setup)(void);
12871-};
12872+} __no_const;
12873
12874 /**
12875 * struct x86_init_irqs - platform specific interrupt setup
12876@@ -56,7 +56,7 @@ struct x86_init_irqs {
12877 void (*pre_vector_init)(void);
12878 void (*intr_init)(void);
12879 void (*trap_init)(void);
12880-};
12881+} __no_const;
12882
12883 /**
12884 * struct x86_init_oem - oem platform specific customizing functions
12885@@ -66,7 +66,7 @@ struct x86_init_irqs {
12886 struct x86_init_oem {
12887 void (*arch_setup)(void);
12888 void (*banner)(void);
12889-};
12890+} __no_const;
12891
12892 /**
12893 * struct x86_init_mapping - platform specific initial kernel pagetable setup
12894@@ -77,7 +77,7 @@ struct x86_init_oem {
12895 */
12896 struct x86_init_mapping {
12897 void (*pagetable_reserve)(u64 start, u64 end);
12898-};
12899+} __no_const;
12900
12901 /**
12902 * struct x86_init_paging - platform specific paging functions
12903@@ -87,7 +87,7 @@ struct x86_init_mapping {
12904 struct x86_init_paging {
12905 void (*pagetable_setup_start)(pgd_t *base);
12906 void (*pagetable_setup_done)(pgd_t *base);
12907-};
12908+} __no_const;
12909
12910 /**
12911 * struct x86_init_timers - platform specific timer setup
12912@@ -102,7 +102,7 @@ struct x86_init_timers {
12913 void (*tsc_pre_init)(void);
12914 void (*timer_init)(void);
12915 void (*wallclock_init)(void);
12916-};
12917+} __no_const;
12918
12919 /**
12920 * struct x86_init_iommu - platform specific iommu setup
12921@@ -110,7 +110,7 @@ struct x86_init_timers {
12922 */
12923 struct x86_init_iommu {
12924 int (*iommu_init)(void);
12925-};
12926+} __no_const;
12927
12928 /**
12929 * struct x86_init_pci - platform specific pci init functions
12930@@ -124,7 +124,7 @@ struct x86_init_pci {
12931 int (*init)(void);
12932 void (*init_irq)(void);
12933 void (*fixup_irqs)(void);
12934-};
12935+} __no_const;
12936
12937 /**
12938 * struct x86_init_ops - functions for platform specific setup
12939@@ -140,7 +140,7 @@ struct x86_init_ops {
12940 struct x86_init_timers timers;
12941 struct x86_init_iommu iommu;
12942 struct x86_init_pci pci;
12943-};
12944+} __no_const;
12945
12946 /**
12947 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
12948@@ -149,7 +149,7 @@ struct x86_init_ops {
12949 struct x86_cpuinit_ops {
12950 void (*setup_percpu_clockev)(void);
12951 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
12952-};
12953+} __no_const;
12954
12955 /**
12956 * struct x86_platform_ops - platform specific runtime functions
12957@@ -171,7 +171,7 @@ struct x86_platform_ops {
12958 void (*nmi_init)(void);
12959 unsigned char (*get_nmi_reason)(void);
12960 int (*i8042_detect)(void);
12961-};
12962+} __no_const;
12963
12964 struct pci_dev;
12965
12966@@ -180,7 +180,7 @@ struct x86_msi_ops {
12967 void (*teardown_msi_irq)(unsigned int irq);
12968 void (*teardown_msi_irqs)(struct pci_dev *dev);
12969 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
12970-};
12971+} __no_const;
12972
12973 extern struct x86_init_ops x86_init;
12974 extern struct x86_cpuinit_ops x86_cpuinit;
12975diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
12976index c6ce245..ffbdab7 100644
12977--- a/arch/x86/include/asm/xsave.h
12978+++ b/arch/x86/include/asm/xsave.h
12979@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12980 {
12981 int err;
12982
12983+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12984+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
12985+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
12986+#endif
12987+
12988 /*
12989 * Clear the xsave header first, so that reserved fields are
12990 * initialized to zero.
12991@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12992 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
12993 {
12994 int err;
12995- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
12996+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
12997 u32 lmask = mask;
12998 u32 hmask = mask >> 32;
12999
13000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13001+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13002+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13003+#endif
13004+
13005 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13006 "2:\n"
13007 ".section .fixup,\"ax\"\n"
13008diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13009index 6a564ac..9b1340c 100644
13010--- a/arch/x86/kernel/acpi/realmode/Makefile
13011+++ b/arch/x86/kernel/acpi/realmode/Makefile
13012@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13013 $(call cc-option, -fno-stack-protector) \
13014 $(call cc-option, -mpreferred-stack-boundary=2)
13015 KBUILD_CFLAGS += $(call cc-option, -m32)
13016+ifdef CONSTIFY_PLUGIN
13017+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13018+endif
13019 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13020 GCOV_PROFILE := n
13021
13022diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13023index b4fd836..4358fe3 100644
13024--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13025+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13026@@ -108,6 +108,9 @@ wakeup_code:
13027 /* Do any other stuff... */
13028
13029 #ifndef CONFIG_64BIT
13030+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13031+ call verify_cpu
13032+
13033 /* This could also be done in C code... */
13034 movl pmode_cr3, %eax
13035 movl %eax, %cr3
13036@@ -131,6 +134,7 @@ wakeup_code:
13037 movl pmode_cr0, %eax
13038 movl %eax, %cr0
13039 jmp pmode_return
13040+# include "../../verify_cpu.S"
13041 #else
13042 pushw $0
13043 pushw trampoline_segment
13044diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13045index 103b6ab..2004d0a 100644
13046--- a/arch/x86/kernel/acpi/sleep.c
13047+++ b/arch/x86/kernel/acpi/sleep.c
13048@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13049 header->trampoline_segment = trampoline_address() >> 4;
13050 #ifdef CONFIG_SMP
13051 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13052+
13053+ pax_open_kernel();
13054 early_gdt_descr.address =
13055 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13056+ pax_close_kernel();
13057+
13058 initial_gs = per_cpu_offset(smp_processor_id());
13059 #endif
13060 initial_code = (unsigned long)wakeup_long64;
13061diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13062index 13ab720..95d5442 100644
13063--- a/arch/x86/kernel/acpi/wakeup_32.S
13064+++ b/arch/x86/kernel/acpi/wakeup_32.S
13065@@ -30,13 +30,11 @@ wakeup_pmode_return:
13066 # and restore the stack ... but you need gdt for this to work
13067 movl saved_context_esp, %esp
13068
13069- movl %cs:saved_magic, %eax
13070- cmpl $0x12345678, %eax
13071+ cmpl $0x12345678, saved_magic
13072 jne bogus_magic
13073
13074 # jump to place where we left off
13075- movl saved_eip, %eax
13076- jmp *%eax
13077+ jmp *(saved_eip)
13078
13079 bogus_magic:
13080 jmp bogus_magic
13081diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13082index 1f84794..e23f862 100644
13083--- a/arch/x86/kernel/alternative.c
13084+++ b/arch/x86/kernel/alternative.c
13085@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13086 */
13087 for (a = start; a < end; a++) {
13088 instr = (u8 *)&a->instr_offset + a->instr_offset;
13089+
13090+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13091+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13092+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13093+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13094+#endif
13095+
13096 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13097 BUG_ON(a->replacementlen > a->instrlen);
13098 BUG_ON(a->instrlen > sizeof(insnbuf));
13099@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13100 for (poff = start; poff < end; poff++) {
13101 u8 *ptr = (u8 *)poff + *poff;
13102
13103+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13104+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13105+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13106+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13107+#endif
13108+
13109 if (!*poff || ptr < text || ptr >= text_end)
13110 continue;
13111 /* turn DS segment override prefix into lock prefix */
13112- if (*ptr == 0x3e)
13113+ if (*ktla_ktva(ptr) == 0x3e)
13114 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13115 };
13116 mutex_unlock(&text_mutex);
13117@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13118 for (poff = start; poff < end; poff++) {
13119 u8 *ptr = (u8 *)poff + *poff;
13120
13121+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13122+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13123+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13124+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13125+#endif
13126+
13127 if (!*poff || ptr < text || ptr >= text_end)
13128 continue;
13129 /* turn lock prefix into DS segment override prefix */
13130- if (*ptr == 0xf0)
13131+ if (*ktla_ktva(ptr) == 0xf0)
13132 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13133 };
13134 mutex_unlock(&text_mutex);
13135@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13136
13137 BUG_ON(p->len > MAX_PATCH_LEN);
13138 /* prep the buffer with the original instructions */
13139- memcpy(insnbuf, p->instr, p->len);
13140+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13141 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13142 (unsigned long)p->instr, p->len);
13143
13144@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13145 if (smp_alt_once)
13146 free_init_pages("SMP alternatives",
13147 (unsigned long)__smp_locks,
13148- (unsigned long)__smp_locks_end);
13149+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13150
13151 restart_nmi();
13152 }
13153@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13154 * instructions. And on the local CPU you need to be protected again NMI or MCE
13155 * handlers seeing an inconsistent instruction while you patch.
13156 */
13157-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13158+void *__kprobes text_poke_early(void *addr, const void *opcode,
13159 size_t len)
13160 {
13161 unsigned long flags;
13162 local_irq_save(flags);
13163- memcpy(addr, opcode, len);
13164+
13165+ pax_open_kernel();
13166+ memcpy(ktla_ktva(addr), opcode, len);
13167 sync_core();
13168+ pax_close_kernel();
13169+
13170 local_irq_restore(flags);
13171 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13172 that causes hangs on some VIA CPUs. */
13173@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13174 */
13175 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13176 {
13177- unsigned long flags;
13178- char *vaddr;
13179+ unsigned char *vaddr = ktla_ktva(addr);
13180 struct page *pages[2];
13181- int i;
13182+ size_t i;
13183
13184 if (!core_kernel_text((unsigned long)addr)) {
13185- pages[0] = vmalloc_to_page(addr);
13186- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13187+ pages[0] = vmalloc_to_page(vaddr);
13188+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13189 } else {
13190- pages[0] = virt_to_page(addr);
13191+ pages[0] = virt_to_page(vaddr);
13192 WARN_ON(!PageReserved(pages[0]));
13193- pages[1] = virt_to_page(addr + PAGE_SIZE);
13194+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13195 }
13196 BUG_ON(!pages[0]);
13197- local_irq_save(flags);
13198- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13199- if (pages[1])
13200- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13201- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13202- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13203- clear_fixmap(FIX_TEXT_POKE0);
13204- if (pages[1])
13205- clear_fixmap(FIX_TEXT_POKE1);
13206- local_flush_tlb();
13207- sync_core();
13208- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13209- that causes hangs on some VIA CPUs. */
13210+ text_poke_early(addr, opcode, len);
13211 for (i = 0; i < len; i++)
13212- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13213- local_irq_restore(flags);
13214+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13215 return addr;
13216 }
13217
13218diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13219index 2eec05b..fef012b 100644
13220--- a/arch/x86/kernel/apic/apic.c
13221+++ b/arch/x86/kernel/apic/apic.c
13222@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13223 /*
13224 * Debug level, exported for io_apic.c
13225 */
13226-unsigned int apic_verbosity;
13227+int apic_verbosity;
13228
13229 int pic_mode;
13230
13231@@ -1908,7 +1908,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13232 apic_write(APIC_ESR, 0);
13233 v1 = apic_read(APIC_ESR);
13234 ack_APIC_irq();
13235- atomic_inc(&irq_err_count);
13236+ atomic_inc_unchecked(&irq_err_count);
13237
13238 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13239 smp_processor_id(), v0 , v1);
13240diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13241index fb07275..e06bb59 100644
13242--- a/arch/x86/kernel/apic/io_apic.c
13243+++ b/arch/x86/kernel/apic/io_apic.c
13244@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13245 }
13246 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13247
13248-void lock_vector_lock(void)
13249+void lock_vector_lock(void) __acquires(vector_lock)
13250 {
13251 /* Used to the online set of cpus does not change
13252 * during assign_irq_vector.
13253@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13254 raw_spin_lock(&vector_lock);
13255 }
13256
13257-void unlock_vector_lock(void)
13258+void unlock_vector_lock(void) __releases(vector_lock)
13259 {
13260 raw_spin_unlock(&vector_lock);
13261 }
13262@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13263 ack_APIC_irq();
13264 }
13265
13266-atomic_t irq_mis_count;
13267+atomic_unchecked_t irq_mis_count;
13268
13269 static void ack_apic_level(struct irq_data *data)
13270 {
13271@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13272 * at the cpu.
13273 */
13274 if (!(v & (1 << (i & 0x1f)))) {
13275- atomic_inc(&irq_mis_count);
13276+ atomic_inc_unchecked(&irq_mis_count);
13277
13278 eoi_ioapic_irq(irq, cfg);
13279 }
13280diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13281index f76623c..aab694f 100644
13282--- a/arch/x86/kernel/apm_32.c
13283+++ b/arch/x86/kernel/apm_32.c
13284@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13285 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13286 * even though they are called in protected mode.
13287 */
13288-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13289+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13290 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13291
13292 static const char driver_version[] = "1.16ac"; /* no spaces */
13293@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13294 BUG_ON(cpu != 0);
13295 gdt = get_cpu_gdt_table(cpu);
13296 save_desc_40 = gdt[0x40 / 8];
13297+
13298+ pax_open_kernel();
13299 gdt[0x40 / 8] = bad_bios_desc;
13300+ pax_close_kernel();
13301
13302 apm_irq_save(flags);
13303 APM_DO_SAVE_SEGS;
13304@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13305 &call->esi);
13306 APM_DO_RESTORE_SEGS;
13307 apm_irq_restore(flags);
13308+
13309+ pax_open_kernel();
13310 gdt[0x40 / 8] = save_desc_40;
13311+ pax_close_kernel();
13312+
13313 put_cpu();
13314
13315 return call->eax & 0xff;
13316@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13317 BUG_ON(cpu != 0);
13318 gdt = get_cpu_gdt_table(cpu);
13319 save_desc_40 = gdt[0x40 / 8];
13320+
13321+ pax_open_kernel();
13322 gdt[0x40 / 8] = bad_bios_desc;
13323+ pax_close_kernel();
13324
13325 apm_irq_save(flags);
13326 APM_DO_SAVE_SEGS;
13327@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13328 &call->eax);
13329 APM_DO_RESTORE_SEGS;
13330 apm_irq_restore(flags);
13331+
13332+ pax_open_kernel();
13333 gdt[0x40 / 8] = save_desc_40;
13334+ pax_close_kernel();
13335+
13336 put_cpu();
13337 return error;
13338 }
13339@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13340 * code to that CPU.
13341 */
13342 gdt = get_cpu_gdt_table(0);
13343+
13344+ pax_open_kernel();
13345 set_desc_base(&gdt[APM_CS >> 3],
13346 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13347 set_desc_base(&gdt[APM_CS_16 >> 3],
13348 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13349 set_desc_base(&gdt[APM_DS >> 3],
13350 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13351+ pax_close_kernel();
13352
13353 proc_create("apm", 0, NULL, &apm_file_ops);
13354
13355diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13356index 68de2dc..1f3c720 100644
13357--- a/arch/x86/kernel/asm-offsets.c
13358+++ b/arch/x86/kernel/asm-offsets.c
13359@@ -33,6 +33,8 @@ void common(void) {
13360 OFFSET(TI_status, thread_info, status);
13361 OFFSET(TI_addr_limit, thread_info, addr_limit);
13362 OFFSET(TI_preempt_count, thread_info, preempt_count);
13363+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13364+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13365
13366 BLANK();
13367 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13368@@ -53,8 +55,26 @@ void common(void) {
13369 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13370 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13371 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13372+
13373+#ifdef CONFIG_PAX_KERNEXEC
13374+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13375 #endif
13376
13377+#ifdef CONFIG_PAX_MEMORY_UDEREF
13378+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13379+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13380+#ifdef CONFIG_X86_64
13381+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13382+#endif
13383+#endif
13384+
13385+#endif
13386+
13387+ BLANK();
13388+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13389+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13390+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13391+
13392 #ifdef CONFIG_XEN
13393 BLANK();
13394 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13395diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13396index 834e897..dacddc8 100644
13397--- a/arch/x86/kernel/asm-offsets_64.c
13398+++ b/arch/x86/kernel/asm-offsets_64.c
13399@@ -70,6 +70,7 @@ int main(void)
13400 BLANK();
13401 #undef ENTRY
13402
13403+ DEFINE(TSS_size, sizeof(struct tss_struct));
13404 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13405 BLANK();
13406
13407diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13408index 25f24dc..4094a7f 100644
13409--- a/arch/x86/kernel/cpu/Makefile
13410+++ b/arch/x86/kernel/cpu/Makefile
13411@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13412 CFLAGS_REMOVE_perf_event.o = -pg
13413 endif
13414
13415-# Make sure load_percpu_segment has no stackprotector
13416-nostackp := $(call cc-option, -fno-stack-protector)
13417-CFLAGS_common.o := $(nostackp)
13418-
13419 obj-y := intel_cacheinfo.o scattered.o topology.o
13420 obj-y += proc.o capflags.o powerflags.o common.o
13421 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13422diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13423index f4773f4..b3fb13c 100644
13424--- a/arch/x86/kernel/cpu/amd.c
13425+++ b/arch/x86/kernel/cpu/amd.c
13426@@ -669,7 +669,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13427 unsigned int size)
13428 {
13429 /* AMD errata T13 (order #21922) */
13430- if ((c->x86 == 6)) {
13431+ if (c->x86 == 6) {
13432 /* Duron Rev A0 */
13433 if (c->x86_model == 3 && c->x86_mask == 0)
13434 size = 64;
13435diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13436index c0f7d68..aa418f9 100644
13437--- a/arch/x86/kernel/cpu/common.c
13438+++ b/arch/x86/kernel/cpu/common.c
13439@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13440
13441 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13442
13443-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13444-#ifdef CONFIG_X86_64
13445- /*
13446- * We need valid kernel segments for data and code in long mode too
13447- * IRET will check the segment types kkeil 2000/10/28
13448- * Also sysret mandates a special GDT layout
13449- *
13450- * TLS descriptors are currently at a different place compared to i386.
13451- * Hopefully nobody expects them at a fixed place (Wine?)
13452- */
13453- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13454- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13455- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13456- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13457- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13458- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13459-#else
13460- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13461- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13462- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13463- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13464- /*
13465- * Segments used for calling PnP BIOS have byte granularity.
13466- * They code segments and data segments have fixed 64k limits,
13467- * the transfer segment sizes are set at run time.
13468- */
13469- /* 32-bit code */
13470- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13471- /* 16-bit code */
13472- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13473- /* 16-bit data */
13474- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13475- /* 16-bit data */
13476- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13477- /* 16-bit data */
13478- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13479- /*
13480- * The APM segments have byte granularity and their bases
13481- * are set at run time. All have 64k limits.
13482- */
13483- /* 32-bit code */
13484- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13485- /* 16-bit code */
13486- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13487- /* data */
13488- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13489-
13490- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13491- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13492- GDT_STACK_CANARY_INIT
13493-#endif
13494-} };
13495-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13496-
13497 static int __init x86_xsave_setup(char *s)
13498 {
13499 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13500@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13501 {
13502 struct desc_ptr gdt_descr;
13503
13504- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13505+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13506 gdt_descr.size = GDT_SIZE - 1;
13507 load_gdt(&gdt_descr);
13508 /* Reload the per-cpu base */
13509@@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13510 /* Filter out anything that depends on CPUID levels we don't have */
13511 filter_cpuid_features(c, true);
13512
13513+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13514+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13515+#endif
13516+
13517 /* If the model name is still unset, do table lookup. */
13518 if (!c->x86_model_id[0]) {
13519 const char *p;
13520@@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13521 }
13522 __setup("clearcpuid=", setup_disablecpuid);
13523
13524+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13525+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13526+
13527 #ifdef CONFIG_X86_64
13528 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13529-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13530- (unsigned long) nmi_idt_table };
13531+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13532
13533 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13534 irq_stack_union) __aligned(PAGE_SIZE);
13535@@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13536 EXPORT_PER_CPU_SYMBOL(current_task);
13537
13538 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13539- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13540+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13541 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13542
13543 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13544@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13545 {
13546 memset(regs, 0, sizeof(struct pt_regs));
13547 regs->fs = __KERNEL_PERCPU;
13548- regs->gs = __KERNEL_STACK_CANARY;
13549+ savesegment(gs, regs->gs);
13550
13551 return regs;
13552 }
13553@@ -1190,7 +1142,7 @@ void __cpuinit cpu_init(void)
13554 int i;
13555
13556 cpu = stack_smp_processor_id();
13557- t = &per_cpu(init_tss, cpu);
13558+ t = init_tss + cpu;
13559 oist = &per_cpu(orig_ist, cpu);
13560
13561 #ifdef CONFIG_NUMA
13562@@ -1216,7 +1168,7 @@ void __cpuinit cpu_init(void)
13563 switch_to_new_gdt(cpu);
13564 loadsegment(fs, 0);
13565
13566- load_idt((const struct desc_ptr *)&idt_descr);
13567+ load_idt(&idt_descr);
13568
13569 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13570 syscall_init();
13571@@ -1225,7 +1177,6 @@ void __cpuinit cpu_init(void)
13572 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13573 barrier();
13574
13575- x86_configure_nx();
13576 if (cpu != 0)
13577 enable_x2apic();
13578
13579@@ -1281,7 +1232,7 @@ void __cpuinit cpu_init(void)
13580 {
13581 int cpu = smp_processor_id();
13582 struct task_struct *curr = current;
13583- struct tss_struct *t = &per_cpu(init_tss, cpu);
13584+ struct tss_struct *t = init_tss + cpu;
13585 struct thread_struct *thread = &curr->thread;
13586
13587 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13588diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13589index 3e6ff6c..54b4992 100644
13590--- a/arch/x86/kernel/cpu/intel.c
13591+++ b/arch/x86/kernel/cpu/intel.c
13592@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13593 * Update the IDT descriptor and reload the IDT so that
13594 * it uses the read-only mapped virtual address.
13595 */
13596- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13597+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13598 load_idt(&idt_descr);
13599 }
13600 #endif
13601diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13602index 5a11ae2..a1a1c8a 100644
13603--- a/arch/x86/kernel/cpu/mcheck/mce.c
13604+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13605@@ -42,6 +42,7 @@
13606 #include <asm/processor.h>
13607 #include <asm/mce.h>
13608 #include <asm/msr.h>
13609+#include <asm/local.h>
13610
13611 #include "mce-internal.h"
13612
13613@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
13614 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13615 m->cs, m->ip);
13616
13617- if (m->cs == __KERNEL_CS)
13618+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13619 print_symbol("{%s}", m->ip);
13620 pr_cont("\n");
13621 }
13622@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
13623
13624 #define PANIC_TIMEOUT 5 /* 5 seconds */
13625
13626-static atomic_t mce_paniced;
13627+static atomic_unchecked_t mce_paniced;
13628
13629 static int fake_panic;
13630-static atomic_t mce_fake_paniced;
13631+static atomic_unchecked_t mce_fake_paniced;
13632
13633 /* Panic in progress. Enable interrupts and wait for final IPI */
13634 static void wait_for_panic(void)
13635@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13636 /*
13637 * Make sure only one CPU runs in machine check panic
13638 */
13639- if (atomic_inc_return(&mce_paniced) > 1)
13640+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13641 wait_for_panic();
13642 barrier();
13643
13644@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13645 console_verbose();
13646 } else {
13647 /* Don't log too much for fake panic */
13648- if (atomic_inc_return(&mce_fake_paniced) > 1)
13649+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13650 return;
13651 }
13652 /* First print corrected ones that are still unlogged */
13653@@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
13654 * might have been modified by someone else.
13655 */
13656 rmb();
13657- if (atomic_read(&mce_paniced))
13658+ if (atomic_read_unchecked(&mce_paniced))
13659 wait_for_panic();
13660 if (!monarch_timeout)
13661 goto out;
13662@@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13663 }
13664
13665 /* Call the installed machine check handler for this CPU setup. */
13666-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13667+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13668 unexpected_machine_check;
13669
13670 /*
13671@@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13672 return;
13673 }
13674
13675+ pax_open_kernel();
13676 machine_check_vector = do_machine_check;
13677+ pax_close_kernel();
13678
13679 __mcheck_cpu_init_generic();
13680 __mcheck_cpu_init_vendor(c);
13681@@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13682 */
13683
13684 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13685-static int mce_chrdev_open_count; /* #times opened */
13686+static local_t mce_chrdev_open_count; /* #times opened */
13687 static int mce_chrdev_open_exclu; /* already open exclusive? */
13688
13689 static int mce_chrdev_open(struct inode *inode, struct file *file)
13690@@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13691 spin_lock(&mce_chrdev_state_lock);
13692
13693 if (mce_chrdev_open_exclu ||
13694- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13695+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13696 spin_unlock(&mce_chrdev_state_lock);
13697
13698 return -EBUSY;
13699@@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13700
13701 if (file->f_flags & O_EXCL)
13702 mce_chrdev_open_exclu = 1;
13703- mce_chrdev_open_count++;
13704+ local_inc(&mce_chrdev_open_count);
13705
13706 spin_unlock(&mce_chrdev_state_lock);
13707
13708@@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13709 {
13710 spin_lock(&mce_chrdev_state_lock);
13711
13712- mce_chrdev_open_count--;
13713+ local_dec(&mce_chrdev_open_count);
13714 mce_chrdev_open_exclu = 0;
13715
13716 spin_unlock(&mce_chrdev_state_lock);
13717@@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
13718 static void mce_reset(void)
13719 {
13720 cpu_missing = 0;
13721- atomic_set(&mce_fake_paniced, 0);
13722+ atomic_set_unchecked(&mce_fake_paniced, 0);
13723 atomic_set(&mce_executing, 0);
13724 atomic_set(&mce_callin, 0);
13725 atomic_set(&global_nwo, 0);
13726diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13727index 5c0e653..0882b0a 100644
13728--- a/arch/x86/kernel/cpu/mcheck/p5.c
13729+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13730@@ -12,6 +12,7 @@
13731 #include <asm/system.h>
13732 #include <asm/mce.h>
13733 #include <asm/msr.h>
13734+#include <asm/pgtable.h>
13735
13736 /* By default disabled */
13737 int mce_p5_enabled __read_mostly;
13738@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13739 if (!cpu_has(c, X86_FEATURE_MCE))
13740 return;
13741
13742+ pax_open_kernel();
13743 machine_check_vector = pentium_machine_check;
13744+ pax_close_kernel();
13745 /* Make sure the vector pointer is visible before we enable MCEs: */
13746 wmb();
13747
13748diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13749index 54060f5..c1a7577 100644
13750--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13751+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13752@@ -11,6 +11,7 @@
13753 #include <asm/system.h>
13754 #include <asm/mce.h>
13755 #include <asm/msr.h>
13756+#include <asm/pgtable.h>
13757
13758 /* Machine check handler for WinChip C6: */
13759 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13760@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13761 {
13762 u32 lo, hi;
13763
13764+ pax_open_kernel();
13765 machine_check_vector = winchip_machine_check;
13766+ pax_close_kernel();
13767 /* Make sure the vector pointer is visible before we enable MCEs: */
13768 wmb();
13769
13770diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13771index 6b96110..0da73eb 100644
13772--- a/arch/x86/kernel/cpu/mtrr/main.c
13773+++ b/arch/x86/kernel/cpu/mtrr/main.c
13774@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13775 u64 size_or_mask, size_and_mask;
13776 static bool mtrr_aps_delayed_init;
13777
13778-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13779+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13780
13781 const struct mtrr_ops *mtrr_if;
13782
13783diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13784index df5e41f..816c719 100644
13785--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13786+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13787@@ -25,7 +25,7 @@ struct mtrr_ops {
13788 int (*validate_add_page)(unsigned long base, unsigned long size,
13789 unsigned int type);
13790 int (*have_wrcomb)(void);
13791-};
13792+} __do_const;
13793
13794 extern int generic_get_free_region(unsigned long base, unsigned long size,
13795 int replace_reg);
13796diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13797index 5adce10..99284ec 100644
13798--- a/arch/x86/kernel/cpu/perf_event.c
13799+++ b/arch/x86/kernel/cpu/perf_event.c
13800@@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13801 break;
13802
13803 perf_callchain_store(entry, frame.return_address);
13804- fp = frame.next_frame;
13805+ fp = (const void __force_user *)frame.next_frame;
13806 }
13807 }
13808
13809diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13810index 13ad899..f642b9a 100644
13811--- a/arch/x86/kernel/crash.c
13812+++ b/arch/x86/kernel/crash.c
13813@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13814 {
13815 #ifdef CONFIG_X86_32
13816 struct pt_regs fixed_regs;
13817-#endif
13818
13819-#ifdef CONFIG_X86_32
13820- if (!user_mode_vm(regs)) {
13821+ if (!user_mode(regs)) {
13822 crash_fixup_ss_esp(&fixed_regs, regs);
13823 regs = &fixed_regs;
13824 }
13825diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
13826index 37250fe..bf2ec74 100644
13827--- a/arch/x86/kernel/doublefault_32.c
13828+++ b/arch/x86/kernel/doublefault_32.c
13829@@ -11,7 +11,7 @@
13830
13831 #define DOUBLEFAULT_STACKSIZE (1024)
13832 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13833-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13834+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13835
13836 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13837
13838@@ -21,7 +21,7 @@ static void doublefault_fn(void)
13839 unsigned long gdt, tss;
13840
13841 store_gdt(&gdt_desc);
13842- gdt = gdt_desc.address;
13843+ gdt = (unsigned long)gdt_desc.address;
13844
13845 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13846
13847@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
13848 /* 0x2 bit is always set */
13849 .flags = X86_EFLAGS_SF | 0x2,
13850 .sp = STACK_START,
13851- .es = __USER_DS,
13852+ .es = __KERNEL_DS,
13853 .cs = __KERNEL_CS,
13854 .ss = __KERNEL_DS,
13855- .ds = __USER_DS,
13856+ .ds = __KERNEL_DS,
13857 .fs = __KERNEL_PERCPU,
13858
13859 .__cr3 = __pa_nodebug(swapper_pg_dir),
13860diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
13861index 4025fe4..d8451c6 100644
13862--- a/arch/x86/kernel/dumpstack.c
13863+++ b/arch/x86/kernel/dumpstack.c
13864@@ -2,6 +2,9 @@
13865 * Copyright (C) 1991, 1992 Linus Torvalds
13866 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13867 */
13868+#ifdef CONFIG_GRKERNSEC_HIDESYM
13869+#define __INCLUDED_BY_HIDESYM 1
13870+#endif
13871 #include <linux/kallsyms.h>
13872 #include <linux/kprobes.h>
13873 #include <linux/uaccess.h>
13874@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
13875 static void
13876 print_ftrace_graph_addr(unsigned long addr, void *data,
13877 const struct stacktrace_ops *ops,
13878- struct thread_info *tinfo, int *graph)
13879+ struct task_struct *task, int *graph)
13880 {
13881- struct task_struct *task = tinfo->task;
13882 unsigned long ret_addr;
13883 int index = task->curr_ret_stack;
13884
13885@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13886 static inline void
13887 print_ftrace_graph_addr(unsigned long addr, void *data,
13888 const struct stacktrace_ops *ops,
13889- struct thread_info *tinfo, int *graph)
13890+ struct task_struct *task, int *graph)
13891 { }
13892 #endif
13893
13894@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13895 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13896 */
13897
13898-static inline int valid_stack_ptr(struct thread_info *tinfo,
13899- void *p, unsigned int size, void *end)
13900+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13901 {
13902- void *t = tinfo;
13903 if (end) {
13904 if (p < end && p >= (end-THREAD_SIZE))
13905 return 1;
13906@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
13907 }
13908
13909 unsigned long
13910-print_context_stack(struct thread_info *tinfo,
13911+print_context_stack(struct task_struct *task, void *stack_start,
13912 unsigned long *stack, unsigned long bp,
13913 const struct stacktrace_ops *ops, void *data,
13914 unsigned long *end, int *graph)
13915 {
13916 struct stack_frame *frame = (struct stack_frame *)bp;
13917
13918- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13919+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13920 unsigned long addr;
13921
13922 addr = *stack;
13923@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
13924 } else {
13925 ops->address(data, addr, 0);
13926 }
13927- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13928+ print_ftrace_graph_addr(addr, data, ops, task, graph);
13929 }
13930 stack++;
13931 }
13932@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
13933 EXPORT_SYMBOL_GPL(print_context_stack);
13934
13935 unsigned long
13936-print_context_stack_bp(struct thread_info *tinfo,
13937+print_context_stack_bp(struct task_struct *task, void *stack_start,
13938 unsigned long *stack, unsigned long bp,
13939 const struct stacktrace_ops *ops, void *data,
13940 unsigned long *end, int *graph)
13941@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13942 struct stack_frame *frame = (struct stack_frame *)bp;
13943 unsigned long *ret_addr = &frame->return_address;
13944
13945- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
13946+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
13947 unsigned long addr = *ret_addr;
13948
13949 if (!__kernel_text_address(addr))
13950@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13951 ops->address(data, addr, 1);
13952 frame = frame->next_frame;
13953 ret_addr = &frame->return_address;
13954- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13955+ print_ftrace_graph_addr(addr, data, ops, task, graph);
13956 }
13957
13958 return (unsigned long)frame;
13959@@ -186,7 +186,7 @@ void dump_stack(void)
13960
13961 bp = stack_frame(current, NULL);
13962 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13963- current->pid, current->comm, print_tainted(),
13964+ task_pid_nr(current), current->comm, print_tainted(),
13965 init_utsname()->release,
13966 (int)strcspn(init_utsname()->version, " "),
13967 init_utsname()->version);
13968@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
13969 }
13970 EXPORT_SYMBOL_GPL(oops_begin);
13971
13972+extern void gr_handle_kernel_exploit(void);
13973+
13974 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13975 {
13976 if (regs && kexec_should_crash(current))
13977@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13978 panic("Fatal exception in interrupt");
13979 if (panic_on_oops)
13980 panic("Fatal exception");
13981- do_exit(signr);
13982+
13983+ gr_handle_kernel_exploit();
13984+
13985+ do_group_exit(signr);
13986 }
13987
13988 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13989@@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13990
13991 show_registers(regs);
13992 #ifdef CONFIG_X86_32
13993- if (user_mode_vm(regs)) {
13994+ if (user_mode(regs)) {
13995 sp = regs->sp;
13996 ss = regs->ss & 0xffff;
13997 } else {
13998@@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
13999 unsigned long flags = oops_begin();
14000 int sig = SIGSEGV;
14001
14002- if (!user_mode_vm(regs))
14003+ if (!user_mode(regs))
14004 report_bug(regs->ip, regs);
14005
14006 if (__die(str, regs, err))
14007diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14008index c99f9ed..2a15d80 100644
14009--- a/arch/x86/kernel/dumpstack_32.c
14010+++ b/arch/x86/kernel/dumpstack_32.c
14011@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14012 bp = stack_frame(task, regs);
14013
14014 for (;;) {
14015- struct thread_info *context;
14016+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14017
14018- context = (struct thread_info *)
14019- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14020- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14021+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14022
14023- stack = (unsigned long *)context->previous_esp;
14024- if (!stack)
14025+ if (stack_start == task_stack_page(task))
14026 break;
14027+ stack = *(unsigned long **)stack_start;
14028 if (ops->stack(data, "IRQ") < 0)
14029 break;
14030 touch_nmi_watchdog();
14031@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14032 * When in-kernel, we also print out the stack and code at the
14033 * time of the fault..
14034 */
14035- if (!user_mode_vm(regs)) {
14036+ if (!user_mode(regs)) {
14037 unsigned int code_prologue = code_bytes * 43 / 64;
14038 unsigned int code_len = code_bytes;
14039 unsigned char c;
14040 u8 *ip;
14041+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14042
14043 printk(KERN_EMERG "Stack:\n");
14044 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14045
14046 printk(KERN_EMERG "Code: ");
14047
14048- ip = (u8 *)regs->ip - code_prologue;
14049+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14050 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14051 /* try starting at IP */
14052- ip = (u8 *)regs->ip;
14053+ ip = (u8 *)regs->ip + cs_base;
14054 code_len = code_len - code_prologue + 1;
14055 }
14056 for (i = 0; i < code_len; i++, ip++) {
14057@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14058 printk(KERN_CONT " Bad EIP value.");
14059 break;
14060 }
14061- if (ip == (u8 *)regs->ip)
14062+ if (ip == (u8 *)regs->ip + cs_base)
14063 printk(KERN_CONT "<%02x> ", c);
14064 else
14065 printk(KERN_CONT "%02x ", c);
14066@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14067 {
14068 unsigned short ud2;
14069
14070+ ip = ktla_ktva(ip);
14071 if (ip < PAGE_OFFSET)
14072 return 0;
14073 if (probe_kernel_address((unsigned short *)ip, ud2))
14074@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14075
14076 return ud2 == 0x0b0f;
14077 }
14078+
14079+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14080+void pax_check_alloca(unsigned long size)
14081+{
14082+ unsigned long sp = (unsigned long)&sp, stack_left;
14083+
14084+ /* all kernel stacks are of the same size */
14085+ stack_left = sp & (THREAD_SIZE - 1);
14086+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14087+}
14088+EXPORT_SYMBOL(pax_check_alloca);
14089+#endif
14090diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14091index 17107bd..b2deecf 100644
14092--- a/arch/x86/kernel/dumpstack_64.c
14093+++ b/arch/x86/kernel/dumpstack_64.c
14094@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14095 unsigned long *irq_stack_end =
14096 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14097 unsigned used = 0;
14098- struct thread_info *tinfo;
14099 int graph = 0;
14100 unsigned long dummy;
14101+ void *stack_start;
14102
14103 if (!task)
14104 task = current;
14105@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14106 * current stack address. If the stacks consist of nested
14107 * exceptions
14108 */
14109- tinfo = task_thread_info(task);
14110 for (;;) {
14111 char *id;
14112 unsigned long *estack_end;
14113+
14114 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14115 &used, &id);
14116
14117@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14118 if (ops->stack(data, id) < 0)
14119 break;
14120
14121- bp = ops->walk_stack(tinfo, stack, bp, ops,
14122+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14123 data, estack_end, &graph);
14124 ops->stack(data, "<EOE>");
14125 /*
14126@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14127 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14128 if (ops->stack(data, "IRQ") < 0)
14129 break;
14130- bp = ops->walk_stack(tinfo, stack, bp,
14131+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14132 ops, data, irq_stack_end, &graph);
14133 /*
14134 * We link to the next stack (which would be
14135@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14136 /*
14137 * This handles the process stack:
14138 */
14139- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14140+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14141+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14142 put_cpu();
14143 }
14144 EXPORT_SYMBOL(dump_trace);
14145@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14146
14147 return ud2 == 0x0b0f;
14148 }
14149+
14150+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14151+void pax_check_alloca(unsigned long size)
14152+{
14153+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14154+ unsigned cpu, used;
14155+ char *id;
14156+
14157+ /* check the process stack first */
14158+ stack_start = (unsigned long)task_stack_page(current);
14159+ stack_end = stack_start + THREAD_SIZE;
14160+ if (likely(stack_start <= sp && sp < stack_end)) {
14161+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14162+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14163+ return;
14164+ }
14165+
14166+ cpu = get_cpu();
14167+
14168+ /* check the irq stacks */
14169+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14170+ stack_start = stack_end - IRQ_STACK_SIZE;
14171+ if (stack_start <= sp && sp < stack_end) {
14172+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14173+ put_cpu();
14174+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14175+ return;
14176+ }
14177+
14178+ /* check the exception stacks */
14179+ used = 0;
14180+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14181+ stack_start = stack_end - EXCEPTION_STKSZ;
14182+ if (stack_end && stack_start <= sp && sp < stack_end) {
14183+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14184+ put_cpu();
14185+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14186+ return;
14187+ }
14188+
14189+ put_cpu();
14190+
14191+ /* unknown stack */
14192+ BUG();
14193+}
14194+EXPORT_SYMBOL(pax_check_alloca);
14195+#endif
14196diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14197index 9b9f18b..9fcaa04 100644
14198--- a/arch/x86/kernel/early_printk.c
14199+++ b/arch/x86/kernel/early_printk.c
14200@@ -7,6 +7,7 @@
14201 #include <linux/pci_regs.h>
14202 #include <linux/pci_ids.h>
14203 #include <linux/errno.h>
14204+#include <linux/sched.h>
14205 #include <asm/io.h>
14206 #include <asm/processor.h>
14207 #include <asm/fcntl.h>
14208diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14209index 79d97e6..87ea048 100644
14210--- a/arch/x86/kernel/entry_32.S
14211+++ b/arch/x86/kernel/entry_32.S
14212@@ -185,13 +185,146 @@
14213 /*CFI_REL_OFFSET gs, PT_GS*/
14214 .endm
14215 .macro SET_KERNEL_GS reg
14216+
14217+#ifdef CONFIG_CC_STACKPROTECTOR
14218 movl $(__KERNEL_STACK_CANARY), \reg
14219+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14220+ movl $(__USER_DS), \reg
14221+#else
14222+ xorl \reg, \reg
14223+#endif
14224+
14225 movl \reg, %gs
14226 .endm
14227
14228 #endif /* CONFIG_X86_32_LAZY_GS */
14229
14230-.macro SAVE_ALL
14231+.macro pax_enter_kernel
14232+#ifdef CONFIG_PAX_KERNEXEC
14233+ call pax_enter_kernel
14234+#endif
14235+.endm
14236+
14237+.macro pax_exit_kernel
14238+#ifdef CONFIG_PAX_KERNEXEC
14239+ call pax_exit_kernel
14240+#endif
14241+.endm
14242+
14243+#ifdef CONFIG_PAX_KERNEXEC
14244+ENTRY(pax_enter_kernel)
14245+#ifdef CONFIG_PARAVIRT
14246+ pushl %eax
14247+ pushl %ecx
14248+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14249+ mov %eax, %esi
14250+#else
14251+ mov %cr0, %esi
14252+#endif
14253+ bts $16, %esi
14254+ jnc 1f
14255+ mov %cs, %esi
14256+ cmp $__KERNEL_CS, %esi
14257+ jz 3f
14258+ ljmp $__KERNEL_CS, $3f
14259+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14260+2:
14261+#ifdef CONFIG_PARAVIRT
14262+ mov %esi, %eax
14263+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14264+#else
14265+ mov %esi, %cr0
14266+#endif
14267+3:
14268+#ifdef CONFIG_PARAVIRT
14269+ popl %ecx
14270+ popl %eax
14271+#endif
14272+ ret
14273+ENDPROC(pax_enter_kernel)
14274+
14275+ENTRY(pax_exit_kernel)
14276+#ifdef CONFIG_PARAVIRT
14277+ pushl %eax
14278+ pushl %ecx
14279+#endif
14280+ mov %cs, %esi
14281+ cmp $__KERNEXEC_KERNEL_CS, %esi
14282+ jnz 2f
14283+#ifdef CONFIG_PARAVIRT
14284+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14285+ mov %eax, %esi
14286+#else
14287+ mov %cr0, %esi
14288+#endif
14289+ btr $16, %esi
14290+ ljmp $__KERNEL_CS, $1f
14291+1:
14292+#ifdef CONFIG_PARAVIRT
14293+ mov %esi, %eax
14294+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14295+#else
14296+ mov %esi, %cr0
14297+#endif
14298+2:
14299+#ifdef CONFIG_PARAVIRT
14300+ popl %ecx
14301+ popl %eax
14302+#endif
14303+ ret
14304+ENDPROC(pax_exit_kernel)
14305+#endif
14306+
14307+.macro pax_erase_kstack
14308+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14309+ call pax_erase_kstack
14310+#endif
14311+.endm
14312+
14313+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14314+/*
14315+ * ebp: thread_info
14316+ * ecx, edx: can be clobbered
14317+ */
14318+ENTRY(pax_erase_kstack)
14319+ pushl %edi
14320+ pushl %eax
14321+
14322+ mov TI_lowest_stack(%ebp), %edi
14323+ mov $-0xBEEF, %eax
14324+ std
14325+
14326+1: mov %edi, %ecx
14327+ and $THREAD_SIZE_asm - 1, %ecx
14328+ shr $2, %ecx
14329+ repne scasl
14330+ jecxz 2f
14331+
14332+ cmp $2*16, %ecx
14333+ jc 2f
14334+
14335+ mov $2*16, %ecx
14336+ repe scasl
14337+ jecxz 2f
14338+ jne 1b
14339+
14340+2: cld
14341+ mov %esp, %ecx
14342+ sub %edi, %ecx
14343+ shr $2, %ecx
14344+ rep stosl
14345+
14346+ mov TI_task_thread_sp0(%ebp), %edi
14347+ sub $128, %edi
14348+ mov %edi, TI_lowest_stack(%ebp)
14349+
14350+ popl %eax
14351+ popl %edi
14352+ ret
14353+ENDPROC(pax_erase_kstack)
14354+#endif
14355+
14356+.macro __SAVE_ALL _DS
14357 cld
14358 PUSH_GS
14359 pushl_cfi %fs
14360@@ -214,7 +347,7 @@
14361 CFI_REL_OFFSET ecx, 0
14362 pushl_cfi %ebx
14363 CFI_REL_OFFSET ebx, 0
14364- movl $(__USER_DS), %edx
14365+ movl $\_DS, %edx
14366 movl %edx, %ds
14367 movl %edx, %es
14368 movl $(__KERNEL_PERCPU), %edx
14369@@ -222,6 +355,15 @@
14370 SET_KERNEL_GS %edx
14371 .endm
14372
14373+.macro SAVE_ALL
14374+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14375+ __SAVE_ALL __KERNEL_DS
14376+ pax_enter_kernel
14377+#else
14378+ __SAVE_ALL __USER_DS
14379+#endif
14380+.endm
14381+
14382 .macro RESTORE_INT_REGS
14383 popl_cfi %ebx
14384 CFI_RESTORE ebx
14385@@ -307,7 +449,7 @@ ENTRY(ret_from_fork)
14386 popfl_cfi
14387 jmp syscall_exit
14388 CFI_ENDPROC
14389-END(ret_from_fork)
14390+ENDPROC(ret_from_fork)
14391
14392 /*
14393 * Interrupt exit functions should be protected against kprobes
14394@@ -332,7 +474,15 @@ check_userspace:
14395 movb PT_CS(%esp), %al
14396 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14397 cmpl $USER_RPL, %eax
14398+
14399+#ifdef CONFIG_PAX_KERNEXEC
14400+ jae resume_userspace
14401+
14402+ PAX_EXIT_KERNEL
14403+ jmp resume_kernel
14404+#else
14405 jb resume_kernel # not returning to v8086 or userspace
14406+#endif
14407
14408 ENTRY(resume_userspace)
14409 LOCKDEP_SYS_EXIT
14410@@ -344,8 +494,8 @@ ENTRY(resume_userspace)
14411 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14412 # int/exception return?
14413 jne work_pending
14414- jmp restore_all
14415-END(ret_from_exception)
14416+ jmp restore_all_pax
14417+ENDPROC(ret_from_exception)
14418
14419 #ifdef CONFIG_PREEMPT
14420 ENTRY(resume_kernel)
14421@@ -360,7 +510,7 @@ need_resched:
14422 jz restore_all
14423 call preempt_schedule_irq
14424 jmp need_resched
14425-END(resume_kernel)
14426+ENDPROC(resume_kernel)
14427 #endif
14428 CFI_ENDPROC
14429 /*
14430@@ -394,23 +544,34 @@ sysenter_past_esp:
14431 /*CFI_REL_OFFSET cs, 0*/
14432 /*
14433 * Push current_thread_info()->sysenter_return to the stack.
14434- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14435- * pushed above; +8 corresponds to copy_thread's esp0 setting.
14436 */
14437- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14438+ pushl_cfi $0
14439 CFI_REL_OFFSET eip, 0
14440
14441 pushl_cfi %eax
14442 SAVE_ALL
14443+ GET_THREAD_INFO(%ebp)
14444+ movl TI_sysenter_return(%ebp),%ebp
14445+ movl %ebp,PT_EIP(%esp)
14446 ENABLE_INTERRUPTS(CLBR_NONE)
14447
14448 /*
14449 * Load the potential sixth argument from user stack.
14450 * Careful about security.
14451 */
14452+ movl PT_OLDESP(%esp),%ebp
14453+
14454+#ifdef CONFIG_PAX_MEMORY_UDEREF
14455+ mov PT_OLDSS(%esp),%ds
14456+1: movl %ds:(%ebp),%ebp
14457+ push %ss
14458+ pop %ds
14459+#else
14460 cmpl $__PAGE_OFFSET-3,%ebp
14461 jae syscall_fault
14462 1: movl (%ebp),%ebp
14463+#endif
14464+
14465 movl %ebp,PT_EBP(%esp)
14466 .section __ex_table,"a"
14467 .align 4
14468@@ -433,12 +594,24 @@ sysenter_do_call:
14469 testl $_TIF_ALLWORK_MASK, %ecx
14470 jne sysexit_audit
14471 sysenter_exit:
14472+
14473+#ifdef CONFIG_PAX_RANDKSTACK
14474+ pushl_cfi %eax
14475+ movl %esp, %eax
14476+ call pax_randomize_kstack
14477+ popl_cfi %eax
14478+#endif
14479+
14480+ pax_erase_kstack
14481+
14482 /* if something modifies registers it must also disable sysexit */
14483 movl PT_EIP(%esp), %edx
14484 movl PT_OLDESP(%esp), %ecx
14485 xorl %ebp,%ebp
14486 TRACE_IRQS_ON
14487 1: mov PT_FS(%esp), %fs
14488+2: mov PT_DS(%esp), %ds
14489+3: mov PT_ES(%esp), %es
14490 PTGS_TO_GS
14491 ENABLE_INTERRUPTS_SYSEXIT
14492
14493@@ -455,6 +628,9 @@ sysenter_audit:
14494 movl %eax,%edx /* 2nd arg: syscall number */
14495 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14496 call __audit_syscall_entry
14497+
14498+ pax_erase_kstack
14499+
14500 pushl_cfi %ebx
14501 movl PT_EAX(%esp),%eax /* reload syscall number */
14502 jmp sysenter_do_call
14503@@ -480,11 +656,17 @@ sysexit_audit:
14504
14505 CFI_ENDPROC
14506 .pushsection .fixup,"ax"
14507-2: movl $0,PT_FS(%esp)
14508+4: movl $0,PT_FS(%esp)
14509+ jmp 1b
14510+5: movl $0,PT_DS(%esp)
14511+ jmp 1b
14512+6: movl $0,PT_ES(%esp)
14513 jmp 1b
14514 .section __ex_table,"a"
14515 .align 4
14516- .long 1b,2b
14517+ .long 1b,4b
14518+ .long 2b,5b
14519+ .long 3b,6b
14520 .popsection
14521 PTGS_TO_GS_EX
14522 ENDPROC(ia32_sysenter_target)
14523@@ -517,6 +699,15 @@ syscall_exit:
14524 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14525 jne syscall_exit_work
14526
14527+restore_all_pax:
14528+
14529+#ifdef CONFIG_PAX_RANDKSTACK
14530+ movl %esp, %eax
14531+ call pax_randomize_kstack
14532+#endif
14533+
14534+ pax_erase_kstack
14535+
14536 restore_all:
14537 TRACE_IRQS_IRET
14538 restore_all_notrace:
14539@@ -576,14 +767,34 @@ ldt_ss:
14540 * compensating for the offset by changing to the ESPFIX segment with
14541 * a base address that matches for the difference.
14542 */
14543-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14544+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14545 mov %esp, %edx /* load kernel esp */
14546 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14547 mov %dx, %ax /* eax: new kernel esp */
14548 sub %eax, %edx /* offset (low word is 0) */
14549+#ifdef CONFIG_SMP
14550+ movl PER_CPU_VAR(cpu_number), %ebx
14551+ shll $PAGE_SHIFT_asm, %ebx
14552+ addl $cpu_gdt_table, %ebx
14553+#else
14554+ movl $cpu_gdt_table, %ebx
14555+#endif
14556 shr $16, %edx
14557- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14558- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14559+
14560+#ifdef CONFIG_PAX_KERNEXEC
14561+ mov %cr0, %esi
14562+ btr $16, %esi
14563+ mov %esi, %cr0
14564+#endif
14565+
14566+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14567+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14568+
14569+#ifdef CONFIG_PAX_KERNEXEC
14570+ bts $16, %esi
14571+ mov %esi, %cr0
14572+#endif
14573+
14574 pushl_cfi $__ESPFIX_SS
14575 pushl_cfi %eax /* new kernel esp */
14576 /* Disable interrupts, but do not irqtrace this section: we
14577@@ -612,38 +823,30 @@ work_resched:
14578 movl TI_flags(%ebp), %ecx
14579 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14580 # than syscall tracing?
14581- jz restore_all
14582+ jz restore_all_pax
14583 testb $_TIF_NEED_RESCHED, %cl
14584 jnz work_resched
14585
14586 work_notifysig: # deal with pending signals and
14587 # notify-resume requests
14588+ movl %esp, %eax
14589 #ifdef CONFIG_VM86
14590 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14591- movl %esp, %eax
14592- jne work_notifysig_v86 # returning to kernel-space or
14593+ jz 1f # returning to kernel-space or
14594 # vm86-space
14595- TRACE_IRQS_ON
14596- ENABLE_INTERRUPTS(CLBR_NONE)
14597- xorl %edx, %edx
14598- call do_notify_resume
14599- jmp resume_userspace_sig
14600
14601- ALIGN
14602-work_notifysig_v86:
14603 pushl_cfi %ecx # save ti_flags for do_notify_resume
14604 call save_v86_state # %eax contains pt_regs pointer
14605 popl_cfi %ecx
14606 movl %eax, %esp
14607-#else
14608- movl %esp, %eax
14609+1:
14610 #endif
14611 TRACE_IRQS_ON
14612 ENABLE_INTERRUPTS(CLBR_NONE)
14613 xorl %edx, %edx
14614 call do_notify_resume
14615 jmp resume_userspace_sig
14616-END(work_pending)
14617+ENDPROC(work_pending)
14618
14619 # perform syscall exit tracing
14620 ALIGN
14621@@ -651,11 +854,14 @@ syscall_trace_entry:
14622 movl $-ENOSYS,PT_EAX(%esp)
14623 movl %esp, %eax
14624 call syscall_trace_enter
14625+
14626+ pax_erase_kstack
14627+
14628 /* What it returned is what we'll actually use. */
14629 cmpl $(NR_syscalls), %eax
14630 jnae syscall_call
14631 jmp syscall_exit
14632-END(syscall_trace_entry)
14633+ENDPROC(syscall_trace_entry)
14634
14635 # perform syscall exit tracing
14636 ALIGN
14637@@ -668,20 +874,24 @@ syscall_exit_work:
14638 movl %esp, %eax
14639 call syscall_trace_leave
14640 jmp resume_userspace
14641-END(syscall_exit_work)
14642+ENDPROC(syscall_exit_work)
14643 CFI_ENDPROC
14644
14645 RING0_INT_FRAME # can't unwind into user space anyway
14646 syscall_fault:
14647+#ifdef CONFIG_PAX_MEMORY_UDEREF
14648+ push %ss
14649+ pop %ds
14650+#endif
14651 GET_THREAD_INFO(%ebp)
14652 movl $-EFAULT,PT_EAX(%esp)
14653 jmp resume_userspace
14654-END(syscall_fault)
14655+ENDPROC(syscall_fault)
14656
14657 syscall_badsys:
14658 movl $-ENOSYS,PT_EAX(%esp)
14659 jmp resume_userspace
14660-END(syscall_badsys)
14661+ENDPROC(syscall_badsys)
14662 CFI_ENDPROC
14663 /*
14664 * End of kprobes section
14665@@ -753,6 +963,36 @@ ENTRY(ptregs_clone)
14666 CFI_ENDPROC
14667 ENDPROC(ptregs_clone)
14668
14669+ ALIGN;
14670+ENTRY(kernel_execve)
14671+ CFI_STARTPROC
14672+ pushl_cfi %ebp
14673+ sub $PT_OLDSS+4,%esp
14674+ pushl_cfi %edi
14675+ pushl_cfi %ecx
14676+ pushl_cfi %eax
14677+ lea 3*4(%esp),%edi
14678+ mov $PT_OLDSS/4+1,%ecx
14679+ xorl %eax,%eax
14680+ rep stosl
14681+ popl_cfi %eax
14682+ popl_cfi %ecx
14683+ popl_cfi %edi
14684+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14685+ pushl_cfi %esp
14686+ call sys_execve
14687+ add $4,%esp
14688+ CFI_ADJUST_CFA_OFFSET -4
14689+ GET_THREAD_INFO(%ebp)
14690+ test %eax,%eax
14691+ jz syscall_exit
14692+ add $PT_OLDSS+4,%esp
14693+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14694+ popl_cfi %ebp
14695+ ret
14696+ CFI_ENDPROC
14697+ENDPROC(kernel_execve)
14698+
14699 .macro FIXUP_ESPFIX_STACK
14700 /*
14701 * Switch back for ESPFIX stack to the normal zerobased stack
14702@@ -762,8 +1002,15 @@ ENDPROC(ptregs_clone)
14703 * normal stack and adjusts ESP with the matching offset.
14704 */
14705 /* fixup the stack */
14706- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14707- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14708+#ifdef CONFIG_SMP
14709+ movl PER_CPU_VAR(cpu_number), %ebx
14710+ shll $PAGE_SHIFT_asm, %ebx
14711+ addl $cpu_gdt_table, %ebx
14712+#else
14713+ movl $cpu_gdt_table, %ebx
14714+#endif
14715+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14716+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14717 shl $16, %eax
14718 addl %esp, %eax /* the adjusted stack pointer */
14719 pushl_cfi $__KERNEL_DS
14720@@ -816,7 +1063,7 @@ vector=vector+1
14721 .endr
14722 2: jmp common_interrupt
14723 .endr
14724-END(irq_entries_start)
14725+ENDPROC(irq_entries_start)
14726
14727 .previous
14728 END(interrupt)
14729@@ -864,7 +1111,7 @@ ENTRY(coprocessor_error)
14730 pushl_cfi $do_coprocessor_error
14731 jmp error_code
14732 CFI_ENDPROC
14733-END(coprocessor_error)
14734+ENDPROC(coprocessor_error)
14735
14736 ENTRY(simd_coprocessor_error)
14737 RING0_INT_FRAME
14738@@ -885,7 +1132,7 @@ ENTRY(simd_coprocessor_error)
14739 #endif
14740 jmp error_code
14741 CFI_ENDPROC
14742-END(simd_coprocessor_error)
14743+ENDPROC(simd_coprocessor_error)
14744
14745 ENTRY(device_not_available)
14746 RING0_INT_FRAME
14747@@ -893,7 +1140,7 @@ ENTRY(device_not_available)
14748 pushl_cfi $do_device_not_available
14749 jmp error_code
14750 CFI_ENDPROC
14751-END(device_not_available)
14752+ENDPROC(device_not_available)
14753
14754 #ifdef CONFIG_PARAVIRT
14755 ENTRY(native_iret)
14756@@ -902,12 +1149,12 @@ ENTRY(native_iret)
14757 .align 4
14758 .long native_iret, iret_exc
14759 .previous
14760-END(native_iret)
14761+ENDPROC(native_iret)
14762
14763 ENTRY(native_irq_enable_sysexit)
14764 sti
14765 sysexit
14766-END(native_irq_enable_sysexit)
14767+ENDPROC(native_irq_enable_sysexit)
14768 #endif
14769
14770 ENTRY(overflow)
14771@@ -916,7 +1163,7 @@ ENTRY(overflow)
14772 pushl_cfi $do_overflow
14773 jmp error_code
14774 CFI_ENDPROC
14775-END(overflow)
14776+ENDPROC(overflow)
14777
14778 ENTRY(bounds)
14779 RING0_INT_FRAME
14780@@ -924,7 +1171,7 @@ ENTRY(bounds)
14781 pushl_cfi $do_bounds
14782 jmp error_code
14783 CFI_ENDPROC
14784-END(bounds)
14785+ENDPROC(bounds)
14786
14787 ENTRY(invalid_op)
14788 RING0_INT_FRAME
14789@@ -932,7 +1179,7 @@ ENTRY(invalid_op)
14790 pushl_cfi $do_invalid_op
14791 jmp error_code
14792 CFI_ENDPROC
14793-END(invalid_op)
14794+ENDPROC(invalid_op)
14795
14796 ENTRY(coprocessor_segment_overrun)
14797 RING0_INT_FRAME
14798@@ -940,35 +1187,35 @@ ENTRY(coprocessor_segment_overrun)
14799 pushl_cfi $do_coprocessor_segment_overrun
14800 jmp error_code
14801 CFI_ENDPROC
14802-END(coprocessor_segment_overrun)
14803+ENDPROC(coprocessor_segment_overrun)
14804
14805 ENTRY(invalid_TSS)
14806 RING0_EC_FRAME
14807 pushl_cfi $do_invalid_TSS
14808 jmp error_code
14809 CFI_ENDPROC
14810-END(invalid_TSS)
14811+ENDPROC(invalid_TSS)
14812
14813 ENTRY(segment_not_present)
14814 RING0_EC_FRAME
14815 pushl_cfi $do_segment_not_present
14816 jmp error_code
14817 CFI_ENDPROC
14818-END(segment_not_present)
14819+ENDPROC(segment_not_present)
14820
14821 ENTRY(stack_segment)
14822 RING0_EC_FRAME
14823 pushl_cfi $do_stack_segment
14824 jmp error_code
14825 CFI_ENDPROC
14826-END(stack_segment)
14827+ENDPROC(stack_segment)
14828
14829 ENTRY(alignment_check)
14830 RING0_EC_FRAME
14831 pushl_cfi $do_alignment_check
14832 jmp error_code
14833 CFI_ENDPROC
14834-END(alignment_check)
14835+ENDPROC(alignment_check)
14836
14837 ENTRY(divide_error)
14838 RING0_INT_FRAME
14839@@ -976,7 +1223,7 @@ ENTRY(divide_error)
14840 pushl_cfi $do_divide_error
14841 jmp error_code
14842 CFI_ENDPROC
14843-END(divide_error)
14844+ENDPROC(divide_error)
14845
14846 #ifdef CONFIG_X86_MCE
14847 ENTRY(machine_check)
14848@@ -985,7 +1232,7 @@ ENTRY(machine_check)
14849 pushl_cfi machine_check_vector
14850 jmp error_code
14851 CFI_ENDPROC
14852-END(machine_check)
14853+ENDPROC(machine_check)
14854 #endif
14855
14856 ENTRY(spurious_interrupt_bug)
14857@@ -994,7 +1241,7 @@ ENTRY(spurious_interrupt_bug)
14858 pushl_cfi $do_spurious_interrupt_bug
14859 jmp error_code
14860 CFI_ENDPROC
14861-END(spurious_interrupt_bug)
14862+ENDPROC(spurious_interrupt_bug)
14863 /*
14864 * End of kprobes section
14865 */
14866@@ -1109,7 +1356,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
14867
14868 ENTRY(mcount)
14869 ret
14870-END(mcount)
14871+ENDPROC(mcount)
14872
14873 ENTRY(ftrace_caller)
14874 cmpl $0, function_trace_stop
14875@@ -1138,7 +1385,7 @@ ftrace_graph_call:
14876 .globl ftrace_stub
14877 ftrace_stub:
14878 ret
14879-END(ftrace_caller)
14880+ENDPROC(ftrace_caller)
14881
14882 #else /* ! CONFIG_DYNAMIC_FTRACE */
14883
14884@@ -1174,7 +1421,7 @@ trace:
14885 popl %ecx
14886 popl %eax
14887 jmp ftrace_stub
14888-END(mcount)
14889+ENDPROC(mcount)
14890 #endif /* CONFIG_DYNAMIC_FTRACE */
14891 #endif /* CONFIG_FUNCTION_TRACER */
14892
14893@@ -1195,7 +1442,7 @@ ENTRY(ftrace_graph_caller)
14894 popl %ecx
14895 popl %eax
14896 ret
14897-END(ftrace_graph_caller)
14898+ENDPROC(ftrace_graph_caller)
14899
14900 .globl return_to_handler
14901 return_to_handler:
14902@@ -1250,15 +1497,18 @@ error_code:
14903 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14904 REG_TO_PTGS %ecx
14905 SET_KERNEL_GS %ecx
14906- movl $(__USER_DS), %ecx
14907+ movl $(__KERNEL_DS), %ecx
14908 movl %ecx, %ds
14909 movl %ecx, %es
14910+
14911+ pax_enter_kernel
14912+
14913 TRACE_IRQS_OFF
14914 movl %esp,%eax # pt_regs pointer
14915 call *%edi
14916 jmp ret_from_exception
14917 CFI_ENDPROC
14918-END(page_fault)
14919+ENDPROC(page_fault)
14920
14921 /*
14922 * Debug traps and NMI can happen at the one SYSENTER instruction
14923@@ -1300,7 +1550,7 @@ debug_stack_correct:
14924 call do_debug
14925 jmp ret_from_exception
14926 CFI_ENDPROC
14927-END(debug)
14928+ENDPROC(debug)
14929
14930 /*
14931 * NMI is doubly nasty. It can happen _while_ we're handling
14932@@ -1337,6 +1587,9 @@ nmi_stack_correct:
14933 xorl %edx,%edx # zero error code
14934 movl %esp,%eax # pt_regs pointer
14935 call do_nmi
14936+
14937+ pax_exit_kernel
14938+
14939 jmp restore_all_notrace
14940 CFI_ENDPROC
14941
14942@@ -1373,12 +1626,15 @@ nmi_espfix_stack:
14943 FIXUP_ESPFIX_STACK # %eax == %esp
14944 xorl %edx,%edx # zero error code
14945 call do_nmi
14946+
14947+ pax_exit_kernel
14948+
14949 RESTORE_REGS
14950 lss 12+4(%esp), %esp # back to espfix stack
14951 CFI_ADJUST_CFA_OFFSET -24
14952 jmp irq_return
14953 CFI_ENDPROC
14954-END(nmi)
14955+ENDPROC(nmi)
14956
14957 ENTRY(int3)
14958 RING0_INT_FRAME
14959@@ -1390,14 +1646,14 @@ ENTRY(int3)
14960 call do_int3
14961 jmp ret_from_exception
14962 CFI_ENDPROC
14963-END(int3)
14964+ENDPROC(int3)
14965
14966 ENTRY(general_protection)
14967 RING0_EC_FRAME
14968 pushl_cfi $do_general_protection
14969 jmp error_code
14970 CFI_ENDPROC
14971-END(general_protection)
14972+ENDPROC(general_protection)
14973
14974 #ifdef CONFIG_KVM_GUEST
14975 ENTRY(async_page_fault)
14976@@ -1405,7 +1661,7 @@ ENTRY(async_page_fault)
14977 pushl_cfi $do_async_page_fault
14978 jmp error_code
14979 CFI_ENDPROC
14980-END(async_page_fault)
14981+ENDPROC(async_page_fault)
14982 #endif
14983
14984 /*
14985diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
14986index 1333d98..b340ca2 100644
14987--- a/arch/x86/kernel/entry_64.S
14988+++ b/arch/x86/kernel/entry_64.S
14989@@ -56,6 +56,8 @@
14990 #include <asm/ftrace.h>
14991 #include <asm/percpu.h>
14992 #include <linux/err.h>
14993+#include <asm/pgtable.h>
14994+#include <asm/alternative-asm.h>
14995
14996 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14997 #include <linux/elf-em.h>
14998@@ -69,8 +71,9 @@
14999 #ifdef CONFIG_FUNCTION_TRACER
15000 #ifdef CONFIG_DYNAMIC_FTRACE
15001 ENTRY(mcount)
15002+ pax_force_retaddr
15003 retq
15004-END(mcount)
15005+ENDPROC(mcount)
15006
15007 ENTRY(ftrace_caller)
15008 cmpl $0, function_trace_stop
15009@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15010 #endif
15011
15012 GLOBAL(ftrace_stub)
15013+ pax_force_retaddr
15014 retq
15015-END(ftrace_caller)
15016+ENDPROC(ftrace_caller)
15017
15018 #else /* ! CONFIG_DYNAMIC_FTRACE */
15019 ENTRY(mcount)
15020@@ -113,6 +117,7 @@ ENTRY(mcount)
15021 #endif
15022
15023 GLOBAL(ftrace_stub)
15024+ pax_force_retaddr
15025 retq
15026
15027 trace:
15028@@ -122,12 +127,13 @@ trace:
15029 movq 8(%rbp), %rsi
15030 subq $MCOUNT_INSN_SIZE, %rdi
15031
15032+ pax_force_fptr ftrace_trace_function
15033 call *ftrace_trace_function
15034
15035 MCOUNT_RESTORE_FRAME
15036
15037 jmp ftrace_stub
15038-END(mcount)
15039+ENDPROC(mcount)
15040 #endif /* CONFIG_DYNAMIC_FTRACE */
15041 #endif /* CONFIG_FUNCTION_TRACER */
15042
15043@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15044
15045 MCOUNT_RESTORE_FRAME
15046
15047+ pax_force_retaddr
15048 retq
15049-END(ftrace_graph_caller)
15050+ENDPROC(ftrace_graph_caller)
15051
15052 GLOBAL(return_to_handler)
15053 subq $24, %rsp
15054@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15055 movq 8(%rsp), %rdx
15056 movq (%rsp), %rax
15057 addq $24, %rsp
15058+ pax_force_fptr %rdi
15059 jmp *%rdi
15060 #endif
15061
15062@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15063 ENDPROC(native_usergs_sysret64)
15064 #endif /* CONFIG_PARAVIRT */
15065
15066+ .macro ljmpq sel, off
15067+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15068+ .byte 0x48; ljmp *1234f(%rip)
15069+ .pushsection .rodata
15070+ .align 16
15071+ 1234: .quad \off; .word \sel
15072+ .popsection
15073+#else
15074+ pushq $\sel
15075+ pushq $\off
15076+ lretq
15077+#endif
15078+ .endm
15079+
15080+ .macro pax_enter_kernel
15081+ pax_set_fptr_mask
15082+#ifdef CONFIG_PAX_KERNEXEC
15083+ call pax_enter_kernel
15084+#endif
15085+ .endm
15086+
15087+ .macro pax_exit_kernel
15088+#ifdef CONFIG_PAX_KERNEXEC
15089+ call pax_exit_kernel
15090+#endif
15091+ .endm
15092+
15093+#ifdef CONFIG_PAX_KERNEXEC
15094+ENTRY(pax_enter_kernel)
15095+ pushq %rdi
15096+
15097+#ifdef CONFIG_PARAVIRT
15098+ PV_SAVE_REGS(CLBR_RDI)
15099+#endif
15100+
15101+ GET_CR0_INTO_RDI
15102+ bts $16,%rdi
15103+ jnc 3f
15104+ mov %cs,%edi
15105+ cmp $__KERNEL_CS,%edi
15106+ jnz 2f
15107+1:
15108+
15109+#ifdef CONFIG_PARAVIRT
15110+ PV_RESTORE_REGS(CLBR_RDI)
15111+#endif
15112+
15113+ popq %rdi
15114+ pax_force_retaddr
15115+ retq
15116+
15117+2: ljmpq __KERNEL_CS,1f
15118+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15119+4: SET_RDI_INTO_CR0
15120+ jmp 1b
15121+ENDPROC(pax_enter_kernel)
15122+
15123+ENTRY(pax_exit_kernel)
15124+ pushq %rdi
15125+
15126+#ifdef CONFIG_PARAVIRT
15127+ PV_SAVE_REGS(CLBR_RDI)
15128+#endif
15129+
15130+ mov %cs,%rdi
15131+ cmp $__KERNEXEC_KERNEL_CS,%edi
15132+ jz 2f
15133+1:
15134+
15135+#ifdef CONFIG_PARAVIRT
15136+ PV_RESTORE_REGS(CLBR_RDI);
15137+#endif
15138+
15139+ popq %rdi
15140+ pax_force_retaddr
15141+ retq
15142+
15143+2: GET_CR0_INTO_RDI
15144+ btr $16,%rdi
15145+ ljmpq __KERNEL_CS,3f
15146+3: SET_RDI_INTO_CR0
15147+ jmp 1b
15148+#ifdef CONFIG_PARAVIRT
15149+ PV_RESTORE_REGS(CLBR_RDI);
15150+#endif
15151+
15152+ popq %rdi
15153+ pax_force_retaddr
15154+ retq
15155+ENDPROC(pax_exit_kernel)
15156+#endif
15157+
15158+ .macro pax_enter_kernel_user
15159+ pax_set_fptr_mask
15160+#ifdef CONFIG_PAX_MEMORY_UDEREF
15161+ call pax_enter_kernel_user
15162+#endif
15163+ .endm
15164+
15165+ .macro pax_exit_kernel_user
15166+#ifdef CONFIG_PAX_MEMORY_UDEREF
15167+ call pax_exit_kernel_user
15168+#endif
15169+#ifdef CONFIG_PAX_RANDKSTACK
15170+ pushq %rax
15171+ call pax_randomize_kstack
15172+ popq %rax
15173+#endif
15174+ .endm
15175+
15176+#ifdef CONFIG_PAX_MEMORY_UDEREF
15177+ENTRY(pax_enter_kernel_user)
15178+ pushq %rdi
15179+ pushq %rbx
15180+
15181+#ifdef CONFIG_PARAVIRT
15182+ PV_SAVE_REGS(CLBR_RDI)
15183+#endif
15184+
15185+ GET_CR3_INTO_RDI
15186+ mov %rdi,%rbx
15187+ add $__START_KERNEL_map,%rbx
15188+ sub phys_base(%rip),%rbx
15189+
15190+#ifdef CONFIG_PARAVIRT
15191+ pushq %rdi
15192+ cmpl $0, pv_info+PARAVIRT_enabled
15193+ jz 1f
15194+ i = 0
15195+ .rept USER_PGD_PTRS
15196+ mov i*8(%rbx),%rsi
15197+ mov $0,%sil
15198+ lea i*8(%rbx),%rdi
15199+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15200+ i = i + 1
15201+ .endr
15202+ jmp 2f
15203+1:
15204+#endif
15205+
15206+ i = 0
15207+ .rept USER_PGD_PTRS
15208+ movb $0,i*8(%rbx)
15209+ i = i + 1
15210+ .endr
15211+
15212+#ifdef CONFIG_PARAVIRT
15213+2: popq %rdi
15214+#endif
15215+ SET_RDI_INTO_CR3
15216+
15217+#ifdef CONFIG_PAX_KERNEXEC
15218+ GET_CR0_INTO_RDI
15219+ bts $16,%rdi
15220+ SET_RDI_INTO_CR0
15221+#endif
15222+
15223+#ifdef CONFIG_PARAVIRT
15224+ PV_RESTORE_REGS(CLBR_RDI)
15225+#endif
15226+
15227+ popq %rbx
15228+ popq %rdi
15229+ pax_force_retaddr
15230+ retq
15231+ENDPROC(pax_enter_kernel_user)
15232+
15233+ENTRY(pax_exit_kernel_user)
15234+ push %rdi
15235+
15236+#ifdef CONFIG_PARAVIRT
15237+ pushq %rbx
15238+ PV_SAVE_REGS(CLBR_RDI)
15239+#endif
15240+
15241+#ifdef CONFIG_PAX_KERNEXEC
15242+ GET_CR0_INTO_RDI
15243+ btr $16,%rdi
15244+ SET_RDI_INTO_CR0
15245+#endif
15246+
15247+ GET_CR3_INTO_RDI
15248+ add $__START_KERNEL_map,%rdi
15249+ sub phys_base(%rip),%rdi
15250+
15251+#ifdef CONFIG_PARAVIRT
15252+ cmpl $0, pv_info+PARAVIRT_enabled
15253+ jz 1f
15254+ mov %rdi,%rbx
15255+ i = 0
15256+ .rept USER_PGD_PTRS
15257+ mov i*8(%rbx),%rsi
15258+ mov $0x67,%sil
15259+ lea i*8(%rbx),%rdi
15260+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15261+ i = i + 1
15262+ .endr
15263+ jmp 2f
15264+1:
15265+#endif
15266+
15267+ i = 0
15268+ .rept USER_PGD_PTRS
15269+ movb $0x67,i*8(%rdi)
15270+ i = i + 1
15271+ .endr
15272+
15273+#ifdef CONFIG_PARAVIRT
15274+2: PV_RESTORE_REGS(CLBR_RDI)
15275+ popq %rbx
15276+#endif
15277+
15278+ popq %rdi
15279+ pax_force_retaddr
15280+ retq
15281+ENDPROC(pax_exit_kernel_user)
15282+#endif
15283+
15284+.macro pax_erase_kstack
15285+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15286+ call pax_erase_kstack
15287+#endif
15288+.endm
15289+
15290+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15291+/*
15292+ * r11: thread_info
15293+ * rcx, rdx: can be clobbered
15294+ */
15295+ENTRY(pax_erase_kstack)
15296+ pushq %rdi
15297+ pushq %rax
15298+ pushq %r11
15299+
15300+ GET_THREAD_INFO(%r11)
15301+ mov TI_lowest_stack(%r11), %rdi
15302+ mov $-0xBEEF, %rax
15303+ std
15304+
15305+1: mov %edi, %ecx
15306+ and $THREAD_SIZE_asm - 1, %ecx
15307+ shr $3, %ecx
15308+ repne scasq
15309+ jecxz 2f
15310+
15311+ cmp $2*8, %ecx
15312+ jc 2f
15313+
15314+ mov $2*8, %ecx
15315+ repe scasq
15316+ jecxz 2f
15317+ jne 1b
15318+
15319+2: cld
15320+ mov %esp, %ecx
15321+ sub %edi, %ecx
15322+
15323+ cmp $THREAD_SIZE_asm, %rcx
15324+ jb 3f
15325+ ud2
15326+3:
15327+
15328+ shr $3, %ecx
15329+ rep stosq
15330+
15331+ mov TI_task_thread_sp0(%r11), %rdi
15332+ sub $256, %rdi
15333+ mov %rdi, TI_lowest_stack(%r11)
15334+
15335+ popq %r11
15336+ popq %rax
15337+ popq %rdi
15338+ pax_force_retaddr
15339+ ret
15340+ENDPROC(pax_erase_kstack)
15341+#endif
15342
15343 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15344 #ifdef CONFIG_TRACE_IRQFLAGS
15345@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15346 .endm
15347
15348 .macro UNFAKE_STACK_FRAME
15349- addq $8*6, %rsp
15350- CFI_ADJUST_CFA_OFFSET -(6*8)
15351+ addq $8*6 + ARG_SKIP, %rsp
15352+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15353 .endm
15354
15355 /*
15356@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15357 movq %rsp, %rsi
15358
15359 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15360- testl $3, CS(%rdi)
15361+ testb $3, CS(%rdi)
15362 je 1f
15363 SWAPGS
15364 /*
15365@@ -356,9 +640,10 @@ ENTRY(save_rest)
15366 movq_cfi r15, R15+16
15367 movq %r11, 8(%rsp) /* return address */
15368 FIXUP_TOP_OF_STACK %r11, 16
15369+ pax_force_retaddr
15370 ret
15371 CFI_ENDPROC
15372-END(save_rest)
15373+ENDPROC(save_rest)
15374
15375 /* save complete stack frame */
15376 .pushsection .kprobes.text, "ax"
15377@@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15378 js 1f /* negative -> in kernel */
15379 SWAPGS
15380 xorl %ebx,%ebx
15381-1: ret
15382+1: pax_force_retaddr_bts
15383+ ret
15384 CFI_ENDPROC
15385-END(save_paranoid)
15386+ENDPROC(save_paranoid)
15387 .popsection
15388
15389 /*
15390@@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15391
15392 RESTORE_REST
15393
15394- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15395+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15396 jz retint_restore_args
15397
15398 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15399@@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15400 jmp ret_from_sys_call # go to the SYSRET fastpath
15401
15402 CFI_ENDPROC
15403-END(ret_from_fork)
15404+ENDPROC(ret_from_fork)
15405
15406 /*
15407 * System call entry. Up to 6 arguments in registers are supported.
15408@@ -457,7 +743,7 @@ END(ret_from_fork)
15409 ENTRY(system_call)
15410 CFI_STARTPROC simple
15411 CFI_SIGNAL_FRAME
15412- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15413+ CFI_DEF_CFA rsp,0
15414 CFI_REGISTER rip,rcx
15415 /*CFI_REGISTER rflags,r11*/
15416 SWAPGS_UNSAFE_STACK
15417@@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15418
15419 movq %rsp,PER_CPU_VAR(old_rsp)
15420 movq PER_CPU_VAR(kernel_stack),%rsp
15421+ SAVE_ARGS 8*6,0
15422+ pax_enter_kernel_user
15423 /*
15424 * No need to follow this irqs off/on section - it's straight
15425 * and short:
15426 */
15427 ENABLE_INTERRUPTS(CLBR_NONE)
15428- SAVE_ARGS 8,0
15429 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15430 movq %rcx,RIP-ARGOFFSET(%rsp)
15431 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15432- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15433+ GET_THREAD_INFO(%rcx)
15434+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15435 jnz tracesys
15436 system_call_fastpath:
15437 cmpq $__NR_syscall_max,%rax
15438 ja badsys
15439- movq %r10,%rcx
15440+ movq R10-ARGOFFSET(%rsp),%rcx
15441 call *sys_call_table(,%rax,8) # XXX: rip relative
15442 movq %rax,RAX-ARGOFFSET(%rsp)
15443 /*
15444@@ -498,10 +786,13 @@ sysret_check:
15445 LOCKDEP_SYS_EXIT
15446 DISABLE_INTERRUPTS(CLBR_NONE)
15447 TRACE_IRQS_OFF
15448- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15449+ GET_THREAD_INFO(%rcx)
15450+ movl TI_flags(%rcx),%edx
15451 andl %edi,%edx
15452 jnz sysret_careful
15453 CFI_REMEMBER_STATE
15454+ pax_exit_kernel_user
15455+ pax_erase_kstack
15456 /*
15457 * sysretq will re-enable interrupts:
15458 */
15459@@ -553,14 +844,18 @@ badsys:
15460 * jump back to the normal fast path.
15461 */
15462 auditsys:
15463- movq %r10,%r9 /* 6th arg: 4th syscall arg */
15464+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15465 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15466 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15467 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15468 movq %rax,%rsi /* 2nd arg: syscall number */
15469 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15470 call __audit_syscall_entry
15471+
15472+ pax_erase_kstack
15473+
15474 LOAD_ARGS 0 /* reload call-clobbered registers */
15475+ pax_set_fptr_mask
15476 jmp system_call_fastpath
15477
15478 /*
15479@@ -581,7 +876,7 @@ sysret_audit:
15480 /* Do syscall tracing */
15481 tracesys:
15482 #ifdef CONFIG_AUDITSYSCALL
15483- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15484+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15485 jz auditsys
15486 #endif
15487 SAVE_REST
15488@@ -589,16 +884,20 @@ tracesys:
15489 FIXUP_TOP_OF_STACK %rdi
15490 movq %rsp,%rdi
15491 call syscall_trace_enter
15492+
15493+ pax_erase_kstack
15494+
15495 /*
15496 * Reload arg registers from stack in case ptrace changed them.
15497 * We don't reload %rax because syscall_trace_enter() returned
15498 * the value it wants us to use in the table lookup.
15499 */
15500 LOAD_ARGS ARGOFFSET, 1
15501+ pax_set_fptr_mask
15502 RESTORE_REST
15503 cmpq $__NR_syscall_max,%rax
15504 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15505- movq %r10,%rcx /* fixup for C */
15506+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15507 call *sys_call_table(,%rax,8)
15508 movq %rax,RAX-ARGOFFSET(%rsp)
15509 /* Use IRET because user could have changed frame */
15510@@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15511 andl %edi,%edx
15512 jnz int_careful
15513 andl $~TS_COMPAT,TI_status(%rcx)
15514+ pax_erase_kstack
15515 jmp retint_swapgs
15516
15517 /* Either reschedule or signal or syscall exit tracking needed. */
15518@@ -665,7 +965,7 @@ int_restore_rest:
15519 TRACE_IRQS_OFF
15520 jmp int_with_check
15521 CFI_ENDPROC
15522-END(system_call)
15523+ENDPROC(system_call)
15524
15525 /*
15526 * Certain special system calls that need to save a complete full stack frame.
15527@@ -681,7 +981,7 @@ ENTRY(\label)
15528 call \func
15529 jmp ptregscall_common
15530 CFI_ENDPROC
15531-END(\label)
15532+ENDPROC(\label)
15533 .endm
15534
15535 PTREGSCALL stub_clone, sys_clone, %r8
15536@@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15537 movq_cfi_restore R12+8, r12
15538 movq_cfi_restore RBP+8, rbp
15539 movq_cfi_restore RBX+8, rbx
15540+ pax_force_retaddr
15541 ret $REST_SKIP /* pop extended registers */
15542 CFI_ENDPROC
15543-END(ptregscall_common)
15544+ENDPROC(ptregscall_common)
15545
15546 ENTRY(stub_execve)
15547 CFI_STARTPROC
15548@@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15549 RESTORE_REST
15550 jmp int_ret_from_sys_call
15551 CFI_ENDPROC
15552-END(stub_execve)
15553+ENDPROC(stub_execve)
15554
15555 /*
15556 * sigreturn is special because it needs to restore all registers on return.
15557@@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15558 RESTORE_REST
15559 jmp int_ret_from_sys_call
15560 CFI_ENDPROC
15561-END(stub_rt_sigreturn)
15562+ENDPROC(stub_rt_sigreturn)
15563
15564 /*
15565 * Build the entry stubs and pointer table with some assembler magic.
15566@@ -769,7 +1070,7 @@ vector=vector+1
15567 2: jmp common_interrupt
15568 .endr
15569 CFI_ENDPROC
15570-END(irq_entries_start)
15571+ENDPROC(irq_entries_start)
15572
15573 .previous
15574 END(interrupt)
15575@@ -789,6 +1090,16 @@ END(interrupt)
15576 subq $ORIG_RAX-RBP, %rsp
15577 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15578 SAVE_ARGS_IRQ
15579+#ifdef CONFIG_PAX_MEMORY_UDEREF
15580+ testb $3, CS(%rdi)
15581+ jnz 1f
15582+ pax_enter_kernel
15583+ jmp 2f
15584+1: pax_enter_kernel_user
15585+2:
15586+#else
15587+ pax_enter_kernel
15588+#endif
15589 call \func
15590 .endm
15591
15592@@ -820,7 +1131,7 @@ ret_from_intr:
15593
15594 exit_intr:
15595 GET_THREAD_INFO(%rcx)
15596- testl $3,CS-ARGOFFSET(%rsp)
15597+ testb $3,CS-ARGOFFSET(%rsp)
15598 je retint_kernel
15599
15600 /* Interrupt came from user space */
15601@@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
15602 * The iretq could re-enable interrupts:
15603 */
15604 DISABLE_INTERRUPTS(CLBR_ANY)
15605+ pax_exit_kernel_user
15606 TRACE_IRQS_IRETQ
15607 SWAPGS
15608 jmp restore_args
15609
15610 retint_restore_args: /* return to kernel space */
15611 DISABLE_INTERRUPTS(CLBR_ANY)
15612+ pax_exit_kernel
15613+ pax_force_retaddr RIP-ARGOFFSET
15614 /*
15615 * The iretq could re-enable interrupts:
15616 */
15617@@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
15618 #endif
15619
15620 CFI_ENDPROC
15621-END(common_interrupt)
15622+ENDPROC(common_interrupt)
15623 /*
15624 * End of kprobes section
15625 */
15626@@ -953,7 +1267,7 @@ ENTRY(\sym)
15627 interrupt \do_sym
15628 jmp ret_from_intr
15629 CFI_ENDPROC
15630-END(\sym)
15631+ENDPROC(\sym)
15632 .endm
15633
15634 #ifdef CONFIG_SMP
15635@@ -1026,12 +1340,22 @@ ENTRY(\sym)
15636 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15637 call error_entry
15638 DEFAULT_FRAME 0
15639+#ifdef CONFIG_PAX_MEMORY_UDEREF
15640+ testb $3, CS(%rsp)
15641+ jnz 1f
15642+ pax_enter_kernel
15643+ jmp 2f
15644+1: pax_enter_kernel_user
15645+2:
15646+#else
15647+ pax_enter_kernel
15648+#endif
15649 movq %rsp,%rdi /* pt_regs pointer */
15650 xorl %esi,%esi /* no error code */
15651 call \do_sym
15652 jmp error_exit /* %ebx: no swapgs flag */
15653 CFI_ENDPROC
15654-END(\sym)
15655+ENDPROC(\sym)
15656 .endm
15657
15658 .macro paranoidzeroentry sym do_sym
15659@@ -1043,15 +1367,25 @@ ENTRY(\sym)
15660 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15661 call save_paranoid
15662 TRACE_IRQS_OFF
15663+#ifdef CONFIG_PAX_MEMORY_UDEREF
15664+ testb $3, CS(%rsp)
15665+ jnz 1f
15666+ pax_enter_kernel
15667+ jmp 2f
15668+1: pax_enter_kernel_user
15669+2:
15670+#else
15671+ pax_enter_kernel
15672+#endif
15673 movq %rsp,%rdi /* pt_regs pointer */
15674 xorl %esi,%esi /* no error code */
15675 call \do_sym
15676 jmp paranoid_exit /* %ebx: no swapgs flag */
15677 CFI_ENDPROC
15678-END(\sym)
15679+ENDPROC(\sym)
15680 .endm
15681
15682-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15683+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15684 .macro paranoidzeroentry_ist sym do_sym ist
15685 ENTRY(\sym)
15686 INTR_FRAME
15687@@ -1061,14 +1395,30 @@ ENTRY(\sym)
15688 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15689 call save_paranoid
15690 TRACE_IRQS_OFF
15691+#ifdef CONFIG_PAX_MEMORY_UDEREF
15692+ testb $3, CS(%rsp)
15693+ jnz 1f
15694+ pax_enter_kernel
15695+ jmp 2f
15696+1: pax_enter_kernel_user
15697+2:
15698+#else
15699+ pax_enter_kernel
15700+#endif
15701 movq %rsp,%rdi /* pt_regs pointer */
15702 xorl %esi,%esi /* no error code */
15703+#ifdef CONFIG_SMP
15704+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15705+ lea init_tss(%r12), %r12
15706+#else
15707+ lea init_tss(%rip), %r12
15708+#endif
15709 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15710 call \do_sym
15711 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15712 jmp paranoid_exit /* %ebx: no swapgs flag */
15713 CFI_ENDPROC
15714-END(\sym)
15715+ENDPROC(\sym)
15716 .endm
15717
15718 .macro errorentry sym do_sym
15719@@ -1079,13 +1429,23 @@ ENTRY(\sym)
15720 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15721 call error_entry
15722 DEFAULT_FRAME 0
15723+#ifdef CONFIG_PAX_MEMORY_UDEREF
15724+ testb $3, CS(%rsp)
15725+ jnz 1f
15726+ pax_enter_kernel
15727+ jmp 2f
15728+1: pax_enter_kernel_user
15729+2:
15730+#else
15731+ pax_enter_kernel
15732+#endif
15733 movq %rsp,%rdi /* pt_regs pointer */
15734 movq ORIG_RAX(%rsp),%rsi /* get error code */
15735 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15736 call \do_sym
15737 jmp error_exit /* %ebx: no swapgs flag */
15738 CFI_ENDPROC
15739-END(\sym)
15740+ENDPROC(\sym)
15741 .endm
15742
15743 /* error code is on the stack already */
15744@@ -1098,13 +1458,23 @@ ENTRY(\sym)
15745 call save_paranoid
15746 DEFAULT_FRAME 0
15747 TRACE_IRQS_OFF
15748+#ifdef CONFIG_PAX_MEMORY_UDEREF
15749+ testb $3, CS(%rsp)
15750+ jnz 1f
15751+ pax_enter_kernel
15752+ jmp 2f
15753+1: pax_enter_kernel_user
15754+2:
15755+#else
15756+ pax_enter_kernel
15757+#endif
15758 movq %rsp,%rdi /* pt_regs pointer */
15759 movq ORIG_RAX(%rsp),%rsi /* get error code */
15760 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15761 call \do_sym
15762 jmp paranoid_exit /* %ebx: no swapgs flag */
15763 CFI_ENDPROC
15764-END(\sym)
15765+ENDPROC(\sym)
15766 .endm
15767
15768 zeroentry divide_error do_divide_error
15769@@ -1134,9 +1504,10 @@ gs_change:
15770 2: mfence /* workaround */
15771 SWAPGS
15772 popfq_cfi
15773+ pax_force_retaddr
15774 ret
15775 CFI_ENDPROC
15776-END(native_load_gs_index)
15777+ENDPROC(native_load_gs_index)
15778
15779 .section __ex_table,"a"
15780 .align 8
15781@@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
15782 * Here we are in the child and the registers are set as they were
15783 * at kernel_thread() invocation in the parent.
15784 */
15785+ pax_force_fptr %rsi
15786 call *%rsi
15787 # exit
15788 mov %eax, %edi
15789 call do_exit
15790 ud2 # padding for call trace
15791 CFI_ENDPROC
15792-END(kernel_thread_helper)
15793+ENDPROC(kernel_thread_helper)
15794
15795 /*
15796 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15797@@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
15798 RESTORE_REST
15799 testq %rax,%rax
15800 je int_ret_from_sys_call
15801- RESTORE_ARGS
15802 UNFAKE_STACK_FRAME
15803+ pax_force_retaddr
15804 ret
15805 CFI_ENDPROC
15806-END(kernel_execve)
15807+ENDPROC(kernel_execve)
15808
15809 /* Call softirq on interrupt stack. Interrupts are off. */
15810 ENTRY(call_softirq)
15811@@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
15812 CFI_DEF_CFA_REGISTER rsp
15813 CFI_ADJUST_CFA_OFFSET -8
15814 decl PER_CPU_VAR(irq_count)
15815+ pax_force_retaddr
15816 ret
15817 CFI_ENDPROC
15818-END(call_softirq)
15819+ENDPROC(call_softirq)
15820
15821 #ifdef CONFIG_XEN
15822 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15823@@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15824 decl PER_CPU_VAR(irq_count)
15825 jmp error_exit
15826 CFI_ENDPROC
15827-END(xen_do_hypervisor_callback)
15828+ENDPROC(xen_do_hypervisor_callback)
15829
15830 /*
15831 * Hypervisor uses this for application faults while it executes.
15832@@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
15833 SAVE_ALL
15834 jmp error_exit
15835 CFI_ENDPROC
15836-END(xen_failsafe_callback)
15837+ENDPROC(xen_failsafe_callback)
15838
15839 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
15840 xen_hvm_callback_vector xen_evtchn_do_upcall
15841@@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
15842 TRACE_IRQS_OFF
15843 testl %ebx,%ebx /* swapgs needed? */
15844 jnz paranoid_restore
15845- testl $3,CS(%rsp)
15846+ testb $3,CS(%rsp)
15847 jnz paranoid_userspace
15848+#ifdef CONFIG_PAX_MEMORY_UDEREF
15849+ pax_exit_kernel
15850+ TRACE_IRQS_IRETQ 0
15851+ SWAPGS_UNSAFE_STACK
15852+ RESTORE_ALL 8
15853+ pax_force_retaddr_bts
15854+ jmp irq_return
15855+#endif
15856 paranoid_swapgs:
15857+#ifdef CONFIG_PAX_MEMORY_UDEREF
15858+ pax_exit_kernel_user
15859+#else
15860+ pax_exit_kernel
15861+#endif
15862 TRACE_IRQS_IRETQ 0
15863 SWAPGS_UNSAFE_STACK
15864 RESTORE_ALL 8
15865 jmp irq_return
15866 paranoid_restore:
15867+ pax_exit_kernel
15868 TRACE_IRQS_IRETQ 0
15869 RESTORE_ALL 8
15870+ pax_force_retaddr_bts
15871 jmp irq_return
15872 paranoid_userspace:
15873 GET_THREAD_INFO(%rcx)
15874@@ -1399,7 +1787,7 @@ paranoid_schedule:
15875 TRACE_IRQS_OFF
15876 jmp paranoid_userspace
15877 CFI_ENDPROC
15878-END(paranoid_exit)
15879+ENDPROC(paranoid_exit)
15880
15881 /*
15882 * Exception entry point. This expects an error code/orig_rax on the stack.
15883@@ -1426,12 +1814,13 @@ ENTRY(error_entry)
15884 movq_cfi r14, R14+8
15885 movq_cfi r15, R15+8
15886 xorl %ebx,%ebx
15887- testl $3,CS+8(%rsp)
15888+ testb $3,CS+8(%rsp)
15889 je error_kernelspace
15890 error_swapgs:
15891 SWAPGS
15892 error_sti:
15893 TRACE_IRQS_OFF
15894+ pax_force_retaddr_bts
15895 ret
15896
15897 /*
15898@@ -1458,7 +1847,7 @@ bstep_iret:
15899 movq %rcx,RIP+8(%rsp)
15900 jmp error_swapgs
15901 CFI_ENDPROC
15902-END(error_entry)
15903+ENDPROC(error_entry)
15904
15905
15906 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
15907@@ -1478,7 +1867,7 @@ ENTRY(error_exit)
15908 jnz retint_careful
15909 jmp retint_swapgs
15910 CFI_ENDPROC
15911-END(error_exit)
15912+ENDPROC(error_exit)
15913
15914 /*
15915 * Test if a given stack is an NMI stack or not.
15916@@ -1535,9 +1924,11 @@ ENTRY(nmi)
15917 * If %cs was not the kernel segment, then the NMI triggered in user
15918 * space, which means it is definitely not nested.
15919 */
15920+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
15921+ je 1f
15922 cmpl $__KERNEL_CS, 16(%rsp)
15923 jne first_nmi
15924-
15925+1:
15926 /*
15927 * Check the special variable on the stack to see if NMIs are
15928 * executing.
15929@@ -1659,6 +2050,16 @@ restart_nmi:
15930 */
15931 call save_paranoid
15932 DEFAULT_FRAME 0
15933+#ifdef CONFIG_PAX_MEMORY_UDEREF
15934+ testb $3, CS(%rsp)
15935+ jnz 1f
15936+ pax_enter_kernel
15937+ jmp 2f
15938+1: pax_enter_kernel_user
15939+2:
15940+#else
15941+ pax_enter_kernel
15942+#endif
15943 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
15944 movq %rsp,%rdi
15945 movq $-1,%rsi
15946@@ -1666,14 +2067,25 @@ restart_nmi:
15947 testl %ebx,%ebx /* swapgs needed? */
15948 jnz nmi_restore
15949 nmi_swapgs:
15950+#ifdef CONFIG_PAX_MEMORY_UDEREF
15951+ pax_exit_kernel_user
15952+#else
15953+ pax_exit_kernel
15954+#endif
15955 SWAPGS_UNSAFE_STACK
15956+ RESTORE_ALL 8
15957+ /* Clear the NMI executing stack variable */
15958+ movq $0, 10*8(%rsp)
15959+ jmp irq_return
15960 nmi_restore:
15961+ pax_exit_kernel
15962 RESTORE_ALL 8
15963+ pax_force_retaddr_bts
15964 /* Clear the NMI executing stack variable */
15965 movq $0, 10*8(%rsp)
15966 jmp irq_return
15967 CFI_ENDPROC
15968-END(nmi)
15969+ENDPROC(nmi)
15970
15971 /*
15972 * If an NMI hit an iret because of an exception or breakpoint,
15973@@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
15974 mov $-ENOSYS,%eax
15975 sysret
15976 CFI_ENDPROC
15977-END(ignore_sysret)
15978+ENDPROC(ignore_sysret)
15979
15980 /*
15981 * End of kprobes section
15982diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
15983index c9a281f..ce2f317 100644
15984--- a/arch/x86/kernel/ftrace.c
15985+++ b/arch/x86/kernel/ftrace.c
15986@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15987 static const void *mod_code_newcode; /* holds the text to write to the IP */
15988
15989 static unsigned nmi_wait_count;
15990-static atomic_t nmi_update_count = ATOMIC_INIT(0);
15991+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
15992
15993 int ftrace_arch_read_dyn_info(char *buf, int size)
15994 {
15995@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
15996
15997 r = snprintf(buf, size, "%u %u",
15998 nmi_wait_count,
15999- atomic_read(&nmi_update_count));
16000+ atomic_read_unchecked(&nmi_update_count));
16001 return r;
16002 }
16003
16004@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16005
16006 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16007 smp_rmb();
16008+ pax_open_kernel();
16009 ftrace_mod_code();
16010- atomic_inc(&nmi_update_count);
16011+ pax_close_kernel();
16012+ atomic_inc_unchecked(&nmi_update_count);
16013 }
16014 /* Must have previous changes seen before executions */
16015 smp_mb();
16016@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16017 {
16018 unsigned char replaced[MCOUNT_INSN_SIZE];
16019
16020+ ip = ktla_ktva(ip);
16021+
16022 /*
16023 * Note: Due to modules and __init, code can
16024 * disappear and change, we need to protect against faulting
16025@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16026 unsigned char old[MCOUNT_INSN_SIZE], *new;
16027 int ret;
16028
16029- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16030+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16031 new = ftrace_call_replace(ip, (unsigned long)func);
16032 ret = ftrace_modify_code(ip, old, new);
16033
16034@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16035 {
16036 unsigned char code[MCOUNT_INSN_SIZE];
16037
16038+ ip = ktla_ktva(ip);
16039+
16040 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16041 return -EFAULT;
16042
16043diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16044index 51ff186..9e77418 100644
16045--- a/arch/x86/kernel/head32.c
16046+++ b/arch/x86/kernel/head32.c
16047@@ -19,6 +19,7 @@
16048 #include <asm/io_apic.h>
16049 #include <asm/bios_ebda.h>
16050 #include <asm/tlbflush.h>
16051+#include <asm/boot.h>
16052
16053 static void __init i386_default_early_setup(void)
16054 {
16055@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16056
16057 void __init i386_start_kernel(void)
16058 {
16059- memblock_reserve(__pa_symbol(&_text),
16060- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16061+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16062
16063 #ifdef CONFIG_BLK_DEV_INITRD
16064 /* Reserve INITRD */
16065diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16066index ce0be7c..c41476e 100644
16067--- a/arch/x86/kernel/head_32.S
16068+++ b/arch/x86/kernel/head_32.S
16069@@ -25,6 +25,12 @@
16070 /* Physical address */
16071 #define pa(X) ((X) - __PAGE_OFFSET)
16072
16073+#ifdef CONFIG_PAX_KERNEXEC
16074+#define ta(X) (X)
16075+#else
16076+#define ta(X) ((X) - __PAGE_OFFSET)
16077+#endif
16078+
16079 /*
16080 * References to members of the new_cpu_data structure.
16081 */
16082@@ -54,11 +60,7 @@
16083 * and small than max_low_pfn, otherwise will waste some page table entries
16084 */
16085
16086-#if PTRS_PER_PMD > 1
16087-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16088-#else
16089-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16090-#endif
16091+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16092
16093 /* Number of possible pages in the lowmem region */
16094 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16095@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16096 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16097
16098 /*
16099+ * Real beginning of normal "text" segment
16100+ */
16101+ENTRY(stext)
16102+ENTRY(_stext)
16103+
16104+/*
16105 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16106 * %esi points to the real-mode code as a 32-bit pointer.
16107 * CS and DS must be 4 GB flat segments, but we don't depend on
16108@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16109 * can.
16110 */
16111 __HEAD
16112+
16113+#ifdef CONFIG_PAX_KERNEXEC
16114+ jmp startup_32
16115+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16116+.fill PAGE_SIZE-5,1,0xcc
16117+#endif
16118+
16119 ENTRY(startup_32)
16120 movl pa(stack_start),%ecx
16121
16122@@ -105,6 +120,57 @@ ENTRY(startup_32)
16123 2:
16124 leal -__PAGE_OFFSET(%ecx),%esp
16125
16126+#ifdef CONFIG_SMP
16127+ movl $pa(cpu_gdt_table),%edi
16128+ movl $__per_cpu_load,%eax
16129+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16130+ rorl $16,%eax
16131+ movb %al,__KERNEL_PERCPU + 4(%edi)
16132+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16133+ movl $__per_cpu_end - 1,%eax
16134+ subl $__per_cpu_start,%eax
16135+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16136+#endif
16137+
16138+#ifdef CONFIG_PAX_MEMORY_UDEREF
16139+ movl $NR_CPUS,%ecx
16140+ movl $pa(cpu_gdt_table),%edi
16141+1:
16142+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16143+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16144+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16145+ addl $PAGE_SIZE_asm,%edi
16146+ loop 1b
16147+#endif
16148+
16149+#ifdef CONFIG_PAX_KERNEXEC
16150+ movl $pa(boot_gdt),%edi
16151+ movl $__LOAD_PHYSICAL_ADDR,%eax
16152+ movw %ax,__BOOT_CS + 2(%edi)
16153+ rorl $16,%eax
16154+ movb %al,__BOOT_CS + 4(%edi)
16155+ movb %ah,__BOOT_CS + 7(%edi)
16156+ rorl $16,%eax
16157+
16158+ ljmp $(__BOOT_CS),$1f
16159+1:
16160+
16161+ movl $NR_CPUS,%ecx
16162+ movl $pa(cpu_gdt_table),%edi
16163+ addl $__PAGE_OFFSET,%eax
16164+1:
16165+ movw %ax,__KERNEL_CS + 2(%edi)
16166+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16167+ rorl $16,%eax
16168+ movb %al,__KERNEL_CS + 4(%edi)
16169+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16170+ movb %ah,__KERNEL_CS + 7(%edi)
16171+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16172+ rorl $16,%eax
16173+ addl $PAGE_SIZE_asm,%edi
16174+ loop 1b
16175+#endif
16176+
16177 /*
16178 * Clear BSS first so that there are no surprises...
16179 */
16180@@ -195,8 +261,11 @@ ENTRY(startup_32)
16181 movl %eax, pa(max_pfn_mapped)
16182
16183 /* Do early initialization of the fixmap area */
16184- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16185- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16186+#ifdef CONFIG_COMPAT_VDSO
16187+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16188+#else
16189+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16190+#endif
16191 #else /* Not PAE */
16192
16193 page_pde_offset = (__PAGE_OFFSET >> 20);
16194@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16195 movl %eax, pa(max_pfn_mapped)
16196
16197 /* Do early initialization of the fixmap area */
16198- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16199- movl %eax,pa(initial_page_table+0xffc)
16200+#ifdef CONFIG_COMPAT_VDSO
16201+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16202+#else
16203+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16204+#endif
16205 #endif
16206
16207 #ifdef CONFIG_PARAVIRT
16208@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16209 cmpl $num_subarch_entries, %eax
16210 jae bad_subarch
16211
16212- movl pa(subarch_entries)(,%eax,4), %eax
16213- subl $__PAGE_OFFSET, %eax
16214- jmp *%eax
16215+ jmp *pa(subarch_entries)(,%eax,4)
16216
16217 bad_subarch:
16218 WEAK(lguest_entry)
16219@@ -255,10 +325,10 @@ WEAK(xen_entry)
16220 __INITDATA
16221
16222 subarch_entries:
16223- .long default_entry /* normal x86/PC */
16224- .long lguest_entry /* lguest hypervisor */
16225- .long xen_entry /* Xen hypervisor */
16226- .long default_entry /* Moorestown MID */
16227+ .long ta(default_entry) /* normal x86/PC */
16228+ .long ta(lguest_entry) /* lguest hypervisor */
16229+ .long ta(xen_entry) /* Xen hypervisor */
16230+ .long ta(default_entry) /* Moorestown MID */
16231 num_subarch_entries = (. - subarch_entries) / 4
16232 .previous
16233 #else
16234@@ -312,6 +382,7 @@ default_entry:
16235 orl %edx,%eax
16236 movl %eax,%cr4
16237
16238+#ifdef CONFIG_X86_PAE
16239 testb $X86_CR4_PAE, %al # check if PAE is enabled
16240 jz 6f
16241
16242@@ -340,6 +411,9 @@ default_entry:
16243 /* Make changes effective */
16244 wrmsr
16245
16246+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16247+#endif
16248+
16249 6:
16250
16251 /*
16252@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16253 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16254 movl %eax,%ss # after changing gdt.
16255
16256- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16257+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16258 movl %eax,%ds
16259 movl %eax,%es
16260
16261@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16262 */
16263 cmpb $0,ready
16264 jne 1f
16265- movl $gdt_page,%eax
16266+ movl $cpu_gdt_table,%eax
16267 movl $stack_canary,%ecx
16268+#ifdef CONFIG_SMP
16269+ addl $__per_cpu_load,%ecx
16270+#endif
16271 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16272 shrl $16, %ecx
16273 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16274 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16275 1:
16276-#endif
16277 movl $(__KERNEL_STACK_CANARY),%eax
16278+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16279+ movl $(__USER_DS),%eax
16280+#else
16281+ xorl %eax,%eax
16282+#endif
16283 movl %eax,%gs
16284
16285 xorl %eax,%eax # Clear LDT
16286@@ -558,22 +639,22 @@ early_page_fault:
16287 jmp early_fault
16288
16289 early_fault:
16290- cld
16291 #ifdef CONFIG_PRINTK
16292+ cmpl $1,%ss:early_recursion_flag
16293+ je hlt_loop
16294+ incl %ss:early_recursion_flag
16295+ cld
16296 pusha
16297 movl $(__KERNEL_DS),%eax
16298 movl %eax,%ds
16299 movl %eax,%es
16300- cmpl $2,early_recursion_flag
16301- je hlt_loop
16302- incl early_recursion_flag
16303 movl %cr2,%eax
16304 pushl %eax
16305 pushl %edx /* trapno */
16306 pushl $fault_msg
16307 call printk
16308+; call dump_stack
16309 #endif
16310- call dump_stack
16311 hlt_loop:
16312 hlt
16313 jmp hlt_loop
16314@@ -581,8 +662,11 @@ hlt_loop:
16315 /* This is the default interrupt "handler" :-) */
16316 ALIGN
16317 ignore_int:
16318- cld
16319 #ifdef CONFIG_PRINTK
16320+ cmpl $2,%ss:early_recursion_flag
16321+ je hlt_loop
16322+ incl %ss:early_recursion_flag
16323+ cld
16324 pushl %eax
16325 pushl %ecx
16326 pushl %edx
16327@@ -591,9 +675,6 @@ ignore_int:
16328 movl $(__KERNEL_DS),%eax
16329 movl %eax,%ds
16330 movl %eax,%es
16331- cmpl $2,early_recursion_flag
16332- je hlt_loop
16333- incl early_recursion_flag
16334 pushl 16(%esp)
16335 pushl 24(%esp)
16336 pushl 32(%esp)
16337@@ -622,29 +703,43 @@ ENTRY(initial_code)
16338 /*
16339 * BSS section
16340 */
16341-__PAGE_ALIGNED_BSS
16342- .align PAGE_SIZE
16343 #ifdef CONFIG_X86_PAE
16344+.section .initial_pg_pmd,"a",@progbits
16345 initial_pg_pmd:
16346 .fill 1024*KPMDS,4,0
16347 #else
16348+.section .initial_page_table,"a",@progbits
16349 ENTRY(initial_page_table)
16350 .fill 1024,4,0
16351 #endif
16352+.section .initial_pg_fixmap,"a",@progbits
16353 initial_pg_fixmap:
16354 .fill 1024,4,0
16355+.section .empty_zero_page,"a",@progbits
16356 ENTRY(empty_zero_page)
16357 .fill 4096,1,0
16358+.section .swapper_pg_dir,"a",@progbits
16359 ENTRY(swapper_pg_dir)
16360+#ifdef CONFIG_X86_PAE
16361+ .fill 4,8,0
16362+#else
16363 .fill 1024,4,0
16364+#endif
16365+
16366+/*
16367+ * The IDT has to be page-aligned to simplify the Pentium
16368+ * F0 0F bug workaround.. We have a special link segment
16369+ * for this.
16370+ */
16371+.section .idt,"a",@progbits
16372+ENTRY(idt_table)
16373+ .fill 256,8,0
16374
16375 /*
16376 * This starts the data section.
16377 */
16378 #ifdef CONFIG_X86_PAE
16379-__PAGE_ALIGNED_DATA
16380- /* Page-aligned for the benefit of paravirt? */
16381- .align PAGE_SIZE
16382+.section .initial_page_table,"a",@progbits
16383 ENTRY(initial_page_table)
16384 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16385 # if KPMDS == 3
16386@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16387 # error "Kernel PMDs should be 1, 2 or 3"
16388 # endif
16389 .align PAGE_SIZE /* needs to be page-sized too */
16390+
16391+#ifdef CONFIG_PAX_PER_CPU_PGD
16392+ENTRY(cpu_pgd)
16393+ .rept NR_CPUS
16394+ .fill 4,8,0
16395+ .endr
16396+#endif
16397+
16398 #endif
16399
16400 .data
16401 .balign 4
16402 ENTRY(stack_start)
16403- .long init_thread_union+THREAD_SIZE
16404+ .long init_thread_union+THREAD_SIZE-8
16405
16406+ready: .byte 0
16407+
16408+.section .rodata,"a",@progbits
16409 early_recursion_flag:
16410 .long 0
16411
16412-ready: .byte 0
16413-
16414 int_msg:
16415 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16416
16417@@ -707,7 +811,7 @@ fault_msg:
16418 .word 0 # 32 bit align gdt_desc.address
16419 boot_gdt_descr:
16420 .word __BOOT_DS+7
16421- .long boot_gdt - __PAGE_OFFSET
16422+ .long pa(boot_gdt)
16423
16424 .word 0 # 32-bit align idt_desc.address
16425 idt_descr:
16426@@ -718,7 +822,7 @@ idt_descr:
16427 .word 0 # 32 bit align gdt_desc.address
16428 ENTRY(early_gdt_descr)
16429 .word GDT_ENTRIES*8-1
16430- .long gdt_page /* Overwritten for secondary CPUs */
16431+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
16432
16433 /*
16434 * The boot_gdt must mirror the equivalent in setup.S and is
16435@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16436 .align L1_CACHE_BYTES
16437 ENTRY(boot_gdt)
16438 .fill GDT_ENTRY_BOOT_CS,8,0
16439- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16440- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16441+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16442+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16443+
16444+ .align PAGE_SIZE_asm
16445+ENTRY(cpu_gdt_table)
16446+ .rept NR_CPUS
16447+ .quad 0x0000000000000000 /* NULL descriptor */
16448+ .quad 0x0000000000000000 /* 0x0b reserved */
16449+ .quad 0x0000000000000000 /* 0x13 reserved */
16450+ .quad 0x0000000000000000 /* 0x1b reserved */
16451+
16452+#ifdef CONFIG_PAX_KERNEXEC
16453+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16454+#else
16455+ .quad 0x0000000000000000 /* 0x20 unused */
16456+#endif
16457+
16458+ .quad 0x0000000000000000 /* 0x28 unused */
16459+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16460+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16461+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16462+ .quad 0x0000000000000000 /* 0x4b reserved */
16463+ .quad 0x0000000000000000 /* 0x53 reserved */
16464+ .quad 0x0000000000000000 /* 0x5b reserved */
16465+
16466+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16467+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16468+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16469+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16470+
16471+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16472+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16473+
16474+ /*
16475+ * Segments used for calling PnP BIOS have byte granularity.
16476+ * The code segments and data segments have fixed 64k limits,
16477+ * the transfer segment sizes are set at run time.
16478+ */
16479+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
16480+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
16481+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
16482+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
16483+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
16484+
16485+ /*
16486+ * The APM segments have byte granularity and their bases
16487+ * are set at run time. All have 64k limits.
16488+ */
16489+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16490+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16491+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
16492+
16493+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16494+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16495+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16496+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16497+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16498+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16499+
16500+ /* Be sure this is zeroed to avoid false validations in Xen */
16501+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16502+ .endr
16503diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16504index 40f4eb3..6d24d9d 100644
16505--- a/arch/x86/kernel/head_64.S
16506+++ b/arch/x86/kernel/head_64.S
16507@@ -19,6 +19,8 @@
16508 #include <asm/cache.h>
16509 #include <asm/processor-flags.h>
16510 #include <asm/percpu.h>
16511+#include <asm/cpufeature.h>
16512+#include <asm/alternative-asm.h>
16513
16514 #ifdef CONFIG_PARAVIRT
16515 #include <asm/asm-offsets.h>
16516@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16517 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16518 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16519 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16520+L4_VMALLOC_START = pgd_index(VMALLOC_START)
16521+L3_VMALLOC_START = pud_index(VMALLOC_START)
16522+L4_VMALLOC_END = pgd_index(VMALLOC_END)
16523+L3_VMALLOC_END = pud_index(VMALLOC_END)
16524+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16525+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16526
16527 .text
16528 __HEAD
16529@@ -85,35 +93,23 @@ startup_64:
16530 */
16531 addq %rbp, init_level4_pgt + 0(%rip)
16532 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16533+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16534+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16535+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16536 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16537
16538 addq %rbp, level3_ident_pgt + 0(%rip)
16539+#ifndef CONFIG_XEN
16540+ addq %rbp, level3_ident_pgt + 8(%rip)
16541+#endif
16542
16543- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16544- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16545+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16546+
16547+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16548+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16549
16550 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16551-
16552- /* Add an Identity mapping if I am above 1G */
16553- leaq _text(%rip), %rdi
16554- andq $PMD_PAGE_MASK, %rdi
16555-
16556- movq %rdi, %rax
16557- shrq $PUD_SHIFT, %rax
16558- andq $(PTRS_PER_PUD - 1), %rax
16559- jz ident_complete
16560-
16561- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16562- leaq level3_ident_pgt(%rip), %rbx
16563- movq %rdx, 0(%rbx, %rax, 8)
16564-
16565- movq %rdi, %rax
16566- shrq $PMD_SHIFT, %rax
16567- andq $(PTRS_PER_PMD - 1), %rax
16568- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16569- leaq level2_spare_pgt(%rip), %rbx
16570- movq %rdx, 0(%rbx, %rax, 8)
16571-ident_complete:
16572+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16573
16574 /*
16575 * Fixup the kernel text+data virtual addresses. Note that
16576@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16577 * after the boot processor executes this code.
16578 */
16579
16580- /* Enable PAE mode and PGE */
16581- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16582+ /* Enable PAE mode and PSE/PGE */
16583+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16584 movq %rax, %cr4
16585
16586 /* Setup early boot stage 4 level pagetables. */
16587@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16588 movl $MSR_EFER, %ecx
16589 rdmsr
16590 btsl $_EFER_SCE, %eax /* Enable System Call */
16591- btl $20,%edi /* No Execute supported? */
16592+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16593 jnc 1f
16594 btsl $_EFER_NX, %eax
16595+ leaq init_level4_pgt(%rip), %rdi
16596+#ifndef CONFIG_EFI
16597+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16598+#endif
16599+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16600+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16601+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16602+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16603 1: wrmsr /* Make changes effective */
16604
16605 /* Setup cr0 */
16606@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16607 * jump. In addition we need to ensure %cs is set so we make this
16608 * a far return.
16609 */
16610+ pax_set_fptr_mask
16611 movq initial_code(%rip),%rax
16612 pushq $0 # fake return address to stop unwinder
16613 pushq $__KERNEL_CS # set correct cs
16614@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16615 bad_address:
16616 jmp bad_address
16617
16618- .section ".init.text","ax"
16619+ __INIT
16620 #ifdef CONFIG_EARLY_PRINTK
16621 .globl early_idt_handlers
16622 early_idt_handlers:
16623@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16624 #endif /* EARLY_PRINTK */
16625 1: hlt
16626 jmp 1b
16627+ .previous
16628
16629 #ifdef CONFIG_EARLY_PRINTK
16630+ __INITDATA
16631 early_recursion_flag:
16632 .long 0
16633+ .previous
16634
16635+ .section .rodata,"a",@progbits
16636 early_idt_msg:
16637 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16638 early_idt_ripmsg:
16639 .asciz "RIP %s\n"
16640+ .previous
16641 #endif /* CONFIG_EARLY_PRINTK */
16642- .previous
16643
16644+ .section .rodata,"a",@progbits
16645 #define NEXT_PAGE(name) \
16646 .balign PAGE_SIZE; \
16647 ENTRY(name)
16648@@ -338,7 +348,6 @@ ENTRY(name)
16649 i = i + 1 ; \
16650 .endr
16651
16652- .data
16653 /*
16654 * This default setting generates an ident mapping at address 0x100000
16655 * and a mapping for the kernel that precisely maps virtual address
16656@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16657 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16658 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16659 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16660+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
16661+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16662+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
16663+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16664+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16665+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16666 .org init_level4_pgt + L4_START_KERNEL*8, 0
16667 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16668 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16669
16670+#ifdef CONFIG_PAX_PER_CPU_PGD
16671+NEXT_PAGE(cpu_pgd)
16672+ .rept NR_CPUS
16673+ .fill 512,8,0
16674+ .endr
16675+#endif
16676+
16677 NEXT_PAGE(level3_ident_pgt)
16678 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16679+#ifdef CONFIG_XEN
16680 .fill 511,8,0
16681+#else
16682+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16683+ .fill 510,8,0
16684+#endif
16685+
16686+NEXT_PAGE(level3_vmalloc_start_pgt)
16687+ .fill 512,8,0
16688+
16689+NEXT_PAGE(level3_vmalloc_end_pgt)
16690+ .fill 512,8,0
16691+
16692+NEXT_PAGE(level3_vmemmap_pgt)
16693+ .fill L3_VMEMMAP_START,8,0
16694+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16695
16696 NEXT_PAGE(level3_kernel_pgt)
16697 .fill L3_START_KERNEL,8,0
16698@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16699 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16700 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16701
16702+NEXT_PAGE(level2_vmemmap_pgt)
16703+ .fill 512,8,0
16704+
16705 NEXT_PAGE(level2_fixmap_pgt)
16706- .fill 506,8,0
16707- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16708- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16709- .fill 5,8,0
16710+ .fill 507,8,0
16711+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16712+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16713+ .fill 4,8,0
16714
16715-NEXT_PAGE(level1_fixmap_pgt)
16716+NEXT_PAGE(level1_vsyscall_pgt)
16717 .fill 512,8,0
16718
16719-NEXT_PAGE(level2_ident_pgt)
16720- /* Since I easily can, map the first 1G.
16721+ /* Since I easily can, map the first 2G.
16722 * Don't set NX because code runs from these pages.
16723 */
16724- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16725+NEXT_PAGE(level2_ident_pgt)
16726+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16727
16728 NEXT_PAGE(level2_kernel_pgt)
16729 /*
16730@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
16731 * If you want to increase this then increase MODULES_VADDR
16732 * too.)
16733 */
16734- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16735- KERNEL_IMAGE_SIZE/PMD_SIZE)
16736-
16737-NEXT_PAGE(level2_spare_pgt)
16738- .fill 512, 8, 0
16739+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16740
16741 #undef PMDS
16742 #undef NEXT_PAGE
16743
16744- .data
16745+ .align PAGE_SIZE
16746+ENTRY(cpu_gdt_table)
16747+ .rept NR_CPUS
16748+ .quad 0x0000000000000000 /* NULL descriptor */
16749+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16750+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
16751+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
16752+ .quad 0x00cffb000000ffff /* __USER32_CS */
16753+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16754+ .quad 0x00affb000000ffff /* __USER_CS */
16755+
16756+#ifdef CONFIG_PAX_KERNEXEC
16757+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16758+#else
16759+ .quad 0x0 /* unused */
16760+#endif
16761+
16762+ .quad 0,0 /* TSS */
16763+ .quad 0,0 /* LDT */
16764+ .quad 0,0,0 /* three TLS descriptors */
16765+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
16766+ /* asm/segment.h:GDT_ENTRIES must match this */
16767+
16768+ /* zero the remaining page */
16769+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16770+ .endr
16771+
16772 .align 16
16773 .globl early_gdt_descr
16774 early_gdt_descr:
16775 .word GDT_ENTRIES*8-1
16776 early_gdt_descr_base:
16777- .quad INIT_PER_CPU_VAR(gdt_page)
16778+ .quad cpu_gdt_table
16779
16780 ENTRY(phys_base)
16781 /* This must match the first entry in level2_kernel_pgt */
16782 .quad 0x0000000000000000
16783
16784 #include "../../x86/xen/xen-head.S"
16785-
16786- .section .bss, "aw", @nobits
16787+
16788+ .section .rodata,"a",@progbits
16789 .align L1_CACHE_BYTES
16790 ENTRY(idt_table)
16791- .skip IDT_ENTRIES * 16
16792+ .fill 512,8,0
16793
16794 .align L1_CACHE_BYTES
16795 ENTRY(nmi_idt_table)
16796- .skip IDT_ENTRIES * 16
16797+ .fill 512,8,0
16798
16799 __PAGE_ALIGNED_BSS
16800 .align PAGE_SIZE
16801diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16802index 9c3bd4a..e1d9b35 100644
16803--- a/arch/x86/kernel/i386_ksyms_32.c
16804+++ b/arch/x86/kernel/i386_ksyms_32.c
16805@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16806 EXPORT_SYMBOL(cmpxchg8b_emu);
16807 #endif
16808
16809+EXPORT_SYMBOL_GPL(cpu_gdt_table);
16810+
16811 /* Networking helper routines. */
16812 EXPORT_SYMBOL(csum_partial_copy_generic);
16813+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16814+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16815
16816 EXPORT_SYMBOL(__get_user_1);
16817 EXPORT_SYMBOL(__get_user_2);
16818@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16819
16820 EXPORT_SYMBOL(csum_partial);
16821 EXPORT_SYMBOL(empty_zero_page);
16822+
16823+#ifdef CONFIG_PAX_KERNEXEC
16824+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
16825+#endif
16826diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
16827index 6104852..6114160 100644
16828--- a/arch/x86/kernel/i8259.c
16829+++ b/arch/x86/kernel/i8259.c
16830@@ -210,7 +210,7 @@ spurious_8259A_irq:
16831 "spurious 8259A interrupt: IRQ%d.\n", irq);
16832 spurious_irq_mask |= irqmask;
16833 }
16834- atomic_inc(&irq_err_count);
16835+ atomic_inc_unchecked(&irq_err_count);
16836 /*
16837 * Theoretically we do not have to handle this IRQ,
16838 * but in Linux this does not cause problems and is
16839diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
16840index 43e9ccf..44ccf6f 100644
16841--- a/arch/x86/kernel/init_task.c
16842+++ b/arch/x86/kernel/init_task.c
16843@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16844 * way process stacks are handled. This is done by having a special
16845 * "init_task" linker map entry..
16846 */
16847-union thread_union init_thread_union __init_task_data =
16848- { INIT_THREAD_INFO(init_task) };
16849+union thread_union init_thread_union __init_task_data;
16850
16851 /*
16852 * Initial task structure.
16853@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
16854 * section. Since TSS's are completely CPU-local, we want them
16855 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
16856 */
16857-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
16858-
16859+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
16860+EXPORT_SYMBOL(init_tss);
16861diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
16862index 8c96897..be66bfa 100644
16863--- a/arch/x86/kernel/ioport.c
16864+++ b/arch/x86/kernel/ioport.c
16865@@ -6,6 +6,7 @@
16866 #include <linux/sched.h>
16867 #include <linux/kernel.h>
16868 #include <linux/capability.h>
16869+#include <linux/security.h>
16870 #include <linux/errno.h>
16871 #include <linux/types.h>
16872 #include <linux/ioport.h>
16873@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16874
16875 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
16876 return -EINVAL;
16877+#ifdef CONFIG_GRKERNSEC_IO
16878+ if (turn_on && grsec_disable_privio) {
16879+ gr_handle_ioperm();
16880+ return -EPERM;
16881+ }
16882+#endif
16883 if (turn_on && !capable(CAP_SYS_RAWIO))
16884 return -EPERM;
16885
16886@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16887 * because the ->io_bitmap_max value must match the bitmap
16888 * contents:
16889 */
16890- tss = &per_cpu(init_tss, get_cpu());
16891+ tss = init_tss + get_cpu();
16892
16893 if (turn_on)
16894 bitmap_clear(t->io_bitmap_ptr, from, num);
16895@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
16896 return -EINVAL;
16897 /* Trying to gain more privileges? */
16898 if (level > old) {
16899+#ifdef CONFIG_GRKERNSEC_IO
16900+ if (grsec_disable_privio) {
16901+ gr_handle_iopl();
16902+ return -EPERM;
16903+ }
16904+#endif
16905 if (!capable(CAP_SYS_RAWIO))
16906 return -EPERM;
16907 }
16908diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
16909index 7943e0c..dd32c5c 100644
16910--- a/arch/x86/kernel/irq.c
16911+++ b/arch/x86/kernel/irq.c
16912@@ -18,7 +18,7 @@
16913 #include <asm/mce.h>
16914 #include <asm/hw_irq.h>
16915
16916-atomic_t irq_err_count;
16917+atomic_unchecked_t irq_err_count;
16918
16919 /* Function pointer for generic interrupt vector handling */
16920 void (*x86_platform_ipi_callback)(void) = NULL;
16921@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
16922 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
16923 seq_printf(p, " Machine check polls\n");
16924 #endif
16925- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
16926+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
16927 #if defined(CONFIG_X86_IO_APIC)
16928- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
16929+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
16930 #endif
16931 return 0;
16932 }
16933@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
16934
16935 u64 arch_irq_stat(void)
16936 {
16937- u64 sum = atomic_read(&irq_err_count);
16938+ u64 sum = atomic_read_unchecked(&irq_err_count);
16939
16940 #ifdef CONFIG_X86_IO_APIC
16941- sum += atomic_read(&irq_mis_count);
16942+ sum += atomic_read_unchecked(&irq_mis_count);
16943 #endif
16944 return sum;
16945 }
16946diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
16947index 40fc861..9b8739b 100644
16948--- a/arch/x86/kernel/irq_32.c
16949+++ b/arch/x86/kernel/irq_32.c
16950@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
16951 __asm__ __volatile__("andl %%esp,%0" :
16952 "=r" (sp) : "0" (THREAD_SIZE - 1));
16953
16954- return sp < (sizeof(struct thread_info) + STACK_WARN);
16955+ return sp < STACK_WARN;
16956 }
16957
16958 static void print_stack_overflow(void)
16959@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
16960 * per-CPU IRQ handling contexts (thread information and stack)
16961 */
16962 union irq_ctx {
16963- struct thread_info tinfo;
16964- u32 stack[THREAD_SIZE/sizeof(u32)];
16965+ unsigned long previous_esp;
16966+ u32 stack[THREAD_SIZE/sizeof(u32)];
16967 } __attribute__((aligned(THREAD_SIZE)));
16968
16969 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
16970@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
16971 static inline int
16972 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16973 {
16974- union irq_ctx *curctx, *irqctx;
16975+ union irq_ctx *irqctx;
16976 u32 *isp, arg1, arg2;
16977
16978- curctx = (union irq_ctx *) current_thread_info();
16979 irqctx = __this_cpu_read(hardirq_ctx);
16980
16981 /*
16982@@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16983 * handler) we can't do that and just have to keep using the
16984 * current stack (which is the irq stack already after all)
16985 */
16986- if (unlikely(curctx == irqctx))
16987+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
16988 return 0;
16989
16990 /* build the stack frame on the IRQ stack */
16991- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
16992- irqctx->tinfo.task = curctx->tinfo.task;
16993- irqctx->tinfo.previous_esp = current_stack_pointer;
16994+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
16995+ irqctx->previous_esp = current_stack_pointer;
16996
16997- /*
16998- * Copy the softirq bits in preempt_count so that the
16999- * softirq checks work in the hardirq context.
17000- */
17001- irqctx->tinfo.preempt_count =
17002- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17003- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17004+#ifdef CONFIG_PAX_MEMORY_UDEREF
17005+ __set_fs(MAKE_MM_SEG(0));
17006+#endif
17007
17008 if (unlikely(overflow))
17009 call_on_stack(print_stack_overflow, isp);
17010@@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17011 : "0" (irq), "1" (desc), "2" (isp),
17012 "D" (desc->handle_irq)
17013 : "memory", "cc", "ecx");
17014+
17015+#ifdef CONFIG_PAX_MEMORY_UDEREF
17016+ __set_fs(current_thread_info()->addr_limit);
17017+#endif
17018+
17019 return 1;
17020 }
17021
17022@@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17023 */
17024 void __cpuinit irq_ctx_init(int cpu)
17025 {
17026- union irq_ctx *irqctx;
17027-
17028 if (per_cpu(hardirq_ctx, cpu))
17029 return;
17030
17031- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17032- THREAD_FLAGS,
17033- THREAD_ORDER));
17034- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17035- irqctx->tinfo.cpu = cpu;
17036- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17037- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17038-
17039- per_cpu(hardirq_ctx, cpu) = irqctx;
17040-
17041- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17042- THREAD_FLAGS,
17043- THREAD_ORDER));
17044- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17045- irqctx->tinfo.cpu = cpu;
17046- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17047-
17048- per_cpu(softirq_ctx, cpu) = irqctx;
17049+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17050+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17051
17052 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17053 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17054@@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17055 asmlinkage void do_softirq(void)
17056 {
17057 unsigned long flags;
17058- struct thread_info *curctx;
17059 union irq_ctx *irqctx;
17060 u32 *isp;
17061
17062@@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17063 local_irq_save(flags);
17064
17065 if (local_softirq_pending()) {
17066- curctx = current_thread_info();
17067 irqctx = __this_cpu_read(softirq_ctx);
17068- irqctx->tinfo.task = curctx->task;
17069- irqctx->tinfo.previous_esp = current_stack_pointer;
17070+ irqctx->previous_esp = current_stack_pointer;
17071
17072 /* build the stack frame on the softirq stack */
17073- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17074+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17075+
17076+#ifdef CONFIG_PAX_MEMORY_UDEREF
17077+ __set_fs(MAKE_MM_SEG(0));
17078+#endif
17079
17080 call_on_stack(__do_softirq, isp);
17081+
17082+#ifdef CONFIG_PAX_MEMORY_UDEREF
17083+ __set_fs(current_thread_info()->addr_limit);
17084+#endif
17085+
17086 /*
17087 * Shouldn't happen, we returned above if in_interrupt():
17088 */
17089diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17090index d04d3ec..ea4b374 100644
17091--- a/arch/x86/kernel/irq_64.c
17092+++ b/arch/x86/kernel/irq_64.c
17093@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17094 u64 estack_top, estack_bottom;
17095 u64 curbase = (u64)task_stack_page(current);
17096
17097- if (user_mode_vm(regs))
17098+ if (user_mode(regs))
17099 return;
17100
17101 if (regs->sp >= curbase + sizeof(struct thread_info) +
17102diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17103index faba577..93b9e71 100644
17104--- a/arch/x86/kernel/kgdb.c
17105+++ b/arch/x86/kernel/kgdb.c
17106@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17107 #ifdef CONFIG_X86_32
17108 switch (regno) {
17109 case GDB_SS:
17110- if (!user_mode_vm(regs))
17111+ if (!user_mode(regs))
17112 *(unsigned long *)mem = __KERNEL_DS;
17113 break;
17114 case GDB_SP:
17115- if (!user_mode_vm(regs))
17116+ if (!user_mode(regs))
17117 *(unsigned long *)mem = kernel_stack_pointer(regs);
17118 break;
17119 case GDB_GS:
17120@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17121 case 'k':
17122 /* clear the trace bit */
17123 linux_regs->flags &= ~X86_EFLAGS_TF;
17124- atomic_set(&kgdb_cpu_doing_single_step, -1);
17125+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17126
17127 /* set the trace bit if we're stepping */
17128 if (remcomInBuffer[0] == 's') {
17129 linux_regs->flags |= X86_EFLAGS_TF;
17130- atomic_set(&kgdb_cpu_doing_single_step,
17131+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17132 raw_smp_processor_id());
17133 }
17134
17135@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17136
17137 switch (cmd) {
17138 case DIE_DEBUG:
17139- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17140+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17141 if (user_mode(regs))
17142 return single_step_cont(regs, args);
17143 break;
17144diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17145index 7da647d..56fe348 100644
17146--- a/arch/x86/kernel/kprobes.c
17147+++ b/arch/x86/kernel/kprobes.c
17148@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17149 } __attribute__((packed)) *insn;
17150
17151 insn = (struct __arch_relative_insn *)from;
17152+
17153+ pax_open_kernel();
17154 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17155 insn->op = op;
17156+ pax_close_kernel();
17157 }
17158
17159 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17160@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17161 kprobe_opcode_t opcode;
17162 kprobe_opcode_t *orig_opcodes = opcodes;
17163
17164- if (search_exception_tables((unsigned long)opcodes))
17165+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17166 return 0; /* Page fault may occur on this address. */
17167
17168 retry:
17169@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17170 }
17171 }
17172 insn_get_length(&insn);
17173+ pax_open_kernel();
17174 memcpy(dest, insn.kaddr, insn.length);
17175+ pax_close_kernel();
17176
17177 #ifdef CONFIG_X86_64
17178 if (insn_rip_relative(&insn)) {
17179@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17180 (u8 *) dest;
17181 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17182 disp = (u8 *) dest + insn_offset_displacement(&insn);
17183+ pax_open_kernel();
17184 *(s32 *) disp = (s32) newdisp;
17185+ pax_close_kernel();
17186 }
17187 #endif
17188 return insn.length;
17189@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17190 */
17191 __copy_instruction(p->ainsn.insn, p->addr, 0);
17192
17193- if (can_boost(p->addr))
17194+ if (can_boost(ktla_ktva(p->addr)))
17195 p->ainsn.boostable = 0;
17196 else
17197 p->ainsn.boostable = -1;
17198
17199- p->opcode = *p->addr;
17200+ p->opcode = *(ktla_ktva(p->addr));
17201 }
17202
17203 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17204@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17205 * nor set current_kprobe, because it doesn't use single
17206 * stepping.
17207 */
17208- regs->ip = (unsigned long)p->ainsn.insn;
17209+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17210 preempt_enable_no_resched();
17211 return;
17212 }
17213@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17214 if (p->opcode == BREAKPOINT_INSTRUCTION)
17215 regs->ip = (unsigned long)p->addr;
17216 else
17217- regs->ip = (unsigned long)p->ainsn.insn;
17218+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17219 }
17220
17221 /*
17222@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17223 setup_singlestep(p, regs, kcb, 0);
17224 return 1;
17225 }
17226- } else if (*addr != BREAKPOINT_INSTRUCTION) {
17227+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17228 /*
17229 * The breakpoint instruction was removed right
17230 * after we hit it. Another cpu has removed
17231@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17232 " movq %rax, 152(%rsp)\n"
17233 RESTORE_REGS_STRING
17234 " popfq\n"
17235+#ifdef KERNEXEC_PLUGIN
17236+ " btsq $63,(%rsp)\n"
17237+#endif
17238 #else
17239 " pushf\n"
17240 SAVE_REGS_STRING
17241@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17242 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17243 {
17244 unsigned long *tos = stack_addr(regs);
17245- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17246+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17247 unsigned long orig_ip = (unsigned long)p->addr;
17248 kprobe_opcode_t *insn = p->ainsn.insn;
17249
17250@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17251 struct die_args *args = data;
17252 int ret = NOTIFY_DONE;
17253
17254- if (args->regs && user_mode_vm(args->regs))
17255+ if (args->regs && user_mode(args->regs))
17256 return ret;
17257
17258 switch (val) {
17259@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17260 * Verify if the address gap is in 2GB range, because this uses
17261 * a relative jump.
17262 */
17263- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17264+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17265 if (abs(rel) > 0x7fffffff)
17266 return -ERANGE;
17267
17268@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17269 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17270
17271 /* Set probe function call */
17272- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17273+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17274
17275 /* Set returning jmp instruction at the tail of out-of-line buffer */
17276 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17277- (u8 *)op->kp.addr + op->optinsn.size);
17278+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17279
17280 flush_icache_range((unsigned long) buf,
17281 (unsigned long) buf + TMPL_END_IDX +
17282@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17283 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17284
17285 /* Backup instructions which will be replaced by jump address */
17286- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17287+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17288 RELATIVE_ADDR_SIZE);
17289
17290 insn_buf[0] = RELATIVEJUMP_OPCODE;
17291diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17292index ea69726..604d066 100644
17293--- a/arch/x86/kernel/ldt.c
17294+++ b/arch/x86/kernel/ldt.c
17295@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17296 if (reload) {
17297 #ifdef CONFIG_SMP
17298 preempt_disable();
17299- load_LDT(pc);
17300+ load_LDT_nolock(pc);
17301 if (!cpumask_equal(mm_cpumask(current->mm),
17302 cpumask_of(smp_processor_id())))
17303 smp_call_function(flush_ldt, current->mm, 1);
17304 preempt_enable();
17305 #else
17306- load_LDT(pc);
17307+ load_LDT_nolock(pc);
17308 #endif
17309 }
17310 if (oldsize) {
17311@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17312 return err;
17313
17314 for (i = 0; i < old->size; i++)
17315- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17316+ write_ldt_entry(new->ldt, i, old->ldt + i);
17317 return 0;
17318 }
17319
17320@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17321 retval = copy_ldt(&mm->context, &old_mm->context);
17322 mutex_unlock(&old_mm->context.lock);
17323 }
17324+
17325+ if (tsk == current) {
17326+ mm->context.vdso = 0;
17327+
17328+#ifdef CONFIG_X86_32
17329+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17330+ mm->context.user_cs_base = 0UL;
17331+ mm->context.user_cs_limit = ~0UL;
17332+
17333+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17334+ cpus_clear(mm->context.cpu_user_cs_mask);
17335+#endif
17336+
17337+#endif
17338+#endif
17339+
17340+ }
17341+
17342 return retval;
17343 }
17344
17345@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17346 }
17347 }
17348
17349+#ifdef CONFIG_PAX_SEGMEXEC
17350+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17351+ error = -EINVAL;
17352+ goto out_unlock;
17353+ }
17354+#endif
17355+
17356 fill_ldt(&ldt, &ldt_info);
17357 if (oldmode)
17358 ldt.avl = 0;
17359diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17360index a3fa43b..8966f4c 100644
17361--- a/arch/x86/kernel/machine_kexec_32.c
17362+++ b/arch/x86/kernel/machine_kexec_32.c
17363@@ -27,7 +27,7 @@
17364 #include <asm/cacheflush.h>
17365 #include <asm/debugreg.h>
17366
17367-static void set_idt(void *newidt, __u16 limit)
17368+static void set_idt(struct desc_struct *newidt, __u16 limit)
17369 {
17370 struct desc_ptr curidt;
17371
17372@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17373 }
17374
17375
17376-static void set_gdt(void *newgdt, __u16 limit)
17377+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17378 {
17379 struct desc_ptr curgdt;
17380
17381@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17382 }
17383
17384 control_page = page_address(image->control_code_page);
17385- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17386+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17387
17388 relocate_kernel_ptr = control_page;
17389 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17390diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17391index 3ca42d0..7cff8cc 100644
17392--- a/arch/x86/kernel/microcode_intel.c
17393+++ b/arch/x86/kernel/microcode_intel.c
17394@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17395
17396 static int get_ucode_user(void *to, const void *from, size_t n)
17397 {
17398- return copy_from_user(to, from, n);
17399+ return copy_from_user(to, (const void __force_user *)from, n);
17400 }
17401
17402 static enum ucode_state
17403 request_microcode_user(int cpu, const void __user *buf, size_t size)
17404 {
17405- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17406+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17407 }
17408
17409 static void microcode_fini_cpu(int cpu)
17410diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17411index 925179f..267ac7a 100644
17412--- a/arch/x86/kernel/module.c
17413+++ b/arch/x86/kernel/module.c
17414@@ -36,15 +36,60 @@
17415 #define DEBUGP(fmt...)
17416 #endif
17417
17418-void *module_alloc(unsigned long size)
17419+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17420 {
17421- if (PAGE_ALIGN(size) > MODULES_LEN)
17422+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17423 return NULL;
17424 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17425- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17426+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17427 -1, __builtin_return_address(0));
17428 }
17429
17430+void *module_alloc(unsigned long size)
17431+{
17432+
17433+#ifdef CONFIG_PAX_KERNEXEC
17434+ return __module_alloc(size, PAGE_KERNEL);
17435+#else
17436+ return __module_alloc(size, PAGE_KERNEL_EXEC);
17437+#endif
17438+
17439+}
17440+
17441+#ifdef CONFIG_PAX_KERNEXEC
17442+#ifdef CONFIG_X86_32
17443+void *module_alloc_exec(unsigned long size)
17444+{
17445+ struct vm_struct *area;
17446+
17447+ if (size == 0)
17448+ return NULL;
17449+
17450+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17451+ return area ? area->addr : NULL;
17452+}
17453+EXPORT_SYMBOL(module_alloc_exec);
17454+
17455+void module_free_exec(struct module *mod, void *module_region)
17456+{
17457+ vunmap(module_region);
17458+}
17459+EXPORT_SYMBOL(module_free_exec);
17460+#else
17461+void module_free_exec(struct module *mod, void *module_region)
17462+{
17463+ module_free(mod, module_region);
17464+}
17465+EXPORT_SYMBOL(module_free_exec);
17466+
17467+void *module_alloc_exec(unsigned long size)
17468+{
17469+ return __module_alloc(size, PAGE_KERNEL_RX);
17470+}
17471+EXPORT_SYMBOL(module_alloc_exec);
17472+#endif
17473+#endif
17474+
17475 #ifdef CONFIG_X86_32
17476 int apply_relocate(Elf32_Shdr *sechdrs,
17477 const char *strtab,
17478@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17479 unsigned int i;
17480 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17481 Elf32_Sym *sym;
17482- uint32_t *location;
17483+ uint32_t *plocation, location;
17484
17485 DEBUGP("Applying relocate section %u to %u\n", relsec,
17486 sechdrs[relsec].sh_info);
17487 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17488 /* This is where to make the change */
17489- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17490- + rel[i].r_offset;
17491+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17492+ location = (uint32_t)plocation;
17493+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17494+ plocation = ktla_ktva((void *)plocation);
17495 /* This is the symbol it is referring to. Note that all
17496 undefined symbols have been resolved. */
17497 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17498@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17499 switch (ELF32_R_TYPE(rel[i].r_info)) {
17500 case R_386_32:
17501 /* We add the value into the location given */
17502- *location += sym->st_value;
17503+ pax_open_kernel();
17504+ *plocation += sym->st_value;
17505+ pax_close_kernel();
17506 break;
17507 case R_386_PC32:
17508 /* Add the value, subtract its postition */
17509- *location += sym->st_value - (uint32_t)location;
17510+ pax_open_kernel();
17511+ *plocation += sym->st_value - location;
17512+ pax_close_kernel();
17513 break;
17514 default:
17515 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17516@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17517 case R_X86_64_NONE:
17518 break;
17519 case R_X86_64_64:
17520+ pax_open_kernel();
17521 *(u64 *)loc = val;
17522+ pax_close_kernel();
17523 break;
17524 case R_X86_64_32:
17525+ pax_open_kernel();
17526 *(u32 *)loc = val;
17527+ pax_close_kernel();
17528 if (val != *(u32 *)loc)
17529 goto overflow;
17530 break;
17531 case R_X86_64_32S:
17532+ pax_open_kernel();
17533 *(s32 *)loc = val;
17534+ pax_close_kernel();
17535 if ((s64)val != *(s32 *)loc)
17536 goto overflow;
17537 break;
17538 case R_X86_64_PC32:
17539 val -= (u64)loc;
17540+ pax_open_kernel();
17541 *(u32 *)loc = val;
17542+ pax_close_kernel();
17543+
17544 #if 0
17545 if ((s64)val != *(s32 *)loc)
17546 goto overflow;
17547diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17548index 47acaf3..ec48ab6 100644
17549--- a/arch/x86/kernel/nmi.c
17550+++ b/arch/x86/kernel/nmi.c
17551@@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
17552 dotraplinkage notrace __kprobes void
17553 do_nmi(struct pt_regs *regs, long error_code)
17554 {
17555+
17556+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17557+ if (!user_mode(regs)) {
17558+ unsigned long cs = regs->cs & 0xFFFF;
17559+ unsigned long ip = ktva_ktla(regs->ip);
17560+
17561+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17562+ regs->ip = ip;
17563+ }
17564+#endif
17565+
17566 nmi_nesting_preprocess(regs);
17567
17568 nmi_enter();
17569diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17570index 676b8c7..870ba04 100644
17571--- a/arch/x86/kernel/paravirt-spinlocks.c
17572+++ b/arch/x86/kernel/paravirt-spinlocks.c
17573@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17574 arch_spin_lock(lock);
17575 }
17576
17577-struct pv_lock_ops pv_lock_ops = {
17578+struct pv_lock_ops pv_lock_ops __read_only = {
17579 #ifdef CONFIG_SMP
17580 .spin_is_locked = __ticket_spin_is_locked,
17581 .spin_is_contended = __ticket_spin_is_contended,
17582diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17583index d90272e..6bb013b 100644
17584--- a/arch/x86/kernel/paravirt.c
17585+++ b/arch/x86/kernel/paravirt.c
17586@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17587 {
17588 return x;
17589 }
17590+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17591+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17592+#endif
17593
17594 void __init default_banner(void)
17595 {
17596@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17597 if (opfunc == NULL)
17598 /* If there's no function, patch it with a ud2a (BUG) */
17599 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17600- else if (opfunc == _paravirt_nop)
17601+ else if (opfunc == (void *)_paravirt_nop)
17602 /* If the operation is a nop, then nop the callsite */
17603 ret = paravirt_patch_nop();
17604
17605 /* identity functions just return their single argument */
17606- else if (opfunc == _paravirt_ident_32)
17607+ else if (opfunc == (void *)_paravirt_ident_32)
17608 ret = paravirt_patch_ident_32(insnbuf, len);
17609- else if (opfunc == _paravirt_ident_64)
17610+ else if (opfunc == (void *)_paravirt_ident_64)
17611 ret = paravirt_patch_ident_64(insnbuf, len);
17612+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17613+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17614+ ret = paravirt_patch_ident_64(insnbuf, len);
17615+#endif
17616
17617 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17618 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17619@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17620 if (insn_len > len || start == NULL)
17621 insn_len = len;
17622 else
17623- memcpy(insnbuf, start, insn_len);
17624+ memcpy(insnbuf, ktla_ktva(start), insn_len);
17625
17626 return insn_len;
17627 }
17628@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17629 preempt_enable();
17630 }
17631
17632-struct pv_info pv_info = {
17633+struct pv_info pv_info __read_only = {
17634 .name = "bare hardware",
17635 .paravirt_enabled = 0,
17636 .kernel_rpl = 0,
17637@@ -313,16 +320,16 @@ struct pv_info pv_info = {
17638 #endif
17639 };
17640
17641-struct pv_init_ops pv_init_ops = {
17642+struct pv_init_ops pv_init_ops __read_only = {
17643 .patch = native_patch,
17644 };
17645
17646-struct pv_time_ops pv_time_ops = {
17647+struct pv_time_ops pv_time_ops __read_only = {
17648 .sched_clock = native_sched_clock,
17649 .steal_clock = native_steal_clock,
17650 };
17651
17652-struct pv_irq_ops pv_irq_ops = {
17653+struct pv_irq_ops pv_irq_ops __read_only = {
17654 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17655 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17656 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17657@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17658 #endif
17659 };
17660
17661-struct pv_cpu_ops pv_cpu_ops = {
17662+struct pv_cpu_ops pv_cpu_ops __read_only = {
17663 .cpuid = native_cpuid,
17664 .get_debugreg = native_get_debugreg,
17665 .set_debugreg = native_set_debugreg,
17666@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17667 .end_context_switch = paravirt_nop,
17668 };
17669
17670-struct pv_apic_ops pv_apic_ops = {
17671+struct pv_apic_ops pv_apic_ops __read_only = {
17672 #ifdef CONFIG_X86_LOCAL_APIC
17673 .startup_ipi_hook = paravirt_nop,
17674 #endif
17675 };
17676
17677-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17678+#ifdef CONFIG_X86_32
17679+#ifdef CONFIG_X86_PAE
17680+/* 64-bit pagetable entries */
17681+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17682+#else
17683 /* 32-bit pagetable entries */
17684 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17685+#endif
17686 #else
17687 /* 64-bit pagetable entries */
17688 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17689 #endif
17690
17691-struct pv_mmu_ops pv_mmu_ops = {
17692+struct pv_mmu_ops pv_mmu_ops __read_only = {
17693
17694 .read_cr2 = native_read_cr2,
17695 .write_cr2 = native_write_cr2,
17696@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17697 .make_pud = PTE_IDENT,
17698
17699 .set_pgd = native_set_pgd,
17700+ .set_pgd_batched = native_set_pgd_batched,
17701 #endif
17702 #endif /* PAGETABLE_LEVELS >= 3 */
17703
17704@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17705 },
17706
17707 .set_fixmap = native_set_fixmap,
17708+
17709+#ifdef CONFIG_PAX_KERNEXEC
17710+ .pax_open_kernel = native_pax_open_kernel,
17711+ .pax_close_kernel = native_pax_close_kernel,
17712+#endif
17713+
17714 };
17715
17716 EXPORT_SYMBOL_GPL(pv_time_ops);
17717diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17718index 35ccf75..7a15747 100644
17719--- a/arch/x86/kernel/pci-iommu_table.c
17720+++ b/arch/x86/kernel/pci-iommu_table.c
17721@@ -2,7 +2,7 @@
17722 #include <asm/iommu_table.h>
17723 #include <linux/string.h>
17724 #include <linux/kallsyms.h>
17725-
17726+#include <linux/sched.h>
17727
17728 #define DEBUG 1
17729
17730diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
17731index 15763af..da59ada 100644
17732--- a/arch/x86/kernel/process.c
17733+++ b/arch/x86/kernel/process.c
17734@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
17735
17736 void free_thread_info(struct thread_info *ti)
17737 {
17738- free_thread_xstate(ti->task);
17739 free_pages((unsigned long)ti, THREAD_ORDER);
17740 }
17741
17742+static struct kmem_cache *task_struct_cachep;
17743+
17744 void arch_task_cache_init(void)
17745 {
17746- task_xstate_cachep =
17747- kmem_cache_create("task_xstate", xstate_size,
17748+ /* create a slab on which task_structs can be allocated */
17749+ task_struct_cachep =
17750+ kmem_cache_create("task_struct", sizeof(struct task_struct),
17751+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
17752+
17753+ task_xstate_cachep =
17754+ kmem_cache_create("task_xstate", xstate_size,
17755 __alignof__(union thread_xstate),
17756- SLAB_PANIC | SLAB_NOTRACK, NULL);
17757+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
17758+}
17759+
17760+struct task_struct *alloc_task_struct_node(int node)
17761+{
17762+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17763+}
17764+
17765+void free_task_struct(struct task_struct *task)
17766+{
17767+ free_thread_xstate(task);
17768+ kmem_cache_free(task_struct_cachep, task);
17769 }
17770
17771 /*
17772@@ -70,7 +87,7 @@ void exit_thread(void)
17773 unsigned long *bp = t->io_bitmap_ptr;
17774
17775 if (bp) {
17776- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
17777+ struct tss_struct *tss = init_tss + get_cpu();
17778
17779 t->io_bitmap_ptr = NULL;
17780 clear_thread_flag(TIF_IO_BITMAP);
17781@@ -106,7 +123,7 @@ void show_regs_common(void)
17782
17783 printk(KERN_CONT "\n");
17784 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
17785- current->pid, current->comm, print_tainted(),
17786+ task_pid_nr(current), current->comm, print_tainted(),
17787 init_utsname()->release,
17788 (int)strcspn(init_utsname()->version, " "),
17789 init_utsname()->version);
17790@@ -120,6 +137,9 @@ void flush_thread(void)
17791 {
17792 struct task_struct *tsk = current;
17793
17794+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17795+ loadsegment(gs, 0);
17796+#endif
17797 flush_ptrace_hw_breakpoint(tsk);
17798 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
17799 /*
17800@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
17801 regs.di = (unsigned long) arg;
17802
17803 #ifdef CONFIG_X86_32
17804- regs.ds = __USER_DS;
17805- regs.es = __USER_DS;
17806+ regs.ds = __KERNEL_DS;
17807+ regs.es = __KERNEL_DS;
17808 regs.fs = __KERNEL_PERCPU;
17809- regs.gs = __KERNEL_STACK_CANARY;
17810+ savesegment(gs, regs.gs);
17811 #else
17812 regs.ss = __KERNEL_DS;
17813 #endif
17814@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
17815
17816 return ret;
17817 }
17818-void stop_this_cpu(void *dummy)
17819+__noreturn void stop_this_cpu(void *dummy)
17820 {
17821 local_irq_disable();
17822 /*
17823@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
17824 }
17825 early_param("idle", idle_setup);
17826
17827-unsigned long arch_align_stack(unsigned long sp)
17828+#ifdef CONFIG_PAX_RANDKSTACK
17829+void pax_randomize_kstack(struct pt_regs *regs)
17830 {
17831- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
17832- sp -= get_random_int() % 8192;
17833- return sp & ~0xf;
17834-}
17835+ struct thread_struct *thread = &current->thread;
17836+ unsigned long time;
17837
17838-unsigned long arch_randomize_brk(struct mm_struct *mm)
17839-{
17840- unsigned long range_end = mm->brk + 0x02000000;
17841- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
17842-}
17843+ if (!randomize_va_space)
17844+ return;
17845+
17846+ if (v8086_mode(regs))
17847+ return;
17848
17849+ rdtscl(time);
17850+
17851+ /* P4 seems to return a 0 LSB, ignore it */
17852+#ifdef CONFIG_MPENTIUM4
17853+ time &= 0x3EUL;
17854+ time <<= 2;
17855+#elif defined(CONFIG_X86_64)
17856+ time &= 0xFUL;
17857+ time <<= 4;
17858+#else
17859+ time &= 0x1FUL;
17860+ time <<= 3;
17861+#endif
17862+
17863+ thread->sp0 ^= time;
17864+ load_sp0(init_tss + smp_processor_id(), thread);
17865+
17866+#ifdef CONFIG_X86_64
17867+ percpu_write(kernel_stack, thread->sp0);
17868+#endif
17869+}
17870+#endif
17871diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
17872index c08d1ff..6ae1c81 100644
17873--- a/arch/x86/kernel/process_32.c
17874+++ b/arch/x86/kernel/process_32.c
17875@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
17876 unsigned long thread_saved_pc(struct task_struct *tsk)
17877 {
17878 return ((unsigned long *)tsk->thread.sp)[3];
17879+//XXX return tsk->thread.eip;
17880 }
17881
17882 #ifndef CONFIG_SMP
17883@@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
17884 unsigned long sp;
17885 unsigned short ss, gs;
17886
17887- if (user_mode_vm(regs)) {
17888+ if (user_mode(regs)) {
17889 sp = regs->sp;
17890 ss = regs->ss & 0xffff;
17891- gs = get_user_gs(regs);
17892 } else {
17893 sp = kernel_stack_pointer(regs);
17894 savesegment(ss, ss);
17895- savesegment(gs, gs);
17896 }
17897+ gs = get_user_gs(regs);
17898
17899 show_regs_common();
17900
17901@@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17902 struct task_struct *tsk;
17903 int err;
17904
17905- childregs = task_pt_regs(p);
17906+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
17907 *childregs = *regs;
17908 childregs->ax = 0;
17909 childregs->sp = sp;
17910
17911 p->thread.sp = (unsigned long) childregs;
17912 p->thread.sp0 = (unsigned long) (childregs+1);
17913+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17914
17915 p->thread.ip = (unsigned long) ret_from_fork;
17916
17917@@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17918 struct thread_struct *prev = &prev_p->thread,
17919 *next = &next_p->thread;
17920 int cpu = smp_processor_id();
17921- struct tss_struct *tss = &per_cpu(init_tss, cpu);
17922+ struct tss_struct *tss = init_tss + cpu;
17923 fpu_switch_t fpu;
17924
17925 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
17926@@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17927 */
17928 lazy_save_gs(prev->gs);
17929
17930+#ifdef CONFIG_PAX_MEMORY_UDEREF
17931+ __set_fs(task_thread_info(next_p)->addr_limit);
17932+#endif
17933+
17934 /*
17935 * Load the per-thread Thread-Local Storage descriptor.
17936 */
17937@@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17938 */
17939 arch_end_context_switch(next_p);
17940
17941+ percpu_write(current_task, next_p);
17942+ percpu_write(current_tinfo, &next_p->tinfo);
17943+
17944 /*
17945 * Restore %gs if needed (which is common)
17946 */
17947@@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17948
17949 switch_fpu_finish(next_p, fpu);
17950
17951- percpu_write(current_task, next_p);
17952-
17953 return prev_p;
17954 }
17955
17956@@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
17957 } while (count++ < 16);
17958 return 0;
17959 }
17960-
17961diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
17962index cfa5c90..4facd28 100644
17963--- a/arch/x86/kernel/process_64.c
17964+++ b/arch/x86/kernel/process_64.c
17965@@ -89,7 +89,7 @@ static void __exit_idle(void)
17966 void exit_idle(void)
17967 {
17968 /* idle loop has pid 0 */
17969- if (current->pid)
17970+ if (task_pid_nr(current))
17971 return;
17972 __exit_idle();
17973 }
17974@@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17975 struct pt_regs *childregs;
17976 struct task_struct *me = current;
17977
17978- childregs = ((struct pt_regs *)
17979- (THREAD_SIZE + task_stack_page(p))) - 1;
17980+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
17981 *childregs = *regs;
17982
17983 childregs->ax = 0;
17984@@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17985 p->thread.sp = (unsigned long) childregs;
17986 p->thread.sp0 = (unsigned long) (childregs+1);
17987 p->thread.usersp = me->thread.usersp;
17988+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17989
17990 set_tsk_thread_flag(p, TIF_FORK);
17991
17992@@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17993 struct thread_struct *prev = &prev_p->thread;
17994 struct thread_struct *next = &next_p->thread;
17995 int cpu = smp_processor_id();
17996- struct tss_struct *tss = &per_cpu(init_tss, cpu);
17997+ struct tss_struct *tss = init_tss + cpu;
17998 unsigned fsindex, gsindex;
17999 fpu_switch_t fpu;
18000
18001@@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18002 prev->usersp = percpu_read(old_rsp);
18003 percpu_write(old_rsp, next->usersp);
18004 percpu_write(current_task, next_p);
18005+ percpu_write(current_tinfo, &next_p->tinfo);
18006
18007- percpu_write(kernel_stack,
18008- (unsigned long)task_stack_page(next_p) +
18009- THREAD_SIZE - KERNEL_STACK_OFFSET);
18010+ percpu_write(kernel_stack, next->sp0);
18011
18012 /*
18013 * Now maybe reload the debug registers and handle I/O bitmaps
18014@@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18015 if (!p || p == current || p->state == TASK_RUNNING)
18016 return 0;
18017 stack = (unsigned long)task_stack_page(p);
18018- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18019+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18020 return 0;
18021 fp = *(u64 *)(p->thread.sp);
18022 do {
18023- if (fp < (unsigned long)stack ||
18024- fp >= (unsigned long)stack+THREAD_SIZE)
18025+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18026 return 0;
18027 ip = *(u64 *)(fp+8);
18028 if (!in_sched_functions(ip))
18029diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18030index 5026738..9e6d6dc 100644
18031--- a/arch/x86/kernel/ptrace.c
18032+++ b/arch/x86/kernel/ptrace.c
18033@@ -823,7 +823,7 @@ long arch_ptrace(struct task_struct *child, long request,
18034 unsigned long addr, unsigned long data)
18035 {
18036 int ret;
18037- unsigned long __user *datap = (unsigned long __user *)data;
18038+ unsigned long __user *datap = (__force unsigned long __user *)data;
18039
18040 switch (request) {
18041 /* read the word at location addr in the USER area. */
18042@@ -908,14 +908,14 @@ long arch_ptrace(struct task_struct *child, long request,
18043 if ((int) addr < 0)
18044 return -EIO;
18045 ret = do_get_thread_area(child, addr,
18046- (struct user_desc __user *)data);
18047+ (__force struct user_desc __user *) data);
18048 break;
18049
18050 case PTRACE_SET_THREAD_AREA:
18051 if ((int) addr < 0)
18052 return -EIO;
18053 ret = do_set_thread_area(child, addr,
18054- (struct user_desc __user *)data, 0);
18055+ (__force struct user_desc __user *) data, 0);
18056 break;
18057 #endif
18058
18059@@ -1332,7 +1332,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18060 memset(info, 0, sizeof(*info));
18061 info->si_signo = SIGTRAP;
18062 info->si_code = si_code;
18063- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18064+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18065 }
18066
18067 void user_single_step_siginfo(struct task_struct *tsk,
18068diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18069index 42eb330..139955c 100644
18070--- a/arch/x86/kernel/pvclock.c
18071+++ b/arch/x86/kernel/pvclock.c
18072@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18073 return pv_tsc_khz;
18074 }
18075
18076-static atomic64_t last_value = ATOMIC64_INIT(0);
18077+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18078
18079 void pvclock_resume(void)
18080 {
18081- atomic64_set(&last_value, 0);
18082+ atomic64_set_unchecked(&last_value, 0);
18083 }
18084
18085 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18086@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18087 * updating at the same time, and one of them could be slightly behind,
18088 * making the assumption that last_value always go forward fail to hold.
18089 */
18090- last = atomic64_read(&last_value);
18091+ last = atomic64_read_unchecked(&last_value);
18092 do {
18093 if (ret < last)
18094 return last;
18095- last = atomic64_cmpxchg(&last_value, last, ret);
18096+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18097 } while (unlikely(last != ret));
18098
18099 return ret;
18100diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18101index d840e69..98e9581 100644
18102--- a/arch/x86/kernel/reboot.c
18103+++ b/arch/x86/kernel/reboot.c
18104@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18105 EXPORT_SYMBOL(pm_power_off);
18106
18107 static const struct desc_ptr no_idt = {};
18108-static int reboot_mode;
18109+static unsigned short reboot_mode;
18110 enum reboot_type reboot_type = BOOT_ACPI;
18111 int reboot_force;
18112
18113@@ -335,13 +335,17 @@ core_initcall(reboot_init);
18114 extern const unsigned char machine_real_restart_asm[];
18115 extern const u64 machine_real_restart_gdt[3];
18116
18117-void machine_real_restart(unsigned int type)
18118+__noreturn void machine_real_restart(unsigned int type)
18119 {
18120 void *restart_va;
18121 unsigned long restart_pa;
18122- void (*restart_lowmem)(unsigned int);
18123+ void (* __noreturn restart_lowmem)(unsigned int);
18124 u64 *lowmem_gdt;
18125
18126+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18127+ struct desc_struct *gdt;
18128+#endif
18129+
18130 local_irq_disable();
18131
18132 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18133@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18134 boot)". This seems like a fairly standard thing that gets set by
18135 REBOOT.COM programs, and the previous reset routine did this
18136 too. */
18137- *((unsigned short *)0x472) = reboot_mode;
18138+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18139
18140 /* Patch the GDT in the low memory trampoline */
18141 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18142
18143 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18144 restart_pa = virt_to_phys(restart_va);
18145- restart_lowmem = (void (*)(unsigned int))restart_pa;
18146+ restart_lowmem = (void *)restart_pa;
18147
18148 /* GDT[0]: GDT self-pointer */
18149 lowmem_gdt[0] =
18150@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18151 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18152
18153 /* Jump to the identity-mapped low memory code */
18154+
18155+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18156+ gdt = get_cpu_gdt_table(smp_processor_id());
18157+ pax_open_kernel();
18158+#ifdef CONFIG_PAX_MEMORY_UDEREF
18159+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18160+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18161+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18162+#endif
18163+#ifdef CONFIG_PAX_KERNEXEC
18164+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18165+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18166+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18167+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18168+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18169+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18170+#endif
18171+ pax_close_kernel();
18172+#endif
18173+
18174+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18175+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18176+ unreachable();
18177+#else
18178 restart_lowmem(type);
18179+#endif
18180+
18181 }
18182 #ifdef CONFIG_APM_MODULE
18183 EXPORT_SYMBOL(machine_real_restart);
18184@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18185 * try to force a triple fault and then cycle between hitting the keyboard
18186 * controller and doing that
18187 */
18188-static void native_machine_emergency_restart(void)
18189+__noreturn static void native_machine_emergency_restart(void)
18190 {
18191 int i;
18192 int attempt = 0;
18193@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18194 #endif
18195 }
18196
18197-static void __machine_emergency_restart(int emergency)
18198+static __noreturn void __machine_emergency_restart(int emergency)
18199 {
18200 reboot_emergency = emergency;
18201 machine_ops.emergency_restart();
18202 }
18203
18204-static void native_machine_restart(char *__unused)
18205+static __noreturn void native_machine_restart(char *__unused)
18206 {
18207 printk("machine restart\n");
18208
18209@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18210 __machine_emergency_restart(0);
18211 }
18212
18213-static void native_machine_halt(void)
18214+static __noreturn void native_machine_halt(void)
18215 {
18216 /* stop other cpus and apics */
18217 machine_shutdown();
18218@@ -706,7 +736,7 @@ static void native_machine_halt(void)
18219 stop_this_cpu(NULL);
18220 }
18221
18222-static void native_machine_power_off(void)
18223+__noreturn static void native_machine_power_off(void)
18224 {
18225 if (pm_power_off) {
18226 if (!reboot_force)
18227@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18228 }
18229 /* a fallback in case there is no PM info available */
18230 tboot_shutdown(TB_SHUTDOWN_HALT);
18231+ unreachable();
18232 }
18233
18234 struct machine_ops machine_ops = {
18235diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18236index 7a6f3b3..bed145d7 100644
18237--- a/arch/x86/kernel/relocate_kernel_64.S
18238+++ b/arch/x86/kernel/relocate_kernel_64.S
18239@@ -11,6 +11,7 @@
18240 #include <asm/kexec.h>
18241 #include <asm/processor-flags.h>
18242 #include <asm/pgtable_types.h>
18243+#include <asm/alternative-asm.h>
18244
18245 /*
18246 * Must be relocatable PIC code callable as a C function
18247@@ -160,13 +161,14 @@ identity_mapped:
18248 xorq %rbp, %rbp
18249 xorq %r8, %r8
18250 xorq %r9, %r9
18251- xorq %r10, %r9
18252+ xorq %r10, %r10
18253 xorq %r11, %r11
18254 xorq %r12, %r12
18255 xorq %r13, %r13
18256 xorq %r14, %r14
18257 xorq %r15, %r15
18258
18259+ pax_force_retaddr 0, 1
18260 ret
18261
18262 1:
18263diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18264index d7d5099..28555d0 100644
18265--- a/arch/x86/kernel/setup.c
18266+++ b/arch/x86/kernel/setup.c
18267@@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18268
18269 switch (data->type) {
18270 case SETUP_E820_EXT:
18271- parse_e820_ext(data);
18272+ parse_e820_ext((struct setup_data __force_kernel *)data);
18273 break;
18274 case SETUP_DTB:
18275 add_dtb(pa_data);
18276@@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18277 * area (640->1Mb) as ram even though it is not.
18278 * take them out.
18279 */
18280- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18281+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18282 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18283 }
18284
18285@@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18286
18287 if (!boot_params.hdr.root_flags)
18288 root_mountflags &= ~MS_RDONLY;
18289- init_mm.start_code = (unsigned long) _text;
18290- init_mm.end_code = (unsigned long) _etext;
18291+ init_mm.start_code = ktla_ktva((unsigned long) _text);
18292+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
18293 init_mm.end_data = (unsigned long) _edata;
18294 init_mm.brk = _brk_end;
18295
18296- code_resource.start = virt_to_phys(_text);
18297- code_resource.end = virt_to_phys(_etext)-1;
18298- data_resource.start = virt_to_phys(_etext);
18299+ code_resource.start = virt_to_phys(ktla_ktva(_text));
18300+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18301+ data_resource.start = virt_to_phys(_sdata);
18302 data_resource.end = virt_to_phys(_edata)-1;
18303 bss_resource.start = virt_to_phys(&__bss_start);
18304 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18305diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18306index 71f4727..16dc9f7 100644
18307--- a/arch/x86/kernel/setup_percpu.c
18308+++ b/arch/x86/kernel/setup_percpu.c
18309@@ -21,19 +21,17 @@
18310 #include <asm/cpu.h>
18311 #include <asm/stackprotector.h>
18312
18313-DEFINE_PER_CPU(int, cpu_number);
18314+#ifdef CONFIG_SMP
18315+DEFINE_PER_CPU(unsigned int, cpu_number);
18316 EXPORT_PER_CPU_SYMBOL(cpu_number);
18317+#endif
18318
18319-#ifdef CONFIG_X86_64
18320 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18321-#else
18322-#define BOOT_PERCPU_OFFSET 0
18323-#endif
18324
18325 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18326 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18327
18328-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18329+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18330 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18331 };
18332 EXPORT_SYMBOL(__per_cpu_offset);
18333@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
18334 {
18335 #ifdef CONFIG_X86_32
18336 struct desc_struct gdt;
18337+ unsigned long base = per_cpu_offset(cpu);
18338
18339- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18340- 0x2 | DESCTYPE_S, 0x8);
18341- gdt.s = 1;
18342+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18343+ 0x83 | DESCTYPE_S, 0xC);
18344 write_gdt_entry(get_cpu_gdt_table(cpu),
18345 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18346 #endif
18347@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
18348 /* alrighty, percpu areas up and running */
18349 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18350 for_each_possible_cpu(cpu) {
18351+#ifdef CONFIG_CC_STACKPROTECTOR
18352+#ifdef CONFIG_X86_32
18353+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
18354+#endif
18355+#endif
18356 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18357 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18358 per_cpu(cpu_number, cpu) = cpu;
18359@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
18360 */
18361 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18362 #endif
18363+#ifdef CONFIG_CC_STACKPROTECTOR
18364+#ifdef CONFIG_X86_32
18365+ if (!cpu)
18366+ per_cpu(stack_canary.canary, cpu) = canary;
18367+#endif
18368+#endif
18369 /*
18370 * Up to this point, the boot CPU has been using .init.data
18371 * area. Reload any changed state for the boot CPU.
18372diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18373index 46a01bd..2e88e6d 100644
18374--- a/arch/x86/kernel/signal.c
18375+++ b/arch/x86/kernel/signal.c
18376@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18377 * Align the stack pointer according to the i386 ABI,
18378 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18379 */
18380- sp = ((sp + 4) & -16ul) - 4;
18381+ sp = ((sp - 12) & -16ul) - 4;
18382 #else /* !CONFIG_X86_32 */
18383 sp = round_down(sp, 16) - 8;
18384 #endif
18385@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18386 * Return an always-bogus address instead so we will die with SIGSEGV.
18387 */
18388 if (onsigstack && !likely(on_sig_stack(sp)))
18389- return (void __user *)-1L;
18390+ return (__force void __user *)-1L;
18391
18392 /* save i387 state */
18393 if (used_math() && save_i387_xstate(*fpstate) < 0)
18394- return (void __user *)-1L;
18395+ return (__force void __user *)-1L;
18396
18397 return (void __user *)sp;
18398 }
18399@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18400 }
18401
18402 if (current->mm->context.vdso)
18403- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18404+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18405 else
18406- restorer = &frame->retcode;
18407+ restorer = (void __user *)&frame->retcode;
18408 if (ka->sa.sa_flags & SA_RESTORER)
18409 restorer = ka->sa.sa_restorer;
18410
18411@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18412 * reasons and because gdb uses it as a signature to notice
18413 * signal handler stack frames.
18414 */
18415- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18416+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18417
18418 if (err)
18419 return -EFAULT;
18420@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18421 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18422
18423 /* Set up to return from userspace. */
18424- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18425+ if (current->mm->context.vdso)
18426+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18427+ else
18428+ restorer = (void __user *)&frame->retcode;
18429 if (ka->sa.sa_flags & SA_RESTORER)
18430 restorer = ka->sa.sa_restorer;
18431 put_user_ex(restorer, &frame->pretcode);
18432@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18433 * reasons and because gdb uses it as a signature to notice
18434 * signal handler stack frames.
18435 */
18436- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18437+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18438 } put_user_catch(err);
18439
18440 if (err)
18441@@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
18442 * X86_32: vm86 regs switched out by assembly code before reaching
18443 * here, so testing against kernel CS suffices.
18444 */
18445- if (!user_mode(regs))
18446+ if (!user_mode_novm(regs))
18447 return;
18448
18449 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18450diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18451index 66d250c..f1b10bd 100644
18452--- a/arch/x86/kernel/smpboot.c
18453+++ b/arch/x86/kernel/smpboot.c
18454@@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18455 set_idle_for_cpu(cpu, c_idle.idle);
18456 do_rest:
18457 per_cpu(current_task, cpu) = c_idle.idle;
18458+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18459 #ifdef CONFIG_X86_32
18460 /* Stack for startup_32 can be just as for start_secondary onwards */
18461 irq_ctx_init(cpu);
18462 #else
18463 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18464 initial_gs = per_cpu_offset(cpu);
18465- per_cpu(kernel_stack, cpu) =
18466- (unsigned long)task_stack_page(c_idle.idle) -
18467- KERNEL_STACK_OFFSET + THREAD_SIZE;
18468+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18469 #endif
18470+
18471+ pax_open_kernel();
18472 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18473+ pax_close_kernel();
18474+
18475 initial_code = (unsigned long)start_secondary;
18476 stack_start = c_idle.idle->thread.sp;
18477
18478@@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18479
18480 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18481
18482+#ifdef CONFIG_PAX_PER_CPU_PGD
18483+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18484+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18485+ KERNEL_PGD_PTRS);
18486+#endif
18487+
18488 err = do_boot_cpu(apicid, cpu);
18489 if (err) {
18490 pr_debug("do_boot_cpu failed %d\n", err);
18491diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18492index c346d11..d43b163 100644
18493--- a/arch/x86/kernel/step.c
18494+++ b/arch/x86/kernel/step.c
18495@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18496 struct desc_struct *desc;
18497 unsigned long base;
18498
18499- seg &= ~7UL;
18500+ seg >>= 3;
18501
18502 mutex_lock(&child->mm->context.lock);
18503- if (unlikely((seg >> 3) >= child->mm->context.size))
18504+ if (unlikely(seg >= child->mm->context.size))
18505 addr = -1L; /* bogus selector, access would fault */
18506 else {
18507 desc = child->mm->context.ldt + seg;
18508@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18509 addr += base;
18510 }
18511 mutex_unlock(&child->mm->context.lock);
18512- }
18513+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18514+ addr = ktla_ktva(addr);
18515
18516 return addr;
18517 }
18518@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18519 unsigned char opcode[15];
18520 unsigned long addr = convert_ip_to_linear(child, regs);
18521
18522+ if (addr == -EINVAL)
18523+ return 0;
18524+
18525 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18526 for (i = 0; i < copied; i++) {
18527 switch (opcode[i]) {
18528diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18529index 0b0cb5f..db6b9ed 100644
18530--- a/arch/x86/kernel/sys_i386_32.c
18531+++ b/arch/x86/kernel/sys_i386_32.c
18532@@ -24,17 +24,224 @@
18533
18534 #include <asm/syscalls.h>
18535
18536-/*
18537- * Do a system call from kernel instead of calling sys_execve so we
18538- * end up with proper pt_regs.
18539- */
18540-int kernel_execve(const char *filename,
18541- const char *const argv[],
18542- const char *const envp[])
18543+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18544 {
18545- long __res;
18546- asm volatile ("int $0x80"
18547- : "=a" (__res)
18548- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18549- return __res;
18550+ unsigned long pax_task_size = TASK_SIZE;
18551+
18552+#ifdef CONFIG_PAX_SEGMEXEC
18553+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18554+ pax_task_size = SEGMEXEC_TASK_SIZE;
18555+#endif
18556+
18557+ if (len > pax_task_size || addr > pax_task_size - len)
18558+ return -EINVAL;
18559+
18560+ return 0;
18561+}
18562+
18563+unsigned long
18564+arch_get_unmapped_area(struct file *filp, unsigned long addr,
18565+ unsigned long len, unsigned long pgoff, unsigned long flags)
18566+{
18567+ struct mm_struct *mm = current->mm;
18568+ struct vm_area_struct *vma;
18569+ unsigned long start_addr, pax_task_size = TASK_SIZE;
18570+
18571+#ifdef CONFIG_PAX_SEGMEXEC
18572+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
18573+ pax_task_size = SEGMEXEC_TASK_SIZE;
18574+#endif
18575+
18576+ pax_task_size -= PAGE_SIZE;
18577+
18578+ if (len > pax_task_size)
18579+ return -ENOMEM;
18580+
18581+ if (flags & MAP_FIXED)
18582+ return addr;
18583+
18584+#ifdef CONFIG_PAX_RANDMMAP
18585+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18586+#endif
18587+
18588+ if (addr) {
18589+ addr = PAGE_ALIGN(addr);
18590+ if (pax_task_size - len >= addr) {
18591+ vma = find_vma(mm, addr);
18592+ if (check_heap_stack_gap(vma, addr, len))
18593+ return addr;
18594+ }
18595+ }
18596+ if (len > mm->cached_hole_size) {
18597+ start_addr = addr = mm->free_area_cache;
18598+ } else {
18599+ start_addr = addr = mm->mmap_base;
18600+ mm->cached_hole_size = 0;
18601+ }
18602+
18603+#ifdef CONFIG_PAX_PAGEEXEC
18604+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18605+ start_addr = 0x00110000UL;
18606+
18607+#ifdef CONFIG_PAX_RANDMMAP
18608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
18609+ start_addr += mm->delta_mmap & 0x03FFF000UL;
18610+#endif
18611+
18612+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18613+ start_addr = addr = mm->mmap_base;
18614+ else
18615+ addr = start_addr;
18616+ }
18617+#endif
18618+
18619+full_search:
18620+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18621+ /* At this point: (!vma || addr < vma->vm_end). */
18622+ if (pax_task_size - len < addr) {
18623+ /*
18624+ * Start a new search - just in case we missed
18625+ * some holes.
18626+ */
18627+ if (start_addr != mm->mmap_base) {
18628+ start_addr = addr = mm->mmap_base;
18629+ mm->cached_hole_size = 0;
18630+ goto full_search;
18631+ }
18632+ return -ENOMEM;
18633+ }
18634+ if (check_heap_stack_gap(vma, addr, len))
18635+ break;
18636+ if (addr + mm->cached_hole_size < vma->vm_start)
18637+ mm->cached_hole_size = vma->vm_start - addr;
18638+ addr = vma->vm_end;
18639+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
18640+ start_addr = addr = mm->mmap_base;
18641+ mm->cached_hole_size = 0;
18642+ goto full_search;
18643+ }
18644+ }
18645+
18646+ /*
18647+ * Remember the place where we stopped the search:
18648+ */
18649+ mm->free_area_cache = addr + len;
18650+ return addr;
18651+}
18652+
18653+unsigned long
18654+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18655+ const unsigned long len, const unsigned long pgoff,
18656+ const unsigned long flags)
18657+{
18658+ struct vm_area_struct *vma;
18659+ struct mm_struct *mm = current->mm;
18660+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18661+
18662+#ifdef CONFIG_PAX_SEGMEXEC
18663+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
18664+ pax_task_size = SEGMEXEC_TASK_SIZE;
18665+#endif
18666+
18667+ pax_task_size -= PAGE_SIZE;
18668+
18669+ /* requested length too big for entire address space */
18670+ if (len > pax_task_size)
18671+ return -ENOMEM;
18672+
18673+ if (flags & MAP_FIXED)
18674+ return addr;
18675+
18676+#ifdef CONFIG_PAX_PAGEEXEC
18677+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18678+ goto bottomup;
18679+#endif
18680+
18681+#ifdef CONFIG_PAX_RANDMMAP
18682+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18683+#endif
18684+
18685+ /* requesting a specific address */
18686+ if (addr) {
18687+ addr = PAGE_ALIGN(addr);
18688+ if (pax_task_size - len >= addr) {
18689+ vma = find_vma(mm, addr);
18690+ if (check_heap_stack_gap(vma, addr, len))
18691+ return addr;
18692+ }
18693+ }
18694+
18695+ /* check if free_area_cache is useful for us */
18696+ if (len <= mm->cached_hole_size) {
18697+ mm->cached_hole_size = 0;
18698+ mm->free_area_cache = mm->mmap_base;
18699+ }
18700+
18701+ /* either no address requested or can't fit in requested address hole */
18702+ addr = mm->free_area_cache;
18703+
18704+ /* make sure it can fit in the remaining address space */
18705+ if (addr > len) {
18706+ vma = find_vma(mm, addr-len);
18707+ if (check_heap_stack_gap(vma, addr - len, len))
18708+ /* remember the address as a hint for next time */
18709+ return (mm->free_area_cache = addr-len);
18710+ }
18711+
18712+ if (mm->mmap_base < len)
18713+ goto bottomup;
18714+
18715+ addr = mm->mmap_base-len;
18716+
18717+ do {
18718+ /*
18719+ * Lookup failure means no vma is above this address,
18720+ * else if new region fits below vma->vm_start,
18721+ * return with success:
18722+ */
18723+ vma = find_vma(mm, addr);
18724+ if (check_heap_stack_gap(vma, addr, len))
18725+ /* remember the address as a hint for next time */
18726+ return (mm->free_area_cache = addr);
18727+
18728+ /* remember the largest hole we saw so far */
18729+ if (addr + mm->cached_hole_size < vma->vm_start)
18730+ mm->cached_hole_size = vma->vm_start - addr;
18731+
18732+ /* try just below the current vma->vm_start */
18733+ addr = skip_heap_stack_gap(vma, len);
18734+ } while (!IS_ERR_VALUE(addr));
18735+
18736+bottomup:
18737+ /*
18738+ * A failed mmap() very likely causes application failure,
18739+ * so fall back to the bottom-up function here. This scenario
18740+ * can happen with large stack limits and large mmap()
18741+ * allocations.
18742+ */
18743+
18744+#ifdef CONFIG_PAX_SEGMEXEC
18745+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
18746+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18747+ else
18748+#endif
18749+
18750+ mm->mmap_base = TASK_UNMAPPED_BASE;
18751+
18752+#ifdef CONFIG_PAX_RANDMMAP
18753+ if (mm->pax_flags & MF_PAX_RANDMMAP)
18754+ mm->mmap_base += mm->delta_mmap;
18755+#endif
18756+
18757+ mm->free_area_cache = mm->mmap_base;
18758+ mm->cached_hole_size = ~0UL;
18759+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18760+ /*
18761+ * Restore the topdown base:
18762+ */
18763+ mm->mmap_base = base;
18764+ mm->free_area_cache = base;
18765+ mm->cached_hole_size = ~0UL;
18766+
18767+ return addr;
18768 }
18769diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
18770index 0514890..3dbebce 100644
18771--- a/arch/x86/kernel/sys_x86_64.c
18772+++ b/arch/x86/kernel/sys_x86_64.c
18773@@ -95,8 +95,8 @@ out:
18774 return error;
18775 }
18776
18777-static void find_start_end(unsigned long flags, unsigned long *begin,
18778- unsigned long *end)
18779+static void find_start_end(struct mm_struct *mm, unsigned long flags,
18780+ unsigned long *begin, unsigned long *end)
18781 {
18782 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
18783 unsigned long new_begin;
18784@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
18785 *begin = new_begin;
18786 }
18787 } else {
18788- *begin = TASK_UNMAPPED_BASE;
18789+ *begin = mm->mmap_base;
18790 *end = TASK_SIZE;
18791 }
18792 }
18793@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
18794 if (flags & MAP_FIXED)
18795 return addr;
18796
18797- find_start_end(flags, &begin, &end);
18798+ find_start_end(mm, flags, &begin, &end);
18799
18800 if (len > end)
18801 return -ENOMEM;
18802
18803+#ifdef CONFIG_PAX_RANDMMAP
18804+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18805+#endif
18806+
18807 if (addr) {
18808 addr = PAGE_ALIGN(addr);
18809 vma = find_vma(mm, addr);
18810- if (end - len >= addr &&
18811- (!vma || addr + len <= vma->vm_start))
18812+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
18813 return addr;
18814 }
18815 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
18816@@ -172,7 +175,7 @@ full_search:
18817 }
18818 return -ENOMEM;
18819 }
18820- if (!vma || addr + len <= vma->vm_start) {
18821+ if (check_heap_stack_gap(vma, addr, len)) {
18822 /*
18823 * Remember the place where we stopped the search:
18824 */
18825@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18826 {
18827 struct vm_area_struct *vma;
18828 struct mm_struct *mm = current->mm;
18829- unsigned long addr = addr0;
18830+ unsigned long base = mm->mmap_base, addr = addr0;
18831
18832 /* requested length too big for entire address space */
18833 if (len > TASK_SIZE)
18834@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18835 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
18836 goto bottomup;
18837
18838+#ifdef CONFIG_PAX_RANDMMAP
18839+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18840+#endif
18841+
18842 /* requesting a specific address */
18843 if (addr) {
18844 addr = PAGE_ALIGN(addr);
18845- vma = find_vma(mm, addr);
18846- if (TASK_SIZE - len >= addr &&
18847- (!vma || addr + len <= vma->vm_start))
18848- return addr;
18849+ if (TASK_SIZE - len >= addr) {
18850+ vma = find_vma(mm, addr);
18851+ if (check_heap_stack_gap(vma, addr, len))
18852+ return addr;
18853+ }
18854 }
18855
18856 /* check if free_area_cache is useful for us */
18857@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18858 ALIGN_TOPDOWN);
18859
18860 vma = find_vma(mm, tmp_addr);
18861- if (!vma || tmp_addr + len <= vma->vm_start)
18862+ if (check_heap_stack_gap(vma, tmp_addr, len))
18863 /* remember the address as a hint for next time */
18864 return mm->free_area_cache = tmp_addr;
18865 }
18866@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18867 * return with success:
18868 */
18869 vma = find_vma(mm, addr);
18870- if (!vma || addr+len <= vma->vm_start)
18871+ if (check_heap_stack_gap(vma, addr, len))
18872 /* remember the address as a hint for next time */
18873 return mm->free_area_cache = addr;
18874
18875@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18876 mm->cached_hole_size = vma->vm_start - addr;
18877
18878 /* try just below the current vma->vm_start */
18879- addr = vma->vm_start-len;
18880- } while (len < vma->vm_start);
18881+ addr = skip_heap_stack_gap(vma, len);
18882+ } while (!IS_ERR_VALUE(addr));
18883
18884 bottomup:
18885 /*
18886@@ -270,13 +278,21 @@ bottomup:
18887 * can happen with large stack limits and large mmap()
18888 * allocations.
18889 */
18890+ mm->mmap_base = TASK_UNMAPPED_BASE;
18891+
18892+#ifdef CONFIG_PAX_RANDMMAP
18893+ if (mm->pax_flags & MF_PAX_RANDMMAP)
18894+ mm->mmap_base += mm->delta_mmap;
18895+#endif
18896+
18897+ mm->free_area_cache = mm->mmap_base;
18898 mm->cached_hole_size = ~0UL;
18899- mm->free_area_cache = TASK_UNMAPPED_BASE;
18900 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18901 /*
18902 * Restore the topdown base:
18903 */
18904- mm->free_area_cache = mm->mmap_base;
18905+ mm->mmap_base = base;
18906+ mm->free_area_cache = base;
18907 mm->cached_hole_size = ~0UL;
18908
18909 return addr;
18910diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
18911index e2410e2..4fe3fbc 100644
18912--- a/arch/x86/kernel/tboot.c
18913+++ b/arch/x86/kernel/tboot.c
18914@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
18915
18916 void tboot_shutdown(u32 shutdown_type)
18917 {
18918- void (*shutdown)(void);
18919+ void (* __noreturn shutdown)(void);
18920
18921 if (!tboot_enabled())
18922 return;
18923@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
18924
18925 switch_to_tboot_pt();
18926
18927- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
18928+ shutdown = (void *)tboot->shutdown_entry;
18929 shutdown();
18930
18931 /* should not reach here */
18932@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
18933 tboot_shutdown(acpi_shutdown_map[sleep_state]);
18934 }
18935
18936-static atomic_t ap_wfs_count;
18937+static atomic_unchecked_t ap_wfs_count;
18938
18939 static int tboot_wait_for_aps(int num_aps)
18940 {
18941@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
18942 {
18943 switch (action) {
18944 case CPU_DYING:
18945- atomic_inc(&ap_wfs_count);
18946+ atomic_inc_unchecked(&ap_wfs_count);
18947 if (num_online_cpus() == 1)
18948- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
18949+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
18950 return NOTIFY_BAD;
18951 break;
18952 }
18953@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
18954
18955 tboot_create_trampoline();
18956
18957- atomic_set(&ap_wfs_count, 0);
18958+ atomic_set_unchecked(&ap_wfs_count, 0);
18959 register_hotcpu_notifier(&tboot_cpu_notifier);
18960 return 0;
18961 }
18962diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
18963index dd5fbf4..b7f2232 100644
18964--- a/arch/x86/kernel/time.c
18965+++ b/arch/x86/kernel/time.c
18966@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
18967 {
18968 unsigned long pc = instruction_pointer(regs);
18969
18970- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
18971+ if (!user_mode(regs) && in_lock_functions(pc)) {
18972 #ifdef CONFIG_FRAME_POINTER
18973- return *(unsigned long *)(regs->bp + sizeof(long));
18974+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
18975 #else
18976 unsigned long *sp =
18977 (unsigned long *)kernel_stack_pointer(regs);
18978@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
18979 * or above a saved flags. Eflags has bits 22-31 zero,
18980 * kernel addresses don't.
18981 */
18982+
18983+#ifdef CONFIG_PAX_KERNEXEC
18984+ return ktla_ktva(sp[0]);
18985+#else
18986 if (sp[0] >> 22)
18987 return sp[0];
18988 if (sp[1] >> 22)
18989 return sp[1];
18990 #endif
18991+
18992+#endif
18993 }
18994 return pc;
18995 }
18996diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
18997index 6bb7b85..dd853e1 100644
18998--- a/arch/x86/kernel/tls.c
18999+++ b/arch/x86/kernel/tls.c
19000@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19001 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19002 return -EINVAL;
19003
19004+#ifdef CONFIG_PAX_SEGMEXEC
19005+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19006+ return -EINVAL;
19007+#endif
19008+
19009 set_tls_desc(p, idx, &info, 1);
19010
19011 return 0;
19012diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19013index 451c0a7..e57f551 100644
19014--- a/arch/x86/kernel/trampoline_32.S
19015+++ b/arch/x86/kernel/trampoline_32.S
19016@@ -32,6 +32,12 @@
19017 #include <asm/segment.h>
19018 #include <asm/page_types.h>
19019
19020+#ifdef CONFIG_PAX_KERNEXEC
19021+#define ta(X) (X)
19022+#else
19023+#define ta(X) ((X) - __PAGE_OFFSET)
19024+#endif
19025+
19026 #ifdef CONFIG_SMP
19027
19028 .section ".x86_trampoline","a"
19029@@ -62,7 +68,7 @@ r_base = .
19030 inc %ax # protected mode (PE) bit
19031 lmsw %ax # into protected mode
19032 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19033- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19034+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19035
19036 # These need to be in the same 64K segment as the above;
19037 # hence we don't use the boot_gdt_descr defined in head.S
19038diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19039index 09ff517..df19fbff 100644
19040--- a/arch/x86/kernel/trampoline_64.S
19041+++ b/arch/x86/kernel/trampoline_64.S
19042@@ -90,7 +90,7 @@ startup_32:
19043 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19044 movl %eax, %ds
19045
19046- movl $X86_CR4_PAE, %eax
19047+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19048 movl %eax, %cr4 # Enable PAE mode
19049
19050 # Setup trampoline 4 level pagetables
19051@@ -138,7 +138,7 @@ tidt:
19052 # so the kernel can live anywhere
19053 .balign 4
19054 tgdt:
19055- .short tgdt_end - tgdt # gdt limit
19056+ .short tgdt_end - tgdt - 1 # gdt limit
19057 .long tgdt - r_base
19058 .short 0
19059 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19060diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19061index 4bbe04d..41d0943 100644
19062--- a/arch/x86/kernel/traps.c
19063+++ b/arch/x86/kernel/traps.c
19064@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19065
19066 /* Do we ignore FPU interrupts ? */
19067 char ignore_fpu_irq;
19068-
19069-/*
19070- * The IDT has to be page-aligned to simplify the Pentium
19071- * F0 0F bug workaround.
19072- */
19073-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19074 #endif
19075
19076 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19077@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19078 }
19079
19080 static void __kprobes
19081-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19082+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19083 long error_code, siginfo_t *info)
19084 {
19085 struct task_struct *tsk = current;
19086
19087 #ifdef CONFIG_X86_32
19088- if (regs->flags & X86_VM_MASK) {
19089+ if (v8086_mode(regs)) {
19090 /*
19091 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19092 * On nmi (interrupt 2), do_trap should not be called.
19093@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19094 }
19095 #endif
19096
19097- if (!user_mode(regs))
19098+ if (!user_mode_novm(regs))
19099 goto kernel_trap;
19100
19101 #ifdef CONFIG_X86_32
19102@@ -148,7 +142,7 @@ trap_signal:
19103 printk_ratelimit()) {
19104 printk(KERN_INFO
19105 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19106- tsk->comm, tsk->pid, str,
19107+ tsk->comm, task_pid_nr(tsk), str,
19108 regs->ip, regs->sp, error_code);
19109 print_vma_addr(" in ", regs->ip);
19110 printk("\n");
19111@@ -165,8 +159,20 @@ kernel_trap:
19112 if (!fixup_exception(regs)) {
19113 tsk->thread.error_code = error_code;
19114 tsk->thread.trap_no = trapnr;
19115+
19116+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19117+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19118+ str = "PAX: suspicious stack segment fault";
19119+#endif
19120+
19121 die(str, regs, error_code);
19122 }
19123+
19124+#ifdef CONFIG_PAX_REFCOUNT
19125+ if (trapnr == 4)
19126+ pax_report_refcount_overflow(regs);
19127+#endif
19128+
19129 return;
19130
19131 #ifdef CONFIG_X86_32
19132@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19133 conditional_sti(regs);
19134
19135 #ifdef CONFIG_X86_32
19136- if (regs->flags & X86_VM_MASK)
19137+ if (v8086_mode(regs))
19138 goto gp_in_vm86;
19139 #endif
19140
19141 tsk = current;
19142- if (!user_mode(regs))
19143+ if (!user_mode_novm(regs))
19144 goto gp_in_kernel;
19145
19146+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19147+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19148+ struct mm_struct *mm = tsk->mm;
19149+ unsigned long limit;
19150+
19151+ down_write(&mm->mmap_sem);
19152+ limit = mm->context.user_cs_limit;
19153+ if (limit < TASK_SIZE) {
19154+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19155+ up_write(&mm->mmap_sem);
19156+ return;
19157+ }
19158+ up_write(&mm->mmap_sem);
19159+ }
19160+#endif
19161+
19162 tsk->thread.error_code = error_code;
19163 tsk->thread.trap_no = 13;
19164
19165@@ -295,6 +317,13 @@ gp_in_kernel:
19166 if (notify_die(DIE_GPF, "general protection fault", regs,
19167 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19168 return;
19169+
19170+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19171+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19172+ die("PAX: suspicious general protection fault", regs, error_code);
19173+ else
19174+#endif
19175+
19176 die("general protection fault", regs, error_code);
19177 }
19178
19179@@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19180 /* It's safe to allow irq's after DR6 has been saved */
19181 preempt_conditional_sti(regs);
19182
19183- if (regs->flags & X86_VM_MASK) {
19184+ if (v8086_mode(regs)) {
19185 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19186 error_code, 1);
19187 preempt_conditional_cli(regs);
19188@@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19189 * We already checked v86 mode above, so we can check for kernel mode
19190 * by just checking the CPL of CS.
19191 */
19192- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19193+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19194 tsk->thread.debugreg6 &= ~DR_STEP;
19195 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19196 regs->flags &= ~X86_EFLAGS_TF;
19197@@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19198 return;
19199 conditional_sti(regs);
19200
19201- if (!user_mode_vm(regs))
19202+ if (!user_mode(regs))
19203 {
19204 if (!fixup_exception(regs)) {
19205 task->thread.error_code = error_code;
19206diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19207index b9242ba..50c5edd 100644
19208--- a/arch/x86/kernel/verify_cpu.S
19209+++ b/arch/x86/kernel/verify_cpu.S
19210@@ -20,6 +20,7 @@
19211 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19212 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19213 * arch/x86/kernel/head_32.S: processor startup
19214+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19215 *
19216 * verify_cpu, returns the status of longmode and SSE in register %eax.
19217 * 0: Success 1: Failure
19218diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19219index b466cab..a0df083 100644
19220--- a/arch/x86/kernel/vm86_32.c
19221+++ b/arch/x86/kernel/vm86_32.c
19222@@ -41,6 +41,7 @@
19223 #include <linux/ptrace.h>
19224 #include <linux/audit.h>
19225 #include <linux/stddef.h>
19226+#include <linux/grsecurity.h>
19227
19228 #include <asm/uaccess.h>
19229 #include <asm/io.h>
19230@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19231 do_exit(SIGSEGV);
19232 }
19233
19234- tss = &per_cpu(init_tss, get_cpu());
19235+ tss = init_tss + get_cpu();
19236 current->thread.sp0 = current->thread.saved_sp0;
19237 current->thread.sysenter_cs = __KERNEL_CS;
19238 load_sp0(tss, &current->thread);
19239@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19240 struct task_struct *tsk;
19241 int tmp, ret = -EPERM;
19242
19243+#ifdef CONFIG_GRKERNSEC_VM86
19244+ if (!capable(CAP_SYS_RAWIO)) {
19245+ gr_handle_vm86();
19246+ goto out;
19247+ }
19248+#endif
19249+
19250 tsk = current;
19251 if (tsk->thread.saved_sp0)
19252 goto out;
19253@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19254 int tmp, ret;
19255 struct vm86plus_struct __user *v86;
19256
19257+#ifdef CONFIG_GRKERNSEC_VM86
19258+ if (!capable(CAP_SYS_RAWIO)) {
19259+ gr_handle_vm86();
19260+ ret = -EPERM;
19261+ goto out;
19262+ }
19263+#endif
19264+
19265 tsk = current;
19266 switch (cmd) {
19267 case VM86_REQUEST_IRQ:
19268@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19269 tsk->thread.saved_fs = info->regs32->fs;
19270 tsk->thread.saved_gs = get_user_gs(info->regs32);
19271
19272- tss = &per_cpu(init_tss, get_cpu());
19273+ tss = init_tss + get_cpu();
19274 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19275 if (cpu_has_sep)
19276 tsk->thread.sysenter_cs = 0;
19277@@ -531,7 +547,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19278 goto cannot_handle;
19279 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19280 goto cannot_handle;
19281- intr_ptr = (unsigned long __user *) (i << 2);
19282+ intr_ptr = (__force unsigned long __user *) (i << 2);
19283 if (get_user(segoffs, intr_ptr))
19284 goto cannot_handle;
19285 if ((segoffs >> 16) == BIOSSEG)
19286diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19287index 0f703f1..9e15f64 100644
19288--- a/arch/x86/kernel/vmlinux.lds.S
19289+++ b/arch/x86/kernel/vmlinux.lds.S
19290@@ -26,6 +26,13 @@
19291 #include <asm/page_types.h>
19292 #include <asm/cache.h>
19293 #include <asm/boot.h>
19294+#include <asm/segment.h>
19295+
19296+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19297+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19298+#else
19299+#define __KERNEL_TEXT_OFFSET 0
19300+#endif
19301
19302 #undef i386 /* in case the preprocessor is a 32bit one */
19303
19304@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19305
19306 PHDRS {
19307 text PT_LOAD FLAGS(5); /* R_E */
19308+#ifdef CONFIG_X86_32
19309+ module PT_LOAD FLAGS(5); /* R_E */
19310+#endif
19311+#ifdef CONFIG_XEN
19312+ rodata PT_LOAD FLAGS(5); /* R_E */
19313+#else
19314+ rodata PT_LOAD FLAGS(4); /* R__ */
19315+#endif
19316 data PT_LOAD FLAGS(6); /* RW_ */
19317-#ifdef CONFIG_X86_64
19318+ init.begin PT_LOAD FLAGS(6); /* RW_ */
19319 #ifdef CONFIG_SMP
19320 percpu PT_LOAD FLAGS(6); /* RW_ */
19321 #endif
19322+ text.init PT_LOAD FLAGS(5); /* R_E */
19323+ text.exit PT_LOAD FLAGS(5); /* R_E */
19324 init PT_LOAD FLAGS(7); /* RWE */
19325-#endif
19326 note PT_NOTE FLAGS(0); /* ___ */
19327 }
19328
19329 SECTIONS
19330 {
19331 #ifdef CONFIG_X86_32
19332- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19333- phys_startup_32 = startup_32 - LOAD_OFFSET;
19334+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19335 #else
19336- . = __START_KERNEL;
19337- phys_startup_64 = startup_64 - LOAD_OFFSET;
19338+ . = __START_KERNEL;
19339 #endif
19340
19341 /* Text and read-only data */
19342- .text : AT(ADDR(.text) - LOAD_OFFSET) {
19343- _text = .;
19344+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19345 /* bootstrapping code */
19346+#ifdef CONFIG_X86_32
19347+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19348+#else
19349+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19350+#endif
19351+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19352+ _text = .;
19353 HEAD_TEXT
19354 #ifdef CONFIG_X86_32
19355 . = ALIGN(PAGE_SIZE);
19356@@ -108,13 +128,47 @@ SECTIONS
19357 IRQENTRY_TEXT
19358 *(.fixup)
19359 *(.gnu.warning)
19360- /* End of text section */
19361- _etext = .;
19362 } :text = 0x9090
19363
19364- NOTES :text :note
19365+ . += __KERNEL_TEXT_OFFSET;
19366
19367- EXCEPTION_TABLE(16) :text = 0x9090
19368+#ifdef CONFIG_X86_32
19369+ . = ALIGN(PAGE_SIZE);
19370+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19371+
19372+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19373+ MODULES_EXEC_VADDR = .;
19374+ BYTE(0)
19375+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19376+ . = ALIGN(HPAGE_SIZE);
19377+ MODULES_EXEC_END = . - 1;
19378+#endif
19379+
19380+ } :module
19381+#endif
19382+
19383+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19384+ /* End of text section */
19385+ _etext = . - __KERNEL_TEXT_OFFSET;
19386+ }
19387+
19388+#ifdef CONFIG_X86_32
19389+ . = ALIGN(PAGE_SIZE);
19390+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19391+ *(.idt)
19392+ . = ALIGN(PAGE_SIZE);
19393+ *(.empty_zero_page)
19394+ *(.initial_pg_fixmap)
19395+ *(.initial_pg_pmd)
19396+ *(.initial_page_table)
19397+ *(.swapper_pg_dir)
19398+ } :rodata
19399+#endif
19400+
19401+ . = ALIGN(PAGE_SIZE);
19402+ NOTES :rodata :note
19403+
19404+ EXCEPTION_TABLE(16) :rodata
19405
19406 #if defined(CONFIG_DEBUG_RODATA)
19407 /* .text should occupy whole number of pages */
19408@@ -126,16 +180,20 @@ SECTIONS
19409
19410 /* Data */
19411 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19412+
19413+#ifdef CONFIG_PAX_KERNEXEC
19414+ . = ALIGN(HPAGE_SIZE);
19415+#else
19416+ . = ALIGN(PAGE_SIZE);
19417+#endif
19418+
19419 /* Start of data section */
19420 _sdata = .;
19421
19422 /* init_task */
19423 INIT_TASK_DATA(THREAD_SIZE)
19424
19425-#ifdef CONFIG_X86_32
19426- /* 32 bit has nosave before _edata */
19427 NOSAVE_DATA
19428-#endif
19429
19430 PAGE_ALIGNED_DATA(PAGE_SIZE)
19431
19432@@ -176,12 +234,19 @@ SECTIONS
19433 #endif /* CONFIG_X86_64 */
19434
19435 /* Init code and data - will be freed after init */
19436- . = ALIGN(PAGE_SIZE);
19437 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19438+ BYTE(0)
19439+
19440+#ifdef CONFIG_PAX_KERNEXEC
19441+ . = ALIGN(HPAGE_SIZE);
19442+#else
19443+ . = ALIGN(PAGE_SIZE);
19444+#endif
19445+
19446 __init_begin = .; /* paired with __init_end */
19447- }
19448+ } :init.begin
19449
19450-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19451+#ifdef CONFIG_SMP
19452 /*
19453 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19454 * output PHDR, so the next output section - .init.text - should
19455@@ -190,12 +255,27 @@ SECTIONS
19456 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19457 #endif
19458
19459- INIT_TEXT_SECTION(PAGE_SIZE)
19460-#ifdef CONFIG_X86_64
19461- :init
19462-#endif
19463+ . = ALIGN(PAGE_SIZE);
19464+ init_begin = .;
19465+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19466+ VMLINUX_SYMBOL(_sinittext) = .;
19467+ INIT_TEXT
19468+ VMLINUX_SYMBOL(_einittext) = .;
19469+ . = ALIGN(PAGE_SIZE);
19470+ } :text.init
19471
19472- INIT_DATA_SECTION(16)
19473+ /*
19474+ * .exit.text is discard at runtime, not link time, to deal with
19475+ * references from .altinstructions and .eh_frame
19476+ */
19477+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19478+ EXIT_TEXT
19479+ . = ALIGN(16);
19480+ } :text.exit
19481+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19482+
19483+ . = ALIGN(PAGE_SIZE);
19484+ INIT_DATA_SECTION(16) :init
19485
19486 /*
19487 * Code and data for a variety of lowlevel trampolines, to be
19488@@ -269,19 +349,12 @@ SECTIONS
19489 }
19490
19491 . = ALIGN(8);
19492- /*
19493- * .exit.text is discard at runtime, not link time, to deal with
19494- * references from .altinstructions and .eh_frame
19495- */
19496- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19497- EXIT_TEXT
19498- }
19499
19500 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19501 EXIT_DATA
19502 }
19503
19504-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19505+#ifndef CONFIG_SMP
19506 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19507 #endif
19508
19509@@ -300,16 +373,10 @@ SECTIONS
19510 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19511 __smp_locks = .;
19512 *(.smp_locks)
19513- . = ALIGN(PAGE_SIZE);
19514 __smp_locks_end = .;
19515+ . = ALIGN(PAGE_SIZE);
19516 }
19517
19518-#ifdef CONFIG_X86_64
19519- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19520- NOSAVE_DATA
19521- }
19522-#endif
19523-
19524 /* BSS */
19525 . = ALIGN(PAGE_SIZE);
19526 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19527@@ -325,6 +392,7 @@ SECTIONS
19528 __brk_base = .;
19529 . += 64 * 1024; /* 64k alignment slop space */
19530 *(.brk_reservation) /* areas brk users have reserved */
19531+ . = ALIGN(HPAGE_SIZE);
19532 __brk_limit = .;
19533 }
19534
19535@@ -351,13 +419,12 @@ SECTIONS
19536 * for the boot processor.
19537 */
19538 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19539-INIT_PER_CPU(gdt_page);
19540 INIT_PER_CPU(irq_stack_union);
19541
19542 /*
19543 * Build-time check on the image size:
19544 */
19545-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19546+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19547 "kernel image bigger than KERNEL_IMAGE_SIZE");
19548
19549 #ifdef CONFIG_SMP
19550diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19551index b07ba93..a212969 100644
19552--- a/arch/x86/kernel/vsyscall_64.c
19553+++ b/arch/x86/kernel/vsyscall_64.c
19554@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19555 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19556 };
19557
19558-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
19559+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19560
19561 static int __init vsyscall_setup(char *str)
19562 {
19563 if (str) {
19564 if (!strcmp("emulate", str))
19565 vsyscall_mode = EMULATE;
19566- else if (!strcmp("native", str))
19567- vsyscall_mode = NATIVE;
19568 else if (!strcmp("none", str))
19569 vsyscall_mode = NONE;
19570 else
19571@@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19572
19573 tsk = current;
19574 if (seccomp_mode(&tsk->seccomp))
19575- do_exit(SIGKILL);
19576+ do_group_exit(SIGKILL);
19577
19578 /*
19579 * With a real vsyscall, page faults cause SIGSEGV. We want to
19580@@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19581 return true;
19582
19583 sigsegv:
19584- force_sig(SIGSEGV, current);
19585- return true;
19586+ do_group_exit(SIGKILL);
19587 }
19588
19589 /*
19590@@ -333,10 +330,7 @@ void __init map_vsyscall(void)
19591 extern char __vvar_page;
19592 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19593
19594- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19595- vsyscall_mode == NATIVE
19596- ? PAGE_KERNEL_VSYSCALL
19597- : PAGE_KERNEL_VVAR);
19598+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19599 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19600 (unsigned long)VSYSCALL_START);
19601
19602diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19603index 9796c2f..f686fbf 100644
19604--- a/arch/x86/kernel/x8664_ksyms_64.c
19605+++ b/arch/x86/kernel/x8664_ksyms_64.c
19606@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19607 EXPORT_SYMBOL(copy_user_generic_string);
19608 EXPORT_SYMBOL(copy_user_generic_unrolled);
19609 EXPORT_SYMBOL(__copy_user_nocache);
19610-EXPORT_SYMBOL(_copy_from_user);
19611-EXPORT_SYMBOL(_copy_to_user);
19612
19613 EXPORT_SYMBOL(copy_page);
19614 EXPORT_SYMBOL(clear_page);
19615diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19616index 7110911..e8cdee5 100644
19617--- a/arch/x86/kernel/xsave.c
19618+++ b/arch/x86/kernel/xsave.c
19619@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19620 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19621 return -EINVAL;
19622
19623- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19624+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19625 fx_sw_user->extended_size -
19626 FP_XSTATE_MAGIC2_SIZE));
19627 if (err)
19628@@ -266,7 +266,7 @@ fx_only:
19629 * the other extended state.
19630 */
19631 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19632- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19633+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19634 }
19635
19636 /*
19637@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19638 if (use_xsave())
19639 err = restore_user_xstate(buf);
19640 else
19641- err = fxrstor_checking((__force struct i387_fxsave_struct *)
19642+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19643 buf);
19644 if (unlikely(err)) {
19645 /*
19646diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19647index 89b02bf..0f6511d 100644
19648--- a/arch/x86/kvm/cpuid.c
19649+++ b/arch/x86/kvm/cpuid.c
19650@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19651 struct kvm_cpuid2 *cpuid,
19652 struct kvm_cpuid_entry2 __user *entries)
19653 {
19654- int r;
19655+ int r, i;
19656
19657 r = -E2BIG;
19658 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19659 goto out;
19660 r = -EFAULT;
19661- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19662- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19663+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19664 goto out;
19665+ for (i = 0; i < cpuid->nent; ++i) {
19666+ struct kvm_cpuid_entry2 cpuid_entry;
19667+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19668+ goto out;
19669+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
19670+ }
19671 vcpu->arch.cpuid_nent = cpuid->nent;
19672 kvm_apic_set_version(vcpu);
19673 kvm_x86_ops->cpuid_update(vcpu);
19674@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19675 struct kvm_cpuid2 *cpuid,
19676 struct kvm_cpuid_entry2 __user *entries)
19677 {
19678- int r;
19679+ int r, i;
19680
19681 r = -E2BIG;
19682 if (cpuid->nent < vcpu->arch.cpuid_nent)
19683 goto out;
19684 r = -EFAULT;
19685- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19686- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19687+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19688 goto out;
19689+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19690+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19691+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19692+ goto out;
19693+ }
19694 return 0;
19695
19696 out:
19697diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19698index 0982507..7f6d72f 100644
19699--- a/arch/x86/kvm/emulate.c
19700+++ b/arch/x86/kvm/emulate.c
19701@@ -250,6 +250,7 @@ struct gprefix {
19702
19703 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
19704 do { \
19705+ unsigned long _tmp; \
19706 __asm__ __volatile__ ( \
19707 _PRE_EFLAGS("0", "4", "2") \
19708 _op _suffix " %"_x"3,%1; " \
19709@@ -264,8 +265,6 @@ struct gprefix {
19710 /* Raw emulation: instruction has two explicit operands. */
19711 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
19712 do { \
19713- unsigned long _tmp; \
19714- \
19715 switch ((ctxt)->dst.bytes) { \
19716 case 2: \
19717 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
19718@@ -281,7 +280,6 @@ struct gprefix {
19719
19720 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19721 do { \
19722- unsigned long _tmp; \
19723 switch ((ctxt)->dst.bytes) { \
19724 case 1: \
19725 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
19726diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19727index cfdc6e0..ab92e84 100644
19728--- a/arch/x86/kvm/lapic.c
19729+++ b/arch/x86/kvm/lapic.c
19730@@ -54,7 +54,7 @@
19731 #define APIC_BUS_CYCLE_NS 1
19732
19733 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19734-#define apic_debug(fmt, arg...)
19735+#define apic_debug(fmt, arg...) do {} while (0)
19736
19737 #define APIC_LVT_NUM 6
19738 /* 14 is the version for Xeon and Pentium 8.4.8*/
19739diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
19740index 1561028..0ed7f14 100644
19741--- a/arch/x86/kvm/paging_tmpl.h
19742+++ b/arch/x86/kvm/paging_tmpl.h
19743@@ -197,7 +197,7 @@ retry_walk:
19744 if (unlikely(kvm_is_error_hva(host_addr)))
19745 goto error;
19746
19747- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
19748+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
19749 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
19750 goto error;
19751
19752diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
19753index e385214..f8df033 100644
19754--- a/arch/x86/kvm/svm.c
19755+++ b/arch/x86/kvm/svm.c
19756@@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
19757 int cpu = raw_smp_processor_id();
19758
19759 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19760+
19761+ pax_open_kernel();
19762 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
19763+ pax_close_kernel();
19764+
19765 load_TR_desc();
19766 }
19767
19768@@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
19769 #endif
19770 #endif
19771
19772+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19773+ __set_fs(current_thread_info()->addr_limit);
19774+#endif
19775+
19776 reload_tss(vcpu);
19777
19778 local_irq_disable();
19779diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
19780index 3b4c8d8..f457b63 100644
19781--- a/arch/x86/kvm/vmx.c
19782+++ b/arch/x86/kvm/vmx.c
19783@@ -1306,7 +1306,11 @@ static void reload_tss(void)
19784 struct desc_struct *descs;
19785
19786 descs = (void *)gdt->address;
19787+
19788+ pax_open_kernel();
19789 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19790+ pax_close_kernel();
19791+
19792 load_TR_desc();
19793 }
19794
19795@@ -2631,8 +2635,11 @@ static __init int hardware_setup(void)
19796 if (!cpu_has_vmx_flexpriority())
19797 flexpriority_enabled = 0;
19798
19799- if (!cpu_has_vmx_tpr_shadow())
19800- kvm_x86_ops->update_cr8_intercept = NULL;
19801+ if (!cpu_has_vmx_tpr_shadow()) {
19802+ pax_open_kernel();
19803+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19804+ pax_close_kernel();
19805+ }
19806
19807 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19808 kvm_disable_largepages();
19809@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
19810 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
19811
19812 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
19813- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
19814+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
19815
19816 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
19817 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
19818@@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19819 "jmp .Lkvm_vmx_return \n\t"
19820 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19821 ".Lkvm_vmx_return: "
19822+
19823+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19824+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19825+ ".Lkvm_vmx_return2: "
19826+#endif
19827+
19828 /* Save guest registers, load host registers, keep flags */
19829 "mov %0, %c[wordsize](%%"R"sp) \n\t"
19830 "pop %0 \n\t"
19831@@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19832 #endif
19833 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
19834 [wordsize]"i"(sizeof(ulong))
19835+
19836+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19837+ ,[cs]"i"(__KERNEL_CS)
19838+#endif
19839+
19840 : "cc", "memory"
19841 , R"ax", R"bx", R"di", R"si"
19842 #ifdef CONFIG_X86_64
19843@@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19844 }
19845 }
19846
19847- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19848+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19849+
19850+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19851+ loadsegment(fs, __KERNEL_PERCPU);
19852+#endif
19853+
19854+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19855+ __set_fs(current_thread_info()->addr_limit);
19856+#endif
19857+
19858 vmx->loaded_vmcs->launched = 1;
19859
19860 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
19861diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19862index 9cbfc06..7ddc9fa 100644
19863--- a/arch/x86/kvm/x86.c
19864+++ b/arch/x86/kvm/x86.c
19865@@ -1311,8 +1311,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
19866 {
19867 struct kvm *kvm = vcpu->kvm;
19868 int lm = is_long_mode(vcpu);
19869- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19870- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19871+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19872+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19873 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
19874 : kvm->arch.xen_hvm_config.blob_size_32;
19875 u32 page_num = data & ~PAGE_MASK;
19876@@ -2145,6 +2145,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
19877 if (n < msr_list.nmsrs)
19878 goto out;
19879 r = -EFAULT;
19880+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
19881+ goto out;
19882 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
19883 num_msrs_to_save * sizeof(u32)))
19884 goto out;
19885@@ -2266,7 +2268,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
19886 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19887 struct kvm_interrupt *irq)
19888 {
19889- if (irq->irq < 0 || irq->irq >= 256)
19890+ if (irq->irq >= 256)
19891 return -EINVAL;
19892 if (irqchip_in_kernel(vcpu->kvm))
19893 return -ENXIO;
19894@@ -4780,7 +4782,7 @@ static void kvm_set_mmio_spte_mask(void)
19895 kvm_mmu_set_mmio_spte_mask(mask);
19896 }
19897
19898-int kvm_arch_init(void *opaque)
19899+int kvm_arch_init(const void *opaque)
19900 {
19901 int r;
19902 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19903diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
19904index 642d880..44e0f3f 100644
19905--- a/arch/x86/lguest/boot.c
19906+++ b/arch/x86/lguest/boot.c
19907@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
19908 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19909 * Launcher to reboot us.
19910 */
19911-static void lguest_restart(char *reason)
19912+static __noreturn void lguest_restart(char *reason)
19913 {
19914 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
19915+ BUG();
19916 }
19917
19918 /*G:050
19919diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
19920index 042f682..c92afb6 100644
19921--- a/arch/x86/lib/atomic64_32.c
19922+++ b/arch/x86/lib/atomic64_32.c
19923@@ -8,18 +8,30 @@
19924
19925 long long atomic64_read_cx8(long long, const atomic64_t *v);
19926 EXPORT_SYMBOL(atomic64_read_cx8);
19927+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19928+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
19929 long long atomic64_set_cx8(long long, const atomic64_t *v);
19930 EXPORT_SYMBOL(atomic64_set_cx8);
19931+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19932+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
19933 long long atomic64_xchg_cx8(long long, unsigned high);
19934 EXPORT_SYMBOL(atomic64_xchg_cx8);
19935 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
19936 EXPORT_SYMBOL(atomic64_add_return_cx8);
19937+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19938+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
19939 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
19940 EXPORT_SYMBOL(atomic64_sub_return_cx8);
19941+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19942+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
19943 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
19944 EXPORT_SYMBOL(atomic64_inc_return_cx8);
19945+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19946+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
19947 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
19948 EXPORT_SYMBOL(atomic64_dec_return_cx8);
19949+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19950+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
19951 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
19952 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
19953 int atomic64_inc_not_zero_cx8(atomic64_t *v);
19954@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
19955 #ifndef CONFIG_X86_CMPXCHG64
19956 long long atomic64_read_386(long long, const atomic64_t *v);
19957 EXPORT_SYMBOL(atomic64_read_386);
19958+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
19959+EXPORT_SYMBOL(atomic64_read_unchecked_386);
19960 long long atomic64_set_386(long long, const atomic64_t *v);
19961 EXPORT_SYMBOL(atomic64_set_386);
19962+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
19963+EXPORT_SYMBOL(atomic64_set_unchecked_386);
19964 long long atomic64_xchg_386(long long, unsigned high);
19965 EXPORT_SYMBOL(atomic64_xchg_386);
19966 long long atomic64_add_return_386(long long a, atomic64_t *v);
19967 EXPORT_SYMBOL(atomic64_add_return_386);
19968+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19969+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
19970 long long atomic64_sub_return_386(long long a, atomic64_t *v);
19971 EXPORT_SYMBOL(atomic64_sub_return_386);
19972+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19973+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
19974 long long atomic64_inc_return_386(long long a, atomic64_t *v);
19975 EXPORT_SYMBOL(atomic64_inc_return_386);
19976+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19977+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
19978 long long atomic64_dec_return_386(long long a, atomic64_t *v);
19979 EXPORT_SYMBOL(atomic64_dec_return_386);
19980+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19981+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
19982 long long atomic64_add_386(long long a, atomic64_t *v);
19983 EXPORT_SYMBOL(atomic64_add_386);
19984+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
19985+EXPORT_SYMBOL(atomic64_add_unchecked_386);
19986 long long atomic64_sub_386(long long a, atomic64_t *v);
19987 EXPORT_SYMBOL(atomic64_sub_386);
19988+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
19989+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
19990 long long atomic64_inc_386(long long a, atomic64_t *v);
19991 EXPORT_SYMBOL(atomic64_inc_386);
19992+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
19993+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
19994 long long atomic64_dec_386(long long a, atomic64_t *v);
19995 EXPORT_SYMBOL(atomic64_dec_386);
19996+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
19997+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
19998 long long atomic64_dec_if_positive_386(atomic64_t *v);
19999 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20000 int atomic64_inc_not_zero_386(atomic64_t *v);
20001diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20002index e8e7e0d..56fd1b0 100644
20003--- a/arch/x86/lib/atomic64_386_32.S
20004+++ b/arch/x86/lib/atomic64_386_32.S
20005@@ -48,6 +48,10 @@ BEGIN(read)
20006 movl (v), %eax
20007 movl 4(v), %edx
20008 RET_ENDP
20009+BEGIN(read_unchecked)
20010+ movl (v), %eax
20011+ movl 4(v), %edx
20012+RET_ENDP
20013 #undef v
20014
20015 #define v %esi
20016@@ -55,6 +59,10 @@ BEGIN(set)
20017 movl %ebx, (v)
20018 movl %ecx, 4(v)
20019 RET_ENDP
20020+BEGIN(set_unchecked)
20021+ movl %ebx, (v)
20022+ movl %ecx, 4(v)
20023+RET_ENDP
20024 #undef v
20025
20026 #define v %esi
20027@@ -70,6 +78,20 @@ RET_ENDP
20028 BEGIN(add)
20029 addl %eax, (v)
20030 adcl %edx, 4(v)
20031+
20032+#ifdef CONFIG_PAX_REFCOUNT
20033+ jno 0f
20034+ subl %eax, (v)
20035+ sbbl %edx, 4(v)
20036+ int $4
20037+0:
20038+ _ASM_EXTABLE(0b, 0b)
20039+#endif
20040+
20041+RET_ENDP
20042+BEGIN(add_unchecked)
20043+ addl %eax, (v)
20044+ adcl %edx, 4(v)
20045 RET_ENDP
20046 #undef v
20047
20048@@ -77,6 +99,24 @@ RET_ENDP
20049 BEGIN(add_return)
20050 addl (v), %eax
20051 adcl 4(v), %edx
20052+
20053+#ifdef CONFIG_PAX_REFCOUNT
20054+ into
20055+1234:
20056+ _ASM_EXTABLE(1234b, 2f)
20057+#endif
20058+
20059+ movl %eax, (v)
20060+ movl %edx, 4(v)
20061+
20062+#ifdef CONFIG_PAX_REFCOUNT
20063+2:
20064+#endif
20065+
20066+RET_ENDP
20067+BEGIN(add_return_unchecked)
20068+ addl (v), %eax
20069+ adcl 4(v), %edx
20070 movl %eax, (v)
20071 movl %edx, 4(v)
20072 RET_ENDP
20073@@ -86,6 +126,20 @@ RET_ENDP
20074 BEGIN(sub)
20075 subl %eax, (v)
20076 sbbl %edx, 4(v)
20077+
20078+#ifdef CONFIG_PAX_REFCOUNT
20079+ jno 0f
20080+ addl %eax, (v)
20081+ adcl %edx, 4(v)
20082+ int $4
20083+0:
20084+ _ASM_EXTABLE(0b, 0b)
20085+#endif
20086+
20087+RET_ENDP
20088+BEGIN(sub_unchecked)
20089+ subl %eax, (v)
20090+ sbbl %edx, 4(v)
20091 RET_ENDP
20092 #undef v
20093
20094@@ -96,6 +150,27 @@ BEGIN(sub_return)
20095 sbbl $0, %edx
20096 addl (v), %eax
20097 adcl 4(v), %edx
20098+
20099+#ifdef CONFIG_PAX_REFCOUNT
20100+ into
20101+1234:
20102+ _ASM_EXTABLE(1234b, 2f)
20103+#endif
20104+
20105+ movl %eax, (v)
20106+ movl %edx, 4(v)
20107+
20108+#ifdef CONFIG_PAX_REFCOUNT
20109+2:
20110+#endif
20111+
20112+RET_ENDP
20113+BEGIN(sub_return_unchecked)
20114+ negl %edx
20115+ negl %eax
20116+ sbbl $0, %edx
20117+ addl (v), %eax
20118+ adcl 4(v), %edx
20119 movl %eax, (v)
20120 movl %edx, 4(v)
20121 RET_ENDP
20122@@ -105,6 +180,20 @@ RET_ENDP
20123 BEGIN(inc)
20124 addl $1, (v)
20125 adcl $0, 4(v)
20126+
20127+#ifdef CONFIG_PAX_REFCOUNT
20128+ jno 0f
20129+ subl $1, (v)
20130+ sbbl $0, 4(v)
20131+ int $4
20132+0:
20133+ _ASM_EXTABLE(0b, 0b)
20134+#endif
20135+
20136+RET_ENDP
20137+BEGIN(inc_unchecked)
20138+ addl $1, (v)
20139+ adcl $0, 4(v)
20140 RET_ENDP
20141 #undef v
20142
20143@@ -114,6 +203,26 @@ BEGIN(inc_return)
20144 movl 4(v), %edx
20145 addl $1, %eax
20146 adcl $0, %edx
20147+
20148+#ifdef CONFIG_PAX_REFCOUNT
20149+ into
20150+1234:
20151+ _ASM_EXTABLE(1234b, 2f)
20152+#endif
20153+
20154+ movl %eax, (v)
20155+ movl %edx, 4(v)
20156+
20157+#ifdef CONFIG_PAX_REFCOUNT
20158+2:
20159+#endif
20160+
20161+RET_ENDP
20162+BEGIN(inc_return_unchecked)
20163+ movl (v), %eax
20164+ movl 4(v), %edx
20165+ addl $1, %eax
20166+ adcl $0, %edx
20167 movl %eax, (v)
20168 movl %edx, 4(v)
20169 RET_ENDP
20170@@ -123,6 +232,20 @@ RET_ENDP
20171 BEGIN(dec)
20172 subl $1, (v)
20173 sbbl $0, 4(v)
20174+
20175+#ifdef CONFIG_PAX_REFCOUNT
20176+ jno 0f
20177+ addl $1, (v)
20178+ adcl $0, 4(v)
20179+ int $4
20180+0:
20181+ _ASM_EXTABLE(0b, 0b)
20182+#endif
20183+
20184+RET_ENDP
20185+BEGIN(dec_unchecked)
20186+ subl $1, (v)
20187+ sbbl $0, 4(v)
20188 RET_ENDP
20189 #undef v
20190
20191@@ -132,6 +255,26 @@ BEGIN(dec_return)
20192 movl 4(v), %edx
20193 subl $1, %eax
20194 sbbl $0, %edx
20195+
20196+#ifdef CONFIG_PAX_REFCOUNT
20197+ into
20198+1234:
20199+ _ASM_EXTABLE(1234b, 2f)
20200+#endif
20201+
20202+ movl %eax, (v)
20203+ movl %edx, 4(v)
20204+
20205+#ifdef CONFIG_PAX_REFCOUNT
20206+2:
20207+#endif
20208+
20209+RET_ENDP
20210+BEGIN(dec_return_unchecked)
20211+ movl (v), %eax
20212+ movl 4(v), %edx
20213+ subl $1, %eax
20214+ sbbl $0, %edx
20215 movl %eax, (v)
20216 movl %edx, 4(v)
20217 RET_ENDP
20218@@ -143,6 +286,13 @@ BEGIN(add_unless)
20219 adcl %edx, %edi
20220 addl (v), %eax
20221 adcl 4(v), %edx
20222+
20223+#ifdef CONFIG_PAX_REFCOUNT
20224+ into
20225+1234:
20226+ _ASM_EXTABLE(1234b, 2f)
20227+#endif
20228+
20229 cmpl %eax, %esi
20230 je 3f
20231 1:
20232@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20233 1:
20234 addl $1, %eax
20235 adcl $0, %edx
20236+
20237+#ifdef CONFIG_PAX_REFCOUNT
20238+ into
20239+1234:
20240+ _ASM_EXTABLE(1234b, 2f)
20241+#endif
20242+
20243 movl %eax, (v)
20244 movl %edx, 4(v)
20245 movl $1, %eax
20246@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20247 movl 4(v), %edx
20248 subl $1, %eax
20249 sbbl $0, %edx
20250+
20251+#ifdef CONFIG_PAX_REFCOUNT
20252+ into
20253+1234:
20254+ _ASM_EXTABLE(1234b, 1f)
20255+#endif
20256+
20257 js 1f
20258 movl %eax, (v)
20259 movl %edx, 4(v)
20260diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20261index 391a083..d658e9f 100644
20262--- a/arch/x86/lib/atomic64_cx8_32.S
20263+++ b/arch/x86/lib/atomic64_cx8_32.S
20264@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20265 CFI_STARTPROC
20266
20267 read64 %ecx
20268+ pax_force_retaddr
20269 ret
20270 CFI_ENDPROC
20271 ENDPROC(atomic64_read_cx8)
20272
20273+ENTRY(atomic64_read_unchecked_cx8)
20274+ CFI_STARTPROC
20275+
20276+ read64 %ecx
20277+ pax_force_retaddr
20278+ ret
20279+ CFI_ENDPROC
20280+ENDPROC(atomic64_read_unchecked_cx8)
20281+
20282 ENTRY(atomic64_set_cx8)
20283 CFI_STARTPROC
20284
20285@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20286 cmpxchg8b (%esi)
20287 jne 1b
20288
20289+ pax_force_retaddr
20290 ret
20291 CFI_ENDPROC
20292 ENDPROC(atomic64_set_cx8)
20293
20294+ENTRY(atomic64_set_unchecked_cx8)
20295+ CFI_STARTPROC
20296+
20297+1:
20298+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20299+ * are atomic on 586 and newer */
20300+ cmpxchg8b (%esi)
20301+ jne 1b
20302+
20303+ pax_force_retaddr
20304+ ret
20305+ CFI_ENDPROC
20306+ENDPROC(atomic64_set_unchecked_cx8)
20307+
20308 ENTRY(atomic64_xchg_cx8)
20309 CFI_STARTPROC
20310
20311@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20312 cmpxchg8b (%esi)
20313 jne 1b
20314
20315+ pax_force_retaddr
20316 ret
20317 CFI_ENDPROC
20318 ENDPROC(atomic64_xchg_cx8)
20319
20320-.macro addsub_return func ins insc
20321-ENTRY(atomic64_\func\()_return_cx8)
20322+.macro addsub_return func ins insc unchecked=""
20323+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20324 CFI_STARTPROC
20325 SAVE ebp
20326 SAVE ebx
20327@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20328 movl %edx, %ecx
20329 \ins\()l %esi, %ebx
20330 \insc\()l %edi, %ecx
20331+
20332+.ifb \unchecked
20333+#ifdef CONFIG_PAX_REFCOUNT
20334+ into
20335+2:
20336+ _ASM_EXTABLE(2b, 3f)
20337+#endif
20338+.endif
20339+
20340 LOCK_PREFIX
20341 cmpxchg8b (%ebp)
20342 jne 1b
20343-
20344-10:
20345 movl %ebx, %eax
20346 movl %ecx, %edx
20347+
20348+.ifb \unchecked
20349+#ifdef CONFIG_PAX_REFCOUNT
20350+3:
20351+#endif
20352+.endif
20353+
20354 RESTORE edi
20355 RESTORE esi
20356 RESTORE ebx
20357 RESTORE ebp
20358+ pax_force_retaddr
20359 ret
20360 CFI_ENDPROC
20361-ENDPROC(atomic64_\func\()_return_cx8)
20362+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20363 .endm
20364
20365 addsub_return add add adc
20366 addsub_return sub sub sbb
20367+addsub_return add add adc _unchecked
20368+addsub_return sub sub sbb _unchecked
20369
20370-.macro incdec_return func ins insc
20371-ENTRY(atomic64_\func\()_return_cx8)
20372+.macro incdec_return func ins insc unchecked
20373+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20374 CFI_STARTPROC
20375 SAVE ebx
20376
20377@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20378 movl %edx, %ecx
20379 \ins\()l $1, %ebx
20380 \insc\()l $0, %ecx
20381+
20382+.ifb \unchecked
20383+#ifdef CONFIG_PAX_REFCOUNT
20384+ into
20385+2:
20386+ _ASM_EXTABLE(2b, 3f)
20387+#endif
20388+.endif
20389+
20390 LOCK_PREFIX
20391 cmpxchg8b (%esi)
20392 jne 1b
20393
20394-10:
20395 movl %ebx, %eax
20396 movl %ecx, %edx
20397+
20398+.ifb \unchecked
20399+#ifdef CONFIG_PAX_REFCOUNT
20400+3:
20401+#endif
20402+.endif
20403+
20404 RESTORE ebx
20405+ pax_force_retaddr
20406 ret
20407 CFI_ENDPROC
20408-ENDPROC(atomic64_\func\()_return_cx8)
20409+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20410 .endm
20411
20412 incdec_return inc add adc
20413 incdec_return dec sub sbb
20414+incdec_return inc add adc _unchecked
20415+incdec_return dec sub sbb _unchecked
20416
20417 ENTRY(atomic64_dec_if_positive_cx8)
20418 CFI_STARTPROC
20419@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20420 movl %edx, %ecx
20421 subl $1, %ebx
20422 sbb $0, %ecx
20423+
20424+#ifdef CONFIG_PAX_REFCOUNT
20425+ into
20426+1234:
20427+ _ASM_EXTABLE(1234b, 2f)
20428+#endif
20429+
20430 js 2f
20431 LOCK_PREFIX
20432 cmpxchg8b (%esi)
20433@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20434 movl %ebx, %eax
20435 movl %ecx, %edx
20436 RESTORE ebx
20437+ pax_force_retaddr
20438 ret
20439 CFI_ENDPROC
20440 ENDPROC(atomic64_dec_if_positive_cx8)
20441@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20442 movl %edx, %ecx
20443 addl %esi, %ebx
20444 adcl %edi, %ecx
20445+
20446+#ifdef CONFIG_PAX_REFCOUNT
20447+ into
20448+1234:
20449+ _ASM_EXTABLE(1234b, 3f)
20450+#endif
20451+
20452 LOCK_PREFIX
20453 cmpxchg8b (%ebp)
20454 jne 1b
20455@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20456 CFI_ADJUST_CFA_OFFSET -8
20457 RESTORE ebx
20458 RESTORE ebp
20459+ pax_force_retaddr
20460 ret
20461 4:
20462 cmpl %edx, 4(%esp)
20463@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20464 movl %edx, %ecx
20465 addl $1, %ebx
20466 adcl $0, %ecx
20467+
20468+#ifdef CONFIG_PAX_REFCOUNT
20469+ into
20470+1234:
20471+ _ASM_EXTABLE(1234b, 3f)
20472+#endif
20473+
20474 LOCK_PREFIX
20475 cmpxchg8b (%esi)
20476 jne 1b
20477@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20478 movl $1, %eax
20479 3:
20480 RESTORE ebx
20481+ pax_force_retaddr
20482 ret
20483 4:
20484 testl %edx, %edx
20485diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20486index 78d16a5..fbcf666 100644
20487--- a/arch/x86/lib/checksum_32.S
20488+++ b/arch/x86/lib/checksum_32.S
20489@@ -28,7 +28,8 @@
20490 #include <linux/linkage.h>
20491 #include <asm/dwarf2.h>
20492 #include <asm/errno.h>
20493-
20494+#include <asm/segment.h>
20495+
20496 /*
20497 * computes a partial checksum, e.g. for TCP/UDP fragments
20498 */
20499@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20500
20501 #define ARGBASE 16
20502 #define FP 12
20503-
20504-ENTRY(csum_partial_copy_generic)
20505+
20506+ENTRY(csum_partial_copy_generic_to_user)
20507 CFI_STARTPROC
20508+
20509+#ifdef CONFIG_PAX_MEMORY_UDEREF
20510+ pushl_cfi %gs
20511+ popl_cfi %es
20512+ jmp csum_partial_copy_generic
20513+#endif
20514+
20515+ENTRY(csum_partial_copy_generic_from_user)
20516+
20517+#ifdef CONFIG_PAX_MEMORY_UDEREF
20518+ pushl_cfi %gs
20519+ popl_cfi %ds
20520+#endif
20521+
20522+ENTRY(csum_partial_copy_generic)
20523 subl $4,%esp
20524 CFI_ADJUST_CFA_OFFSET 4
20525 pushl_cfi %edi
20526@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20527 jmp 4f
20528 SRC(1: movw (%esi), %bx )
20529 addl $2, %esi
20530-DST( movw %bx, (%edi) )
20531+DST( movw %bx, %es:(%edi) )
20532 addl $2, %edi
20533 addw %bx, %ax
20534 adcl $0, %eax
20535@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20536 SRC(1: movl (%esi), %ebx )
20537 SRC( movl 4(%esi), %edx )
20538 adcl %ebx, %eax
20539-DST( movl %ebx, (%edi) )
20540+DST( movl %ebx, %es:(%edi) )
20541 adcl %edx, %eax
20542-DST( movl %edx, 4(%edi) )
20543+DST( movl %edx, %es:4(%edi) )
20544
20545 SRC( movl 8(%esi), %ebx )
20546 SRC( movl 12(%esi), %edx )
20547 adcl %ebx, %eax
20548-DST( movl %ebx, 8(%edi) )
20549+DST( movl %ebx, %es:8(%edi) )
20550 adcl %edx, %eax
20551-DST( movl %edx, 12(%edi) )
20552+DST( movl %edx, %es:12(%edi) )
20553
20554 SRC( movl 16(%esi), %ebx )
20555 SRC( movl 20(%esi), %edx )
20556 adcl %ebx, %eax
20557-DST( movl %ebx, 16(%edi) )
20558+DST( movl %ebx, %es:16(%edi) )
20559 adcl %edx, %eax
20560-DST( movl %edx, 20(%edi) )
20561+DST( movl %edx, %es:20(%edi) )
20562
20563 SRC( movl 24(%esi), %ebx )
20564 SRC( movl 28(%esi), %edx )
20565 adcl %ebx, %eax
20566-DST( movl %ebx, 24(%edi) )
20567+DST( movl %ebx, %es:24(%edi) )
20568 adcl %edx, %eax
20569-DST( movl %edx, 28(%edi) )
20570+DST( movl %edx, %es:28(%edi) )
20571
20572 lea 32(%esi), %esi
20573 lea 32(%edi), %edi
20574@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20575 shrl $2, %edx # This clears CF
20576 SRC(3: movl (%esi), %ebx )
20577 adcl %ebx, %eax
20578-DST( movl %ebx, (%edi) )
20579+DST( movl %ebx, %es:(%edi) )
20580 lea 4(%esi), %esi
20581 lea 4(%edi), %edi
20582 dec %edx
20583@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
20584 jb 5f
20585 SRC( movw (%esi), %cx )
20586 leal 2(%esi), %esi
20587-DST( movw %cx, (%edi) )
20588+DST( movw %cx, %es:(%edi) )
20589 leal 2(%edi), %edi
20590 je 6f
20591 shll $16,%ecx
20592 SRC(5: movb (%esi), %cl )
20593-DST( movb %cl, (%edi) )
20594+DST( movb %cl, %es:(%edi) )
20595 6: addl %ecx, %eax
20596 adcl $0, %eax
20597 7:
20598@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
20599
20600 6001:
20601 movl ARGBASE+20(%esp), %ebx # src_err_ptr
20602- movl $-EFAULT, (%ebx)
20603+ movl $-EFAULT, %ss:(%ebx)
20604
20605 # zero the complete destination - computing the rest
20606 # is too much work
20607@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
20608
20609 6002:
20610 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20611- movl $-EFAULT,(%ebx)
20612+ movl $-EFAULT,%ss:(%ebx)
20613 jmp 5000b
20614
20615 .previous
20616
20617+ pushl_cfi %ss
20618+ popl_cfi %ds
20619+ pushl_cfi %ss
20620+ popl_cfi %es
20621 popl_cfi %ebx
20622 CFI_RESTORE ebx
20623 popl_cfi %esi
20624@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
20625 popl_cfi %ecx # equivalent to addl $4,%esp
20626 ret
20627 CFI_ENDPROC
20628-ENDPROC(csum_partial_copy_generic)
20629+ENDPROC(csum_partial_copy_generic_to_user)
20630
20631 #else
20632
20633 /* Version for PentiumII/PPro */
20634
20635 #define ROUND1(x) \
20636+ nop; nop; nop; \
20637 SRC(movl x(%esi), %ebx ) ; \
20638 addl %ebx, %eax ; \
20639- DST(movl %ebx, x(%edi) ) ;
20640+ DST(movl %ebx, %es:x(%edi)) ;
20641
20642 #define ROUND(x) \
20643+ nop; nop; nop; \
20644 SRC(movl x(%esi), %ebx ) ; \
20645 adcl %ebx, %eax ; \
20646- DST(movl %ebx, x(%edi) ) ;
20647+ DST(movl %ebx, %es:x(%edi)) ;
20648
20649 #define ARGBASE 12
20650-
20651-ENTRY(csum_partial_copy_generic)
20652+
20653+ENTRY(csum_partial_copy_generic_to_user)
20654 CFI_STARTPROC
20655+
20656+#ifdef CONFIG_PAX_MEMORY_UDEREF
20657+ pushl_cfi %gs
20658+ popl_cfi %es
20659+ jmp csum_partial_copy_generic
20660+#endif
20661+
20662+ENTRY(csum_partial_copy_generic_from_user)
20663+
20664+#ifdef CONFIG_PAX_MEMORY_UDEREF
20665+ pushl_cfi %gs
20666+ popl_cfi %ds
20667+#endif
20668+
20669+ENTRY(csum_partial_copy_generic)
20670 pushl_cfi %ebx
20671 CFI_REL_OFFSET ebx, 0
20672 pushl_cfi %edi
20673@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
20674 subl %ebx, %edi
20675 lea -1(%esi),%edx
20676 andl $-32,%edx
20677- lea 3f(%ebx,%ebx), %ebx
20678+ lea 3f(%ebx,%ebx,2), %ebx
20679 testl %esi, %esi
20680 jmp *%ebx
20681 1: addl $64,%esi
20682@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
20683 jb 5f
20684 SRC( movw (%esi), %dx )
20685 leal 2(%esi), %esi
20686-DST( movw %dx, (%edi) )
20687+DST( movw %dx, %es:(%edi) )
20688 leal 2(%edi), %edi
20689 je 6f
20690 shll $16,%edx
20691 5:
20692 SRC( movb (%esi), %dl )
20693-DST( movb %dl, (%edi) )
20694+DST( movb %dl, %es:(%edi) )
20695 6: addl %edx, %eax
20696 adcl $0, %eax
20697 7:
20698 .section .fixup, "ax"
20699 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
20700- movl $-EFAULT, (%ebx)
20701+ movl $-EFAULT, %ss:(%ebx)
20702 # zero the complete destination (computing the rest is too much work)
20703 movl ARGBASE+8(%esp),%edi # dst
20704 movl ARGBASE+12(%esp),%ecx # len
20705@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
20706 rep; stosb
20707 jmp 7b
20708 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20709- movl $-EFAULT, (%ebx)
20710+ movl $-EFAULT, %ss:(%ebx)
20711 jmp 7b
20712 .previous
20713
20714+#ifdef CONFIG_PAX_MEMORY_UDEREF
20715+ pushl_cfi %ss
20716+ popl_cfi %ds
20717+ pushl_cfi %ss
20718+ popl_cfi %es
20719+#endif
20720+
20721 popl_cfi %esi
20722 CFI_RESTORE esi
20723 popl_cfi %edi
20724@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
20725 CFI_RESTORE ebx
20726 ret
20727 CFI_ENDPROC
20728-ENDPROC(csum_partial_copy_generic)
20729+ENDPROC(csum_partial_copy_generic_to_user)
20730
20731 #undef ROUND
20732 #undef ROUND1
20733diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
20734index f2145cf..cea889d 100644
20735--- a/arch/x86/lib/clear_page_64.S
20736+++ b/arch/x86/lib/clear_page_64.S
20737@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
20738 movl $4096/8,%ecx
20739 xorl %eax,%eax
20740 rep stosq
20741+ pax_force_retaddr
20742 ret
20743 CFI_ENDPROC
20744 ENDPROC(clear_page_c)
20745@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
20746 movl $4096,%ecx
20747 xorl %eax,%eax
20748 rep stosb
20749+ pax_force_retaddr
20750 ret
20751 CFI_ENDPROC
20752 ENDPROC(clear_page_c_e)
20753@@ -43,6 +45,7 @@ ENTRY(clear_page)
20754 leaq 64(%rdi),%rdi
20755 jnz .Lloop
20756 nop
20757+ pax_force_retaddr
20758 ret
20759 CFI_ENDPROC
20760 .Lclear_page_end:
20761@@ -58,7 +61,7 @@ ENDPROC(clear_page)
20762
20763 #include <asm/cpufeature.h>
20764
20765- .section .altinstr_replacement,"ax"
20766+ .section .altinstr_replacement,"a"
20767 1: .byte 0xeb /* jmp <disp8> */
20768 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
20769 2: .byte 0xeb /* jmp <disp8> */
20770diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
20771index 1e572c5..2a162cd 100644
20772--- a/arch/x86/lib/cmpxchg16b_emu.S
20773+++ b/arch/x86/lib/cmpxchg16b_emu.S
20774@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
20775
20776 popf
20777 mov $1, %al
20778+ pax_force_retaddr
20779 ret
20780
20781 not_same:
20782 popf
20783 xor %al,%al
20784+ pax_force_retaddr
20785 ret
20786
20787 CFI_ENDPROC
20788diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
20789index 01c805b..dccb07f 100644
20790--- a/arch/x86/lib/copy_page_64.S
20791+++ b/arch/x86/lib/copy_page_64.S
20792@@ -9,6 +9,7 @@ copy_page_c:
20793 CFI_STARTPROC
20794 movl $4096/8,%ecx
20795 rep movsq
20796+ pax_force_retaddr
20797 ret
20798 CFI_ENDPROC
20799 ENDPROC(copy_page_c)
20800@@ -39,7 +40,7 @@ ENTRY(copy_page)
20801 movq 16 (%rsi), %rdx
20802 movq 24 (%rsi), %r8
20803 movq 32 (%rsi), %r9
20804- movq 40 (%rsi), %r10
20805+ movq 40 (%rsi), %r13
20806 movq 48 (%rsi), %r11
20807 movq 56 (%rsi), %r12
20808
20809@@ -50,7 +51,7 @@ ENTRY(copy_page)
20810 movq %rdx, 16 (%rdi)
20811 movq %r8, 24 (%rdi)
20812 movq %r9, 32 (%rdi)
20813- movq %r10, 40 (%rdi)
20814+ movq %r13, 40 (%rdi)
20815 movq %r11, 48 (%rdi)
20816 movq %r12, 56 (%rdi)
20817
20818@@ -69,7 +70,7 @@ ENTRY(copy_page)
20819 movq 16 (%rsi), %rdx
20820 movq 24 (%rsi), %r8
20821 movq 32 (%rsi), %r9
20822- movq 40 (%rsi), %r10
20823+ movq 40 (%rsi), %r13
20824 movq 48 (%rsi), %r11
20825 movq 56 (%rsi), %r12
20826
20827@@ -78,7 +79,7 @@ ENTRY(copy_page)
20828 movq %rdx, 16 (%rdi)
20829 movq %r8, 24 (%rdi)
20830 movq %r9, 32 (%rdi)
20831- movq %r10, 40 (%rdi)
20832+ movq %r13, 40 (%rdi)
20833 movq %r11, 48 (%rdi)
20834 movq %r12, 56 (%rdi)
20835
20836@@ -95,6 +96,7 @@ ENTRY(copy_page)
20837 CFI_RESTORE r13
20838 addq $3*8,%rsp
20839 CFI_ADJUST_CFA_OFFSET -3*8
20840+ pax_force_retaddr
20841 ret
20842 .Lcopy_page_end:
20843 CFI_ENDPROC
20844@@ -105,7 +107,7 @@ ENDPROC(copy_page)
20845
20846 #include <asm/cpufeature.h>
20847
20848- .section .altinstr_replacement,"ax"
20849+ .section .altinstr_replacement,"a"
20850 1: .byte 0xeb /* jmp <disp8> */
20851 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
20852 2:
20853diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
20854index 0248402..821c786 100644
20855--- a/arch/x86/lib/copy_user_64.S
20856+++ b/arch/x86/lib/copy_user_64.S
20857@@ -16,6 +16,7 @@
20858 #include <asm/thread_info.h>
20859 #include <asm/cpufeature.h>
20860 #include <asm/alternative-asm.h>
20861+#include <asm/pgtable.h>
20862
20863 /*
20864 * By placing feature2 after feature1 in altinstructions section, we logically
20865@@ -29,7 +30,7 @@
20866 .byte 0xe9 /* 32bit jump */
20867 .long \orig-1f /* by default jump to orig */
20868 1:
20869- .section .altinstr_replacement,"ax"
20870+ .section .altinstr_replacement,"a"
20871 2: .byte 0xe9 /* near jump with 32bit immediate */
20872 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
20873 3: .byte 0xe9 /* near jump with 32bit immediate */
20874@@ -71,47 +72,20 @@
20875 #endif
20876 .endm
20877
20878-/* Standard copy_to_user with segment limit checking */
20879-ENTRY(_copy_to_user)
20880- CFI_STARTPROC
20881- GET_THREAD_INFO(%rax)
20882- movq %rdi,%rcx
20883- addq %rdx,%rcx
20884- jc bad_to_user
20885- cmpq TI_addr_limit(%rax),%rcx
20886- ja bad_to_user
20887- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20888- copy_user_generic_unrolled,copy_user_generic_string, \
20889- copy_user_enhanced_fast_string
20890- CFI_ENDPROC
20891-ENDPROC(_copy_to_user)
20892-
20893-/* Standard copy_from_user with segment limit checking */
20894-ENTRY(_copy_from_user)
20895- CFI_STARTPROC
20896- GET_THREAD_INFO(%rax)
20897- movq %rsi,%rcx
20898- addq %rdx,%rcx
20899- jc bad_from_user
20900- cmpq TI_addr_limit(%rax),%rcx
20901- ja bad_from_user
20902- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20903- copy_user_generic_unrolled,copy_user_generic_string, \
20904- copy_user_enhanced_fast_string
20905- CFI_ENDPROC
20906-ENDPROC(_copy_from_user)
20907-
20908 .section .fixup,"ax"
20909 /* must zero dest */
20910 ENTRY(bad_from_user)
20911 bad_from_user:
20912 CFI_STARTPROC
20913+ testl %edx,%edx
20914+ js bad_to_user
20915 movl %edx,%ecx
20916 xorl %eax,%eax
20917 rep
20918 stosb
20919 bad_to_user:
20920 movl %edx,%eax
20921+ pax_force_retaddr
20922 ret
20923 CFI_ENDPROC
20924 ENDPROC(bad_from_user)
20925@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
20926 jz 17f
20927 1: movq (%rsi),%r8
20928 2: movq 1*8(%rsi),%r9
20929-3: movq 2*8(%rsi),%r10
20930+3: movq 2*8(%rsi),%rax
20931 4: movq 3*8(%rsi),%r11
20932 5: movq %r8,(%rdi)
20933 6: movq %r9,1*8(%rdi)
20934-7: movq %r10,2*8(%rdi)
20935+7: movq %rax,2*8(%rdi)
20936 8: movq %r11,3*8(%rdi)
20937 9: movq 4*8(%rsi),%r8
20938 10: movq 5*8(%rsi),%r9
20939-11: movq 6*8(%rsi),%r10
20940+11: movq 6*8(%rsi),%rax
20941 12: movq 7*8(%rsi),%r11
20942 13: movq %r8,4*8(%rdi)
20943 14: movq %r9,5*8(%rdi)
20944-15: movq %r10,6*8(%rdi)
20945+15: movq %rax,6*8(%rdi)
20946 16: movq %r11,7*8(%rdi)
20947 leaq 64(%rsi),%rsi
20948 leaq 64(%rdi),%rdi
20949@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20950 decl %ecx
20951 jnz 21b
20952 23: xor %eax,%eax
20953+ pax_force_retaddr
20954 ret
20955
20956 .section .fixup,"ax"
20957@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
20958 3: rep
20959 movsb
20960 4: xorl %eax,%eax
20961+ pax_force_retaddr
20962 ret
20963
20964 .section .fixup,"ax"
20965@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
20966 1: rep
20967 movsb
20968 2: xorl %eax,%eax
20969+ pax_force_retaddr
20970 ret
20971
20972 .section .fixup,"ax"
20973diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
20974index cb0c112..e3a6895 100644
20975--- a/arch/x86/lib/copy_user_nocache_64.S
20976+++ b/arch/x86/lib/copy_user_nocache_64.S
20977@@ -8,12 +8,14 @@
20978
20979 #include <linux/linkage.h>
20980 #include <asm/dwarf2.h>
20981+#include <asm/alternative-asm.h>
20982
20983 #define FIX_ALIGNMENT 1
20984
20985 #include <asm/current.h>
20986 #include <asm/asm-offsets.h>
20987 #include <asm/thread_info.h>
20988+#include <asm/pgtable.h>
20989
20990 .macro ALIGN_DESTINATION
20991 #ifdef FIX_ALIGNMENT
20992@@ -50,6 +52,15 @@
20993 */
20994 ENTRY(__copy_user_nocache)
20995 CFI_STARTPROC
20996+
20997+#ifdef CONFIG_PAX_MEMORY_UDEREF
20998+ mov $PAX_USER_SHADOW_BASE,%rcx
20999+ cmp %rcx,%rsi
21000+ jae 1f
21001+ add %rcx,%rsi
21002+1:
21003+#endif
21004+
21005 cmpl $8,%edx
21006 jb 20f /* less then 8 bytes, go to byte copy loop */
21007 ALIGN_DESTINATION
21008@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21009 jz 17f
21010 1: movq (%rsi),%r8
21011 2: movq 1*8(%rsi),%r9
21012-3: movq 2*8(%rsi),%r10
21013+3: movq 2*8(%rsi),%rax
21014 4: movq 3*8(%rsi),%r11
21015 5: movnti %r8,(%rdi)
21016 6: movnti %r9,1*8(%rdi)
21017-7: movnti %r10,2*8(%rdi)
21018+7: movnti %rax,2*8(%rdi)
21019 8: movnti %r11,3*8(%rdi)
21020 9: movq 4*8(%rsi),%r8
21021 10: movq 5*8(%rsi),%r9
21022-11: movq 6*8(%rsi),%r10
21023+11: movq 6*8(%rsi),%rax
21024 12: movq 7*8(%rsi),%r11
21025 13: movnti %r8,4*8(%rdi)
21026 14: movnti %r9,5*8(%rdi)
21027-15: movnti %r10,6*8(%rdi)
21028+15: movnti %rax,6*8(%rdi)
21029 16: movnti %r11,7*8(%rdi)
21030 leaq 64(%rsi),%rsi
21031 leaq 64(%rdi),%rdi
21032@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21033 jnz 21b
21034 23: xorl %eax,%eax
21035 sfence
21036+ pax_force_retaddr
21037 ret
21038
21039 .section .fixup,"ax"
21040diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21041index fb903b7..c92b7f7 100644
21042--- a/arch/x86/lib/csum-copy_64.S
21043+++ b/arch/x86/lib/csum-copy_64.S
21044@@ -8,6 +8,7 @@
21045 #include <linux/linkage.h>
21046 #include <asm/dwarf2.h>
21047 #include <asm/errno.h>
21048+#include <asm/alternative-asm.h>
21049
21050 /*
21051 * Checksum copy with exception handling.
21052@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21053 CFI_RESTORE rbp
21054 addq $7*8, %rsp
21055 CFI_ADJUST_CFA_OFFSET -7*8
21056+ pax_force_retaddr 0, 1
21057 ret
21058 CFI_RESTORE_STATE
21059
21060diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21061index 459b58a..9570bc7 100644
21062--- a/arch/x86/lib/csum-wrappers_64.c
21063+++ b/arch/x86/lib/csum-wrappers_64.c
21064@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21065 len -= 2;
21066 }
21067 }
21068- isum = csum_partial_copy_generic((__force const void *)src,
21069+
21070+#ifdef CONFIG_PAX_MEMORY_UDEREF
21071+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21072+ src += PAX_USER_SHADOW_BASE;
21073+#endif
21074+
21075+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21076 dst, len, isum, errp, NULL);
21077 if (unlikely(*errp))
21078 goto out_err;
21079@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21080 }
21081
21082 *errp = 0;
21083- return csum_partial_copy_generic(src, (void __force *)dst,
21084+
21085+#ifdef CONFIG_PAX_MEMORY_UDEREF
21086+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21087+ dst += PAX_USER_SHADOW_BASE;
21088+#endif
21089+
21090+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21091 len, isum, NULL, errp);
21092 }
21093 EXPORT_SYMBOL(csum_partial_copy_to_user);
21094diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21095index 51f1504..ddac4c1 100644
21096--- a/arch/x86/lib/getuser.S
21097+++ b/arch/x86/lib/getuser.S
21098@@ -33,15 +33,38 @@
21099 #include <asm/asm-offsets.h>
21100 #include <asm/thread_info.h>
21101 #include <asm/asm.h>
21102+#include <asm/segment.h>
21103+#include <asm/pgtable.h>
21104+#include <asm/alternative-asm.h>
21105+
21106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21107+#define __copyuser_seg gs;
21108+#else
21109+#define __copyuser_seg
21110+#endif
21111
21112 .text
21113 ENTRY(__get_user_1)
21114 CFI_STARTPROC
21115+
21116+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21117 GET_THREAD_INFO(%_ASM_DX)
21118 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21119 jae bad_get_user
21120-1: movzb (%_ASM_AX),%edx
21121+
21122+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21123+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21124+ cmp %_ASM_DX,%_ASM_AX
21125+ jae 1234f
21126+ add %_ASM_DX,%_ASM_AX
21127+1234:
21128+#endif
21129+
21130+#endif
21131+
21132+1: __copyuser_seg movzb (%_ASM_AX),%edx
21133 xor %eax,%eax
21134+ pax_force_retaddr
21135 ret
21136 CFI_ENDPROC
21137 ENDPROC(__get_user_1)
21138@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21139 ENTRY(__get_user_2)
21140 CFI_STARTPROC
21141 add $1,%_ASM_AX
21142+
21143+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21144 jc bad_get_user
21145 GET_THREAD_INFO(%_ASM_DX)
21146 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21147 jae bad_get_user
21148-2: movzwl -1(%_ASM_AX),%edx
21149+
21150+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21151+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21152+ cmp %_ASM_DX,%_ASM_AX
21153+ jae 1234f
21154+ add %_ASM_DX,%_ASM_AX
21155+1234:
21156+#endif
21157+
21158+#endif
21159+
21160+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21161 xor %eax,%eax
21162+ pax_force_retaddr
21163 ret
21164 CFI_ENDPROC
21165 ENDPROC(__get_user_2)
21166@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21167 ENTRY(__get_user_4)
21168 CFI_STARTPROC
21169 add $3,%_ASM_AX
21170+
21171+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21172 jc bad_get_user
21173 GET_THREAD_INFO(%_ASM_DX)
21174 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21175 jae bad_get_user
21176-3: mov -3(%_ASM_AX),%edx
21177+
21178+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21179+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21180+ cmp %_ASM_DX,%_ASM_AX
21181+ jae 1234f
21182+ add %_ASM_DX,%_ASM_AX
21183+1234:
21184+#endif
21185+
21186+#endif
21187+
21188+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21189 xor %eax,%eax
21190+ pax_force_retaddr
21191 ret
21192 CFI_ENDPROC
21193 ENDPROC(__get_user_4)
21194@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21195 GET_THREAD_INFO(%_ASM_DX)
21196 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21197 jae bad_get_user
21198+
21199+#ifdef CONFIG_PAX_MEMORY_UDEREF
21200+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21201+ cmp %_ASM_DX,%_ASM_AX
21202+ jae 1234f
21203+ add %_ASM_DX,%_ASM_AX
21204+1234:
21205+#endif
21206+
21207 4: movq -7(%_ASM_AX),%_ASM_DX
21208 xor %eax,%eax
21209+ pax_force_retaddr
21210 ret
21211 CFI_ENDPROC
21212 ENDPROC(__get_user_8)
21213@@ -91,6 +152,7 @@ bad_get_user:
21214 CFI_STARTPROC
21215 xor %edx,%edx
21216 mov $(-EFAULT),%_ASM_AX
21217+ pax_force_retaddr
21218 ret
21219 CFI_ENDPROC
21220 END(bad_get_user)
21221diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21222index 5a1f9f3..ba9f577 100644
21223--- a/arch/x86/lib/insn.c
21224+++ b/arch/x86/lib/insn.c
21225@@ -21,6 +21,11 @@
21226 #include <linux/string.h>
21227 #include <asm/inat.h>
21228 #include <asm/insn.h>
21229+#ifdef __KERNEL__
21230+#include <asm/pgtable_types.h>
21231+#else
21232+#define ktla_ktva(addr) addr
21233+#endif
21234
21235 /* Verify next sizeof(t) bytes can be on the same instruction */
21236 #define validate_next(t, insn, n) \
21237@@ -49,8 +54,8 @@
21238 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21239 {
21240 memset(insn, 0, sizeof(*insn));
21241- insn->kaddr = kaddr;
21242- insn->next_byte = kaddr;
21243+ insn->kaddr = ktla_ktva(kaddr);
21244+ insn->next_byte = ktla_ktva(kaddr);
21245 insn->x86_64 = x86_64 ? 1 : 0;
21246 insn->opnd_bytes = 4;
21247 if (x86_64)
21248diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21249index 05a95e7..326f2fa 100644
21250--- a/arch/x86/lib/iomap_copy_64.S
21251+++ b/arch/x86/lib/iomap_copy_64.S
21252@@ -17,6 +17,7 @@
21253
21254 #include <linux/linkage.h>
21255 #include <asm/dwarf2.h>
21256+#include <asm/alternative-asm.h>
21257
21258 /*
21259 * override generic version in lib/iomap_copy.c
21260@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21261 CFI_STARTPROC
21262 movl %edx,%ecx
21263 rep movsd
21264+ pax_force_retaddr
21265 ret
21266 CFI_ENDPROC
21267 ENDPROC(__iowrite32_copy)
21268diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21269index efbf2a0..8893637 100644
21270--- a/arch/x86/lib/memcpy_64.S
21271+++ b/arch/x86/lib/memcpy_64.S
21272@@ -34,6 +34,7 @@
21273 rep movsq
21274 movl %edx, %ecx
21275 rep movsb
21276+ pax_force_retaddr
21277 ret
21278 .Lmemcpy_e:
21279 .previous
21280@@ -51,6 +52,7 @@
21281
21282 movl %edx, %ecx
21283 rep movsb
21284+ pax_force_retaddr
21285 ret
21286 .Lmemcpy_e_e:
21287 .previous
21288@@ -81,13 +83,13 @@ ENTRY(memcpy)
21289 */
21290 movq 0*8(%rsi), %r8
21291 movq 1*8(%rsi), %r9
21292- movq 2*8(%rsi), %r10
21293+ movq 2*8(%rsi), %rcx
21294 movq 3*8(%rsi), %r11
21295 leaq 4*8(%rsi), %rsi
21296
21297 movq %r8, 0*8(%rdi)
21298 movq %r9, 1*8(%rdi)
21299- movq %r10, 2*8(%rdi)
21300+ movq %rcx, 2*8(%rdi)
21301 movq %r11, 3*8(%rdi)
21302 leaq 4*8(%rdi), %rdi
21303 jae .Lcopy_forward_loop
21304@@ -110,12 +112,12 @@ ENTRY(memcpy)
21305 subq $0x20, %rdx
21306 movq -1*8(%rsi), %r8
21307 movq -2*8(%rsi), %r9
21308- movq -3*8(%rsi), %r10
21309+ movq -3*8(%rsi), %rcx
21310 movq -4*8(%rsi), %r11
21311 leaq -4*8(%rsi), %rsi
21312 movq %r8, -1*8(%rdi)
21313 movq %r9, -2*8(%rdi)
21314- movq %r10, -3*8(%rdi)
21315+ movq %rcx, -3*8(%rdi)
21316 movq %r11, -4*8(%rdi)
21317 leaq -4*8(%rdi), %rdi
21318 jae .Lcopy_backward_loop
21319@@ -135,12 +137,13 @@ ENTRY(memcpy)
21320 */
21321 movq 0*8(%rsi), %r8
21322 movq 1*8(%rsi), %r9
21323- movq -2*8(%rsi, %rdx), %r10
21324+ movq -2*8(%rsi, %rdx), %rcx
21325 movq -1*8(%rsi, %rdx), %r11
21326 movq %r8, 0*8(%rdi)
21327 movq %r9, 1*8(%rdi)
21328- movq %r10, -2*8(%rdi, %rdx)
21329+ movq %rcx, -2*8(%rdi, %rdx)
21330 movq %r11, -1*8(%rdi, %rdx)
21331+ pax_force_retaddr
21332 retq
21333 .p2align 4
21334 .Lless_16bytes:
21335@@ -153,6 +156,7 @@ ENTRY(memcpy)
21336 movq -1*8(%rsi, %rdx), %r9
21337 movq %r8, 0*8(%rdi)
21338 movq %r9, -1*8(%rdi, %rdx)
21339+ pax_force_retaddr
21340 retq
21341 .p2align 4
21342 .Lless_8bytes:
21343@@ -166,6 +170,7 @@ ENTRY(memcpy)
21344 movl -4(%rsi, %rdx), %r8d
21345 movl %ecx, (%rdi)
21346 movl %r8d, -4(%rdi, %rdx)
21347+ pax_force_retaddr
21348 retq
21349 .p2align 4
21350 .Lless_3bytes:
21351@@ -183,6 +188,7 @@ ENTRY(memcpy)
21352 jnz .Lloop_1
21353
21354 .Lend:
21355+ pax_force_retaddr
21356 retq
21357 CFI_ENDPROC
21358 ENDPROC(memcpy)
21359diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21360index ee16461..c39c199 100644
21361--- a/arch/x86/lib/memmove_64.S
21362+++ b/arch/x86/lib/memmove_64.S
21363@@ -61,13 +61,13 @@ ENTRY(memmove)
21364 5:
21365 sub $0x20, %rdx
21366 movq 0*8(%rsi), %r11
21367- movq 1*8(%rsi), %r10
21368+ movq 1*8(%rsi), %rcx
21369 movq 2*8(%rsi), %r9
21370 movq 3*8(%rsi), %r8
21371 leaq 4*8(%rsi), %rsi
21372
21373 movq %r11, 0*8(%rdi)
21374- movq %r10, 1*8(%rdi)
21375+ movq %rcx, 1*8(%rdi)
21376 movq %r9, 2*8(%rdi)
21377 movq %r8, 3*8(%rdi)
21378 leaq 4*8(%rdi), %rdi
21379@@ -81,10 +81,10 @@ ENTRY(memmove)
21380 4:
21381 movq %rdx, %rcx
21382 movq -8(%rsi, %rdx), %r11
21383- lea -8(%rdi, %rdx), %r10
21384+ lea -8(%rdi, %rdx), %r9
21385 shrq $3, %rcx
21386 rep movsq
21387- movq %r11, (%r10)
21388+ movq %r11, (%r9)
21389 jmp 13f
21390 .Lmemmove_end_forward:
21391
21392@@ -95,14 +95,14 @@ ENTRY(memmove)
21393 7:
21394 movq %rdx, %rcx
21395 movq (%rsi), %r11
21396- movq %rdi, %r10
21397+ movq %rdi, %r9
21398 leaq -8(%rsi, %rdx), %rsi
21399 leaq -8(%rdi, %rdx), %rdi
21400 shrq $3, %rcx
21401 std
21402 rep movsq
21403 cld
21404- movq %r11, (%r10)
21405+ movq %r11, (%r9)
21406 jmp 13f
21407
21408 /*
21409@@ -127,13 +127,13 @@ ENTRY(memmove)
21410 8:
21411 subq $0x20, %rdx
21412 movq -1*8(%rsi), %r11
21413- movq -2*8(%rsi), %r10
21414+ movq -2*8(%rsi), %rcx
21415 movq -3*8(%rsi), %r9
21416 movq -4*8(%rsi), %r8
21417 leaq -4*8(%rsi), %rsi
21418
21419 movq %r11, -1*8(%rdi)
21420- movq %r10, -2*8(%rdi)
21421+ movq %rcx, -2*8(%rdi)
21422 movq %r9, -3*8(%rdi)
21423 movq %r8, -4*8(%rdi)
21424 leaq -4*8(%rdi), %rdi
21425@@ -151,11 +151,11 @@ ENTRY(memmove)
21426 * Move data from 16 bytes to 31 bytes.
21427 */
21428 movq 0*8(%rsi), %r11
21429- movq 1*8(%rsi), %r10
21430+ movq 1*8(%rsi), %rcx
21431 movq -2*8(%rsi, %rdx), %r9
21432 movq -1*8(%rsi, %rdx), %r8
21433 movq %r11, 0*8(%rdi)
21434- movq %r10, 1*8(%rdi)
21435+ movq %rcx, 1*8(%rdi)
21436 movq %r9, -2*8(%rdi, %rdx)
21437 movq %r8, -1*8(%rdi, %rdx)
21438 jmp 13f
21439@@ -167,9 +167,9 @@ ENTRY(memmove)
21440 * Move data from 8 bytes to 15 bytes.
21441 */
21442 movq 0*8(%rsi), %r11
21443- movq -1*8(%rsi, %rdx), %r10
21444+ movq -1*8(%rsi, %rdx), %r9
21445 movq %r11, 0*8(%rdi)
21446- movq %r10, -1*8(%rdi, %rdx)
21447+ movq %r9, -1*8(%rdi, %rdx)
21448 jmp 13f
21449 10:
21450 cmpq $4, %rdx
21451@@ -178,9 +178,9 @@ ENTRY(memmove)
21452 * Move data from 4 bytes to 7 bytes.
21453 */
21454 movl (%rsi), %r11d
21455- movl -4(%rsi, %rdx), %r10d
21456+ movl -4(%rsi, %rdx), %r9d
21457 movl %r11d, (%rdi)
21458- movl %r10d, -4(%rdi, %rdx)
21459+ movl %r9d, -4(%rdi, %rdx)
21460 jmp 13f
21461 11:
21462 cmp $2, %rdx
21463@@ -189,9 +189,9 @@ ENTRY(memmove)
21464 * Move data from 2 bytes to 3 bytes.
21465 */
21466 movw (%rsi), %r11w
21467- movw -2(%rsi, %rdx), %r10w
21468+ movw -2(%rsi, %rdx), %r9w
21469 movw %r11w, (%rdi)
21470- movw %r10w, -2(%rdi, %rdx)
21471+ movw %r9w, -2(%rdi, %rdx)
21472 jmp 13f
21473 12:
21474 cmp $1, %rdx
21475@@ -202,6 +202,7 @@ ENTRY(memmove)
21476 movb (%rsi), %r11b
21477 movb %r11b, (%rdi)
21478 13:
21479+ pax_force_retaddr
21480 retq
21481 CFI_ENDPROC
21482
21483@@ -210,6 +211,7 @@ ENTRY(memmove)
21484 /* Forward moving data. */
21485 movq %rdx, %rcx
21486 rep movsb
21487+ pax_force_retaddr
21488 retq
21489 .Lmemmove_end_forward_efs:
21490 .previous
21491diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21492index 79bd454..dff325a 100644
21493--- a/arch/x86/lib/memset_64.S
21494+++ b/arch/x86/lib/memset_64.S
21495@@ -31,6 +31,7 @@
21496 movl %r8d,%ecx
21497 rep stosb
21498 movq %r9,%rax
21499+ pax_force_retaddr
21500 ret
21501 .Lmemset_e:
21502 .previous
21503@@ -53,6 +54,7 @@
21504 movl %edx,%ecx
21505 rep stosb
21506 movq %r9,%rax
21507+ pax_force_retaddr
21508 ret
21509 .Lmemset_e_e:
21510 .previous
21511@@ -60,13 +62,13 @@
21512 ENTRY(memset)
21513 ENTRY(__memset)
21514 CFI_STARTPROC
21515- movq %rdi,%r10
21516 movq %rdx,%r11
21517
21518 /* expand byte value */
21519 movzbl %sil,%ecx
21520 movabs $0x0101010101010101,%rax
21521 mul %rcx /* with rax, clobbers rdx */
21522+ movq %rdi,%rdx
21523
21524 /* align dst */
21525 movl %edi,%r9d
21526@@ -120,7 +122,8 @@ ENTRY(__memset)
21527 jnz .Lloop_1
21528
21529 .Lende:
21530- movq %r10,%rax
21531+ movq %rdx,%rax
21532+ pax_force_retaddr
21533 ret
21534
21535 CFI_RESTORE_STATE
21536diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21537index c9f2d9b..e7fd2c0 100644
21538--- a/arch/x86/lib/mmx_32.c
21539+++ b/arch/x86/lib/mmx_32.c
21540@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21541 {
21542 void *p;
21543 int i;
21544+ unsigned long cr0;
21545
21546 if (unlikely(in_interrupt()))
21547 return __memcpy(to, from, len);
21548@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21549 kernel_fpu_begin();
21550
21551 __asm__ __volatile__ (
21552- "1: prefetch (%0)\n" /* This set is 28 bytes */
21553- " prefetch 64(%0)\n"
21554- " prefetch 128(%0)\n"
21555- " prefetch 192(%0)\n"
21556- " prefetch 256(%0)\n"
21557+ "1: prefetch (%1)\n" /* This set is 28 bytes */
21558+ " prefetch 64(%1)\n"
21559+ " prefetch 128(%1)\n"
21560+ " prefetch 192(%1)\n"
21561+ " prefetch 256(%1)\n"
21562 "2: \n"
21563 ".section .fixup, \"ax\"\n"
21564- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21565+ "3: \n"
21566+
21567+#ifdef CONFIG_PAX_KERNEXEC
21568+ " movl %%cr0, %0\n"
21569+ " movl %0, %%eax\n"
21570+ " andl $0xFFFEFFFF, %%eax\n"
21571+ " movl %%eax, %%cr0\n"
21572+#endif
21573+
21574+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21575+
21576+#ifdef CONFIG_PAX_KERNEXEC
21577+ " movl %0, %%cr0\n"
21578+#endif
21579+
21580 " jmp 2b\n"
21581 ".previous\n"
21582 _ASM_EXTABLE(1b, 3b)
21583- : : "r" (from));
21584+ : "=&r" (cr0) : "r" (from) : "ax");
21585
21586 for ( ; i > 5; i--) {
21587 __asm__ __volatile__ (
21588- "1: prefetch 320(%0)\n"
21589- "2: movq (%0), %%mm0\n"
21590- " movq 8(%0), %%mm1\n"
21591- " movq 16(%0), %%mm2\n"
21592- " movq 24(%0), %%mm3\n"
21593- " movq %%mm0, (%1)\n"
21594- " movq %%mm1, 8(%1)\n"
21595- " movq %%mm2, 16(%1)\n"
21596- " movq %%mm3, 24(%1)\n"
21597- " movq 32(%0), %%mm0\n"
21598- " movq 40(%0), %%mm1\n"
21599- " movq 48(%0), %%mm2\n"
21600- " movq 56(%0), %%mm3\n"
21601- " movq %%mm0, 32(%1)\n"
21602- " movq %%mm1, 40(%1)\n"
21603- " movq %%mm2, 48(%1)\n"
21604- " movq %%mm3, 56(%1)\n"
21605+ "1: prefetch 320(%1)\n"
21606+ "2: movq (%1), %%mm0\n"
21607+ " movq 8(%1), %%mm1\n"
21608+ " movq 16(%1), %%mm2\n"
21609+ " movq 24(%1), %%mm3\n"
21610+ " movq %%mm0, (%2)\n"
21611+ " movq %%mm1, 8(%2)\n"
21612+ " movq %%mm2, 16(%2)\n"
21613+ " movq %%mm3, 24(%2)\n"
21614+ " movq 32(%1), %%mm0\n"
21615+ " movq 40(%1), %%mm1\n"
21616+ " movq 48(%1), %%mm2\n"
21617+ " movq 56(%1), %%mm3\n"
21618+ " movq %%mm0, 32(%2)\n"
21619+ " movq %%mm1, 40(%2)\n"
21620+ " movq %%mm2, 48(%2)\n"
21621+ " movq %%mm3, 56(%2)\n"
21622 ".section .fixup, \"ax\"\n"
21623- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21624+ "3:\n"
21625+
21626+#ifdef CONFIG_PAX_KERNEXEC
21627+ " movl %%cr0, %0\n"
21628+ " movl %0, %%eax\n"
21629+ " andl $0xFFFEFFFF, %%eax\n"
21630+ " movl %%eax, %%cr0\n"
21631+#endif
21632+
21633+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21634+
21635+#ifdef CONFIG_PAX_KERNEXEC
21636+ " movl %0, %%cr0\n"
21637+#endif
21638+
21639 " jmp 2b\n"
21640 ".previous\n"
21641 _ASM_EXTABLE(1b, 3b)
21642- : : "r" (from), "r" (to) : "memory");
21643+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21644
21645 from += 64;
21646 to += 64;
21647@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
21648 static void fast_copy_page(void *to, void *from)
21649 {
21650 int i;
21651+ unsigned long cr0;
21652
21653 kernel_fpu_begin();
21654
21655@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
21656 * but that is for later. -AV
21657 */
21658 __asm__ __volatile__(
21659- "1: prefetch (%0)\n"
21660- " prefetch 64(%0)\n"
21661- " prefetch 128(%0)\n"
21662- " prefetch 192(%0)\n"
21663- " prefetch 256(%0)\n"
21664+ "1: prefetch (%1)\n"
21665+ " prefetch 64(%1)\n"
21666+ " prefetch 128(%1)\n"
21667+ " prefetch 192(%1)\n"
21668+ " prefetch 256(%1)\n"
21669 "2: \n"
21670 ".section .fixup, \"ax\"\n"
21671- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21672+ "3: \n"
21673+
21674+#ifdef CONFIG_PAX_KERNEXEC
21675+ " movl %%cr0, %0\n"
21676+ " movl %0, %%eax\n"
21677+ " andl $0xFFFEFFFF, %%eax\n"
21678+ " movl %%eax, %%cr0\n"
21679+#endif
21680+
21681+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21682+
21683+#ifdef CONFIG_PAX_KERNEXEC
21684+ " movl %0, %%cr0\n"
21685+#endif
21686+
21687 " jmp 2b\n"
21688 ".previous\n"
21689- _ASM_EXTABLE(1b, 3b) : : "r" (from));
21690+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21691
21692 for (i = 0; i < (4096-320)/64; i++) {
21693 __asm__ __volatile__ (
21694- "1: prefetch 320(%0)\n"
21695- "2: movq (%0), %%mm0\n"
21696- " movntq %%mm0, (%1)\n"
21697- " movq 8(%0), %%mm1\n"
21698- " movntq %%mm1, 8(%1)\n"
21699- " movq 16(%0), %%mm2\n"
21700- " movntq %%mm2, 16(%1)\n"
21701- " movq 24(%0), %%mm3\n"
21702- " movntq %%mm3, 24(%1)\n"
21703- " movq 32(%0), %%mm4\n"
21704- " movntq %%mm4, 32(%1)\n"
21705- " movq 40(%0), %%mm5\n"
21706- " movntq %%mm5, 40(%1)\n"
21707- " movq 48(%0), %%mm6\n"
21708- " movntq %%mm6, 48(%1)\n"
21709- " movq 56(%0), %%mm7\n"
21710- " movntq %%mm7, 56(%1)\n"
21711+ "1: prefetch 320(%1)\n"
21712+ "2: movq (%1), %%mm0\n"
21713+ " movntq %%mm0, (%2)\n"
21714+ " movq 8(%1), %%mm1\n"
21715+ " movntq %%mm1, 8(%2)\n"
21716+ " movq 16(%1), %%mm2\n"
21717+ " movntq %%mm2, 16(%2)\n"
21718+ " movq 24(%1), %%mm3\n"
21719+ " movntq %%mm3, 24(%2)\n"
21720+ " movq 32(%1), %%mm4\n"
21721+ " movntq %%mm4, 32(%2)\n"
21722+ " movq 40(%1), %%mm5\n"
21723+ " movntq %%mm5, 40(%2)\n"
21724+ " movq 48(%1), %%mm6\n"
21725+ " movntq %%mm6, 48(%2)\n"
21726+ " movq 56(%1), %%mm7\n"
21727+ " movntq %%mm7, 56(%2)\n"
21728 ".section .fixup, \"ax\"\n"
21729- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21730+ "3:\n"
21731+
21732+#ifdef CONFIG_PAX_KERNEXEC
21733+ " movl %%cr0, %0\n"
21734+ " movl %0, %%eax\n"
21735+ " andl $0xFFFEFFFF, %%eax\n"
21736+ " movl %%eax, %%cr0\n"
21737+#endif
21738+
21739+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21740+
21741+#ifdef CONFIG_PAX_KERNEXEC
21742+ " movl %0, %%cr0\n"
21743+#endif
21744+
21745 " jmp 2b\n"
21746 ".previous\n"
21747- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
21748+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21749
21750 from += 64;
21751 to += 64;
21752@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
21753 static void fast_copy_page(void *to, void *from)
21754 {
21755 int i;
21756+ unsigned long cr0;
21757
21758 kernel_fpu_begin();
21759
21760 __asm__ __volatile__ (
21761- "1: prefetch (%0)\n"
21762- " prefetch 64(%0)\n"
21763- " prefetch 128(%0)\n"
21764- " prefetch 192(%0)\n"
21765- " prefetch 256(%0)\n"
21766+ "1: prefetch (%1)\n"
21767+ " prefetch 64(%1)\n"
21768+ " prefetch 128(%1)\n"
21769+ " prefetch 192(%1)\n"
21770+ " prefetch 256(%1)\n"
21771 "2: \n"
21772 ".section .fixup, \"ax\"\n"
21773- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21774+ "3: \n"
21775+
21776+#ifdef CONFIG_PAX_KERNEXEC
21777+ " movl %%cr0, %0\n"
21778+ " movl %0, %%eax\n"
21779+ " andl $0xFFFEFFFF, %%eax\n"
21780+ " movl %%eax, %%cr0\n"
21781+#endif
21782+
21783+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21784+
21785+#ifdef CONFIG_PAX_KERNEXEC
21786+ " movl %0, %%cr0\n"
21787+#endif
21788+
21789 " jmp 2b\n"
21790 ".previous\n"
21791- _ASM_EXTABLE(1b, 3b) : : "r" (from));
21792+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21793
21794 for (i = 0; i < 4096/64; i++) {
21795 __asm__ __volatile__ (
21796- "1: prefetch 320(%0)\n"
21797- "2: movq (%0), %%mm0\n"
21798- " movq 8(%0), %%mm1\n"
21799- " movq 16(%0), %%mm2\n"
21800- " movq 24(%0), %%mm3\n"
21801- " movq %%mm0, (%1)\n"
21802- " movq %%mm1, 8(%1)\n"
21803- " movq %%mm2, 16(%1)\n"
21804- " movq %%mm3, 24(%1)\n"
21805- " movq 32(%0), %%mm0\n"
21806- " movq 40(%0), %%mm1\n"
21807- " movq 48(%0), %%mm2\n"
21808- " movq 56(%0), %%mm3\n"
21809- " movq %%mm0, 32(%1)\n"
21810- " movq %%mm1, 40(%1)\n"
21811- " movq %%mm2, 48(%1)\n"
21812- " movq %%mm3, 56(%1)\n"
21813+ "1: prefetch 320(%1)\n"
21814+ "2: movq (%1), %%mm0\n"
21815+ " movq 8(%1), %%mm1\n"
21816+ " movq 16(%1), %%mm2\n"
21817+ " movq 24(%1), %%mm3\n"
21818+ " movq %%mm0, (%2)\n"
21819+ " movq %%mm1, 8(%2)\n"
21820+ " movq %%mm2, 16(%2)\n"
21821+ " movq %%mm3, 24(%2)\n"
21822+ " movq 32(%1), %%mm0\n"
21823+ " movq 40(%1), %%mm1\n"
21824+ " movq 48(%1), %%mm2\n"
21825+ " movq 56(%1), %%mm3\n"
21826+ " movq %%mm0, 32(%2)\n"
21827+ " movq %%mm1, 40(%2)\n"
21828+ " movq %%mm2, 48(%2)\n"
21829+ " movq %%mm3, 56(%2)\n"
21830 ".section .fixup, \"ax\"\n"
21831- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21832+ "3:\n"
21833+
21834+#ifdef CONFIG_PAX_KERNEXEC
21835+ " movl %%cr0, %0\n"
21836+ " movl %0, %%eax\n"
21837+ " andl $0xFFFEFFFF, %%eax\n"
21838+ " movl %%eax, %%cr0\n"
21839+#endif
21840+
21841+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21842+
21843+#ifdef CONFIG_PAX_KERNEXEC
21844+ " movl %0, %%cr0\n"
21845+#endif
21846+
21847 " jmp 2b\n"
21848 ".previous\n"
21849 _ASM_EXTABLE(1b, 3b)
21850- : : "r" (from), "r" (to) : "memory");
21851+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21852
21853 from += 64;
21854 to += 64;
21855diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
21856index 69fa106..adda88b 100644
21857--- a/arch/x86/lib/msr-reg.S
21858+++ b/arch/x86/lib/msr-reg.S
21859@@ -3,6 +3,7 @@
21860 #include <asm/dwarf2.h>
21861 #include <asm/asm.h>
21862 #include <asm/msr.h>
21863+#include <asm/alternative-asm.h>
21864
21865 #ifdef CONFIG_X86_64
21866 /*
21867@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
21868 CFI_STARTPROC
21869 pushq_cfi %rbx
21870 pushq_cfi %rbp
21871- movq %rdi, %r10 /* Save pointer */
21872+ movq %rdi, %r9 /* Save pointer */
21873 xorl %r11d, %r11d /* Return value */
21874 movl (%rdi), %eax
21875 movl 4(%rdi), %ecx
21876@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
21877 movl 28(%rdi), %edi
21878 CFI_REMEMBER_STATE
21879 1: \op
21880-2: movl %eax, (%r10)
21881+2: movl %eax, (%r9)
21882 movl %r11d, %eax /* Return value */
21883- movl %ecx, 4(%r10)
21884- movl %edx, 8(%r10)
21885- movl %ebx, 12(%r10)
21886- movl %ebp, 20(%r10)
21887- movl %esi, 24(%r10)
21888- movl %edi, 28(%r10)
21889+ movl %ecx, 4(%r9)
21890+ movl %edx, 8(%r9)
21891+ movl %ebx, 12(%r9)
21892+ movl %ebp, 20(%r9)
21893+ movl %esi, 24(%r9)
21894+ movl %edi, 28(%r9)
21895 popq_cfi %rbp
21896 popq_cfi %rbx
21897+ pax_force_retaddr
21898 ret
21899 3:
21900 CFI_RESTORE_STATE
21901diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
21902index 36b0d15..d381858 100644
21903--- a/arch/x86/lib/putuser.S
21904+++ b/arch/x86/lib/putuser.S
21905@@ -15,7 +15,9 @@
21906 #include <asm/thread_info.h>
21907 #include <asm/errno.h>
21908 #include <asm/asm.h>
21909-
21910+#include <asm/segment.h>
21911+#include <asm/pgtable.h>
21912+#include <asm/alternative-asm.h>
21913
21914 /*
21915 * __put_user_X
21916@@ -29,52 +31,119 @@
21917 * as they get called from within inline assembly.
21918 */
21919
21920-#define ENTER CFI_STARTPROC ; \
21921- GET_THREAD_INFO(%_ASM_BX)
21922-#define EXIT ret ; \
21923+#define ENTER CFI_STARTPROC
21924+#define EXIT pax_force_retaddr; ret ; \
21925 CFI_ENDPROC
21926
21927+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21928+#define _DEST %_ASM_CX,%_ASM_BX
21929+#else
21930+#define _DEST %_ASM_CX
21931+#endif
21932+
21933+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21934+#define __copyuser_seg gs;
21935+#else
21936+#define __copyuser_seg
21937+#endif
21938+
21939 .text
21940 ENTRY(__put_user_1)
21941 ENTER
21942+
21943+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21944+ GET_THREAD_INFO(%_ASM_BX)
21945 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
21946 jae bad_put_user
21947-1: movb %al,(%_ASM_CX)
21948+
21949+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21950+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21951+ cmp %_ASM_BX,%_ASM_CX
21952+ jb 1234f
21953+ xor %ebx,%ebx
21954+1234:
21955+#endif
21956+
21957+#endif
21958+
21959+1: __copyuser_seg movb %al,(_DEST)
21960 xor %eax,%eax
21961 EXIT
21962 ENDPROC(__put_user_1)
21963
21964 ENTRY(__put_user_2)
21965 ENTER
21966+
21967+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21968+ GET_THREAD_INFO(%_ASM_BX)
21969 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
21970 sub $1,%_ASM_BX
21971 cmp %_ASM_BX,%_ASM_CX
21972 jae bad_put_user
21973-2: movw %ax,(%_ASM_CX)
21974+
21975+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21976+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21977+ cmp %_ASM_BX,%_ASM_CX
21978+ jb 1234f
21979+ xor %ebx,%ebx
21980+1234:
21981+#endif
21982+
21983+#endif
21984+
21985+2: __copyuser_seg movw %ax,(_DEST)
21986 xor %eax,%eax
21987 EXIT
21988 ENDPROC(__put_user_2)
21989
21990 ENTRY(__put_user_4)
21991 ENTER
21992+
21993+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21994+ GET_THREAD_INFO(%_ASM_BX)
21995 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
21996 sub $3,%_ASM_BX
21997 cmp %_ASM_BX,%_ASM_CX
21998 jae bad_put_user
21999-3: movl %eax,(%_ASM_CX)
22000+
22001+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22002+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22003+ cmp %_ASM_BX,%_ASM_CX
22004+ jb 1234f
22005+ xor %ebx,%ebx
22006+1234:
22007+#endif
22008+
22009+#endif
22010+
22011+3: __copyuser_seg movl %eax,(_DEST)
22012 xor %eax,%eax
22013 EXIT
22014 ENDPROC(__put_user_4)
22015
22016 ENTRY(__put_user_8)
22017 ENTER
22018+
22019+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22020+ GET_THREAD_INFO(%_ASM_BX)
22021 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22022 sub $7,%_ASM_BX
22023 cmp %_ASM_BX,%_ASM_CX
22024 jae bad_put_user
22025-4: mov %_ASM_AX,(%_ASM_CX)
22026+
22027+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22028+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22029+ cmp %_ASM_BX,%_ASM_CX
22030+ jb 1234f
22031+ xor %ebx,%ebx
22032+1234:
22033+#endif
22034+
22035+#endif
22036+
22037+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22038 #ifdef CONFIG_X86_32
22039-5: movl %edx,4(%_ASM_CX)
22040+5: __copyuser_seg movl %edx,4(_DEST)
22041 #endif
22042 xor %eax,%eax
22043 EXIT
22044diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22045index 1cad221..de671ee 100644
22046--- a/arch/x86/lib/rwlock.S
22047+++ b/arch/x86/lib/rwlock.S
22048@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22049 FRAME
22050 0: LOCK_PREFIX
22051 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22052+
22053+#ifdef CONFIG_PAX_REFCOUNT
22054+ jno 1234f
22055+ LOCK_PREFIX
22056+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22057+ int $4
22058+1234:
22059+ _ASM_EXTABLE(1234b, 1234b)
22060+#endif
22061+
22062 1: rep; nop
22063 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22064 jne 1b
22065 LOCK_PREFIX
22066 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22067+
22068+#ifdef CONFIG_PAX_REFCOUNT
22069+ jno 1234f
22070+ LOCK_PREFIX
22071+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22072+ int $4
22073+1234:
22074+ _ASM_EXTABLE(1234b, 1234b)
22075+#endif
22076+
22077 jnz 0b
22078 ENDFRAME
22079+ pax_force_retaddr
22080 ret
22081 CFI_ENDPROC
22082 END(__write_lock_failed)
22083@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22084 FRAME
22085 0: LOCK_PREFIX
22086 READ_LOCK_SIZE(inc) (%__lock_ptr)
22087+
22088+#ifdef CONFIG_PAX_REFCOUNT
22089+ jno 1234f
22090+ LOCK_PREFIX
22091+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22092+ int $4
22093+1234:
22094+ _ASM_EXTABLE(1234b, 1234b)
22095+#endif
22096+
22097 1: rep; nop
22098 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22099 js 1b
22100 LOCK_PREFIX
22101 READ_LOCK_SIZE(dec) (%__lock_ptr)
22102+
22103+#ifdef CONFIG_PAX_REFCOUNT
22104+ jno 1234f
22105+ LOCK_PREFIX
22106+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22107+ int $4
22108+1234:
22109+ _ASM_EXTABLE(1234b, 1234b)
22110+#endif
22111+
22112 js 0b
22113 ENDFRAME
22114+ pax_force_retaddr
22115 ret
22116 CFI_ENDPROC
22117 END(__read_lock_failed)
22118diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22119index 5dff5f0..cadebf4 100644
22120--- a/arch/x86/lib/rwsem.S
22121+++ b/arch/x86/lib/rwsem.S
22122@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22123 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22124 CFI_RESTORE __ASM_REG(dx)
22125 restore_common_regs
22126+ pax_force_retaddr
22127 ret
22128 CFI_ENDPROC
22129 ENDPROC(call_rwsem_down_read_failed)
22130@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22131 movq %rax,%rdi
22132 call rwsem_down_write_failed
22133 restore_common_regs
22134+ pax_force_retaddr
22135 ret
22136 CFI_ENDPROC
22137 ENDPROC(call_rwsem_down_write_failed)
22138@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22139 movq %rax,%rdi
22140 call rwsem_wake
22141 restore_common_regs
22142-1: ret
22143+1: pax_force_retaddr
22144+ ret
22145 CFI_ENDPROC
22146 ENDPROC(call_rwsem_wake)
22147
22148@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22149 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22150 CFI_RESTORE __ASM_REG(dx)
22151 restore_common_regs
22152+ pax_force_retaddr
22153 ret
22154 CFI_ENDPROC
22155 ENDPROC(call_rwsem_downgrade_wake)
22156diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22157index a63efd6..ccecad8 100644
22158--- a/arch/x86/lib/thunk_64.S
22159+++ b/arch/x86/lib/thunk_64.S
22160@@ -8,6 +8,7 @@
22161 #include <linux/linkage.h>
22162 #include <asm/dwarf2.h>
22163 #include <asm/calling.h>
22164+#include <asm/alternative-asm.h>
22165
22166 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22167 .macro THUNK name, func, put_ret_addr_in_rdi=0
22168@@ -41,5 +42,6 @@
22169 SAVE_ARGS
22170 restore:
22171 RESTORE_ARGS
22172+ pax_force_retaddr
22173 ret
22174 CFI_ENDPROC
22175diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22176index e218d5d..35679b4 100644
22177--- a/arch/x86/lib/usercopy_32.c
22178+++ b/arch/x86/lib/usercopy_32.c
22179@@ -43,7 +43,7 @@ do { \
22180 __asm__ __volatile__( \
22181 " testl %1,%1\n" \
22182 " jz 2f\n" \
22183- "0: lodsb\n" \
22184+ "0: "__copyuser_seg"lodsb\n" \
22185 " stosb\n" \
22186 " testb %%al,%%al\n" \
22187 " jz 1f\n" \
22188@@ -128,10 +128,12 @@ do { \
22189 int __d0; \
22190 might_fault(); \
22191 __asm__ __volatile__( \
22192+ __COPYUSER_SET_ES \
22193 "0: rep; stosl\n" \
22194 " movl %2,%0\n" \
22195 "1: rep; stosb\n" \
22196 "2:\n" \
22197+ __COPYUSER_RESTORE_ES \
22198 ".section .fixup,\"ax\"\n" \
22199 "3: lea 0(%2,%0,4),%0\n" \
22200 " jmp 2b\n" \
22201@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22202 might_fault();
22203
22204 __asm__ __volatile__(
22205+ __COPYUSER_SET_ES
22206 " testl %0, %0\n"
22207 " jz 3f\n"
22208 " andl %0,%%ecx\n"
22209@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22210 " subl %%ecx,%0\n"
22211 " addl %0,%%eax\n"
22212 "1:\n"
22213+ __COPYUSER_RESTORE_ES
22214 ".section .fixup,\"ax\"\n"
22215 "2: xorl %%eax,%%eax\n"
22216 " jmp 1b\n"
22217@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22218
22219 #ifdef CONFIG_X86_INTEL_USERCOPY
22220 static unsigned long
22221-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22222+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22223 {
22224 int d0, d1;
22225 __asm__ __volatile__(
22226@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22227 " .align 2,0x90\n"
22228 "3: movl 0(%4), %%eax\n"
22229 "4: movl 4(%4), %%edx\n"
22230- "5: movl %%eax, 0(%3)\n"
22231- "6: movl %%edx, 4(%3)\n"
22232+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22233+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22234 "7: movl 8(%4), %%eax\n"
22235 "8: movl 12(%4),%%edx\n"
22236- "9: movl %%eax, 8(%3)\n"
22237- "10: movl %%edx, 12(%3)\n"
22238+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22239+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22240 "11: movl 16(%4), %%eax\n"
22241 "12: movl 20(%4), %%edx\n"
22242- "13: movl %%eax, 16(%3)\n"
22243- "14: movl %%edx, 20(%3)\n"
22244+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22245+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22246 "15: movl 24(%4), %%eax\n"
22247 "16: movl 28(%4), %%edx\n"
22248- "17: movl %%eax, 24(%3)\n"
22249- "18: movl %%edx, 28(%3)\n"
22250+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22251+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22252 "19: movl 32(%4), %%eax\n"
22253 "20: movl 36(%4), %%edx\n"
22254- "21: movl %%eax, 32(%3)\n"
22255- "22: movl %%edx, 36(%3)\n"
22256+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22257+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22258 "23: movl 40(%4), %%eax\n"
22259 "24: movl 44(%4), %%edx\n"
22260- "25: movl %%eax, 40(%3)\n"
22261- "26: movl %%edx, 44(%3)\n"
22262+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22263+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22264 "27: movl 48(%4), %%eax\n"
22265 "28: movl 52(%4), %%edx\n"
22266- "29: movl %%eax, 48(%3)\n"
22267- "30: movl %%edx, 52(%3)\n"
22268+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22269+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22270 "31: movl 56(%4), %%eax\n"
22271 "32: movl 60(%4), %%edx\n"
22272- "33: movl %%eax, 56(%3)\n"
22273- "34: movl %%edx, 60(%3)\n"
22274+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22275+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22276 " addl $-64, %0\n"
22277 " addl $64, %4\n"
22278 " addl $64, %3\n"
22279@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22280 " shrl $2, %0\n"
22281 " andl $3, %%eax\n"
22282 " cld\n"
22283+ __COPYUSER_SET_ES
22284 "99: rep; movsl\n"
22285 "36: movl %%eax, %0\n"
22286 "37: rep; movsb\n"
22287 "100:\n"
22288+ __COPYUSER_RESTORE_ES
22289+ ".section .fixup,\"ax\"\n"
22290+ "101: lea 0(%%eax,%0,4),%0\n"
22291+ " jmp 100b\n"
22292+ ".previous\n"
22293+ ".section __ex_table,\"a\"\n"
22294+ " .align 4\n"
22295+ " .long 1b,100b\n"
22296+ " .long 2b,100b\n"
22297+ " .long 3b,100b\n"
22298+ " .long 4b,100b\n"
22299+ " .long 5b,100b\n"
22300+ " .long 6b,100b\n"
22301+ " .long 7b,100b\n"
22302+ " .long 8b,100b\n"
22303+ " .long 9b,100b\n"
22304+ " .long 10b,100b\n"
22305+ " .long 11b,100b\n"
22306+ " .long 12b,100b\n"
22307+ " .long 13b,100b\n"
22308+ " .long 14b,100b\n"
22309+ " .long 15b,100b\n"
22310+ " .long 16b,100b\n"
22311+ " .long 17b,100b\n"
22312+ " .long 18b,100b\n"
22313+ " .long 19b,100b\n"
22314+ " .long 20b,100b\n"
22315+ " .long 21b,100b\n"
22316+ " .long 22b,100b\n"
22317+ " .long 23b,100b\n"
22318+ " .long 24b,100b\n"
22319+ " .long 25b,100b\n"
22320+ " .long 26b,100b\n"
22321+ " .long 27b,100b\n"
22322+ " .long 28b,100b\n"
22323+ " .long 29b,100b\n"
22324+ " .long 30b,100b\n"
22325+ " .long 31b,100b\n"
22326+ " .long 32b,100b\n"
22327+ " .long 33b,100b\n"
22328+ " .long 34b,100b\n"
22329+ " .long 35b,100b\n"
22330+ " .long 36b,100b\n"
22331+ " .long 37b,100b\n"
22332+ " .long 99b,101b\n"
22333+ ".previous"
22334+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
22335+ : "1"(to), "2"(from), "0"(size)
22336+ : "eax", "edx", "memory");
22337+ return size;
22338+}
22339+
22340+static unsigned long
22341+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22342+{
22343+ int d0, d1;
22344+ __asm__ __volatile__(
22345+ " .align 2,0x90\n"
22346+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22347+ " cmpl $67, %0\n"
22348+ " jbe 3f\n"
22349+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22350+ " .align 2,0x90\n"
22351+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22352+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22353+ "5: movl %%eax, 0(%3)\n"
22354+ "6: movl %%edx, 4(%3)\n"
22355+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22356+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22357+ "9: movl %%eax, 8(%3)\n"
22358+ "10: movl %%edx, 12(%3)\n"
22359+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22360+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22361+ "13: movl %%eax, 16(%3)\n"
22362+ "14: movl %%edx, 20(%3)\n"
22363+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22364+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22365+ "17: movl %%eax, 24(%3)\n"
22366+ "18: movl %%edx, 28(%3)\n"
22367+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22368+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22369+ "21: movl %%eax, 32(%3)\n"
22370+ "22: movl %%edx, 36(%3)\n"
22371+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22372+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22373+ "25: movl %%eax, 40(%3)\n"
22374+ "26: movl %%edx, 44(%3)\n"
22375+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22376+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22377+ "29: movl %%eax, 48(%3)\n"
22378+ "30: movl %%edx, 52(%3)\n"
22379+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22380+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22381+ "33: movl %%eax, 56(%3)\n"
22382+ "34: movl %%edx, 60(%3)\n"
22383+ " addl $-64, %0\n"
22384+ " addl $64, %4\n"
22385+ " addl $64, %3\n"
22386+ " cmpl $63, %0\n"
22387+ " ja 1b\n"
22388+ "35: movl %0, %%eax\n"
22389+ " shrl $2, %0\n"
22390+ " andl $3, %%eax\n"
22391+ " cld\n"
22392+ "99: rep; "__copyuser_seg" movsl\n"
22393+ "36: movl %%eax, %0\n"
22394+ "37: rep; "__copyuser_seg" movsb\n"
22395+ "100:\n"
22396 ".section .fixup,\"ax\"\n"
22397 "101: lea 0(%%eax,%0,4),%0\n"
22398 " jmp 100b\n"
22399@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22400 int d0, d1;
22401 __asm__ __volatile__(
22402 " .align 2,0x90\n"
22403- "0: movl 32(%4), %%eax\n"
22404+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22405 " cmpl $67, %0\n"
22406 " jbe 2f\n"
22407- "1: movl 64(%4), %%eax\n"
22408+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22409 " .align 2,0x90\n"
22410- "2: movl 0(%4), %%eax\n"
22411- "21: movl 4(%4), %%edx\n"
22412+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22413+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22414 " movl %%eax, 0(%3)\n"
22415 " movl %%edx, 4(%3)\n"
22416- "3: movl 8(%4), %%eax\n"
22417- "31: movl 12(%4),%%edx\n"
22418+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22419+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22420 " movl %%eax, 8(%3)\n"
22421 " movl %%edx, 12(%3)\n"
22422- "4: movl 16(%4), %%eax\n"
22423- "41: movl 20(%4), %%edx\n"
22424+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22425+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22426 " movl %%eax, 16(%3)\n"
22427 " movl %%edx, 20(%3)\n"
22428- "10: movl 24(%4), %%eax\n"
22429- "51: movl 28(%4), %%edx\n"
22430+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22431+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22432 " movl %%eax, 24(%3)\n"
22433 " movl %%edx, 28(%3)\n"
22434- "11: movl 32(%4), %%eax\n"
22435- "61: movl 36(%4), %%edx\n"
22436+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22437+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22438 " movl %%eax, 32(%3)\n"
22439 " movl %%edx, 36(%3)\n"
22440- "12: movl 40(%4), %%eax\n"
22441- "71: movl 44(%4), %%edx\n"
22442+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22443+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22444 " movl %%eax, 40(%3)\n"
22445 " movl %%edx, 44(%3)\n"
22446- "13: movl 48(%4), %%eax\n"
22447- "81: movl 52(%4), %%edx\n"
22448+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22449+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22450 " movl %%eax, 48(%3)\n"
22451 " movl %%edx, 52(%3)\n"
22452- "14: movl 56(%4), %%eax\n"
22453- "91: movl 60(%4), %%edx\n"
22454+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22455+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22456 " movl %%eax, 56(%3)\n"
22457 " movl %%edx, 60(%3)\n"
22458 " addl $-64, %0\n"
22459@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22460 " shrl $2, %0\n"
22461 " andl $3, %%eax\n"
22462 " cld\n"
22463- "6: rep; movsl\n"
22464+ "6: rep; "__copyuser_seg" movsl\n"
22465 " movl %%eax,%0\n"
22466- "7: rep; movsb\n"
22467+ "7: rep; "__copyuser_seg" movsb\n"
22468 "8:\n"
22469 ".section .fixup,\"ax\"\n"
22470 "9: lea 0(%%eax,%0,4),%0\n"
22471@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22472
22473 __asm__ __volatile__(
22474 " .align 2,0x90\n"
22475- "0: movl 32(%4), %%eax\n"
22476+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22477 " cmpl $67, %0\n"
22478 " jbe 2f\n"
22479- "1: movl 64(%4), %%eax\n"
22480+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22481 " .align 2,0x90\n"
22482- "2: movl 0(%4), %%eax\n"
22483- "21: movl 4(%4), %%edx\n"
22484+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22485+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22486 " movnti %%eax, 0(%3)\n"
22487 " movnti %%edx, 4(%3)\n"
22488- "3: movl 8(%4), %%eax\n"
22489- "31: movl 12(%4),%%edx\n"
22490+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22491+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22492 " movnti %%eax, 8(%3)\n"
22493 " movnti %%edx, 12(%3)\n"
22494- "4: movl 16(%4), %%eax\n"
22495- "41: movl 20(%4), %%edx\n"
22496+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22497+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22498 " movnti %%eax, 16(%3)\n"
22499 " movnti %%edx, 20(%3)\n"
22500- "10: movl 24(%4), %%eax\n"
22501- "51: movl 28(%4), %%edx\n"
22502+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22503+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22504 " movnti %%eax, 24(%3)\n"
22505 " movnti %%edx, 28(%3)\n"
22506- "11: movl 32(%4), %%eax\n"
22507- "61: movl 36(%4), %%edx\n"
22508+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22509+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22510 " movnti %%eax, 32(%3)\n"
22511 " movnti %%edx, 36(%3)\n"
22512- "12: movl 40(%4), %%eax\n"
22513- "71: movl 44(%4), %%edx\n"
22514+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22515+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22516 " movnti %%eax, 40(%3)\n"
22517 " movnti %%edx, 44(%3)\n"
22518- "13: movl 48(%4), %%eax\n"
22519- "81: movl 52(%4), %%edx\n"
22520+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22521+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22522 " movnti %%eax, 48(%3)\n"
22523 " movnti %%edx, 52(%3)\n"
22524- "14: movl 56(%4), %%eax\n"
22525- "91: movl 60(%4), %%edx\n"
22526+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22527+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22528 " movnti %%eax, 56(%3)\n"
22529 " movnti %%edx, 60(%3)\n"
22530 " addl $-64, %0\n"
22531@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22532 " shrl $2, %0\n"
22533 " andl $3, %%eax\n"
22534 " cld\n"
22535- "6: rep; movsl\n"
22536+ "6: rep; "__copyuser_seg" movsl\n"
22537 " movl %%eax,%0\n"
22538- "7: rep; movsb\n"
22539+ "7: rep; "__copyuser_seg" movsb\n"
22540 "8:\n"
22541 ".section .fixup,\"ax\"\n"
22542 "9: lea 0(%%eax,%0,4),%0\n"
22543@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
22544
22545 __asm__ __volatile__(
22546 " .align 2,0x90\n"
22547- "0: movl 32(%4), %%eax\n"
22548+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22549 " cmpl $67, %0\n"
22550 " jbe 2f\n"
22551- "1: movl 64(%4), %%eax\n"
22552+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22553 " .align 2,0x90\n"
22554- "2: movl 0(%4), %%eax\n"
22555- "21: movl 4(%4), %%edx\n"
22556+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22557+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22558 " movnti %%eax, 0(%3)\n"
22559 " movnti %%edx, 4(%3)\n"
22560- "3: movl 8(%4), %%eax\n"
22561- "31: movl 12(%4),%%edx\n"
22562+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22563+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22564 " movnti %%eax, 8(%3)\n"
22565 " movnti %%edx, 12(%3)\n"
22566- "4: movl 16(%4), %%eax\n"
22567- "41: movl 20(%4), %%edx\n"
22568+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22569+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22570 " movnti %%eax, 16(%3)\n"
22571 " movnti %%edx, 20(%3)\n"
22572- "10: movl 24(%4), %%eax\n"
22573- "51: movl 28(%4), %%edx\n"
22574+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22575+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22576 " movnti %%eax, 24(%3)\n"
22577 " movnti %%edx, 28(%3)\n"
22578- "11: movl 32(%4), %%eax\n"
22579- "61: movl 36(%4), %%edx\n"
22580+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22581+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22582 " movnti %%eax, 32(%3)\n"
22583 " movnti %%edx, 36(%3)\n"
22584- "12: movl 40(%4), %%eax\n"
22585- "71: movl 44(%4), %%edx\n"
22586+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22587+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22588 " movnti %%eax, 40(%3)\n"
22589 " movnti %%edx, 44(%3)\n"
22590- "13: movl 48(%4), %%eax\n"
22591- "81: movl 52(%4), %%edx\n"
22592+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22593+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22594 " movnti %%eax, 48(%3)\n"
22595 " movnti %%edx, 52(%3)\n"
22596- "14: movl 56(%4), %%eax\n"
22597- "91: movl 60(%4), %%edx\n"
22598+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22599+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22600 " movnti %%eax, 56(%3)\n"
22601 " movnti %%edx, 60(%3)\n"
22602 " addl $-64, %0\n"
22603@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
22604 " shrl $2, %0\n"
22605 " andl $3, %%eax\n"
22606 " cld\n"
22607- "6: rep; movsl\n"
22608+ "6: rep; "__copyuser_seg" movsl\n"
22609 " movl %%eax,%0\n"
22610- "7: rep; movsb\n"
22611+ "7: rep; "__copyuser_seg" movsb\n"
22612 "8:\n"
22613 ".section .fixup,\"ax\"\n"
22614 "9: lea 0(%%eax,%0,4),%0\n"
22615@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
22616 */
22617 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
22618 unsigned long size);
22619-unsigned long __copy_user_intel(void __user *to, const void *from,
22620+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
22621+ unsigned long size);
22622+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
22623 unsigned long size);
22624 unsigned long __copy_user_zeroing_intel_nocache(void *to,
22625 const void __user *from, unsigned long size);
22626 #endif /* CONFIG_X86_INTEL_USERCOPY */
22627
22628 /* Generic arbitrary sized copy. */
22629-#define __copy_user(to, from, size) \
22630+#define __copy_user(to, from, size, prefix, set, restore) \
22631 do { \
22632 int __d0, __d1, __d2; \
22633 __asm__ __volatile__( \
22634+ set \
22635 " cmp $7,%0\n" \
22636 " jbe 1f\n" \
22637 " movl %1,%0\n" \
22638 " negl %0\n" \
22639 " andl $7,%0\n" \
22640 " subl %0,%3\n" \
22641- "4: rep; movsb\n" \
22642+ "4: rep; "prefix"movsb\n" \
22643 " movl %3,%0\n" \
22644 " shrl $2,%0\n" \
22645 " andl $3,%3\n" \
22646 " .align 2,0x90\n" \
22647- "0: rep; movsl\n" \
22648+ "0: rep; "prefix"movsl\n" \
22649 " movl %3,%0\n" \
22650- "1: rep; movsb\n" \
22651+ "1: rep; "prefix"movsb\n" \
22652 "2:\n" \
22653+ restore \
22654 ".section .fixup,\"ax\"\n" \
22655 "5: addl %3,%0\n" \
22656 " jmp 2b\n" \
22657@@ -682,14 +799,14 @@ do { \
22658 " negl %0\n" \
22659 " andl $7,%0\n" \
22660 " subl %0,%3\n" \
22661- "4: rep; movsb\n" \
22662+ "4: rep; "__copyuser_seg"movsb\n" \
22663 " movl %3,%0\n" \
22664 " shrl $2,%0\n" \
22665 " andl $3,%3\n" \
22666 " .align 2,0x90\n" \
22667- "0: rep; movsl\n" \
22668+ "0: rep; "__copyuser_seg"movsl\n" \
22669 " movl %3,%0\n" \
22670- "1: rep; movsb\n" \
22671+ "1: rep; "__copyuser_seg"movsb\n" \
22672 "2:\n" \
22673 ".section .fixup,\"ax\"\n" \
22674 "5: addl %3,%0\n" \
22675@@ -775,9 +892,9 @@ survive:
22676 }
22677 #endif
22678 if (movsl_is_ok(to, from, n))
22679- __copy_user(to, from, n);
22680+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
22681 else
22682- n = __copy_user_intel(to, from, n);
22683+ n = __generic_copy_to_user_intel(to, from, n);
22684 return n;
22685 }
22686 EXPORT_SYMBOL(__copy_to_user_ll);
22687@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
22688 unsigned long n)
22689 {
22690 if (movsl_is_ok(to, from, n))
22691- __copy_user(to, from, n);
22692+ __copy_user(to, from, n, __copyuser_seg, "", "");
22693 else
22694- n = __copy_user_intel((void __user *)to,
22695- (const void *)from, n);
22696+ n = __generic_copy_from_user_intel(to, from, n);
22697 return n;
22698 }
22699 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
22700@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
22701 if (n > 64 && cpu_has_xmm2)
22702 n = __copy_user_intel_nocache(to, from, n);
22703 else
22704- __copy_user(to, from, n);
22705+ __copy_user(to, from, n, __copyuser_seg, "", "");
22706 #else
22707- __copy_user(to, from, n);
22708+ __copy_user(to, from, n, __copyuser_seg, "", "");
22709 #endif
22710 return n;
22711 }
22712 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
22713
22714-/**
22715- * copy_to_user: - Copy a block of data into user space.
22716- * @to: Destination address, in user space.
22717- * @from: Source address, in kernel space.
22718- * @n: Number of bytes to copy.
22719- *
22720- * Context: User context only. This function may sleep.
22721- *
22722- * Copy data from kernel space to user space.
22723- *
22724- * Returns number of bytes that could not be copied.
22725- * On success, this will be zero.
22726- */
22727-unsigned long
22728-copy_to_user(void __user *to, const void *from, unsigned long n)
22729-{
22730- if (access_ok(VERIFY_WRITE, to, n))
22731- n = __copy_to_user(to, from, n);
22732- return n;
22733-}
22734-EXPORT_SYMBOL(copy_to_user);
22735-
22736-/**
22737- * copy_from_user: - Copy a block of data from user space.
22738- * @to: Destination address, in kernel space.
22739- * @from: Source address, in user space.
22740- * @n: Number of bytes to copy.
22741- *
22742- * Context: User context only. This function may sleep.
22743- *
22744- * Copy data from user space to kernel space.
22745- *
22746- * Returns number of bytes that could not be copied.
22747- * On success, this will be zero.
22748- *
22749- * If some data could not be copied, this function will pad the copied
22750- * data to the requested size using zero bytes.
22751- */
22752-unsigned long
22753-_copy_from_user(void *to, const void __user *from, unsigned long n)
22754-{
22755- if (access_ok(VERIFY_READ, from, n))
22756- n = __copy_from_user(to, from, n);
22757- else
22758- memset(to, 0, n);
22759- return n;
22760-}
22761-EXPORT_SYMBOL(_copy_from_user);
22762-
22763 void copy_from_user_overflow(void)
22764 {
22765 WARN(1, "Buffer overflow detected!\n");
22766 }
22767 EXPORT_SYMBOL(copy_from_user_overflow);
22768+
22769+void copy_to_user_overflow(void)
22770+{
22771+ WARN(1, "Buffer overflow detected!\n");
22772+}
22773+EXPORT_SYMBOL(copy_to_user_overflow);
22774+
22775+#ifdef CONFIG_PAX_MEMORY_UDEREF
22776+void __set_fs(mm_segment_t x)
22777+{
22778+ switch (x.seg) {
22779+ case 0:
22780+ loadsegment(gs, 0);
22781+ break;
22782+ case TASK_SIZE_MAX:
22783+ loadsegment(gs, __USER_DS);
22784+ break;
22785+ case -1UL:
22786+ loadsegment(gs, __KERNEL_DS);
22787+ break;
22788+ default:
22789+ BUG();
22790+ }
22791+ return;
22792+}
22793+EXPORT_SYMBOL(__set_fs);
22794+
22795+void set_fs(mm_segment_t x)
22796+{
22797+ current_thread_info()->addr_limit = x;
22798+ __set_fs(x);
22799+}
22800+EXPORT_SYMBOL(set_fs);
22801+#endif
22802diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
22803index b7c2849..8633ad8 100644
22804--- a/arch/x86/lib/usercopy_64.c
22805+++ b/arch/x86/lib/usercopy_64.c
22806@@ -42,6 +42,12 @@ long
22807 __strncpy_from_user(char *dst, const char __user *src, long count)
22808 {
22809 long res;
22810+
22811+#ifdef CONFIG_PAX_MEMORY_UDEREF
22812+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22813+ src += PAX_USER_SHADOW_BASE;
22814+#endif
22815+
22816 __do_strncpy_from_user(dst, src, count, res);
22817 return res;
22818 }
22819@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
22820 {
22821 long __d0;
22822 might_fault();
22823+
22824+#ifdef CONFIG_PAX_MEMORY_UDEREF
22825+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
22826+ addr += PAX_USER_SHADOW_BASE;
22827+#endif
22828+
22829 /* no memory constraint because it doesn't change any memory gcc knows
22830 about */
22831 asm volatile(
22832@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
22833 }
22834 EXPORT_SYMBOL(strlen_user);
22835
22836-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
22837+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
22838 {
22839- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22840- return copy_user_generic((__force void *)to, (__force void *)from, len);
22841- }
22842- return len;
22843+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22844+
22845+#ifdef CONFIG_PAX_MEMORY_UDEREF
22846+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
22847+ to += PAX_USER_SHADOW_BASE;
22848+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
22849+ from += PAX_USER_SHADOW_BASE;
22850+#endif
22851+
22852+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
22853+ }
22854+ return len;
22855 }
22856 EXPORT_SYMBOL(copy_in_user);
22857
22858@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
22859 * it is not necessary to optimize tail handling.
22860 */
22861 unsigned long
22862-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
22863+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
22864 {
22865 char c;
22866 unsigned zero_len;
22867diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
22868index 1fb85db..8b3540b 100644
22869--- a/arch/x86/mm/extable.c
22870+++ b/arch/x86/mm/extable.c
22871@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
22872 const struct exception_table_entry *fixup;
22873
22874 #ifdef CONFIG_PNPBIOS
22875- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
22876+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
22877 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
22878 extern u32 pnp_bios_is_utter_crap;
22879 pnp_bios_is_utter_crap = 1;
22880diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
22881index f0b4caf..d92fd42 100644
22882--- a/arch/x86/mm/fault.c
22883+++ b/arch/x86/mm/fault.c
22884@@ -13,11 +13,18 @@
22885 #include <linux/perf_event.h> /* perf_sw_event */
22886 #include <linux/hugetlb.h> /* hstate_index_to_shift */
22887 #include <linux/prefetch.h> /* prefetchw */
22888+#include <linux/unistd.h>
22889+#include <linux/compiler.h>
22890
22891 #include <asm/traps.h> /* dotraplinkage, ... */
22892 #include <asm/pgalloc.h> /* pgd_*(), ... */
22893 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22894 #include <asm/fixmap.h> /* VSYSCALL_START */
22895+#include <asm/tlbflush.h>
22896+
22897+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22898+#include <asm/stacktrace.h>
22899+#endif
22900
22901 /*
22902 * Page fault error code bits:
22903@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
22904 int ret = 0;
22905
22906 /* kprobe_running() needs smp_processor_id() */
22907- if (kprobes_built_in() && !user_mode_vm(regs)) {
22908+ if (kprobes_built_in() && !user_mode(regs)) {
22909 preempt_disable();
22910 if (kprobe_running() && kprobe_fault_handler(regs, 14))
22911 ret = 1;
22912@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
22913 return !instr_lo || (instr_lo>>1) == 1;
22914 case 0x00:
22915 /* Prefetch instruction is 0x0F0D or 0x0F18 */
22916- if (probe_kernel_address(instr, opcode))
22917+ if (user_mode(regs)) {
22918+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22919+ return 0;
22920+ } else if (probe_kernel_address(instr, opcode))
22921 return 0;
22922
22923 *prefetch = (instr_lo == 0xF) &&
22924@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
22925 while (instr < max_instr) {
22926 unsigned char opcode;
22927
22928- if (probe_kernel_address(instr, opcode))
22929+ if (user_mode(regs)) {
22930+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22931+ break;
22932+ } else if (probe_kernel_address(instr, opcode))
22933 break;
22934
22935 instr++;
22936@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
22937 force_sig_info(si_signo, &info, tsk);
22938 }
22939
22940+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22941+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
22942+#endif
22943+
22944+#ifdef CONFIG_PAX_EMUTRAMP
22945+static int pax_handle_fetch_fault(struct pt_regs *regs);
22946+#endif
22947+
22948+#ifdef CONFIG_PAX_PAGEEXEC
22949+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
22950+{
22951+ pgd_t *pgd;
22952+ pud_t *pud;
22953+ pmd_t *pmd;
22954+
22955+ pgd = pgd_offset(mm, address);
22956+ if (!pgd_present(*pgd))
22957+ return NULL;
22958+ pud = pud_offset(pgd, address);
22959+ if (!pud_present(*pud))
22960+ return NULL;
22961+ pmd = pmd_offset(pud, address);
22962+ if (!pmd_present(*pmd))
22963+ return NULL;
22964+ return pmd;
22965+}
22966+#endif
22967+
22968 DEFINE_SPINLOCK(pgd_lock);
22969 LIST_HEAD(pgd_list);
22970
22971@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
22972 for (address = VMALLOC_START & PMD_MASK;
22973 address >= TASK_SIZE && address < FIXADDR_TOP;
22974 address += PMD_SIZE) {
22975+
22976+#ifdef CONFIG_PAX_PER_CPU_PGD
22977+ unsigned long cpu;
22978+#else
22979 struct page *page;
22980+#endif
22981
22982 spin_lock(&pgd_lock);
22983+
22984+#ifdef CONFIG_PAX_PER_CPU_PGD
22985+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
22986+ pgd_t *pgd = get_cpu_pgd(cpu);
22987+ pmd_t *ret;
22988+#else
22989 list_for_each_entry(page, &pgd_list, lru) {
22990+ pgd_t *pgd = page_address(page);
22991 spinlock_t *pgt_lock;
22992 pmd_t *ret;
22993
22994@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
22995 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22996
22997 spin_lock(pgt_lock);
22998- ret = vmalloc_sync_one(page_address(page), address);
22999+#endif
23000+
23001+ ret = vmalloc_sync_one(pgd, address);
23002+
23003+#ifndef CONFIG_PAX_PER_CPU_PGD
23004 spin_unlock(pgt_lock);
23005+#endif
23006
23007 if (!ret)
23008 break;
23009@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23010 * an interrupt in the middle of a task switch..
23011 */
23012 pgd_paddr = read_cr3();
23013+
23014+#ifdef CONFIG_PAX_PER_CPU_PGD
23015+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23016+#endif
23017+
23018 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23019 if (!pmd_k)
23020 return -1;
23021@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23022 * happen within a race in page table update. In the later
23023 * case just flush:
23024 */
23025+
23026+#ifdef CONFIG_PAX_PER_CPU_PGD
23027+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23028+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23029+#else
23030 pgd = pgd_offset(current->active_mm, address);
23031+#endif
23032+
23033 pgd_ref = pgd_offset_k(address);
23034 if (pgd_none(*pgd_ref))
23035 return -1;
23036@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23037 static int is_errata100(struct pt_regs *regs, unsigned long address)
23038 {
23039 #ifdef CONFIG_X86_64
23040- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23041+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23042 return 1;
23043 #endif
23044 return 0;
23045@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23046 }
23047
23048 static const char nx_warning[] = KERN_CRIT
23049-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23050+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23051
23052 static void
23053 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23054@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23055 if (!oops_may_print())
23056 return;
23057
23058- if (error_code & PF_INSTR) {
23059+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23060 unsigned int level;
23061
23062 pte_t *pte = lookup_address(address, &level);
23063
23064 if (pte && pte_present(*pte) && !pte_exec(*pte))
23065- printk(nx_warning, current_uid());
23066+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23067 }
23068
23069+#ifdef CONFIG_PAX_KERNEXEC
23070+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23071+ if (current->signal->curr_ip)
23072+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23073+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23074+ else
23075+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23076+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23077+ }
23078+#endif
23079+
23080 printk(KERN_ALERT "BUG: unable to handle kernel ");
23081 if (address < PAGE_SIZE)
23082 printk(KERN_CONT "NULL pointer dereference");
23083@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23084 }
23085 #endif
23086
23087+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23088+ if (pax_is_fetch_fault(regs, error_code, address)) {
23089+
23090+#ifdef CONFIG_PAX_EMUTRAMP
23091+ switch (pax_handle_fetch_fault(regs)) {
23092+ case 2:
23093+ return;
23094+ }
23095+#endif
23096+
23097+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23098+ do_group_exit(SIGKILL);
23099+ }
23100+#endif
23101+
23102 if (unlikely(show_unhandled_signals))
23103 show_signal_msg(regs, error_code, address, tsk);
23104
23105@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23106 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23107 printk(KERN_ERR
23108 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23109- tsk->comm, tsk->pid, address);
23110+ tsk->comm, task_pid_nr(tsk), address);
23111 code = BUS_MCEERR_AR;
23112 }
23113 #endif
23114@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23115 return 1;
23116 }
23117
23118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23119+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23120+{
23121+ pte_t *pte;
23122+ pmd_t *pmd;
23123+ spinlock_t *ptl;
23124+ unsigned char pte_mask;
23125+
23126+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23127+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23128+ return 0;
23129+
23130+ /* PaX: it's our fault, let's handle it if we can */
23131+
23132+ /* PaX: take a look at read faults before acquiring any locks */
23133+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23134+ /* instruction fetch attempt from a protected page in user mode */
23135+ up_read(&mm->mmap_sem);
23136+
23137+#ifdef CONFIG_PAX_EMUTRAMP
23138+ switch (pax_handle_fetch_fault(regs)) {
23139+ case 2:
23140+ return 1;
23141+ }
23142+#endif
23143+
23144+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23145+ do_group_exit(SIGKILL);
23146+ }
23147+
23148+ pmd = pax_get_pmd(mm, address);
23149+ if (unlikely(!pmd))
23150+ return 0;
23151+
23152+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23153+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23154+ pte_unmap_unlock(pte, ptl);
23155+ return 0;
23156+ }
23157+
23158+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23159+ /* write attempt to a protected page in user mode */
23160+ pte_unmap_unlock(pte, ptl);
23161+ return 0;
23162+ }
23163+
23164+#ifdef CONFIG_SMP
23165+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23166+#else
23167+ if (likely(address > get_limit(regs->cs)))
23168+#endif
23169+ {
23170+ set_pte(pte, pte_mkread(*pte));
23171+ __flush_tlb_one(address);
23172+ pte_unmap_unlock(pte, ptl);
23173+ up_read(&mm->mmap_sem);
23174+ return 1;
23175+ }
23176+
23177+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23178+
23179+ /*
23180+ * PaX: fill DTLB with user rights and retry
23181+ */
23182+ __asm__ __volatile__ (
23183+ "orb %2,(%1)\n"
23184+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23185+/*
23186+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23187+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23188+ * page fault when examined during a TLB load attempt. this is true not only
23189+ * for PTEs holding a non-present entry but also present entries that will
23190+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23191+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23192+ * for our target pages since their PTEs are simply not in the TLBs at all.
23193+
23194+ * the best thing in omitting it is that we gain around 15-20% speed in the
23195+ * fast path of the page fault handler and can get rid of tracing since we
23196+ * can no longer flush unintended entries.
23197+ */
23198+ "invlpg (%0)\n"
23199+#endif
23200+ __copyuser_seg"testb $0,(%0)\n"
23201+ "xorb %3,(%1)\n"
23202+ :
23203+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23204+ : "memory", "cc");
23205+ pte_unmap_unlock(pte, ptl);
23206+ up_read(&mm->mmap_sem);
23207+ return 1;
23208+}
23209+#endif
23210+
23211 /*
23212 * Handle a spurious fault caused by a stale TLB entry.
23213 *
23214@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23215 static inline int
23216 access_error(unsigned long error_code, struct vm_area_struct *vma)
23217 {
23218+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23219+ return 1;
23220+
23221 if (error_code & PF_WRITE) {
23222 /* write, present and write, not present: */
23223 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23224@@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23225 {
23226 struct vm_area_struct *vma;
23227 struct task_struct *tsk;
23228- unsigned long address;
23229 struct mm_struct *mm;
23230 int fault;
23231 int write = error_code & PF_WRITE;
23232 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23233 (write ? FAULT_FLAG_WRITE : 0);
23234
23235- tsk = current;
23236- mm = tsk->mm;
23237-
23238 /* Get the faulting address: */
23239- address = read_cr2();
23240+ unsigned long address = read_cr2();
23241+
23242+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23243+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23244+ if (!search_exception_tables(regs->ip)) {
23245+ bad_area_nosemaphore(regs, error_code, address);
23246+ return;
23247+ }
23248+ if (address < PAX_USER_SHADOW_BASE) {
23249+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23250+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23251+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23252+ } else
23253+ address -= PAX_USER_SHADOW_BASE;
23254+ }
23255+#endif
23256+
23257+ tsk = current;
23258+ mm = tsk->mm;
23259
23260 /*
23261 * Detect and handle instructions that would cause a page fault for
23262@@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23263 * User-mode registers count as a user access even for any
23264 * potential system fault or CPU buglet:
23265 */
23266- if (user_mode_vm(regs)) {
23267+ if (user_mode(regs)) {
23268 local_irq_enable();
23269 error_code |= PF_USER;
23270 } else {
23271@@ -1132,6 +1338,11 @@ retry:
23272 might_sleep();
23273 }
23274
23275+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23276+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23277+ return;
23278+#endif
23279+
23280 vma = find_vma(mm, address);
23281 if (unlikely(!vma)) {
23282 bad_area(regs, error_code, address);
23283@@ -1143,18 +1354,24 @@ retry:
23284 bad_area(regs, error_code, address);
23285 return;
23286 }
23287- if (error_code & PF_USER) {
23288- /*
23289- * Accessing the stack below %sp is always a bug.
23290- * The large cushion allows instructions like enter
23291- * and pusha to work. ("enter $65535, $31" pushes
23292- * 32 pointers and then decrements %sp by 65535.)
23293- */
23294- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23295- bad_area(regs, error_code, address);
23296- return;
23297- }
23298+ /*
23299+ * Accessing the stack below %sp is always a bug.
23300+ * The large cushion allows instructions like enter
23301+ * and pusha to work. ("enter $65535, $31" pushes
23302+ * 32 pointers and then decrements %sp by 65535.)
23303+ */
23304+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23305+ bad_area(regs, error_code, address);
23306+ return;
23307 }
23308+
23309+#ifdef CONFIG_PAX_SEGMEXEC
23310+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23311+ bad_area(regs, error_code, address);
23312+ return;
23313+ }
23314+#endif
23315+
23316 if (unlikely(expand_stack(vma, address))) {
23317 bad_area(regs, error_code, address);
23318 return;
23319@@ -1209,3 +1426,292 @@ good_area:
23320
23321 up_read(&mm->mmap_sem);
23322 }
23323+
23324+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23325+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23326+{
23327+ struct mm_struct *mm = current->mm;
23328+ unsigned long ip = regs->ip;
23329+
23330+ if (v8086_mode(regs))
23331+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23332+
23333+#ifdef CONFIG_PAX_PAGEEXEC
23334+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23335+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23336+ return true;
23337+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23338+ return true;
23339+ return false;
23340+ }
23341+#endif
23342+
23343+#ifdef CONFIG_PAX_SEGMEXEC
23344+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23345+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23346+ return true;
23347+ return false;
23348+ }
23349+#endif
23350+
23351+ return false;
23352+}
23353+#endif
23354+
23355+#ifdef CONFIG_PAX_EMUTRAMP
23356+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23357+{
23358+ int err;
23359+
23360+ do { /* PaX: libffi trampoline emulation */
23361+ unsigned char mov, jmp;
23362+ unsigned int addr1, addr2;
23363+
23364+#ifdef CONFIG_X86_64
23365+ if ((regs->ip + 9) >> 32)
23366+ break;
23367+#endif
23368+
23369+ err = get_user(mov, (unsigned char __user *)regs->ip);
23370+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23371+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23372+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23373+
23374+ if (err)
23375+ break;
23376+
23377+ if (mov == 0xB8 && jmp == 0xE9) {
23378+ regs->ax = addr1;
23379+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23380+ return 2;
23381+ }
23382+ } while (0);
23383+
23384+ do { /* PaX: gcc trampoline emulation #1 */
23385+ unsigned char mov1, mov2;
23386+ unsigned short jmp;
23387+ unsigned int addr1, addr2;
23388+
23389+#ifdef CONFIG_X86_64
23390+ if ((regs->ip + 11) >> 32)
23391+ break;
23392+#endif
23393+
23394+ err = get_user(mov1, (unsigned char __user *)regs->ip);
23395+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23396+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23397+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23398+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23399+
23400+ if (err)
23401+ break;
23402+
23403+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23404+ regs->cx = addr1;
23405+ regs->ax = addr2;
23406+ regs->ip = addr2;
23407+ return 2;
23408+ }
23409+ } while (0);
23410+
23411+ do { /* PaX: gcc trampoline emulation #2 */
23412+ unsigned char mov, jmp;
23413+ unsigned int addr1, addr2;
23414+
23415+#ifdef CONFIG_X86_64
23416+ if ((regs->ip + 9) >> 32)
23417+ break;
23418+#endif
23419+
23420+ err = get_user(mov, (unsigned char __user *)regs->ip);
23421+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23422+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23423+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23424+
23425+ if (err)
23426+ break;
23427+
23428+ if (mov == 0xB9 && jmp == 0xE9) {
23429+ regs->cx = addr1;
23430+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23431+ return 2;
23432+ }
23433+ } while (0);
23434+
23435+ return 1; /* PaX in action */
23436+}
23437+
23438+#ifdef CONFIG_X86_64
23439+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23440+{
23441+ int err;
23442+
23443+ do { /* PaX: libffi trampoline emulation */
23444+ unsigned short mov1, mov2, jmp1;
23445+ unsigned char stcclc, jmp2;
23446+ unsigned long addr1, addr2;
23447+
23448+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23449+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23450+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23451+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23452+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23453+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23454+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23455+
23456+ if (err)
23457+ break;
23458+
23459+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23460+ regs->r11 = addr1;
23461+ regs->r10 = addr2;
23462+ if (stcclc == 0xF8)
23463+ regs->flags &= ~X86_EFLAGS_CF;
23464+ else
23465+ regs->flags |= X86_EFLAGS_CF;
23466+ regs->ip = addr1;
23467+ return 2;
23468+ }
23469+ } while (0);
23470+
23471+ do { /* PaX: gcc trampoline emulation #1 */
23472+ unsigned short mov1, mov2, jmp1;
23473+ unsigned char jmp2;
23474+ unsigned int addr1;
23475+ unsigned long addr2;
23476+
23477+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23478+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23479+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23480+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23481+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23482+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23483+
23484+ if (err)
23485+ break;
23486+
23487+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23488+ regs->r11 = addr1;
23489+ regs->r10 = addr2;
23490+ regs->ip = addr1;
23491+ return 2;
23492+ }
23493+ } while (0);
23494+
23495+ do { /* PaX: gcc trampoline emulation #2 */
23496+ unsigned short mov1, mov2, jmp1;
23497+ unsigned char jmp2;
23498+ unsigned long addr1, addr2;
23499+
23500+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23501+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23502+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23503+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23504+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23505+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23506+
23507+ if (err)
23508+ break;
23509+
23510+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23511+ regs->r11 = addr1;
23512+ regs->r10 = addr2;
23513+ regs->ip = addr1;
23514+ return 2;
23515+ }
23516+ } while (0);
23517+
23518+ return 1; /* PaX in action */
23519+}
23520+#endif
23521+
23522+/*
23523+ * PaX: decide what to do with offenders (regs->ip = fault address)
23524+ *
23525+ * returns 1 when task should be killed
23526+ * 2 when gcc trampoline was detected
23527+ */
23528+static int pax_handle_fetch_fault(struct pt_regs *regs)
23529+{
23530+ if (v8086_mode(regs))
23531+ return 1;
23532+
23533+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23534+ return 1;
23535+
23536+#ifdef CONFIG_X86_32
23537+ return pax_handle_fetch_fault_32(regs);
23538+#else
23539+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23540+ return pax_handle_fetch_fault_32(regs);
23541+ else
23542+ return pax_handle_fetch_fault_64(regs);
23543+#endif
23544+}
23545+#endif
23546+
23547+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23548+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23549+{
23550+ long i;
23551+
23552+ printk(KERN_ERR "PAX: bytes at PC: ");
23553+ for (i = 0; i < 20; i++) {
23554+ unsigned char c;
23555+ if (get_user(c, (unsigned char __force_user *)pc+i))
23556+ printk(KERN_CONT "?? ");
23557+ else
23558+ printk(KERN_CONT "%02x ", c);
23559+ }
23560+ printk("\n");
23561+
23562+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
23563+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
23564+ unsigned long c;
23565+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
23566+#ifdef CONFIG_X86_32
23567+ printk(KERN_CONT "???????? ");
23568+#else
23569+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
23570+ printk(KERN_CONT "???????? ???????? ");
23571+ else
23572+ printk(KERN_CONT "???????????????? ");
23573+#endif
23574+ } else {
23575+#ifdef CONFIG_X86_64
23576+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
23577+ printk(KERN_CONT "%08x ", (unsigned int)c);
23578+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
23579+ } else
23580+#endif
23581+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
23582+ }
23583+ }
23584+ printk("\n");
23585+}
23586+#endif
23587+
23588+/**
23589+ * probe_kernel_write(): safely attempt to write to a location
23590+ * @dst: address to write to
23591+ * @src: pointer to the data that shall be written
23592+ * @size: size of the data chunk
23593+ *
23594+ * Safely write to address @dst from the buffer at @src. If a kernel fault
23595+ * happens, handle that and return -EFAULT.
23596+ */
23597+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
23598+{
23599+ long ret;
23600+ mm_segment_t old_fs = get_fs();
23601+
23602+ set_fs(KERNEL_DS);
23603+ pagefault_disable();
23604+ pax_open_kernel();
23605+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
23606+ pax_close_kernel();
23607+ pagefault_enable();
23608+ set_fs(old_fs);
23609+
23610+ return ret ? -EFAULT : 0;
23611+}
23612diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
23613index dd74e46..7d26398 100644
23614--- a/arch/x86/mm/gup.c
23615+++ b/arch/x86/mm/gup.c
23616@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
23617 addr = start;
23618 len = (unsigned long) nr_pages << PAGE_SHIFT;
23619 end = start + len;
23620- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23621+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23622 (void __user *)start, len)))
23623 return 0;
23624
23625diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
23626index f4f29b1..5cac4fb 100644
23627--- a/arch/x86/mm/highmem_32.c
23628+++ b/arch/x86/mm/highmem_32.c
23629@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
23630 idx = type + KM_TYPE_NR*smp_processor_id();
23631 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23632 BUG_ON(!pte_none(*(kmap_pte-idx)));
23633+
23634+ pax_open_kernel();
23635 set_pte(kmap_pte-idx, mk_pte(page, prot));
23636+ pax_close_kernel();
23637+
23638 arch_flush_lazy_mmu_mode();
23639
23640 return (void *)vaddr;
23641diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
23642index 8ecbb4b..29efd37 100644
23643--- a/arch/x86/mm/hugetlbpage.c
23644+++ b/arch/x86/mm/hugetlbpage.c
23645@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
23646 struct hstate *h = hstate_file(file);
23647 struct mm_struct *mm = current->mm;
23648 struct vm_area_struct *vma;
23649- unsigned long start_addr;
23650+ unsigned long start_addr, pax_task_size = TASK_SIZE;
23651+
23652+#ifdef CONFIG_PAX_SEGMEXEC
23653+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23654+ pax_task_size = SEGMEXEC_TASK_SIZE;
23655+#endif
23656+
23657+ pax_task_size -= PAGE_SIZE;
23658
23659 if (len > mm->cached_hole_size) {
23660- start_addr = mm->free_area_cache;
23661+ start_addr = mm->free_area_cache;
23662 } else {
23663- start_addr = TASK_UNMAPPED_BASE;
23664- mm->cached_hole_size = 0;
23665+ start_addr = mm->mmap_base;
23666+ mm->cached_hole_size = 0;
23667 }
23668
23669 full_search:
23670@@ -280,26 +287,27 @@ full_search:
23671
23672 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23673 /* At this point: (!vma || addr < vma->vm_end). */
23674- if (TASK_SIZE - len < addr) {
23675+ if (pax_task_size - len < addr) {
23676 /*
23677 * Start a new search - just in case we missed
23678 * some holes.
23679 */
23680- if (start_addr != TASK_UNMAPPED_BASE) {
23681- start_addr = TASK_UNMAPPED_BASE;
23682+ if (start_addr != mm->mmap_base) {
23683+ start_addr = mm->mmap_base;
23684 mm->cached_hole_size = 0;
23685 goto full_search;
23686 }
23687 return -ENOMEM;
23688 }
23689- if (!vma || addr + len <= vma->vm_start) {
23690- mm->free_area_cache = addr + len;
23691- return addr;
23692- }
23693+ if (check_heap_stack_gap(vma, addr, len))
23694+ break;
23695 if (addr + mm->cached_hole_size < vma->vm_start)
23696 mm->cached_hole_size = vma->vm_start - addr;
23697 addr = ALIGN(vma->vm_end, huge_page_size(h));
23698 }
23699+
23700+ mm->free_area_cache = addr + len;
23701+ return addr;
23702 }
23703
23704 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23705@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23706 {
23707 struct hstate *h = hstate_file(file);
23708 struct mm_struct *mm = current->mm;
23709- struct vm_area_struct *vma, *prev_vma;
23710- unsigned long base = mm->mmap_base, addr = addr0;
23711+ struct vm_area_struct *vma;
23712+ unsigned long base = mm->mmap_base, addr;
23713 unsigned long largest_hole = mm->cached_hole_size;
23714- int first_time = 1;
23715
23716 /* don't allow allocations above current base */
23717 if (mm->free_area_cache > base)
23718@@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23719 largest_hole = 0;
23720 mm->free_area_cache = base;
23721 }
23722-try_again:
23723+
23724 /* make sure it can fit in the remaining address space */
23725 if (mm->free_area_cache < len)
23726 goto fail;
23727
23728 /* either no address requested or can't fit in requested address hole */
23729- addr = (mm->free_area_cache - len) & huge_page_mask(h);
23730+ addr = (mm->free_area_cache - len);
23731 do {
23732+ addr &= huge_page_mask(h);
23733+ vma = find_vma(mm, addr);
23734 /*
23735 * Lookup failure means no vma is above this address,
23736 * i.e. return with success:
23737- */
23738- vma = find_vma(mm, addr);
23739- if (!vma)
23740- return addr;
23741-
23742- /*
23743 * new region fits between prev_vma->vm_end and
23744 * vma->vm_start, use it:
23745 */
23746- prev_vma = vma->vm_prev;
23747- if (addr + len <= vma->vm_start &&
23748- (!prev_vma || (addr >= prev_vma->vm_end))) {
23749+ if (check_heap_stack_gap(vma, addr, len)) {
23750 /* remember the address as a hint for next time */
23751- mm->cached_hole_size = largest_hole;
23752- return (mm->free_area_cache = addr);
23753- } else {
23754- /* pull free_area_cache down to the first hole */
23755- if (mm->free_area_cache == vma->vm_end) {
23756- mm->free_area_cache = vma->vm_start;
23757- mm->cached_hole_size = largest_hole;
23758- }
23759+ mm->cached_hole_size = largest_hole;
23760+ return (mm->free_area_cache = addr);
23761+ }
23762+ /* pull free_area_cache down to the first hole */
23763+ if (mm->free_area_cache == vma->vm_end) {
23764+ mm->free_area_cache = vma->vm_start;
23765+ mm->cached_hole_size = largest_hole;
23766 }
23767
23768 /* remember the largest hole we saw so far */
23769 if (addr + largest_hole < vma->vm_start)
23770- largest_hole = vma->vm_start - addr;
23771+ largest_hole = vma->vm_start - addr;
23772
23773 /* try just below the current vma->vm_start */
23774- addr = (vma->vm_start - len) & huge_page_mask(h);
23775- } while (len <= vma->vm_start);
23776+ addr = skip_heap_stack_gap(vma, len);
23777+ } while (!IS_ERR_VALUE(addr));
23778
23779 fail:
23780 /*
23781- * if hint left us with no space for the requested
23782- * mapping then try again:
23783- */
23784- if (first_time) {
23785- mm->free_area_cache = base;
23786- largest_hole = 0;
23787- first_time = 0;
23788- goto try_again;
23789- }
23790- /*
23791 * A failed mmap() very likely causes application failure,
23792 * so fall back to the bottom-up function here. This scenario
23793 * can happen with large stack limits and large mmap()
23794 * allocations.
23795 */
23796- mm->free_area_cache = TASK_UNMAPPED_BASE;
23797+
23798+#ifdef CONFIG_PAX_SEGMEXEC
23799+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23800+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23801+ else
23802+#endif
23803+
23804+ mm->mmap_base = TASK_UNMAPPED_BASE;
23805+
23806+#ifdef CONFIG_PAX_RANDMMAP
23807+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23808+ mm->mmap_base += mm->delta_mmap;
23809+#endif
23810+
23811+ mm->free_area_cache = mm->mmap_base;
23812 mm->cached_hole_size = ~0UL;
23813 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
23814 len, pgoff, flags);
23815@@ -388,6 +392,7 @@ fail:
23816 /*
23817 * Restore the topdown base:
23818 */
23819+ mm->mmap_base = base;
23820 mm->free_area_cache = base;
23821 mm->cached_hole_size = ~0UL;
23822
23823@@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23824 struct hstate *h = hstate_file(file);
23825 struct mm_struct *mm = current->mm;
23826 struct vm_area_struct *vma;
23827+ unsigned long pax_task_size = TASK_SIZE;
23828
23829 if (len & ~huge_page_mask(h))
23830 return -EINVAL;
23831- if (len > TASK_SIZE)
23832+
23833+#ifdef CONFIG_PAX_SEGMEXEC
23834+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23835+ pax_task_size = SEGMEXEC_TASK_SIZE;
23836+#endif
23837+
23838+ pax_task_size -= PAGE_SIZE;
23839+
23840+ if (len > pax_task_size)
23841 return -ENOMEM;
23842
23843 if (flags & MAP_FIXED) {
23844@@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23845 if (addr) {
23846 addr = ALIGN(addr, huge_page_size(h));
23847 vma = find_vma(mm, addr);
23848- if (TASK_SIZE - len >= addr &&
23849- (!vma || addr + len <= vma->vm_start))
23850+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
23851 return addr;
23852 }
23853 if (mm->get_unmapped_area == arch_get_unmapped_area)
23854diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
23855index 6cabf65..77e9c1c 100644
23856--- a/arch/x86/mm/init.c
23857+++ b/arch/x86/mm/init.c
23858@@ -17,6 +17,7 @@
23859 #include <asm/tlb.h>
23860 #include <asm/proto.h>
23861 #include <asm/dma.h> /* for MAX_DMA_PFN */
23862+#include <asm/desc.h>
23863
23864 unsigned long __initdata pgt_buf_start;
23865 unsigned long __meminitdata pgt_buf_end;
23866@@ -33,7 +34,7 @@ int direct_gbpages
23867 static void __init find_early_table_space(unsigned long end, int use_pse,
23868 int use_gbpages)
23869 {
23870- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
23871+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
23872 phys_addr_t base;
23873
23874 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
23875@@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
23876 */
23877 int devmem_is_allowed(unsigned long pagenr)
23878 {
23879+#ifdef CONFIG_GRKERNSEC_KMEM
23880+ /* allow BDA */
23881+ if (!pagenr)
23882+ return 1;
23883+ /* allow EBDA */
23884+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23885+ return 1;
23886+#else
23887+ if (!pagenr)
23888+ return 1;
23889+#ifdef CONFIG_VM86
23890+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
23891+ return 1;
23892+#endif
23893+#endif
23894+
23895+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23896+ return 1;
23897+#ifdef CONFIG_GRKERNSEC_KMEM
23898+ /* throw out everything else below 1MB */
23899 if (pagenr <= 256)
23900- return 1;
23901+ return 0;
23902+#endif
23903 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23904 return 0;
23905 if (!page_is_ram(pagenr))
23906@@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
23907
23908 void free_initmem(void)
23909 {
23910+
23911+#ifdef CONFIG_PAX_KERNEXEC
23912+#ifdef CONFIG_X86_32
23913+ /* PaX: limit KERNEL_CS to actual size */
23914+ unsigned long addr, limit;
23915+ struct desc_struct d;
23916+ int cpu;
23917+
23918+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23919+ limit = (limit - 1UL) >> PAGE_SHIFT;
23920+
23921+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23922+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23923+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23924+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23925+ }
23926+
23927+ /* PaX: make KERNEL_CS read-only */
23928+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23929+ if (!paravirt_enabled())
23930+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23931+/*
23932+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23933+ pgd = pgd_offset_k(addr);
23934+ pud = pud_offset(pgd, addr);
23935+ pmd = pmd_offset(pud, addr);
23936+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23937+ }
23938+*/
23939+#ifdef CONFIG_X86_PAE
23940+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23941+/*
23942+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23943+ pgd = pgd_offset_k(addr);
23944+ pud = pud_offset(pgd, addr);
23945+ pmd = pmd_offset(pud, addr);
23946+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23947+ }
23948+*/
23949+#endif
23950+
23951+#ifdef CONFIG_MODULES
23952+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23953+#endif
23954+
23955+#else
23956+ pgd_t *pgd;
23957+ pud_t *pud;
23958+ pmd_t *pmd;
23959+ unsigned long addr, end;
23960+
23961+ /* PaX: make kernel code/rodata read-only, rest non-executable */
23962+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
23963+ pgd = pgd_offset_k(addr);
23964+ pud = pud_offset(pgd, addr);
23965+ pmd = pmd_offset(pud, addr);
23966+ if (!pmd_present(*pmd))
23967+ continue;
23968+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
23969+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23970+ else
23971+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23972+ }
23973+
23974+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
23975+ end = addr + KERNEL_IMAGE_SIZE;
23976+ for (; addr < end; addr += PMD_SIZE) {
23977+ pgd = pgd_offset_k(addr);
23978+ pud = pud_offset(pgd, addr);
23979+ pmd = pmd_offset(pud, addr);
23980+ if (!pmd_present(*pmd))
23981+ continue;
23982+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
23983+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23984+ }
23985+#endif
23986+
23987+ flush_tlb_all();
23988+#endif
23989+
23990 free_init_pages("unused kernel memory",
23991 (unsigned long)(&__init_begin),
23992 (unsigned long)(&__init_end));
23993diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
23994index 8663f6c..829ae76 100644
23995--- a/arch/x86/mm/init_32.c
23996+++ b/arch/x86/mm/init_32.c
23997@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
23998 }
23999
24000 /*
24001- * Creates a middle page table and puts a pointer to it in the
24002- * given global directory entry. This only returns the gd entry
24003- * in non-PAE compilation mode, since the middle layer is folded.
24004- */
24005-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24006-{
24007- pud_t *pud;
24008- pmd_t *pmd_table;
24009-
24010-#ifdef CONFIG_X86_PAE
24011- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24012- if (after_bootmem)
24013- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24014- else
24015- pmd_table = (pmd_t *)alloc_low_page();
24016- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24017- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24018- pud = pud_offset(pgd, 0);
24019- BUG_ON(pmd_table != pmd_offset(pud, 0));
24020-
24021- return pmd_table;
24022- }
24023-#endif
24024- pud = pud_offset(pgd, 0);
24025- pmd_table = pmd_offset(pud, 0);
24026-
24027- return pmd_table;
24028-}
24029-
24030-/*
24031 * Create a page table and place a pointer to it in a middle page
24032 * directory entry:
24033 */
24034@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24035 page_table = (pte_t *)alloc_low_page();
24036
24037 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24038+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24039+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24040+#else
24041 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24042+#endif
24043 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24044 }
24045
24046 return pte_offset_kernel(pmd, 0);
24047 }
24048
24049+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24050+{
24051+ pud_t *pud;
24052+ pmd_t *pmd_table;
24053+
24054+ pud = pud_offset(pgd, 0);
24055+ pmd_table = pmd_offset(pud, 0);
24056+
24057+ return pmd_table;
24058+}
24059+
24060 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24061 {
24062 int pgd_idx = pgd_index(vaddr);
24063@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24064 int pgd_idx, pmd_idx;
24065 unsigned long vaddr;
24066 pgd_t *pgd;
24067+ pud_t *pud;
24068 pmd_t *pmd;
24069 pte_t *pte = NULL;
24070
24071@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24072 pgd = pgd_base + pgd_idx;
24073
24074 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24075- pmd = one_md_table_init(pgd);
24076- pmd = pmd + pmd_index(vaddr);
24077+ pud = pud_offset(pgd, vaddr);
24078+ pmd = pmd_offset(pud, vaddr);
24079+
24080+#ifdef CONFIG_X86_PAE
24081+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24082+#endif
24083+
24084 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24085 pmd++, pmd_idx++) {
24086 pte = page_table_kmap_check(one_page_table_init(pmd),
24087@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24088 }
24089 }
24090
24091-static inline int is_kernel_text(unsigned long addr)
24092+static inline int is_kernel_text(unsigned long start, unsigned long end)
24093 {
24094- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24095- return 1;
24096- return 0;
24097+ if ((start > ktla_ktva((unsigned long)_etext) ||
24098+ end <= ktla_ktva((unsigned long)_stext)) &&
24099+ (start > ktla_ktva((unsigned long)_einittext) ||
24100+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24101+
24102+#ifdef CONFIG_ACPI_SLEEP
24103+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24104+#endif
24105+
24106+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24107+ return 0;
24108+ return 1;
24109 }
24110
24111 /*
24112@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24113 unsigned long last_map_addr = end;
24114 unsigned long start_pfn, end_pfn;
24115 pgd_t *pgd_base = swapper_pg_dir;
24116- int pgd_idx, pmd_idx, pte_ofs;
24117+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24118 unsigned long pfn;
24119 pgd_t *pgd;
24120+ pud_t *pud;
24121 pmd_t *pmd;
24122 pte_t *pte;
24123 unsigned pages_2m, pages_4k;
24124@@ -281,8 +282,13 @@ repeat:
24125 pfn = start_pfn;
24126 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24127 pgd = pgd_base + pgd_idx;
24128- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24129- pmd = one_md_table_init(pgd);
24130+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24131+ pud = pud_offset(pgd, 0);
24132+ pmd = pmd_offset(pud, 0);
24133+
24134+#ifdef CONFIG_X86_PAE
24135+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24136+#endif
24137
24138 if (pfn >= end_pfn)
24139 continue;
24140@@ -294,14 +300,13 @@ repeat:
24141 #endif
24142 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24143 pmd++, pmd_idx++) {
24144- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24145+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24146
24147 /*
24148 * Map with big pages if possible, otherwise
24149 * create normal page tables:
24150 */
24151 if (use_pse) {
24152- unsigned int addr2;
24153 pgprot_t prot = PAGE_KERNEL_LARGE;
24154 /*
24155 * first pass will use the same initial
24156@@ -311,11 +316,7 @@ repeat:
24157 __pgprot(PTE_IDENT_ATTR |
24158 _PAGE_PSE);
24159
24160- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24161- PAGE_OFFSET + PAGE_SIZE-1;
24162-
24163- if (is_kernel_text(addr) ||
24164- is_kernel_text(addr2))
24165+ if (is_kernel_text(address, address + PMD_SIZE))
24166 prot = PAGE_KERNEL_LARGE_EXEC;
24167
24168 pages_2m++;
24169@@ -332,7 +333,7 @@ repeat:
24170 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24171 pte += pte_ofs;
24172 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24173- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24174+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24175 pgprot_t prot = PAGE_KERNEL;
24176 /*
24177 * first pass will use the same initial
24178@@ -340,7 +341,7 @@ repeat:
24179 */
24180 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24181
24182- if (is_kernel_text(addr))
24183+ if (is_kernel_text(address, address + PAGE_SIZE))
24184 prot = PAGE_KERNEL_EXEC;
24185
24186 pages_4k++;
24187@@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24188
24189 pud = pud_offset(pgd, va);
24190 pmd = pmd_offset(pud, va);
24191- if (!pmd_present(*pmd))
24192+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24193 break;
24194
24195 pte = pte_offset_kernel(pmd, va);
24196@@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24197
24198 static void __init pagetable_init(void)
24199 {
24200- pgd_t *pgd_base = swapper_pg_dir;
24201-
24202- permanent_kmaps_init(pgd_base);
24203+ permanent_kmaps_init(swapper_pg_dir);
24204 }
24205
24206-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24207+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24208 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24209
24210 /* user-defined highmem size */
24211@@ -735,6 +734,12 @@ void __init mem_init(void)
24212
24213 pci_iommu_alloc();
24214
24215+#ifdef CONFIG_PAX_PER_CPU_PGD
24216+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24217+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24218+ KERNEL_PGD_PTRS);
24219+#endif
24220+
24221 #ifdef CONFIG_FLATMEM
24222 BUG_ON(!mem_map);
24223 #endif
24224@@ -761,7 +766,7 @@ void __init mem_init(void)
24225 reservedpages++;
24226
24227 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24228- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24229+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24230 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24231
24232 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24233@@ -802,10 +807,10 @@ void __init mem_init(void)
24234 ((unsigned long)&__init_end -
24235 (unsigned long)&__init_begin) >> 10,
24236
24237- (unsigned long)&_etext, (unsigned long)&_edata,
24238- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24239+ (unsigned long)&_sdata, (unsigned long)&_edata,
24240+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24241
24242- (unsigned long)&_text, (unsigned long)&_etext,
24243+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24244 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24245
24246 /*
24247@@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24248 if (!kernel_set_to_readonly)
24249 return;
24250
24251+ start = ktla_ktva(start);
24252 pr_debug("Set kernel text: %lx - %lx for read write\n",
24253 start, start+size);
24254
24255@@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24256 if (!kernel_set_to_readonly)
24257 return;
24258
24259+ start = ktla_ktva(start);
24260 pr_debug("Set kernel text: %lx - %lx for read only\n",
24261 start, start+size);
24262
24263@@ -925,6 +932,7 @@ void mark_rodata_ro(void)
24264 unsigned long start = PFN_ALIGN(_text);
24265 unsigned long size = PFN_ALIGN(_etext) - start;
24266
24267+ start = ktla_ktva(start);
24268 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24269 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24270 size >> 10);
24271diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24272index 436a030..b8596b9 100644
24273--- a/arch/x86/mm/init_64.c
24274+++ b/arch/x86/mm/init_64.c
24275@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24276 * around without checking the pgd every time.
24277 */
24278
24279-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24280+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24281 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24282
24283 int force_personality32;
24284@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24285
24286 for (address = start; address <= end; address += PGDIR_SIZE) {
24287 const pgd_t *pgd_ref = pgd_offset_k(address);
24288+
24289+#ifdef CONFIG_PAX_PER_CPU_PGD
24290+ unsigned long cpu;
24291+#else
24292 struct page *page;
24293+#endif
24294
24295 if (pgd_none(*pgd_ref))
24296 continue;
24297
24298 spin_lock(&pgd_lock);
24299+
24300+#ifdef CONFIG_PAX_PER_CPU_PGD
24301+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24302+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
24303+#else
24304 list_for_each_entry(page, &pgd_list, lru) {
24305 pgd_t *pgd;
24306 spinlock_t *pgt_lock;
24307@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24308 /* the pgt_lock only for Xen */
24309 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24310 spin_lock(pgt_lock);
24311+#endif
24312
24313 if (pgd_none(*pgd))
24314 set_pgd(pgd, *pgd_ref);
24315@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24316 BUG_ON(pgd_page_vaddr(*pgd)
24317 != pgd_page_vaddr(*pgd_ref));
24318
24319+#ifndef CONFIG_PAX_PER_CPU_PGD
24320 spin_unlock(pgt_lock);
24321+#endif
24322+
24323 }
24324 spin_unlock(&pgd_lock);
24325 }
24326@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24327 pmd = fill_pmd(pud, vaddr);
24328 pte = fill_pte(pmd, vaddr);
24329
24330+ pax_open_kernel();
24331 set_pte(pte, new_pte);
24332+ pax_close_kernel();
24333
24334 /*
24335 * It's enough to flush this one mapping.
24336@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24337 pgd = pgd_offset_k((unsigned long)__va(phys));
24338 if (pgd_none(*pgd)) {
24339 pud = (pud_t *) spp_getpage();
24340- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24341- _PAGE_USER));
24342+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24343 }
24344 pud = pud_offset(pgd, (unsigned long)__va(phys));
24345 if (pud_none(*pud)) {
24346 pmd = (pmd_t *) spp_getpage();
24347- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24348- _PAGE_USER));
24349+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24350 }
24351 pmd = pmd_offset(pud, phys);
24352 BUG_ON(!pmd_none(*pmd));
24353@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24354 if (pfn >= pgt_buf_top)
24355 panic("alloc_low_page: ran out of memory");
24356
24357- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24358+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24359 clear_page(adr);
24360 *phys = pfn * PAGE_SIZE;
24361 return adr;
24362@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24363
24364 phys = __pa(virt);
24365 left = phys & (PAGE_SIZE - 1);
24366- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24367+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24368 adr = (void *)(((unsigned long)adr) | left);
24369
24370 return adr;
24371@@ -684,6 +698,12 @@ void __init mem_init(void)
24372
24373 pci_iommu_alloc();
24374
24375+#ifdef CONFIG_PAX_PER_CPU_PGD
24376+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24377+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24378+ KERNEL_PGD_PTRS);
24379+#endif
24380+
24381 /* clear_bss() already clear the empty_zero_page */
24382
24383 reservedpages = 0;
24384@@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
24385 static struct vm_area_struct gate_vma = {
24386 .vm_start = VSYSCALL_START,
24387 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24388- .vm_page_prot = PAGE_READONLY_EXEC,
24389- .vm_flags = VM_READ | VM_EXEC
24390+ .vm_page_prot = PAGE_READONLY,
24391+ .vm_flags = VM_READ
24392 };
24393
24394 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24395@@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
24396
24397 const char *arch_vma_name(struct vm_area_struct *vma)
24398 {
24399- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24400+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24401 return "[vdso]";
24402 if (vma == &gate_vma)
24403 return "[vsyscall]";
24404diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24405index 7b179b4..6bd1777 100644
24406--- a/arch/x86/mm/iomap_32.c
24407+++ b/arch/x86/mm/iomap_32.c
24408@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24409 type = kmap_atomic_idx_push();
24410 idx = type + KM_TYPE_NR * smp_processor_id();
24411 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24412+
24413+ pax_open_kernel();
24414 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24415+ pax_close_kernel();
24416+
24417 arch_flush_lazy_mmu_mode();
24418
24419 return (void *)vaddr;
24420diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24421index be1ef57..55f0160 100644
24422--- a/arch/x86/mm/ioremap.c
24423+++ b/arch/x86/mm/ioremap.c
24424@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24425 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24426 int is_ram = page_is_ram(pfn);
24427
24428- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24429+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24430 return NULL;
24431 WARN_ON_ONCE(is_ram);
24432 }
24433@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24434
24435 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24436 if (page_is_ram(start >> PAGE_SHIFT))
24437+#ifdef CONFIG_HIGHMEM
24438+ if ((start >> PAGE_SHIFT) < max_low_pfn)
24439+#endif
24440 return __va(phys);
24441
24442 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24443@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24444 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24445
24446 static __initdata int after_paging_init;
24447-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24448+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24449
24450 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24451 {
24452@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24453 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24454
24455 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24456- memset(bm_pte, 0, sizeof(bm_pte));
24457- pmd_populate_kernel(&init_mm, pmd, bm_pte);
24458+ pmd_populate_user(&init_mm, pmd, bm_pte);
24459
24460 /*
24461 * The boot-ioremap range spans multiple pmds, for which
24462diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24463index d87dd6d..bf3fa66 100644
24464--- a/arch/x86/mm/kmemcheck/kmemcheck.c
24465+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24466@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24467 * memory (e.g. tracked pages)? For now, we need this to avoid
24468 * invoking kmemcheck for PnP BIOS calls.
24469 */
24470- if (regs->flags & X86_VM_MASK)
24471+ if (v8086_mode(regs))
24472 return false;
24473- if (regs->cs != __KERNEL_CS)
24474+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24475 return false;
24476
24477 pte = kmemcheck_pte_lookup(address);
24478diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24479index 845df68..1d8d29f 100644
24480--- a/arch/x86/mm/mmap.c
24481+++ b/arch/x86/mm/mmap.c
24482@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24483 * Leave an at least ~128 MB hole with possible stack randomization.
24484 */
24485 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24486-#define MAX_GAP (TASK_SIZE/6*5)
24487+#define MAX_GAP (pax_task_size/6*5)
24488
24489 static int mmap_is_legacy(void)
24490 {
24491@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24492 return rnd << PAGE_SHIFT;
24493 }
24494
24495-static unsigned long mmap_base(void)
24496+static unsigned long mmap_base(struct mm_struct *mm)
24497 {
24498 unsigned long gap = rlimit(RLIMIT_STACK);
24499+ unsigned long pax_task_size = TASK_SIZE;
24500+
24501+#ifdef CONFIG_PAX_SEGMEXEC
24502+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24503+ pax_task_size = SEGMEXEC_TASK_SIZE;
24504+#endif
24505
24506 if (gap < MIN_GAP)
24507 gap = MIN_GAP;
24508 else if (gap > MAX_GAP)
24509 gap = MAX_GAP;
24510
24511- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24512+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24513 }
24514
24515 /*
24516 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24517 * does, but not when emulating X86_32
24518 */
24519-static unsigned long mmap_legacy_base(void)
24520+static unsigned long mmap_legacy_base(struct mm_struct *mm)
24521 {
24522- if (mmap_is_ia32())
24523+ if (mmap_is_ia32()) {
24524+
24525+#ifdef CONFIG_PAX_SEGMEXEC
24526+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24527+ return SEGMEXEC_TASK_UNMAPPED_BASE;
24528+ else
24529+#endif
24530+
24531 return TASK_UNMAPPED_BASE;
24532- else
24533+ } else
24534 return TASK_UNMAPPED_BASE + mmap_rnd();
24535 }
24536
24537@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24538 void arch_pick_mmap_layout(struct mm_struct *mm)
24539 {
24540 if (mmap_is_legacy()) {
24541- mm->mmap_base = mmap_legacy_base();
24542+ mm->mmap_base = mmap_legacy_base(mm);
24543+
24544+#ifdef CONFIG_PAX_RANDMMAP
24545+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24546+ mm->mmap_base += mm->delta_mmap;
24547+#endif
24548+
24549 mm->get_unmapped_area = arch_get_unmapped_area;
24550 mm->unmap_area = arch_unmap_area;
24551 } else {
24552- mm->mmap_base = mmap_base();
24553+ mm->mmap_base = mmap_base(mm);
24554+
24555+#ifdef CONFIG_PAX_RANDMMAP
24556+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24557+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
24558+#endif
24559+
24560 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
24561 mm->unmap_area = arch_unmap_area_topdown;
24562 }
24563diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
24564index dc0b727..dc9d71a 100644
24565--- a/arch/x86/mm/mmio-mod.c
24566+++ b/arch/x86/mm/mmio-mod.c
24567@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
24568 break;
24569 default:
24570 {
24571- unsigned char *ip = (unsigned char *)instptr;
24572+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
24573 my_trace->opcode = MMIO_UNKNOWN_OP;
24574 my_trace->width = 0;
24575 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
24576@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
24577 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24578 void __iomem *addr)
24579 {
24580- static atomic_t next_id;
24581+ static atomic_unchecked_t next_id;
24582 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
24583 /* These are page-unaligned. */
24584 struct mmiotrace_map map = {
24585@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24586 .private = trace
24587 },
24588 .phys = offset,
24589- .id = atomic_inc_return(&next_id)
24590+ .id = atomic_inc_return_unchecked(&next_id)
24591 };
24592 map.map_id = trace->id;
24593
24594diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
24595index b008656..773eac2 100644
24596--- a/arch/x86/mm/pageattr-test.c
24597+++ b/arch/x86/mm/pageattr-test.c
24598@@ -36,7 +36,7 @@ enum {
24599
24600 static int pte_testbit(pte_t pte)
24601 {
24602- return pte_flags(pte) & _PAGE_UNUSED1;
24603+ return pte_flags(pte) & _PAGE_CPA_TEST;
24604 }
24605
24606 struct split_state {
24607diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
24608index e1ebde3..b1e1db38 100644
24609--- a/arch/x86/mm/pageattr.c
24610+++ b/arch/x86/mm/pageattr.c
24611@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24612 */
24613 #ifdef CONFIG_PCI_BIOS
24614 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
24615- pgprot_val(forbidden) |= _PAGE_NX;
24616+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24617 #endif
24618
24619 /*
24620@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24621 * Does not cover __inittext since that is gone later on. On
24622 * 64bit we do not enforce !NX on the low mapping
24623 */
24624- if (within(address, (unsigned long)_text, (unsigned long)_etext))
24625- pgprot_val(forbidden) |= _PAGE_NX;
24626+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
24627+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24628
24629+#ifdef CONFIG_DEBUG_RODATA
24630 /*
24631 * The .rodata section needs to be read-only. Using the pfn
24632 * catches all aliases.
24633@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24634 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
24635 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
24636 pgprot_val(forbidden) |= _PAGE_RW;
24637+#endif
24638
24639 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
24640 /*
24641@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24642 }
24643 #endif
24644
24645+#ifdef CONFIG_PAX_KERNEXEC
24646+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
24647+ pgprot_val(forbidden) |= _PAGE_RW;
24648+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24649+ }
24650+#endif
24651+
24652 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
24653
24654 return prot;
24655@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
24656 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
24657 {
24658 /* change init_mm */
24659+ pax_open_kernel();
24660 set_pte_atomic(kpte, pte);
24661+
24662 #ifdef CONFIG_X86_32
24663 if (!SHARED_KERNEL_PMD) {
24664+
24665+#ifdef CONFIG_PAX_PER_CPU_PGD
24666+ unsigned long cpu;
24667+#else
24668 struct page *page;
24669+#endif
24670
24671+#ifdef CONFIG_PAX_PER_CPU_PGD
24672+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24673+ pgd_t *pgd = get_cpu_pgd(cpu);
24674+#else
24675 list_for_each_entry(page, &pgd_list, lru) {
24676- pgd_t *pgd;
24677+ pgd_t *pgd = (pgd_t *)page_address(page);
24678+#endif
24679+
24680 pud_t *pud;
24681 pmd_t *pmd;
24682
24683- pgd = (pgd_t *)page_address(page) + pgd_index(address);
24684+ pgd += pgd_index(address);
24685 pud = pud_offset(pgd, address);
24686 pmd = pmd_offset(pud, address);
24687 set_pte_atomic((pte_t *)pmd, pte);
24688 }
24689 }
24690 #endif
24691+ pax_close_kernel();
24692 }
24693
24694 static int
24695diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
24696index f6ff57b..481690f 100644
24697--- a/arch/x86/mm/pat.c
24698+++ b/arch/x86/mm/pat.c
24699@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
24700
24701 if (!entry) {
24702 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
24703- current->comm, current->pid, start, end);
24704+ current->comm, task_pid_nr(current), start, end);
24705 return -EINVAL;
24706 }
24707
24708@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24709 while (cursor < to) {
24710 if (!devmem_is_allowed(pfn)) {
24711 printk(KERN_INFO
24712- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24713- current->comm, from, to);
24714+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
24715+ current->comm, from, to, cursor);
24716 return 0;
24717 }
24718 cursor += PAGE_SIZE;
24719@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
24720 printk(KERN_INFO
24721 "%s:%d ioremap_change_attr failed %s "
24722 "for %Lx-%Lx\n",
24723- current->comm, current->pid,
24724+ current->comm, task_pid_nr(current),
24725 cattr_name(flags),
24726 base, (unsigned long long)(base + size));
24727 return -EINVAL;
24728@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24729 if (want_flags != flags) {
24730 printk(KERN_WARNING
24731 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
24732- current->comm, current->pid,
24733+ current->comm, task_pid_nr(current),
24734 cattr_name(want_flags),
24735 (unsigned long long)paddr,
24736 (unsigned long long)(paddr + size),
24737@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24738 free_memtype(paddr, paddr + size);
24739 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
24740 " for %Lx-%Lx, got %s\n",
24741- current->comm, current->pid,
24742+ current->comm, task_pid_nr(current),
24743 cattr_name(want_flags),
24744 (unsigned long long)paddr,
24745 (unsigned long long)(paddr + size),
24746diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
24747index 9f0614d..92ae64a 100644
24748--- a/arch/x86/mm/pf_in.c
24749+++ b/arch/x86/mm/pf_in.c
24750@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
24751 int i;
24752 enum reason_type rv = OTHERS;
24753
24754- p = (unsigned char *)ins_addr;
24755+ p = (unsigned char *)ktla_ktva(ins_addr);
24756 p += skip_prefix(p, &prf);
24757 p += get_opcode(p, &opcode);
24758
24759@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
24760 struct prefix_bits prf;
24761 int i;
24762
24763- p = (unsigned char *)ins_addr;
24764+ p = (unsigned char *)ktla_ktva(ins_addr);
24765 p += skip_prefix(p, &prf);
24766 p += get_opcode(p, &opcode);
24767
24768@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
24769 struct prefix_bits prf;
24770 int i;
24771
24772- p = (unsigned char *)ins_addr;
24773+ p = (unsigned char *)ktla_ktva(ins_addr);
24774 p += skip_prefix(p, &prf);
24775 p += get_opcode(p, &opcode);
24776
24777@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
24778 struct prefix_bits prf;
24779 int i;
24780
24781- p = (unsigned char *)ins_addr;
24782+ p = (unsigned char *)ktla_ktva(ins_addr);
24783 p += skip_prefix(p, &prf);
24784 p += get_opcode(p, &opcode);
24785 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
24786@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
24787 struct prefix_bits prf;
24788 int i;
24789
24790- p = (unsigned char *)ins_addr;
24791+ p = (unsigned char *)ktla_ktva(ins_addr);
24792 p += skip_prefix(p, &prf);
24793 p += get_opcode(p, &opcode);
24794 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
24795diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
24796index 8573b83..c3b1a30 100644
24797--- a/arch/x86/mm/pgtable.c
24798+++ b/arch/x86/mm/pgtable.c
24799@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
24800 list_del(&page->lru);
24801 }
24802
24803-#define UNSHARED_PTRS_PER_PGD \
24804- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24805+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24806+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
24807
24808+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24809+{
24810+ while (count--)
24811+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
24812+}
24813+#endif
24814
24815+#ifdef CONFIG_PAX_PER_CPU_PGD
24816+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24817+{
24818+ while (count--)
24819+
24820+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24821+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
24822+#else
24823+ *dst++ = *src++;
24824+#endif
24825+
24826+}
24827+#endif
24828+
24829+#ifdef CONFIG_X86_64
24830+#define pxd_t pud_t
24831+#define pyd_t pgd_t
24832+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
24833+#define pxd_free(mm, pud) pud_free((mm), (pud))
24834+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
24835+#define pyd_offset(mm, address) pgd_offset((mm), (address))
24836+#define PYD_SIZE PGDIR_SIZE
24837+#else
24838+#define pxd_t pmd_t
24839+#define pyd_t pud_t
24840+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
24841+#define pxd_free(mm, pud) pmd_free((mm), (pud))
24842+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
24843+#define pyd_offset(mm, address) pud_offset((mm), (address))
24844+#define PYD_SIZE PUD_SIZE
24845+#endif
24846+
24847+#ifdef CONFIG_PAX_PER_CPU_PGD
24848+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
24849+static inline void pgd_dtor(pgd_t *pgd) {}
24850+#else
24851 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
24852 {
24853 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
24854@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
24855 pgd_list_del(pgd);
24856 spin_unlock(&pgd_lock);
24857 }
24858+#endif
24859
24860 /*
24861 * List of all pgd's needed for non-PAE so it can invalidate entries
24862@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
24863 * -- wli
24864 */
24865
24866-#ifdef CONFIG_X86_PAE
24867+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24868 /*
24869 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
24870 * updating the top-level pagetable entries to guarantee the
24871@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
24872 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
24873 * and initialize the kernel pmds here.
24874 */
24875-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
24876+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24877
24878 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24879 {
24880@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24881 */
24882 flush_tlb_mm(mm);
24883 }
24884+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
24885+#define PREALLOCATED_PXDS USER_PGD_PTRS
24886 #else /* !CONFIG_X86_PAE */
24887
24888 /* No need to prepopulate any pagetable entries in non-PAE modes. */
24889-#define PREALLOCATED_PMDS 0
24890+#define PREALLOCATED_PXDS 0
24891
24892 #endif /* CONFIG_X86_PAE */
24893
24894-static void free_pmds(pmd_t *pmds[])
24895+static void free_pxds(pxd_t *pxds[])
24896 {
24897 int i;
24898
24899- for(i = 0; i < PREALLOCATED_PMDS; i++)
24900- if (pmds[i])
24901- free_page((unsigned long)pmds[i]);
24902+ for(i = 0; i < PREALLOCATED_PXDS; i++)
24903+ if (pxds[i])
24904+ free_page((unsigned long)pxds[i]);
24905 }
24906
24907-static int preallocate_pmds(pmd_t *pmds[])
24908+static int preallocate_pxds(pxd_t *pxds[])
24909 {
24910 int i;
24911 bool failed = false;
24912
24913- for(i = 0; i < PREALLOCATED_PMDS; i++) {
24914- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
24915- if (pmd == NULL)
24916+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
24917+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
24918+ if (pxd == NULL)
24919 failed = true;
24920- pmds[i] = pmd;
24921+ pxds[i] = pxd;
24922 }
24923
24924 if (failed) {
24925- free_pmds(pmds);
24926+ free_pxds(pxds);
24927 return -ENOMEM;
24928 }
24929
24930@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
24931 * preallocate which never got a corresponding vma will need to be
24932 * freed manually.
24933 */
24934-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
24935+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
24936 {
24937 int i;
24938
24939- for(i = 0; i < PREALLOCATED_PMDS; i++) {
24940+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
24941 pgd_t pgd = pgdp[i];
24942
24943 if (pgd_val(pgd) != 0) {
24944- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
24945+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
24946
24947- pgdp[i] = native_make_pgd(0);
24948+ set_pgd(pgdp + i, native_make_pgd(0));
24949
24950- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
24951- pmd_free(mm, pmd);
24952+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
24953+ pxd_free(mm, pxd);
24954 }
24955 }
24956 }
24957
24958-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
24959+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
24960 {
24961- pud_t *pud;
24962+ pyd_t *pyd;
24963 unsigned long addr;
24964 int i;
24965
24966- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
24967+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
24968 return;
24969
24970- pud = pud_offset(pgd, 0);
24971+#ifdef CONFIG_X86_64
24972+ pyd = pyd_offset(mm, 0L);
24973+#else
24974+ pyd = pyd_offset(pgd, 0L);
24975+#endif
24976
24977- for (addr = i = 0; i < PREALLOCATED_PMDS;
24978- i++, pud++, addr += PUD_SIZE) {
24979- pmd_t *pmd = pmds[i];
24980+ for (addr = i = 0; i < PREALLOCATED_PXDS;
24981+ i++, pyd++, addr += PYD_SIZE) {
24982+ pxd_t *pxd = pxds[i];
24983
24984 if (i >= KERNEL_PGD_BOUNDARY)
24985- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
24986- sizeof(pmd_t) * PTRS_PER_PMD);
24987+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
24988+ sizeof(pxd_t) * PTRS_PER_PMD);
24989
24990- pud_populate(mm, pud, pmd);
24991+ pyd_populate(mm, pyd, pxd);
24992 }
24993 }
24994
24995 pgd_t *pgd_alloc(struct mm_struct *mm)
24996 {
24997 pgd_t *pgd;
24998- pmd_t *pmds[PREALLOCATED_PMDS];
24999+ pxd_t *pxds[PREALLOCATED_PXDS];
25000
25001 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25002
25003@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25004
25005 mm->pgd = pgd;
25006
25007- if (preallocate_pmds(pmds) != 0)
25008+ if (preallocate_pxds(pxds) != 0)
25009 goto out_free_pgd;
25010
25011 if (paravirt_pgd_alloc(mm) != 0)
25012- goto out_free_pmds;
25013+ goto out_free_pxds;
25014
25015 /*
25016 * Make sure that pre-populating the pmds is atomic with
25017@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25018 spin_lock(&pgd_lock);
25019
25020 pgd_ctor(mm, pgd);
25021- pgd_prepopulate_pmd(mm, pgd, pmds);
25022+ pgd_prepopulate_pxd(mm, pgd, pxds);
25023
25024 spin_unlock(&pgd_lock);
25025
25026 return pgd;
25027
25028-out_free_pmds:
25029- free_pmds(pmds);
25030+out_free_pxds:
25031+ free_pxds(pxds);
25032 out_free_pgd:
25033 free_page((unsigned long)pgd);
25034 out:
25035@@ -295,7 +344,7 @@ out:
25036
25037 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25038 {
25039- pgd_mop_up_pmds(mm, pgd);
25040+ pgd_mop_up_pxds(mm, pgd);
25041 pgd_dtor(pgd);
25042 paravirt_pgd_free(mm, pgd);
25043 free_page((unsigned long)pgd);
25044diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25045index cac7184..09a39fa 100644
25046--- a/arch/x86/mm/pgtable_32.c
25047+++ b/arch/x86/mm/pgtable_32.c
25048@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25049 return;
25050 }
25051 pte = pte_offset_kernel(pmd, vaddr);
25052+
25053+ pax_open_kernel();
25054 if (pte_val(pteval))
25055 set_pte_at(&init_mm, vaddr, pte, pteval);
25056 else
25057 pte_clear(&init_mm, vaddr, pte);
25058+ pax_close_kernel();
25059
25060 /*
25061 * It's enough to flush this one mapping.
25062diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25063index 410531d..0f16030 100644
25064--- a/arch/x86/mm/setup_nx.c
25065+++ b/arch/x86/mm/setup_nx.c
25066@@ -5,8 +5,10 @@
25067 #include <asm/pgtable.h>
25068 #include <asm/proto.h>
25069
25070+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25071 static int disable_nx __cpuinitdata;
25072
25073+#ifndef CONFIG_PAX_PAGEEXEC
25074 /*
25075 * noexec = on|off
25076 *
25077@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25078 return 0;
25079 }
25080 early_param("noexec", noexec_setup);
25081+#endif
25082+
25083+#endif
25084
25085 void __cpuinit x86_configure_nx(void)
25086 {
25087+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25088 if (cpu_has_nx && !disable_nx)
25089 __supported_pte_mask |= _PAGE_NX;
25090 else
25091+#endif
25092 __supported_pte_mask &= ~_PAGE_NX;
25093 }
25094
25095diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25096index d6c0418..06a0ad5 100644
25097--- a/arch/x86/mm/tlb.c
25098+++ b/arch/x86/mm/tlb.c
25099@@ -65,7 +65,11 @@ void leave_mm(int cpu)
25100 BUG();
25101 cpumask_clear_cpu(cpu,
25102 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25103+
25104+#ifndef CONFIG_PAX_PER_CPU_PGD
25105 load_cr3(swapper_pg_dir);
25106+#endif
25107+
25108 }
25109 EXPORT_SYMBOL_GPL(leave_mm);
25110
25111diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25112index 6687022..ceabcfa 100644
25113--- a/arch/x86/net/bpf_jit.S
25114+++ b/arch/x86/net/bpf_jit.S
25115@@ -9,6 +9,7 @@
25116 */
25117 #include <linux/linkage.h>
25118 #include <asm/dwarf2.h>
25119+#include <asm/alternative-asm.h>
25120
25121 /*
25122 * Calling convention :
25123@@ -35,6 +36,7 @@ sk_load_word:
25124 jle bpf_slow_path_word
25125 mov (SKBDATA,%rsi),%eax
25126 bswap %eax /* ntohl() */
25127+ pax_force_retaddr
25128 ret
25129
25130
25131@@ -53,6 +55,7 @@ sk_load_half:
25132 jle bpf_slow_path_half
25133 movzwl (SKBDATA,%rsi),%eax
25134 rol $8,%ax # ntohs()
25135+ pax_force_retaddr
25136 ret
25137
25138 sk_load_byte_ind:
25139@@ -66,6 +69,7 @@ sk_load_byte:
25140 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25141 jle bpf_slow_path_byte
25142 movzbl (SKBDATA,%rsi),%eax
25143+ pax_force_retaddr
25144 ret
25145
25146 /**
25147@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25148 movzbl (SKBDATA,%rsi),%ebx
25149 and $15,%bl
25150 shl $2,%bl
25151+ pax_force_retaddr
25152 ret
25153 CFI_ENDPROC
25154 ENDPROC(sk_load_byte_msh)
25155@@ -91,6 +96,7 @@ bpf_error:
25156 xor %eax,%eax
25157 mov -8(%rbp),%rbx
25158 leaveq
25159+ pax_force_retaddr
25160 ret
25161
25162 /* rsi contains offset and can be scratched */
25163@@ -113,6 +119,7 @@ bpf_slow_path_word:
25164 js bpf_error
25165 mov -12(%rbp),%eax
25166 bswap %eax
25167+ pax_force_retaddr
25168 ret
25169
25170 bpf_slow_path_half:
25171@@ -121,12 +128,14 @@ bpf_slow_path_half:
25172 mov -12(%rbp),%ax
25173 rol $8,%ax
25174 movzwl %ax,%eax
25175+ pax_force_retaddr
25176 ret
25177
25178 bpf_slow_path_byte:
25179 bpf_slow_path_common(1)
25180 js bpf_error
25181 movzbl -12(%rbp),%eax
25182+ pax_force_retaddr
25183 ret
25184
25185 bpf_slow_path_byte_msh:
25186@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25187 and $15,%al
25188 shl $2,%al
25189 xchg %eax,%ebx
25190+ pax_force_retaddr
25191 ret
25192diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25193index 7c1b765..8c072c6 100644
25194--- a/arch/x86/net/bpf_jit_comp.c
25195+++ b/arch/x86/net/bpf_jit_comp.c
25196@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25197 set_fs(old_fs);
25198 }
25199
25200+struct bpf_jit_work {
25201+ struct work_struct work;
25202+ void *image;
25203+};
25204
25205 void bpf_jit_compile(struct sk_filter *fp)
25206 {
25207@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25208 if (addrs == NULL)
25209 return;
25210
25211+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25212+ if (!fp->work)
25213+ goto out;
25214+
25215 /* Before first pass, make a rough estimation of addrs[]
25216 * each bpf instruction is translated to less than 64 bytes
25217 */
25218@@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25219 func = sk_load_word;
25220 common_load: seen |= SEEN_DATAREF;
25221 if ((int)K < 0)
25222- goto out;
25223+ goto error;
25224 t_offset = func - (image + addrs[i]);
25225 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25226 EMIT1_off32(0xe8, t_offset); /* call */
25227@@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25228 break;
25229 default:
25230 /* hmm, too complex filter, give up with jit compiler */
25231- goto out;
25232+ goto error;
25233 }
25234 ilen = prog - temp;
25235 if (image) {
25236 if (unlikely(proglen + ilen > oldproglen)) {
25237 pr_err("bpb_jit_compile fatal error\n");
25238- kfree(addrs);
25239- module_free(NULL, image);
25240- return;
25241+ module_free_exec(NULL, image);
25242+ goto error;
25243 }
25244+ pax_open_kernel();
25245 memcpy(image + proglen, temp, ilen);
25246+ pax_close_kernel();
25247 }
25248 proglen += ilen;
25249 addrs[i] = proglen;
25250@@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25251 break;
25252 }
25253 if (proglen == oldproglen) {
25254- image = module_alloc(max_t(unsigned int,
25255- proglen,
25256- sizeof(struct work_struct)));
25257+ image = module_alloc_exec(proglen);
25258 if (!image)
25259- goto out;
25260+ goto error;
25261 }
25262 oldproglen = proglen;
25263 }
25264@@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25265 bpf_flush_icache(image, image + proglen);
25266
25267 fp->bpf_func = (void *)image;
25268- }
25269+ } else
25270+error:
25271+ kfree(fp->work);
25272+
25273 out:
25274 kfree(addrs);
25275 return;
25276@@ -645,18 +655,20 @@ out:
25277
25278 static void jit_free_defer(struct work_struct *arg)
25279 {
25280- module_free(NULL, arg);
25281+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25282+ kfree(arg);
25283 }
25284
25285 /* run from softirq, we must use a work_struct to call
25286- * module_free() from process context
25287+ * module_free_exec() from process context
25288 */
25289 void bpf_jit_free(struct sk_filter *fp)
25290 {
25291 if (fp->bpf_func != sk_run_filter) {
25292- struct work_struct *work = (struct work_struct *)fp->bpf_func;
25293+ struct work_struct *work = &fp->work->work;
25294
25295 INIT_WORK(work, jit_free_defer);
25296+ fp->work->image = fp->bpf_func;
25297 schedule_work(work);
25298 }
25299 }
25300diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25301index bff89df..377758a 100644
25302--- a/arch/x86/oprofile/backtrace.c
25303+++ b/arch/x86/oprofile/backtrace.c
25304@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25305 struct stack_frame_ia32 *fp;
25306 unsigned long bytes;
25307
25308- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25309+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25310 if (bytes != sizeof(bufhead))
25311 return NULL;
25312
25313- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25314+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25315
25316 oprofile_add_trace(bufhead[0].return_address);
25317
25318@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25319 struct stack_frame bufhead[2];
25320 unsigned long bytes;
25321
25322- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25323+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25324 if (bytes != sizeof(bufhead))
25325 return NULL;
25326
25327@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25328 {
25329 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25330
25331- if (!user_mode_vm(regs)) {
25332+ if (!user_mode(regs)) {
25333 unsigned long stack = kernel_stack_pointer(regs);
25334 if (depth)
25335 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25336diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25337index cb29191..036766d 100644
25338--- a/arch/x86/pci/mrst.c
25339+++ b/arch/x86/pci/mrst.c
25340@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25341 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25342 pci_mmcfg_late_init();
25343 pcibios_enable_irq = mrst_pci_irq_enable;
25344- pci_root_ops = pci_mrst_ops;
25345+ pax_open_kernel();
25346+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25347+ pax_close_kernel();
25348 /* Continue with standard init */
25349 return 1;
25350 }
25351diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25352index da8fe05..7ee6704 100644
25353--- a/arch/x86/pci/pcbios.c
25354+++ b/arch/x86/pci/pcbios.c
25355@@ -79,50 +79,93 @@ union bios32 {
25356 static struct {
25357 unsigned long address;
25358 unsigned short segment;
25359-} bios32_indirect = { 0, __KERNEL_CS };
25360+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25361
25362 /*
25363 * Returns the entry point for the given service, NULL on error
25364 */
25365
25366-static unsigned long bios32_service(unsigned long service)
25367+static unsigned long __devinit bios32_service(unsigned long service)
25368 {
25369 unsigned char return_code; /* %al */
25370 unsigned long address; /* %ebx */
25371 unsigned long length; /* %ecx */
25372 unsigned long entry; /* %edx */
25373 unsigned long flags;
25374+ struct desc_struct d, *gdt;
25375
25376 local_irq_save(flags);
25377- __asm__("lcall *(%%edi); cld"
25378+
25379+ gdt = get_cpu_gdt_table(smp_processor_id());
25380+
25381+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25382+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25383+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25384+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25385+
25386+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25387 : "=a" (return_code),
25388 "=b" (address),
25389 "=c" (length),
25390 "=d" (entry)
25391 : "0" (service),
25392 "1" (0),
25393- "D" (&bios32_indirect));
25394+ "D" (&bios32_indirect),
25395+ "r"(__PCIBIOS_DS)
25396+ : "memory");
25397+
25398+ pax_open_kernel();
25399+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25400+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25401+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25402+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25403+ pax_close_kernel();
25404+
25405 local_irq_restore(flags);
25406
25407 switch (return_code) {
25408- case 0:
25409- return address + entry;
25410- case 0x80: /* Not present */
25411- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25412- return 0;
25413- default: /* Shouldn't happen */
25414- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25415- service, return_code);
25416+ case 0: {
25417+ int cpu;
25418+ unsigned char flags;
25419+
25420+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25421+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25422+ printk(KERN_WARNING "bios32_service: not valid\n");
25423 return 0;
25424+ }
25425+ address = address + PAGE_OFFSET;
25426+ length += 16UL; /* some BIOSs underreport this... */
25427+ flags = 4;
25428+ if (length >= 64*1024*1024) {
25429+ length >>= PAGE_SHIFT;
25430+ flags |= 8;
25431+ }
25432+
25433+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25434+ gdt = get_cpu_gdt_table(cpu);
25435+ pack_descriptor(&d, address, length, 0x9b, flags);
25436+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25437+ pack_descriptor(&d, address, length, 0x93, flags);
25438+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25439+ }
25440+ return entry;
25441+ }
25442+ case 0x80: /* Not present */
25443+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25444+ return 0;
25445+ default: /* Shouldn't happen */
25446+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25447+ service, return_code);
25448+ return 0;
25449 }
25450 }
25451
25452 static struct {
25453 unsigned long address;
25454 unsigned short segment;
25455-} pci_indirect = { 0, __KERNEL_CS };
25456+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25457
25458-static int pci_bios_present;
25459+static int pci_bios_present __read_only;
25460
25461 static int __devinit check_pcibios(void)
25462 {
25463@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25464 unsigned long flags, pcibios_entry;
25465
25466 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25467- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25468+ pci_indirect.address = pcibios_entry;
25469
25470 local_irq_save(flags);
25471- __asm__(
25472- "lcall *(%%edi); cld\n\t"
25473+ __asm__("movw %w6, %%ds\n\t"
25474+ "lcall *%%ss:(%%edi); cld\n\t"
25475+ "push %%ss\n\t"
25476+ "pop %%ds\n\t"
25477 "jc 1f\n\t"
25478 "xor %%ah, %%ah\n"
25479 "1:"
25480@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25481 "=b" (ebx),
25482 "=c" (ecx)
25483 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25484- "D" (&pci_indirect)
25485+ "D" (&pci_indirect),
25486+ "r" (__PCIBIOS_DS)
25487 : "memory");
25488 local_irq_restore(flags);
25489
25490@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25491
25492 switch (len) {
25493 case 1:
25494- __asm__("lcall *(%%esi); cld\n\t"
25495+ __asm__("movw %w6, %%ds\n\t"
25496+ "lcall *%%ss:(%%esi); cld\n\t"
25497+ "push %%ss\n\t"
25498+ "pop %%ds\n\t"
25499 "jc 1f\n\t"
25500 "xor %%ah, %%ah\n"
25501 "1:"
25502@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25503 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25504 "b" (bx),
25505 "D" ((long)reg),
25506- "S" (&pci_indirect));
25507+ "S" (&pci_indirect),
25508+ "r" (__PCIBIOS_DS));
25509 /*
25510 * Zero-extend the result beyond 8 bits, do not trust the
25511 * BIOS having done it:
25512@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25513 *value &= 0xff;
25514 break;
25515 case 2:
25516- __asm__("lcall *(%%esi); cld\n\t"
25517+ __asm__("movw %w6, %%ds\n\t"
25518+ "lcall *%%ss:(%%esi); cld\n\t"
25519+ "push %%ss\n\t"
25520+ "pop %%ds\n\t"
25521 "jc 1f\n\t"
25522 "xor %%ah, %%ah\n"
25523 "1:"
25524@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25525 : "1" (PCIBIOS_READ_CONFIG_WORD),
25526 "b" (bx),
25527 "D" ((long)reg),
25528- "S" (&pci_indirect));
25529+ "S" (&pci_indirect),
25530+ "r" (__PCIBIOS_DS));
25531 /*
25532 * Zero-extend the result beyond 16 bits, do not trust the
25533 * BIOS having done it:
25534@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25535 *value &= 0xffff;
25536 break;
25537 case 4:
25538- __asm__("lcall *(%%esi); cld\n\t"
25539+ __asm__("movw %w6, %%ds\n\t"
25540+ "lcall *%%ss:(%%esi); cld\n\t"
25541+ "push %%ss\n\t"
25542+ "pop %%ds\n\t"
25543 "jc 1f\n\t"
25544 "xor %%ah, %%ah\n"
25545 "1:"
25546@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25547 : "1" (PCIBIOS_READ_CONFIG_DWORD),
25548 "b" (bx),
25549 "D" ((long)reg),
25550- "S" (&pci_indirect));
25551+ "S" (&pci_indirect),
25552+ "r" (__PCIBIOS_DS));
25553 break;
25554 }
25555
25556@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25557
25558 switch (len) {
25559 case 1:
25560- __asm__("lcall *(%%esi); cld\n\t"
25561+ __asm__("movw %w6, %%ds\n\t"
25562+ "lcall *%%ss:(%%esi); cld\n\t"
25563+ "push %%ss\n\t"
25564+ "pop %%ds\n\t"
25565 "jc 1f\n\t"
25566 "xor %%ah, %%ah\n"
25567 "1:"
25568@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25569 "c" (value),
25570 "b" (bx),
25571 "D" ((long)reg),
25572- "S" (&pci_indirect));
25573+ "S" (&pci_indirect),
25574+ "r" (__PCIBIOS_DS));
25575 break;
25576 case 2:
25577- __asm__("lcall *(%%esi); cld\n\t"
25578+ __asm__("movw %w6, %%ds\n\t"
25579+ "lcall *%%ss:(%%esi); cld\n\t"
25580+ "push %%ss\n\t"
25581+ "pop %%ds\n\t"
25582 "jc 1f\n\t"
25583 "xor %%ah, %%ah\n"
25584 "1:"
25585@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25586 "c" (value),
25587 "b" (bx),
25588 "D" ((long)reg),
25589- "S" (&pci_indirect));
25590+ "S" (&pci_indirect),
25591+ "r" (__PCIBIOS_DS));
25592 break;
25593 case 4:
25594- __asm__("lcall *(%%esi); cld\n\t"
25595+ __asm__("movw %w6, %%ds\n\t"
25596+ "lcall *%%ss:(%%esi); cld\n\t"
25597+ "push %%ss\n\t"
25598+ "pop %%ds\n\t"
25599 "jc 1f\n\t"
25600 "xor %%ah, %%ah\n"
25601 "1:"
25602@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25603 "c" (value),
25604 "b" (bx),
25605 "D" ((long)reg),
25606- "S" (&pci_indirect));
25607+ "S" (&pci_indirect),
25608+ "r" (__PCIBIOS_DS));
25609 break;
25610 }
25611
25612@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25613
25614 DBG("PCI: Fetching IRQ routing table... ");
25615 __asm__("push %%es\n\t"
25616+ "movw %w8, %%ds\n\t"
25617 "push %%ds\n\t"
25618 "pop %%es\n\t"
25619- "lcall *(%%esi); cld\n\t"
25620+ "lcall *%%ss:(%%esi); cld\n\t"
25621 "pop %%es\n\t"
25622+ "push %%ss\n\t"
25623+ "pop %%ds\n"
25624 "jc 1f\n\t"
25625 "xor %%ah, %%ah\n"
25626 "1:"
25627@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25628 "1" (0),
25629 "D" ((long) &opt),
25630 "S" (&pci_indirect),
25631- "m" (opt)
25632+ "m" (opt),
25633+ "r" (__PCIBIOS_DS)
25634 : "memory");
25635 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
25636 if (ret & 0xff00)
25637@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25638 {
25639 int ret;
25640
25641- __asm__("lcall *(%%esi); cld\n\t"
25642+ __asm__("movw %w5, %%ds\n\t"
25643+ "lcall *%%ss:(%%esi); cld\n\t"
25644+ "push %%ss\n\t"
25645+ "pop %%ds\n"
25646 "jc 1f\n\t"
25647 "xor %%ah, %%ah\n"
25648 "1:"
25649@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25650 : "0" (PCIBIOS_SET_PCI_HW_INT),
25651 "b" ((dev->bus->number << 8) | dev->devfn),
25652 "c" ((irq << 8) | (pin + 10)),
25653- "S" (&pci_indirect));
25654+ "S" (&pci_indirect),
25655+ "r" (__PCIBIOS_DS));
25656 return !(ret & 0xff00);
25657 }
25658 EXPORT_SYMBOL(pcibios_set_irq_routing);
25659diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
25660index 40e4469..1ab536e 100644
25661--- a/arch/x86/platform/efi/efi_32.c
25662+++ b/arch/x86/platform/efi/efi_32.c
25663@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
25664 {
25665 struct desc_ptr gdt_descr;
25666
25667+#ifdef CONFIG_PAX_KERNEXEC
25668+ struct desc_struct d;
25669+#endif
25670+
25671 local_irq_save(efi_rt_eflags);
25672
25673 load_cr3(initial_page_table);
25674 __flush_tlb_all();
25675
25676+#ifdef CONFIG_PAX_KERNEXEC
25677+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
25678+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25679+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
25680+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25681+#endif
25682+
25683 gdt_descr.address = __pa(get_cpu_gdt_table(0));
25684 gdt_descr.size = GDT_SIZE - 1;
25685 load_gdt(&gdt_descr);
25686@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
25687 {
25688 struct desc_ptr gdt_descr;
25689
25690+#ifdef CONFIG_PAX_KERNEXEC
25691+ struct desc_struct d;
25692+
25693+ memset(&d, 0, sizeof d);
25694+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25695+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25696+#endif
25697+
25698 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
25699 gdt_descr.size = GDT_SIZE - 1;
25700 load_gdt(&gdt_descr);
25701diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
25702index fbe66e6..c5c0dd2 100644
25703--- a/arch/x86/platform/efi/efi_stub_32.S
25704+++ b/arch/x86/platform/efi/efi_stub_32.S
25705@@ -6,7 +6,9 @@
25706 */
25707
25708 #include <linux/linkage.h>
25709+#include <linux/init.h>
25710 #include <asm/page_types.h>
25711+#include <asm/segment.h>
25712
25713 /*
25714 * efi_call_phys(void *, ...) is a function with variable parameters.
25715@@ -20,7 +22,7 @@
25716 * service functions will comply with gcc calling convention, too.
25717 */
25718
25719-.text
25720+__INIT
25721 ENTRY(efi_call_phys)
25722 /*
25723 * 0. The function can only be called in Linux kernel. So CS has been
25724@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
25725 * The mapping of lower virtual memory has been created in prelog and
25726 * epilog.
25727 */
25728- movl $1f, %edx
25729- subl $__PAGE_OFFSET, %edx
25730- jmp *%edx
25731+ movl $(__KERNEXEC_EFI_DS), %edx
25732+ mov %edx, %ds
25733+ mov %edx, %es
25734+ mov %edx, %ss
25735+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
25736 1:
25737
25738 /*
25739@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
25740 * parameter 2, ..., param n. To make things easy, we save the return
25741 * address of efi_call_phys in a global variable.
25742 */
25743- popl %edx
25744- movl %edx, saved_return_addr
25745- /* get the function pointer into ECX*/
25746- popl %ecx
25747- movl %ecx, efi_rt_function_ptr
25748- movl $2f, %edx
25749- subl $__PAGE_OFFSET, %edx
25750- pushl %edx
25751+ popl (saved_return_addr)
25752+ popl (efi_rt_function_ptr)
25753
25754 /*
25755 * 3. Clear PG bit in %CR0.
25756@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
25757 /*
25758 * 5. Call the physical function.
25759 */
25760- jmp *%ecx
25761+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
25762
25763-2:
25764 /*
25765 * 6. After EFI runtime service returns, control will return to
25766 * following instruction. We'd better readjust stack pointer first.
25767@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
25768 movl %cr0, %edx
25769 orl $0x80000000, %edx
25770 movl %edx, %cr0
25771- jmp 1f
25772-1:
25773+
25774 /*
25775 * 8. Now restore the virtual mode from flat mode by
25776 * adding EIP with PAGE_OFFSET.
25777 */
25778- movl $1f, %edx
25779- jmp *%edx
25780+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
25781 1:
25782+ movl $(__KERNEL_DS), %edx
25783+ mov %edx, %ds
25784+ mov %edx, %es
25785+ mov %edx, %ss
25786
25787 /*
25788 * 9. Balance the stack. And because EAX contain the return value,
25789 * we'd better not clobber it.
25790 */
25791- leal efi_rt_function_ptr, %edx
25792- movl (%edx), %ecx
25793- pushl %ecx
25794+ pushl (efi_rt_function_ptr)
25795
25796 /*
25797- * 10. Push the saved return address onto the stack and return.
25798+ * 10. Return to the saved return address.
25799 */
25800- leal saved_return_addr, %edx
25801- movl (%edx), %ecx
25802- pushl %ecx
25803- ret
25804+ jmpl *(saved_return_addr)
25805 ENDPROC(efi_call_phys)
25806 .previous
25807
25808-.data
25809+__INITDATA
25810 saved_return_addr:
25811 .long 0
25812 efi_rt_function_ptr:
25813diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
25814index 4c07cca..2c8427d 100644
25815--- a/arch/x86/platform/efi/efi_stub_64.S
25816+++ b/arch/x86/platform/efi/efi_stub_64.S
25817@@ -7,6 +7,7 @@
25818 */
25819
25820 #include <linux/linkage.h>
25821+#include <asm/alternative-asm.h>
25822
25823 #define SAVE_XMM \
25824 mov %rsp, %rax; \
25825@@ -40,6 +41,7 @@ ENTRY(efi_call0)
25826 call *%rdi
25827 addq $32, %rsp
25828 RESTORE_XMM
25829+ pax_force_retaddr 0, 1
25830 ret
25831 ENDPROC(efi_call0)
25832
25833@@ -50,6 +52,7 @@ ENTRY(efi_call1)
25834 call *%rdi
25835 addq $32, %rsp
25836 RESTORE_XMM
25837+ pax_force_retaddr 0, 1
25838 ret
25839 ENDPROC(efi_call1)
25840
25841@@ -60,6 +63,7 @@ ENTRY(efi_call2)
25842 call *%rdi
25843 addq $32, %rsp
25844 RESTORE_XMM
25845+ pax_force_retaddr 0, 1
25846 ret
25847 ENDPROC(efi_call2)
25848
25849@@ -71,6 +75,7 @@ ENTRY(efi_call3)
25850 call *%rdi
25851 addq $32, %rsp
25852 RESTORE_XMM
25853+ pax_force_retaddr 0, 1
25854 ret
25855 ENDPROC(efi_call3)
25856
25857@@ -83,6 +88,7 @@ ENTRY(efi_call4)
25858 call *%rdi
25859 addq $32, %rsp
25860 RESTORE_XMM
25861+ pax_force_retaddr 0, 1
25862 ret
25863 ENDPROC(efi_call4)
25864
25865@@ -96,6 +102,7 @@ ENTRY(efi_call5)
25866 call *%rdi
25867 addq $48, %rsp
25868 RESTORE_XMM
25869+ pax_force_retaddr 0, 1
25870 ret
25871 ENDPROC(efi_call5)
25872
25873@@ -112,5 +119,6 @@ ENTRY(efi_call6)
25874 call *%rdi
25875 addq $48, %rsp
25876 RESTORE_XMM
25877+ pax_force_retaddr 0, 1
25878 ret
25879 ENDPROC(efi_call6)
25880diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
25881index 475e2cd..1b8e708 100644
25882--- a/arch/x86/platform/mrst/mrst.c
25883+++ b/arch/x86/platform/mrst/mrst.c
25884@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
25885 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
25886 int sfi_mrtc_num;
25887
25888-static void mrst_power_off(void)
25889+static __noreturn void mrst_power_off(void)
25890 {
25891 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25892 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
25893+ BUG();
25894 }
25895
25896-static void mrst_reboot(void)
25897+static __noreturn void mrst_reboot(void)
25898 {
25899 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25900 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
25901 else
25902 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
25903+ BUG();
25904 }
25905
25906 /* parse all the mtimer info to a static mtimer array */
25907diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
25908index f10c0af..3ec1f95 100644
25909--- a/arch/x86/power/cpu.c
25910+++ b/arch/x86/power/cpu.c
25911@@ -131,7 +131,7 @@ static void do_fpu_end(void)
25912 static void fix_processor_context(void)
25913 {
25914 int cpu = smp_processor_id();
25915- struct tss_struct *t = &per_cpu(init_tss, cpu);
25916+ struct tss_struct *t = init_tss + cpu;
25917
25918 set_tss_desc(cpu, t); /*
25919 * This just modifies memory; should not be
25920@@ -141,7 +141,9 @@ static void fix_processor_context(void)
25921 */
25922
25923 #ifdef CONFIG_X86_64
25924+ pax_open_kernel();
25925 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
25926+ pax_close_kernel();
25927
25928 syscall_init(); /* This sets MSR_*STAR and related */
25929 #endif
25930diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
25931index 5d17950..2253fc9 100644
25932--- a/arch/x86/vdso/Makefile
25933+++ b/arch/x86/vdso/Makefile
25934@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
25935 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
25936 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
25937
25938-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25939+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25940 GCOV_PROFILE := n
25941
25942 #
25943diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
25944index 468d591..8e80a0a 100644
25945--- a/arch/x86/vdso/vdso32-setup.c
25946+++ b/arch/x86/vdso/vdso32-setup.c
25947@@ -25,6 +25,7 @@
25948 #include <asm/tlbflush.h>
25949 #include <asm/vdso.h>
25950 #include <asm/proto.h>
25951+#include <asm/mman.h>
25952
25953 enum {
25954 VDSO_DISABLED = 0,
25955@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
25956 void enable_sep_cpu(void)
25957 {
25958 int cpu = get_cpu();
25959- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25960+ struct tss_struct *tss = init_tss + cpu;
25961
25962 if (!boot_cpu_has(X86_FEATURE_SEP)) {
25963 put_cpu();
25964@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
25965 gate_vma.vm_start = FIXADDR_USER_START;
25966 gate_vma.vm_end = FIXADDR_USER_END;
25967 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
25968- gate_vma.vm_page_prot = __P101;
25969+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
25970 /*
25971 * Make sure the vDSO gets into every core dump.
25972 * Dumping its contents makes post-mortem fully interpretable later
25973@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
25974 if (compat)
25975 addr = VDSO_HIGH_BASE;
25976 else {
25977- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
25978+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
25979 if (IS_ERR_VALUE(addr)) {
25980 ret = addr;
25981 goto up_fail;
25982 }
25983 }
25984
25985- current->mm->context.vdso = (void *)addr;
25986+ current->mm->context.vdso = addr;
25987
25988 if (compat_uses_vma || !compat) {
25989 /*
25990@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
25991 }
25992
25993 current_thread_info()->sysenter_return =
25994- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
25995+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
25996
25997 up_fail:
25998 if (ret)
25999- current->mm->context.vdso = NULL;
26000+ current->mm->context.vdso = 0;
26001
26002 up_write(&mm->mmap_sem);
26003
26004@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26005
26006 const char *arch_vma_name(struct vm_area_struct *vma)
26007 {
26008- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26009+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26010 return "[vdso]";
26011+
26012+#ifdef CONFIG_PAX_SEGMEXEC
26013+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26014+ return "[vdso]";
26015+#endif
26016+
26017 return NULL;
26018 }
26019
26020@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26021 * Check to see if the corresponding task was created in compat vdso
26022 * mode.
26023 */
26024- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26025+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26026 return &gate_vma;
26027 return NULL;
26028 }
26029diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26030index 153407c..611cba9 100644
26031--- a/arch/x86/vdso/vma.c
26032+++ b/arch/x86/vdso/vma.c
26033@@ -16,8 +16,6 @@
26034 #include <asm/vdso.h>
26035 #include <asm/page.h>
26036
26037-unsigned int __read_mostly vdso_enabled = 1;
26038-
26039 extern char vdso_start[], vdso_end[];
26040 extern unsigned short vdso_sync_cpuid;
26041
26042@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26043 * unaligned here as a result of stack start randomization.
26044 */
26045 addr = PAGE_ALIGN(addr);
26046- addr = align_addr(addr, NULL, ALIGN_VDSO);
26047
26048 return addr;
26049 }
26050@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26051 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26052 {
26053 struct mm_struct *mm = current->mm;
26054- unsigned long addr;
26055+ unsigned long addr = 0;
26056 int ret;
26057
26058- if (!vdso_enabled)
26059- return 0;
26060-
26061 down_write(&mm->mmap_sem);
26062+
26063+#ifdef CONFIG_PAX_RANDMMAP
26064+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26065+#endif
26066+
26067 addr = vdso_addr(mm->start_stack, vdso_size);
26068+ addr = align_addr(addr, NULL, ALIGN_VDSO);
26069 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26070 if (IS_ERR_VALUE(addr)) {
26071 ret = addr;
26072 goto up_fail;
26073 }
26074
26075- current->mm->context.vdso = (void *)addr;
26076+ mm->context.vdso = addr;
26077
26078 ret = install_special_mapping(mm, addr, vdso_size,
26079 VM_READ|VM_EXEC|
26080 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26081 VM_ALWAYSDUMP,
26082 vdso_pages);
26083- if (ret) {
26084- current->mm->context.vdso = NULL;
26085- goto up_fail;
26086- }
26087+
26088+ if (ret)
26089+ mm->context.vdso = 0;
26090
26091 up_fail:
26092 up_write(&mm->mmap_sem);
26093 return ret;
26094 }
26095-
26096-static __init int vdso_setup(char *s)
26097-{
26098- vdso_enabled = simple_strtoul(s, NULL, 0);
26099- return 0;
26100-}
26101-__setup("vdso=", vdso_setup);
26102diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26103index 4172af8..2c8ed7f 100644
26104--- a/arch/x86/xen/enlighten.c
26105+++ b/arch/x86/xen/enlighten.c
26106@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26107
26108 struct shared_info xen_dummy_shared_info;
26109
26110-void *xen_initial_gdt;
26111-
26112 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26113 __read_mostly int xen_have_vector_callback;
26114 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26115@@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26116 #endif
26117 };
26118
26119-static void xen_reboot(int reason)
26120+static __noreturn void xen_reboot(int reason)
26121 {
26122 struct sched_shutdown r = { .reason = reason };
26123
26124- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26125- BUG();
26126+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26127+ BUG();
26128 }
26129
26130-static void xen_restart(char *msg)
26131+static __noreturn void xen_restart(char *msg)
26132 {
26133 xen_reboot(SHUTDOWN_reboot);
26134 }
26135
26136-static void xen_emergency_restart(void)
26137+static __noreturn void xen_emergency_restart(void)
26138 {
26139 xen_reboot(SHUTDOWN_reboot);
26140 }
26141
26142-static void xen_machine_halt(void)
26143+static __noreturn void xen_machine_halt(void)
26144 {
26145 xen_reboot(SHUTDOWN_poweroff);
26146 }
26147
26148-static void xen_machine_power_off(void)
26149+static __noreturn void xen_machine_power_off(void)
26150 {
26151 if (pm_power_off)
26152 pm_power_off();
26153@@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
26154 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26155
26156 /* Work out if we support NX */
26157- x86_configure_nx();
26158+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26159+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26160+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26161+ unsigned l, h;
26162+
26163+ __supported_pte_mask |= _PAGE_NX;
26164+ rdmsr(MSR_EFER, l, h);
26165+ l |= EFER_NX;
26166+ wrmsr(MSR_EFER, l, h);
26167+ }
26168+#endif
26169
26170 xen_setup_features();
26171
26172@@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
26173
26174 machine_ops = xen_machine_ops;
26175
26176- /*
26177- * The only reliable way to retain the initial address of the
26178- * percpu gdt_page is to remember it here, so we can go and
26179- * mark it RW later, when the initial percpu area is freed.
26180- */
26181- xen_initial_gdt = &per_cpu(gdt_page, 0);
26182-
26183 xen_smp_init();
26184
26185 #ifdef CONFIG_ACPI_NUMA
26186diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26187index 95c1cf6..4bfa5be 100644
26188--- a/arch/x86/xen/mmu.c
26189+++ b/arch/x86/xen/mmu.c
26190@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26191 convert_pfn_mfn(init_level4_pgt);
26192 convert_pfn_mfn(level3_ident_pgt);
26193 convert_pfn_mfn(level3_kernel_pgt);
26194+ convert_pfn_mfn(level3_vmalloc_start_pgt);
26195+ convert_pfn_mfn(level3_vmalloc_end_pgt);
26196+ convert_pfn_mfn(level3_vmemmap_pgt);
26197
26198 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26199 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26200@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26201 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26202 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26203 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26204+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26205+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26206+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26207 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26208+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26209 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26210 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26211
26212@@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
26213 pv_mmu_ops.set_pud = xen_set_pud;
26214 #if PAGETABLE_LEVELS == 4
26215 pv_mmu_ops.set_pgd = xen_set_pgd;
26216+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26217 #endif
26218
26219 /* This will work as long as patching hasn't happened yet
26220@@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26221 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26222 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26223 .set_pgd = xen_set_pgd_hyper,
26224+ .set_pgd_batched = xen_set_pgd_hyper,
26225
26226 .alloc_pud = xen_alloc_pmd_init,
26227 .release_pud = xen_release_pmd_init,
26228diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26229index 501d4e0..e877605 100644
26230--- a/arch/x86/xen/smp.c
26231+++ b/arch/x86/xen/smp.c
26232@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26233 {
26234 BUG_ON(smp_processor_id() != 0);
26235 native_smp_prepare_boot_cpu();
26236-
26237- /* We've switched to the "real" per-cpu gdt, so make sure the
26238- old memory can be recycled */
26239- make_lowmem_page_readwrite(xen_initial_gdt);
26240-
26241 xen_filter_cpu_maps();
26242 xen_setup_vcpu_info_placement();
26243 }
26244@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26245 gdt = get_cpu_gdt_table(cpu);
26246
26247 ctxt->flags = VGCF_IN_KERNEL;
26248- ctxt->user_regs.ds = __USER_DS;
26249- ctxt->user_regs.es = __USER_DS;
26250+ ctxt->user_regs.ds = __KERNEL_DS;
26251+ ctxt->user_regs.es = __KERNEL_DS;
26252 ctxt->user_regs.ss = __KERNEL_DS;
26253 #ifdef CONFIG_X86_32
26254 ctxt->user_regs.fs = __KERNEL_PERCPU;
26255- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26256+ savesegment(gs, ctxt->user_regs.gs);
26257 #else
26258 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26259 #endif
26260@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26261 int rc;
26262
26263 per_cpu(current_task, cpu) = idle;
26264+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26265 #ifdef CONFIG_X86_32
26266 irq_ctx_init(cpu);
26267 #else
26268 clear_tsk_thread_flag(idle, TIF_FORK);
26269- per_cpu(kernel_stack, cpu) =
26270- (unsigned long)task_stack_page(idle) -
26271- KERNEL_STACK_OFFSET + THREAD_SIZE;
26272+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26273 #endif
26274 xen_setup_runstate_info(cpu);
26275 xen_setup_timer(cpu);
26276diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26277index b040b0e..8cc4fe0 100644
26278--- a/arch/x86/xen/xen-asm_32.S
26279+++ b/arch/x86/xen/xen-asm_32.S
26280@@ -83,14 +83,14 @@ ENTRY(xen_iret)
26281 ESP_OFFSET=4 # bytes pushed onto stack
26282
26283 /*
26284- * Store vcpu_info pointer for easy access. Do it this way to
26285- * avoid having to reload %fs
26286+ * Store vcpu_info pointer for easy access.
26287 */
26288 #ifdef CONFIG_SMP
26289- GET_THREAD_INFO(%eax)
26290- movl TI_cpu(%eax), %eax
26291- movl __per_cpu_offset(,%eax,4), %eax
26292- mov xen_vcpu(%eax), %eax
26293+ push %fs
26294+ mov $(__KERNEL_PERCPU), %eax
26295+ mov %eax, %fs
26296+ mov PER_CPU_VAR(xen_vcpu), %eax
26297+ pop %fs
26298 #else
26299 movl xen_vcpu, %eax
26300 #endif
26301diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26302index aaa7291..3f77960 100644
26303--- a/arch/x86/xen/xen-head.S
26304+++ b/arch/x86/xen/xen-head.S
26305@@ -19,6 +19,17 @@ ENTRY(startup_xen)
26306 #ifdef CONFIG_X86_32
26307 mov %esi,xen_start_info
26308 mov $init_thread_union+THREAD_SIZE,%esp
26309+#ifdef CONFIG_SMP
26310+ movl $cpu_gdt_table,%edi
26311+ movl $__per_cpu_load,%eax
26312+ movw %ax,__KERNEL_PERCPU + 2(%edi)
26313+ rorl $16,%eax
26314+ movb %al,__KERNEL_PERCPU + 4(%edi)
26315+ movb %ah,__KERNEL_PERCPU + 7(%edi)
26316+ movl $__per_cpu_end - 1,%eax
26317+ subl $__per_cpu_start,%eax
26318+ movw %ax,__KERNEL_PERCPU + 0(%edi)
26319+#endif
26320 #else
26321 mov %rsi,xen_start_info
26322 mov $init_thread_union+THREAD_SIZE,%rsp
26323diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26324index b095739..8c17bcd 100644
26325--- a/arch/x86/xen/xen-ops.h
26326+++ b/arch/x86/xen/xen-ops.h
26327@@ -10,8 +10,6 @@
26328 extern const char xen_hypervisor_callback[];
26329 extern const char xen_failsafe_callback[];
26330
26331-extern void *xen_initial_gdt;
26332-
26333 struct trap_info;
26334 void xen_copy_trap_info(struct trap_info *traps);
26335
26336diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26337index 525bd3d..ef888b1 100644
26338--- a/arch/xtensa/variants/dc232b/include/variant/core.h
26339+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26340@@ -119,9 +119,9 @@
26341 ----------------------------------------------------------------------*/
26342
26343 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26344-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26345 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26346 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26347+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26348
26349 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26350 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26351diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26352index 2f33760..835e50a 100644
26353--- a/arch/xtensa/variants/fsf/include/variant/core.h
26354+++ b/arch/xtensa/variants/fsf/include/variant/core.h
26355@@ -11,6 +11,7 @@
26356 #ifndef _XTENSA_CORE_H
26357 #define _XTENSA_CORE_H
26358
26359+#include <linux/const.h>
26360
26361 /****************************************************************************
26362 Parameters Useful for Any Code, USER or PRIVILEGED
26363@@ -112,9 +113,9 @@
26364 ----------------------------------------------------------------------*/
26365
26366 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26367-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26368 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26369 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26370+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26371
26372 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26373 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26374diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26375index af00795..2bb8105 100644
26376--- a/arch/xtensa/variants/s6000/include/variant/core.h
26377+++ b/arch/xtensa/variants/s6000/include/variant/core.h
26378@@ -11,6 +11,7 @@
26379 #ifndef _XTENSA_CORE_CONFIGURATION_H
26380 #define _XTENSA_CORE_CONFIGURATION_H
26381
26382+#include <linux/const.h>
26383
26384 /****************************************************************************
26385 Parameters Useful for Any Code, USER or PRIVILEGED
26386@@ -118,9 +119,9 @@
26387 ----------------------------------------------------------------------*/
26388
26389 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26390-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26391 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26392 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26393+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26394
26395 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26396 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26397diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26398index 58916af..9cb880b 100644
26399--- a/block/blk-iopoll.c
26400+++ b/block/blk-iopoll.c
26401@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26402 }
26403 EXPORT_SYMBOL(blk_iopoll_complete);
26404
26405-static void blk_iopoll_softirq(struct softirq_action *h)
26406+static void blk_iopoll_softirq(void)
26407 {
26408 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26409 int rearm = 0, budget = blk_iopoll_budget;
26410diff --git a/block/blk-map.c b/block/blk-map.c
26411index 623e1cd..ca1e109 100644
26412--- a/block/blk-map.c
26413+++ b/block/blk-map.c
26414@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26415 if (!len || !kbuf)
26416 return -EINVAL;
26417
26418- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26419+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26420 if (do_copy)
26421 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26422 else
26423diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26424index 1366a89..e17f54b 100644
26425--- a/block/blk-softirq.c
26426+++ b/block/blk-softirq.c
26427@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26428 * Softirq action handler - move entries to local list and loop over them
26429 * while passing them to the queue registered handler.
26430 */
26431-static void blk_done_softirq(struct softirq_action *h)
26432+static void blk_done_softirq(void)
26433 {
26434 struct list_head *cpu_list, local_list;
26435
26436diff --git a/block/bsg.c b/block/bsg.c
26437index ff64ae3..593560c 100644
26438--- a/block/bsg.c
26439+++ b/block/bsg.c
26440@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26441 struct sg_io_v4 *hdr, struct bsg_device *bd,
26442 fmode_t has_write_perm)
26443 {
26444+ unsigned char tmpcmd[sizeof(rq->__cmd)];
26445+ unsigned char *cmdptr;
26446+
26447 if (hdr->request_len > BLK_MAX_CDB) {
26448 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26449 if (!rq->cmd)
26450 return -ENOMEM;
26451- }
26452+ cmdptr = rq->cmd;
26453+ } else
26454+ cmdptr = tmpcmd;
26455
26456- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26457+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26458 hdr->request_len))
26459 return -EFAULT;
26460
26461+ if (cmdptr != rq->cmd)
26462+ memcpy(rq->cmd, cmdptr, hdr->request_len);
26463+
26464 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26465 if (blk_verify_command(rq->cmd, has_write_perm))
26466 return -EPERM;
26467diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26468index 7c668c8..db3521c 100644
26469--- a/block/compat_ioctl.c
26470+++ b/block/compat_ioctl.c
26471@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26472 err |= __get_user(f->spec1, &uf->spec1);
26473 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26474 err |= __get_user(name, &uf->name);
26475- f->name = compat_ptr(name);
26476+ f->name = (void __force_kernel *)compat_ptr(name);
26477 if (err) {
26478 err = -EFAULT;
26479 goto out;
26480diff --git a/block/partitions/efi.c b/block/partitions/efi.c
26481index 6296b40..417c00f 100644
26482--- a/block/partitions/efi.c
26483+++ b/block/partitions/efi.c
26484@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
26485 if (!gpt)
26486 return NULL;
26487
26488+ if (!le32_to_cpu(gpt->num_partition_entries))
26489+ return NULL;
26490+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
26491+ if (!pte)
26492+ return NULL;
26493+
26494 count = le32_to_cpu(gpt->num_partition_entries) *
26495 le32_to_cpu(gpt->sizeof_partition_entry);
26496- if (!count)
26497- return NULL;
26498- pte = kzalloc(count, GFP_KERNEL);
26499- if (!pte)
26500- return NULL;
26501-
26502 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
26503 (u8 *) pte,
26504 count) < count) {
26505diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26506index 260fa80..e8f3caf 100644
26507--- a/block/scsi_ioctl.c
26508+++ b/block/scsi_ioctl.c
26509@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26510 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26511 struct sg_io_hdr *hdr, fmode_t mode)
26512 {
26513- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26514+ unsigned char tmpcmd[sizeof(rq->__cmd)];
26515+ unsigned char *cmdptr;
26516+
26517+ if (rq->cmd != rq->__cmd)
26518+ cmdptr = rq->cmd;
26519+ else
26520+ cmdptr = tmpcmd;
26521+
26522+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
26523 return -EFAULT;
26524+
26525+ if (cmdptr != rq->cmd)
26526+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
26527+
26528 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
26529 return -EPERM;
26530
26531@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26532 int err;
26533 unsigned int in_len, out_len, bytes, opcode, cmdlen;
26534 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
26535+ unsigned char tmpcmd[sizeof(rq->__cmd)];
26536+ unsigned char *cmdptr;
26537
26538 if (!sic)
26539 return -EINVAL;
26540@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26541 */
26542 err = -EFAULT;
26543 rq->cmd_len = cmdlen;
26544- if (copy_from_user(rq->cmd, sic->data, cmdlen))
26545+
26546+ if (rq->cmd != rq->__cmd)
26547+ cmdptr = rq->cmd;
26548+ else
26549+ cmdptr = tmpcmd;
26550+
26551+ if (copy_from_user(cmdptr, sic->data, cmdlen))
26552 goto error;
26553
26554+ if (rq->cmd != cmdptr)
26555+ memcpy(rq->cmd, cmdptr, cmdlen);
26556+
26557 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
26558 goto error;
26559
26560diff --git a/crypto/cryptd.c b/crypto/cryptd.c
26561index 671d4d6..5f24030 100644
26562--- a/crypto/cryptd.c
26563+++ b/crypto/cryptd.c
26564@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
26565
26566 struct cryptd_blkcipher_request_ctx {
26567 crypto_completion_t complete;
26568-};
26569+} __no_const;
26570
26571 struct cryptd_hash_ctx {
26572 struct crypto_shash *child;
26573@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
26574
26575 struct cryptd_aead_request_ctx {
26576 crypto_completion_t complete;
26577-};
26578+} __no_const;
26579
26580 static void cryptd_queue_worker(struct work_struct *work);
26581
26582diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
26583index 5d41894..22021e4 100644
26584--- a/drivers/acpi/apei/cper.c
26585+++ b/drivers/acpi/apei/cper.c
26586@@ -38,12 +38,12 @@
26587 */
26588 u64 cper_next_record_id(void)
26589 {
26590- static atomic64_t seq;
26591+ static atomic64_unchecked_t seq;
26592
26593- if (!atomic64_read(&seq))
26594- atomic64_set(&seq, ((u64)get_seconds()) << 32);
26595+ if (!atomic64_read_unchecked(&seq))
26596+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
26597
26598- return atomic64_inc_return(&seq);
26599+ return atomic64_inc_return_unchecked(&seq);
26600 }
26601 EXPORT_SYMBOL_GPL(cper_next_record_id);
26602
26603diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
26604index b258cab..3fb7da7 100644
26605--- a/drivers/acpi/ec_sys.c
26606+++ b/drivers/acpi/ec_sys.c
26607@@ -12,6 +12,7 @@
26608 #include <linux/acpi.h>
26609 #include <linux/debugfs.h>
26610 #include <linux/module.h>
26611+#include <linux/uaccess.h>
26612 #include "internal.h"
26613
26614 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
26615@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26616 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
26617 */
26618 unsigned int size = EC_SPACE_SIZE;
26619- u8 *data = (u8 *) buf;
26620+ u8 data;
26621 loff_t init_off = *off;
26622 int err = 0;
26623
26624@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26625 size = count;
26626
26627 while (size) {
26628- err = ec_read(*off, &data[*off - init_off]);
26629+ err = ec_read(*off, &data);
26630 if (err)
26631 return err;
26632+ if (put_user(data, &buf[*off - init_off]))
26633+ return -EFAULT;
26634 *off += 1;
26635 size--;
26636 }
26637@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26638
26639 unsigned int size = count;
26640 loff_t init_off = *off;
26641- u8 *data = (u8 *) buf;
26642 int err = 0;
26643
26644 if (*off >= EC_SPACE_SIZE)
26645@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26646 }
26647
26648 while (size) {
26649- u8 byte_write = data[*off - init_off];
26650+ u8 byte_write;
26651+ if (get_user(byte_write, &buf[*off - init_off]))
26652+ return -EFAULT;
26653 err = ec_write(*off, byte_write);
26654 if (err)
26655 return err;
26656diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
26657index 251c7b62..000462d 100644
26658--- a/drivers/acpi/proc.c
26659+++ b/drivers/acpi/proc.c
26660@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
26661 size_t count, loff_t * ppos)
26662 {
26663 struct list_head *node, *next;
26664- char strbuf[5];
26665- char str[5] = "";
26666- unsigned int len = count;
26667+ char strbuf[5] = {0};
26668
26669- if (len > 4)
26670- len = 4;
26671- if (len < 0)
26672+ if (count > 4)
26673+ count = 4;
26674+ if (copy_from_user(strbuf, buffer, count))
26675 return -EFAULT;
26676-
26677- if (copy_from_user(strbuf, buffer, len))
26678- return -EFAULT;
26679- strbuf[len] = '\0';
26680- sscanf(strbuf, "%s", str);
26681+ strbuf[count] = '\0';
26682
26683 mutex_lock(&acpi_device_lock);
26684 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
26685@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
26686 if (!dev->wakeup.flags.valid)
26687 continue;
26688
26689- if (!strncmp(dev->pnp.bus_id, str, 4)) {
26690+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
26691 if (device_can_wakeup(&dev->dev)) {
26692 bool enable = !device_may_wakeup(&dev->dev);
26693 device_set_wakeup_enable(&dev->dev, enable);
26694diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
26695index 8ae05ce..7dbbed9 100644
26696--- a/drivers/acpi/processor_driver.c
26697+++ b/drivers/acpi/processor_driver.c
26698@@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
26699 return 0;
26700 #endif
26701
26702- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
26703+ BUG_ON(pr->id >= nr_cpu_ids);
26704
26705 /*
26706 * Buggy BIOS check
26707diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
26708index c06e0ec..a2c06ba 100644
26709--- a/drivers/ata/libata-core.c
26710+++ b/drivers/ata/libata-core.c
26711@@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
26712 struct ata_port *ap;
26713 unsigned int tag;
26714
26715- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26716+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26717 ap = qc->ap;
26718
26719 qc->flags = 0;
26720@@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
26721 struct ata_port *ap;
26722 struct ata_link *link;
26723
26724- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26725+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26726 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
26727 ap = qc->ap;
26728 link = qc->dev->link;
26729@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26730 return;
26731
26732 spin_lock(&lock);
26733+ pax_open_kernel();
26734
26735 for (cur = ops->inherits; cur; cur = cur->inherits) {
26736 void **inherit = (void **)cur;
26737@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26738 if (IS_ERR(*pp))
26739 *pp = NULL;
26740
26741- ops->inherits = NULL;
26742+ *(struct ata_port_operations **)&ops->inherits = NULL;
26743
26744+ pax_close_kernel();
26745 spin_unlock(&lock);
26746 }
26747
26748diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
26749index 048589f..4002b98 100644
26750--- a/drivers/ata/pata_arasan_cf.c
26751+++ b/drivers/ata/pata_arasan_cf.c
26752@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
26753 /* Handle platform specific quirks */
26754 if (pdata->quirk) {
26755 if (pdata->quirk & CF_BROKEN_PIO) {
26756- ap->ops->set_piomode = NULL;
26757+ pax_open_kernel();
26758+ *(void **)&ap->ops->set_piomode = NULL;
26759+ pax_close_kernel();
26760 ap->pio_mask = 0;
26761 }
26762 if (pdata->quirk & CF_BROKEN_MWDMA)
26763diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
26764index f9b983a..887b9d8 100644
26765--- a/drivers/atm/adummy.c
26766+++ b/drivers/atm/adummy.c
26767@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
26768 vcc->pop(vcc, skb);
26769 else
26770 dev_kfree_skb_any(skb);
26771- atomic_inc(&vcc->stats->tx);
26772+ atomic_inc_unchecked(&vcc->stats->tx);
26773
26774 return 0;
26775 }
26776diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
26777index f8f41e0..1f987dd 100644
26778--- a/drivers/atm/ambassador.c
26779+++ b/drivers/atm/ambassador.c
26780@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
26781 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26782
26783 // VC layer stats
26784- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26785+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26786
26787 // free the descriptor
26788 kfree (tx_descr);
26789@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26790 dump_skb ("<<<", vc, skb);
26791
26792 // VC layer stats
26793- atomic_inc(&atm_vcc->stats->rx);
26794+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26795 __net_timestamp(skb);
26796 // end of our responsibility
26797 atm_vcc->push (atm_vcc, skb);
26798@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26799 } else {
26800 PRINTK (KERN_INFO, "dropped over-size frame");
26801 // should we count this?
26802- atomic_inc(&atm_vcc->stats->rx_drop);
26803+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26804 }
26805
26806 } else {
26807@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
26808 }
26809
26810 if (check_area (skb->data, skb->len)) {
26811- atomic_inc(&atm_vcc->stats->tx_err);
26812+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26813 return -ENOMEM; // ?
26814 }
26815
26816diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
26817index b22d71c..d6e1049 100644
26818--- a/drivers/atm/atmtcp.c
26819+++ b/drivers/atm/atmtcp.c
26820@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26821 if (vcc->pop) vcc->pop(vcc,skb);
26822 else dev_kfree_skb(skb);
26823 if (dev_data) return 0;
26824- atomic_inc(&vcc->stats->tx_err);
26825+ atomic_inc_unchecked(&vcc->stats->tx_err);
26826 return -ENOLINK;
26827 }
26828 size = skb->len+sizeof(struct atmtcp_hdr);
26829@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26830 if (!new_skb) {
26831 if (vcc->pop) vcc->pop(vcc,skb);
26832 else dev_kfree_skb(skb);
26833- atomic_inc(&vcc->stats->tx_err);
26834+ atomic_inc_unchecked(&vcc->stats->tx_err);
26835 return -ENOBUFS;
26836 }
26837 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26838@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26839 if (vcc->pop) vcc->pop(vcc,skb);
26840 else dev_kfree_skb(skb);
26841 out_vcc->push(out_vcc,new_skb);
26842- atomic_inc(&vcc->stats->tx);
26843- atomic_inc(&out_vcc->stats->rx);
26844+ atomic_inc_unchecked(&vcc->stats->tx);
26845+ atomic_inc_unchecked(&out_vcc->stats->rx);
26846 return 0;
26847 }
26848
26849@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26850 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26851 read_unlock(&vcc_sklist_lock);
26852 if (!out_vcc) {
26853- atomic_inc(&vcc->stats->tx_err);
26854+ atomic_inc_unchecked(&vcc->stats->tx_err);
26855 goto done;
26856 }
26857 skb_pull(skb,sizeof(struct atmtcp_hdr));
26858@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26859 __net_timestamp(new_skb);
26860 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26861 out_vcc->push(out_vcc,new_skb);
26862- atomic_inc(&vcc->stats->tx);
26863- atomic_inc(&out_vcc->stats->rx);
26864+ atomic_inc_unchecked(&vcc->stats->tx);
26865+ atomic_inc_unchecked(&out_vcc->stats->rx);
26866 done:
26867 if (vcc->pop) vcc->pop(vcc,skb);
26868 else dev_kfree_skb(skb);
26869diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
26870index 956e9ac..133516d 100644
26871--- a/drivers/atm/eni.c
26872+++ b/drivers/atm/eni.c
26873@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26874 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26875 vcc->dev->number);
26876 length = 0;
26877- atomic_inc(&vcc->stats->rx_err);
26878+ atomic_inc_unchecked(&vcc->stats->rx_err);
26879 }
26880 else {
26881 length = ATM_CELL_SIZE-1; /* no HEC */
26882@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26883 size);
26884 }
26885 eff = length = 0;
26886- atomic_inc(&vcc->stats->rx_err);
26887+ atomic_inc_unchecked(&vcc->stats->rx_err);
26888 }
26889 else {
26890 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26891@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26892 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26893 vcc->dev->number,vcc->vci,length,size << 2,descr);
26894 length = eff = 0;
26895- atomic_inc(&vcc->stats->rx_err);
26896+ atomic_inc_unchecked(&vcc->stats->rx_err);
26897 }
26898 }
26899 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26900@@ -771,7 +771,7 @@ rx_dequeued++;
26901 vcc->push(vcc,skb);
26902 pushed++;
26903 }
26904- atomic_inc(&vcc->stats->rx);
26905+ atomic_inc_unchecked(&vcc->stats->rx);
26906 }
26907 wake_up(&eni_dev->rx_wait);
26908 }
26909@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
26910 PCI_DMA_TODEVICE);
26911 if (vcc->pop) vcc->pop(vcc,skb);
26912 else dev_kfree_skb_irq(skb);
26913- atomic_inc(&vcc->stats->tx);
26914+ atomic_inc_unchecked(&vcc->stats->tx);
26915 wake_up(&eni_dev->tx_wait);
26916 dma_complete++;
26917 }
26918@@ -1569,7 +1569,7 @@ tx_complete++;
26919 /*--------------------------------- entries ---------------------------------*/
26920
26921
26922-static const char *media_name[] __devinitdata = {
26923+static const char *media_name[] __devinitconst = {
26924 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
26925 "UTP", "05?", "06?", "07?", /* 4- 7 */
26926 "TAXI","09?", "10?", "11?", /* 8-11 */
26927diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
26928index 5072f8a..fa52520d 100644
26929--- a/drivers/atm/firestream.c
26930+++ b/drivers/atm/firestream.c
26931@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
26932 }
26933 }
26934
26935- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26936+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26937
26938 fs_dprintk (FS_DEBUG_TXMEM, "i");
26939 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26940@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26941 #endif
26942 skb_put (skb, qe->p1 & 0xffff);
26943 ATM_SKB(skb)->vcc = atm_vcc;
26944- atomic_inc(&atm_vcc->stats->rx);
26945+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26946 __net_timestamp(skb);
26947 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26948 atm_vcc->push (atm_vcc, skb);
26949@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26950 kfree (pe);
26951 }
26952 if (atm_vcc)
26953- atomic_inc(&atm_vcc->stats->rx_drop);
26954+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26955 break;
26956 case 0x1f: /* Reassembly abort: no buffers. */
26957 /* Silently increment error counter. */
26958 if (atm_vcc)
26959- atomic_inc(&atm_vcc->stats->rx_drop);
26960+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26961 break;
26962 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26963 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26964diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
26965index 361f5ae..7fc552d 100644
26966--- a/drivers/atm/fore200e.c
26967+++ b/drivers/atm/fore200e.c
26968@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
26969 #endif
26970 /* check error condition */
26971 if (*entry->status & STATUS_ERROR)
26972- atomic_inc(&vcc->stats->tx_err);
26973+ atomic_inc_unchecked(&vcc->stats->tx_err);
26974 else
26975- atomic_inc(&vcc->stats->tx);
26976+ atomic_inc_unchecked(&vcc->stats->tx);
26977 }
26978 }
26979
26980@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26981 if (skb == NULL) {
26982 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26983
26984- atomic_inc(&vcc->stats->rx_drop);
26985+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26986 return -ENOMEM;
26987 }
26988
26989@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26990
26991 dev_kfree_skb_any(skb);
26992
26993- atomic_inc(&vcc->stats->rx_drop);
26994+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26995 return -ENOMEM;
26996 }
26997
26998 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26999
27000 vcc->push(vcc, skb);
27001- atomic_inc(&vcc->stats->rx);
27002+ atomic_inc_unchecked(&vcc->stats->rx);
27003
27004 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27005
27006@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27007 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27008 fore200e->atm_dev->number,
27009 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27010- atomic_inc(&vcc->stats->rx_err);
27011+ atomic_inc_unchecked(&vcc->stats->rx_err);
27012 }
27013 }
27014
27015@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27016 goto retry_here;
27017 }
27018
27019- atomic_inc(&vcc->stats->tx_err);
27020+ atomic_inc_unchecked(&vcc->stats->tx_err);
27021
27022 fore200e->tx_sat++;
27023 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27024diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27025index b182c2f..1c6fa8a 100644
27026--- a/drivers/atm/he.c
27027+++ b/drivers/atm/he.c
27028@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27029
27030 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27031 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27032- atomic_inc(&vcc->stats->rx_drop);
27033+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27034 goto return_host_buffers;
27035 }
27036
27037@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27038 RBRQ_LEN_ERR(he_dev->rbrq_head)
27039 ? "LEN_ERR" : "",
27040 vcc->vpi, vcc->vci);
27041- atomic_inc(&vcc->stats->rx_err);
27042+ atomic_inc_unchecked(&vcc->stats->rx_err);
27043 goto return_host_buffers;
27044 }
27045
27046@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27047 vcc->push(vcc, skb);
27048 spin_lock(&he_dev->global_lock);
27049
27050- atomic_inc(&vcc->stats->rx);
27051+ atomic_inc_unchecked(&vcc->stats->rx);
27052
27053 return_host_buffers:
27054 ++pdus_assembled;
27055@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27056 tpd->vcc->pop(tpd->vcc, tpd->skb);
27057 else
27058 dev_kfree_skb_any(tpd->skb);
27059- atomic_inc(&tpd->vcc->stats->tx_err);
27060+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27061 }
27062 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27063 return;
27064@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27065 vcc->pop(vcc, skb);
27066 else
27067 dev_kfree_skb_any(skb);
27068- atomic_inc(&vcc->stats->tx_err);
27069+ atomic_inc_unchecked(&vcc->stats->tx_err);
27070 return -EINVAL;
27071 }
27072
27073@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27074 vcc->pop(vcc, skb);
27075 else
27076 dev_kfree_skb_any(skb);
27077- atomic_inc(&vcc->stats->tx_err);
27078+ atomic_inc_unchecked(&vcc->stats->tx_err);
27079 return -EINVAL;
27080 }
27081 #endif
27082@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27083 vcc->pop(vcc, skb);
27084 else
27085 dev_kfree_skb_any(skb);
27086- atomic_inc(&vcc->stats->tx_err);
27087+ atomic_inc_unchecked(&vcc->stats->tx_err);
27088 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27089 return -ENOMEM;
27090 }
27091@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27092 vcc->pop(vcc, skb);
27093 else
27094 dev_kfree_skb_any(skb);
27095- atomic_inc(&vcc->stats->tx_err);
27096+ atomic_inc_unchecked(&vcc->stats->tx_err);
27097 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27098 return -ENOMEM;
27099 }
27100@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27101 __enqueue_tpd(he_dev, tpd, cid);
27102 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27103
27104- atomic_inc(&vcc->stats->tx);
27105+ atomic_inc_unchecked(&vcc->stats->tx);
27106
27107 return 0;
27108 }
27109diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27110index b812103..e391a49 100644
27111--- a/drivers/atm/horizon.c
27112+++ b/drivers/atm/horizon.c
27113@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27114 {
27115 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27116 // VC layer stats
27117- atomic_inc(&vcc->stats->rx);
27118+ atomic_inc_unchecked(&vcc->stats->rx);
27119 __net_timestamp(skb);
27120 // end of our responsibility
27121 vcc->push (vcc, skb);
27122@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27123 dev->tx_iovec = NULL;
27124
27125 // VC layer stats
27126- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27127+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27128
27129 // free the skb
27130 hrz_kfree_skb (skb);
27131diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27132index 1c05212..c28e200 100644
27133--- a/drivers/atm/idt77252.c
27134+++ b/drivers/atm/idt77252.c
27135@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27136 else
27137 dev_kfree_skb(skb);
27138
27139- atomic_inc(&vcc->stats->tx);
27140+ atomic_inc_unchecked(&vcc->stats->tx);
27141 }
27142
27143 atomic_dec(&scq->used);
27144@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27145 if ((sb = dev_alloc_skb(64)) == NULL) {
27146 printk("%s: Can't allocate buffers for aal0.\n",
27147 card->name);
27148- atomic_add(i, &vcc->stats->rx_drop);
27149+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
27150 break;
27151 }
27152 if (!atm_charge(vcc, sb->truesize)) {
27153 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27154 card->name);
27155- atomic_add(i - 1, &vcc->stats->rx_drop);
27156+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27157 dev_kfree_skb(sb);
27158 break;
27159 }
27160@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27161 ATM_SKB(sb)->vcc = vcc;
27162 __net_timestamp(sb);
27163 vcc->push(vcc, sb);
27164- atomic_inc(&vcc->stats->rx);
27165+ atomic_inc_unchecked(&vcc->stats->rx);
27166
27167 cell += ATM_CELL_PAYLOAD;
27168 }
27169@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27170 "(CDC: %08x)\n",
27171 card->name, len, rpp->len, readl(SAR_REG_CDC));
27172 recycle_rx_pool_skb(card, rpp);
27173- atomic_inc(&vcc->stats->rx_err);
27174+ atomic_inc_unchecked(&vcc->stats->rx_err);
27175 return;
27176 }
27177 if (stat & SAR_RSQE_CRC) {
27178 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27179 recycle_rx_pool_skb(card, rpp);
27180- atomic_inc(&vcc->stats->rx_err);
27181+ atomic_inc_unchecked(&vcc->stats->rx_err);
27182 return;
27183 }
27184 if (skb_queue_len(&rpp->queue) > 1) {
27185@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27186 RXPRINTK("%s: Can't alloc RX skb.\n",
27187 card->name);
27188 recycle_rx_pool_skb(card, rpp);
27189- atomic_inc(&vcc->stats->rx_err);
27190+ atomic_inc_unchecked(&vcc->stats->rx_err);
27191 return;
27192 }
27193 if (!atm_charge(vcc, skb->truesize)) {
27194@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27195 __net_timestamp(skb);
27196
27197 vcc->push(vcc, skb);
27198- atomic_inc(&vcc->stats->rx);
27199+ atomic_inc_unchecked(&vcc->stats->rx);
27200
27201 return;
27202 }
27203@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27204 __net_timestamp(skb);
27205
27206 vcc->push(vcc, skb);
27207- atomic_inc(&vcc->stats->rx);
27208+ atomic_inc_unchecked(&vcc->stats->rx);
27209
27210 if (skb->truesize > SAR_FB_SIZE_3)
27211 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27212@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27213 if (vcc->qos.aal != ATM_AAL0) {
27214 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27215 card->name, vpi, vci);
27216- atomic_inc(&vcc->stats->rx_drop);
27217+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27218 goto drop;
27219 }
27220
27221 if ((sb = dev_alloc_skb(64)) == NULL) {
27222 printk("%s: Can't allocate buffers for AAL0.\n",
27223 card->name);
27224- atomic_inc(&vcc->stats->rx_err);
27225+ atomic_inc_unchecked(&vcc->stats->rx_err);
27226 goto drop;
27227 }
27228
27229@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27230 ATM_SKB(sb)->vcc = vcc;
27231 __net_timestamp(sb);
27232 vcc->push(vcc, sb);
27233- atomic_inc(&vcc->stats->rx);
27234+ atomic_inc_unchecked(&vcc->stats->rx);
27235
27236 drop:
27237 skb_pull(queue, 64);
27238@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27239
27240 if (vc == NULL) {
27241 printk("%s: NULL connection in send().\n", card->name);
27242- atomic_inc(&vcc->stats->tx_err);
27243+ atomic_inc_unchecked(&vcc->stats->tx_err);
27244 dev_kfree_skb(skb);
27245 return -EINVAL;
27246 }
27247 if (!test_bit(VCF_TX, &vc->flags)) {
27248 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27249- atomic_inc(&vcc->stats->tx_err);
27250+ atomic_inc_unchecked(&vcc->stats->tx_err);
27251 dev_kfree_skb(skb);
27252 return -EINVAL;
27253 }
27254@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27255 break;
27256 default:
27257 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27258- atomic_inc(&vcc->stats->tx_err);
27259+ atomic_inc_unchecked(&vcc->stats->tx_err);
27260 dev_kfree_skb(skb);
27261 return -EINVAL;
27262 }
27263
27264 if (skb_shinfo(skb)->nr_frags != 0) {
27265 printk("%s: No scatter-gather yet.\n", card->name);
27266- atomic_inc(&vcc->stats->tx_err);
27267+ atomic_inc_unchecked(&vcc->stats->tx_err);
27268 dev_kfree_skb(skb);
27269 return -EINVAL;
27270 }
27271@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27272
27273 err = queue_skb(card, vc, skb, oam);
27274 if (err) {
27275- atomic_inc(&vcc->stats->tx_err);
27276+ atomic_inc_unchecked(&vcc->stats->tx_err);
27277 dev_kfree_skb(skb);
27278 return err;
27279 }
27280@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27281 skb = dev_alloc_skb(64);
27282 if (!skb) {
27283 printk("%s: Out of memory in send_oam().\n", card->name);
27284- atomic_inc(&vcc->stats->tx_err);
27285+ atomic_inc_unchecked(&vcc->stats->tx_err);
27286 return -ENOMEM;
27287 }
27288 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27289diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27290index 9e373ba..cf93727 100644
27291--- a/drivers/atm/iphase.c
27292+++ b/drivers/atm/iphase.c
27293@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27294 status = (u_short) (buf_desc_ptr->desc_mode);
27295 if (status & (RX_CER | RX_PTE | RX_OFL))
27296 {
27297- atomic_inc(&vcc->stats->rx_err);
27298+ atomic_inc_unchecked(&vcc->stats->rx_err);
27299 IF_ERR(printk("IA: bad packet, dropping it");)
27300 if (status & RX_CER) {
27301 IF_ERR(printk(" cause: packet CRC error\n");)
27302@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27303 len = dma_addr - buf_addr;
27304 if (len > iadev->rx_buf_sz) {
27305 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27306- atomic_inc(&vcc->stats->rx_err);
27307+ atomic_inc_unchecked(&vcc->stats->rx_err);
27308 goto out_free_desc;
27309 }
27310
27311@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27312 ia_vcc = INPH_IA_VCC(vcc);
27313 if (ia_vcc == NULL)
27314 {
27315- atomic_inc(&vcc->stats->rx_err);
27316+ atomic_inc_unchecked(&vcc->stats->rx_err);
27317 atm_return(vcc, skb->truesize);
27318 dev_kfree_skb_any(skb);
27319 goto INCR_DLE;
27320@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27321 if ((length > iadev->rx_buf_sz) || (length >
27322 (skb->len - sizeof(struct cpcs_trailer))))
27323 {
27324- atomic_inc(&vcc->stats->rx_err);
27325+ atomic_inc_unchecked(&vcc->stats->rx_err);
27326 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27327 length, skb->len);)
27328 atm_return(vcc, skb->truesize);
27329@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27330
27331 IF_RX(printk("rx_dle_intr: skb push");)
27332 vcc->push(vcc,skb);
27333- atomic_inc(&vcc->stats->rx);
27334+ atomic_inc_unchecked(&vcc->stats->rx);
27335 iadev->rx_pkt_cnt++;
27336 }
27337 INCR_DLE:
27338@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27339 {
27340 struct k_sonet_stats *stats;
27341 stats = &PRIV(_ia_dev[board])->sonet_stats;
27342- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27343- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27344- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27345- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27346- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27347- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27348- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27349- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27350- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27351+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27352+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27353+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27354+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27355+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27356+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27357+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27358+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27359+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27360 }
27361 ia_cmds.status = 0;
27362 break;
27363@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27364 if ((desc == 0) || (desc > iadev->num_tx_desc))
27365 {
27366 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27367- atomic_inc(&vcc->stats->tx);
27368+ atomic_inc_unchecked(&vcc->stats->tx);
27369 if (vcc->pop)
27370 vcc->pop(vcc, skb);
27371 else
27372@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27373 ATM_DESC(skb) = vcc->vci;
27374 skb_queue_tail(&iadev->tx_dma_q, skb);
27375
27376- atomic_inc(&vcc->stats->tx);
27377+ atomic_inc_unchecked(&vcc->stats->tx);
27378 iadev->tx_pkt_cnt++;
27379 /* Increment transaction counter */
27380 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27381
27382 #if 0
27383 /* add flow control logic */
27384- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27385+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27386 if (iavcc->vc_desc_cnt > 10) {
27387 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27388 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27389diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27390index f556969..0da15eb 100644
27391--- a/drivers/atm/lanai.c
27392+++ b/drivers/atm/lanai.c
27393@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27394 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27395 lanai_endtx(lanai, lvcc);
27396 lanai_free_skb(lvcc->tx.atmvcc, skb);
27397- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27398+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27399 }
27400
27401 /* Try to fill the buffer - don't call unless there is backlog */
27402@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
27403 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27404 __net_timestamp(skb);
27405 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27406- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27407+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27408 out:
27409 lvcc->rx.buf.ptr = end;
27410 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27411@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27412 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27413 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27414 lanai->stats.service_rxnotaal5++;
27415- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27416+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27417 return 0;
27418 }
27419 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27420@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27421 int bytes;
27422 read_unlock(&vcc_sklist_lock);
27423 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27424- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27425+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27426 lvcc->stats.x.aal5.service_trash++;
27427 bytes = (SERVICE_GET_END(s) * 16) -
27428 (((unsigned long) lvcc->rx.buf.ptr) -
27429@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27430 }
27431 if (s & SERVICE_STREAM) {
27432 read_unlock(&vcc_sklist_lock);
27433- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27434+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27435 lvcc->stats.x.aal5.service_stream++;
27436 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27437 "PDU on VCI %d!\n", lanai->number, vci);
27438@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27439 return 0;
27440 }
27441 DPRINTK("got rx crc error on vci %d\n", vci);
27442- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27443+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27444 lvcc->stats.x.aal5.service_rxcrc++;
27445 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27446 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27447diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
27448index 1c70c45..300718d 100644
27449--- a/drivers/atm/nicstar.c
27450+++ b/drivers/atm/nicstar.c
27451@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27452 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
27453 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
27454 card->index);
27455- atomic_inc(&vcc->stats->tx_err);
27456+ atomic_inc_unchecked(&vcc->stats->tx_err);
27457 dev_kfree_skb_any(skb);
27458 return -EINVAL;
27459 }
27460@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27461 if (!vc->tx) {
27462 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
27463 card->index);
27464- atomic_inc(&vcc->stats->tx_err);
27465+ atomic_inc_unchecked(&vcc->stats->tx_err);
27466 dev_kfree_skb_any(skb);
27467 return -EINVAL;
27468 }
27469@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27470 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
27471 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
27472 card->index);
27473- atomic_inc(&vcc->stats->tx_err);
27474+ atomic_inc_unchecked(&vcc->stats->tx_err);
27475 dev_kfree_skb_any(skb);
27476 return -EINVAL;
27477 }
27478
27479 if (skb_shinfo(skb)->nr_frags != 0) {
27480 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27481- atomic_inc(&vcc->stats->tx_err);
27482+ atomic_inc_unchecked(&vcc->stats->tx_err);
27483 dev_kfree_skb_any(skb);
27484 return -EINVAL;
27485 }
27486@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27487 }
27488
27489 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
27490- atomic_inc(&vcc->stats->tx_err);
27491+ atomic_inc_unchecked(&vcc->stats->tx_err);
27492 dev_kfree_skb_any(skb);
27493 return -EIO;
27494 }
27495- atomic_inc(&vcc->stats->tx);
27496+ atomic_inc_unchecked(&vcc->stats->tx);
27497
27498 return 0;
27499 }
27500@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27501 printk
27502 ("nicstar%d: Can't allocate buffers for aal0.\n",
27503 card->index);
27504- atomic_add(i, &vcc->stats->rx_drop);
27505+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
27506 break;
27507 }
27508 if (!atm_charge(vcc, sb->truesize)) {
27509 RXPRINTK
27510 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
27511 card->index);
27512- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27513+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27514 dev_kfree_skb_any(sb);
27515 break;
27516 }
27517@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27518 ATM_SKB(sb)->vcc = vcc;
27519 __net_timestamp(sb);
27520 vcc->push(vcc, sb);
27521- atomic_inc(&vcc->stats->rx);
27522+ atomic_inc_unchecked(&vcc->stats->rx);
27523 cell += ATM_CELL_PAYLOAD;
27524 }
27525
27526@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27527 if (iovb == NULL) {
27528 printk("nicstar%d: Out of iovec buffers.\n",
27529 card->index);
27530- atomic_inc(&vcc->stats->rx_drop);
27531+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27532 recycle_rx_buf(card, skb);
27533 return;
27534 }
27535@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27536 small or large buffer itself. */
27537 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
27538 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27539- atomic_inc(&vcc->stats->rx_err);
27540+ atomic_inc_unchecked(&vcc->stats->rx_err);
27541 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27542 NS_MAX_IOVECS);
27543 NS_PRV_IOVCNT(iovb) = 0;
27544@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27545 ("nicstar%d: Expected a small buffer, and this is not one.\n",
27546 card->index);
27547 which_list(card, skb);
27548- atomic_inc(&vcc->stats->rx_err);
27549+ atomic_inc_unchecked(&vcc->stats->rx_err);
27550 recycle_rx_buf(card, skb);
27551 vc->rx_iov = NULL;
27552 recycle_iov_buf(card, iovb);
27553@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27554 ("nicstar%d: Expected a large buffer, and this is not one.\n",
27555 card->index);
27556 which_list(card, skb);
27557- atomic_inc(&vcc->stats->rx_err);
27558+ atomic_inc_unchecked(&vcc->stats->rx_err);
27559 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27560 NS_PRV_IOVCNT(iovb));
27561 vc->rx_iov = NULL;
27562@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27563 printk(" - PDU size mismatch.\n");
27564 else
27565 printk(".\n");
27566- atomic_inc(&vcc->stats->rx_err);
27567+ atomic_inc_unchecked(&vcc->stats->rx_err);
27568 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27569 NS_PRV_IOVCNT(iovb));
27570 vc->rx_iov = NULL;
27571@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27572 /* skb points to a small buffer */
27573 if (!atm_charge(vcc, skb->truesize)) {
27574 push_rxbufs(card, skb);
27575- atomic_inc(&vcc->stats->rx_drop);
27576+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27577 } else {
27578 skb_put(skb, len);
27579 dequeue_sm_buf(card, skb);
27580@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27581 ATM_SKB(skb)->vcc = vcc;
27582 __net_timestamp(skb);
27583 vcc->push(vcc, skb);
27584- atomic_inc(&vcc->stats->rx);
27585+ atomic_inc_unchecked(&vcc->stats->rx);
27586 }
27587 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
27588 struct sk_buff *sb;
27589@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27590 if (len <= NS_SMBUFSIZE) {
27591 if (!atm_charge(vcc, sb->truesize)) {
27592 push_rxbufs(card, sb);
27593- atomic_inc(&vcc->stats->rx_drop);
27594+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27595 } else {
27596 skb_put(sb, len);
27597 dequeue_sm_buf(card, sb);
27598@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27599 ATM_SKB(sb)->vcc = vcc;
27600 __net_timestamp(sb);
27601 vcc->push(vcc, sb);
27602- atomic_inc(&vcc->stats->rx);
27603+ atomic_inc_unchecked(&vcc->stats->rx);
27604 }
27605
27606 push_rxbufs(card, skb);
27607@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27608
27609 if (!atm_charge(vcc, skb->truesize)) {
27610 push_rxbufs(card, skb);
27611- atomic_inc(&vcc->stats->rx_drop);
27612+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27613 } else {
27614 dequeue_lg_buf(card, skb);
27615 #ifdef NS_USE_DESTRUCTORS
27616@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27617 ATM_SKB(skb)->vcc = vcc;
27618 __net_timestamp(skb);
27619 vcc->push(vcc, skb);
27620- atomic_inc(&vcc->stats->rx);
27621+ atomic_inc_unchecked(&vcc->stats->rx);
27622 }
27623
27624 push_rxbufs(card, sb);
27625@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27626 printk
27627 ("nicstar%d: Out of huge buffers.\n",
27628 card->index);
27629- atomic_inc(&vcc->stats->rx_drop);
27630+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27631 recycle_iovec_rx_bufs(card,
27632 (struct iovec *)
27633 iovb->data,
27634@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27635 card->hbpool.count++;
27636 } else
27637 dev_kfree_skb_any(hb);
27638- atomic_inc(&vcc->stats->rx_drop);
27639+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27640 } else {
27641 /* Copy the small buffer to the huge buffer */
27642 sb = (struct sk_buff *)iov->iov_base;
27643@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27644 #endif /* NS_USE_DESTRUCTORS */
27645 __net_timestamp(hb);
27646 vcc->push(vcc, hb);
27647- atomic_inc(&vcc->stats->rx);
27648+ atomic_inc_unchecked(&vcc->stats->rx);
27649 }
27650 }
27651
27652diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
27653index e8cd652..bbbd1fc 100644
27654--- a/drivers/atm/solos-pci.c
27655+++ b/drivers/atm/solos-pci.c
27656@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
27657 }
27658 atm_charge(vcc, skb->truesize);
27659 vcc->push(vcc, skb);
27660- atomic_inc(&vcc->stats->rx);
27661+ atomic_inc_unchecked(&vcc->stats->rx);
27662 break;
27663
27664 case PKT_STATUS:
27665@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
27666 vcc = SKB_CB(oldskb)->vcc;
27667
27668 if (vcc) {
27669- atomic_inc(&vcc->stats->tx);
27670+ atomic_inc_unchecked(&vcc->stats->tx);
27671 solos_pop(vcc, oldskb);
27672 } else
27673 dev_kfree_skb_irq(oldskb);
27674diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
27675index 90f1ccc..04c4a1e 100644
27676--- a/drivers/atm/suni.c
27677+++ b/drivers/atm/suni.c
27678@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27679
27680
27681 #define ADD_LIMITED(s,v) \
27682- atomic_add((v),&stats->s); \
27683- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27684+ atomic_add_unchecked((v),&stats->s); \
27685+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27686
27687
27688 static void suni_hz(unsigned long from_timer)
27689diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
27690index 5120a96..e2572bd 100644
27691--- a/drivers/atm/uPD98402.c
27692+++ b/drivers/atm/uPD98402.c
27693@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
27694 struct sonet_stats tmp;
27695 int error = 0;
27696
27697- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27698+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27699 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27700 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27701 if (zero && !error) {
27702@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
27703
27704
27705 #define ADD_LIMITED(s,v) \
27706- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27707- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27708- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27709+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27710+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27711+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27712
27713
27714 static void stat_event(struct atm_dev *dev)
27715@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
27716 if (reason & uPD98402_INT_PFM) stat_event(dev);
27717 if (reason & uPD98402_INT_PCO) {
27718 (void) GET(PCOCR); /* clear interrupt cause */
27719- atomic_add(GET(HECCT),
27720+ atomic_add_unchecked(GET(HECCT),
27721 &PRIV(dev)->sonet_stats.uncorr_hcs);
27722 }
27723 if ((reason & uPD98402_INT_RFO) &&
27724@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
27725 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27726 uPD98402_INT_LOS),PIMR); /* enable them */
27727 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27728- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27729- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27730- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27731+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27732+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27733+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27734 return 0;
27735 }
27736
27737diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
27738index d889f56..17eb71e 100644
27739--- a/drivers/atm/zatm.c
27740+++ b/drivers/atm/zatm.c
27741@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27742 }
27743 if (!size) {
27744 dev_kfree_skb_irq(skb);
27745- if (vcc) atomic_inc(&vcc->stats->rx_err);
27746+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27747 continue;
27748 }
27749 if (!atm_charge(vcc,skb->truesize)) {
27750@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27751 skb->len = size;
27752 ATM_SKB(skb)->vcc = vcc;
27753 vcc->push(vcc,skb);
27754- atomic_inc(&vcc->stats->rx);
27755+ atomic_inc_unchecked(&vcc->stats->rx);
27756 }
27757 zout(pos & 0xffff,MTA(mbx));
27758 #if 0 /* probably a stupid idea */
27759@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
27760 skb_queue_head(&zatm_vcc->backlog,skb);
27761 break;
27762 }
27763- atomic_inc(&vcc->stats->tx);
27764+ atomic_inc_unchecked(&vcc->stats->tx);
27765 wake_up(&zatm_vcc->tx_wait);
27766 }
27767
27768diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
27769index 8493536..31adee0 100644
27770--- a/drivers/base/devtmpfs.c
27771+++ b/drivers/base/devtmpfs.c
27772@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
27773 if (!thread)
27774 return 0;
27775
27776- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
27777+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
27778 if (err)
27779 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
27780 else
27781diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
27782index caf995f..6f76697 100644
27783--- a/drivers/base/power/wakeup.c
27784+++ b/drivers/base/power/wakeup.c
27785@@ -30,14 +30,14 @@ bool events_check_enabled;
27786 * They need to be modified together atomically, so it's better to use one
27787 * atomic variable to hold them both.
27788 */
27789-static atomic_t combined_event_count = ATOMIC_INIT(0);
27790+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
27791
27792 #define IN_PROGRESS_BITS (sizeof(int) * 4)
27793 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
27794
27795 static void split_counters(unsigned int *cnt, unsigned int *inpr)
27796 {
27797- unsigned int comb = atomic_read(&combined_event_count);
27798+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
27799
27800 *cnt = (comb >> IN_PROGRESS_BITS);
27801 *inpr = comb & MAX_IN_PROGRESS;
27802@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
27803 ws->last_time = ktime_get();
27804
27805 /* Increment the counter of events in progress. */
27806- atomic_inc(&combined_event_count);
27807+ atomic_inc_unchecked(&combined_event_count);
27808 }
27809
27810 /**
27811@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
27812 * Increment the counter of registered wakeup events and decrement the
27813 * couter of wakeup events in progress simultaneously.
27814 */
27815- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
27816+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
27817 }
27818
27819 /**
27820diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
27821index b0f553b..77b928b 100644
27822--- a/drivers/block/cciss.c
27823+++ b/drivers/block/cciss.c
27824@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
27825 int err;
27826 u32 cp;
27827
27828+ memset(&arg64, 0, sizeof(arg64));
27829+
27830 err = 0;
27831 err |=
27832 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27833@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
27834 while (!list_empty(&h->reqQ)) {
27835 c = list_entry(h->reqQ.next, CommandList_struct, list);
27836 /* can't do anything if fifo is full */
27837- if ((h->access.fifo_full(h))) {
27838+ if ((h->access->fifo_full(h))) {
27839 dev_warn(&h->pdev->dev, "fifo full\n");
27840 break;
27841 }
27842@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
27843 h->Qdepth--;
27844
27845 /* Tell the controller execute command */
27846- h->access.submit_command(h, c);
27847+ h->access->submit_command(h, c);
27848
27849 /* Put job onto the completed Q */
27850 addQ(&h->cmpQ, c);
27851@@ -3443,17 +3445,17 @@ startio:
27852
27853 static inline unsigned long get_next_completion(ctlr_info_t *h)
27854 {
27855- return h->access.command_completed(h);
27856+ return h->access->command_completed(h);
27857 }
27858
27859 static inline int interrupt_pending(ctlr_info_t *h)
27860 {
27861- return h->access.intr_pending(h);
27862+ return h->access->intr_pending(h);
27863 }
27864
27865 static inline long interrupt_not_for_us(ctlr_info_t *h)
27866 {
27867- return ((h->access.intr_pending(h) == 0) ||
27868+ return ((h->access->intr_pending(h) == 0) ||
27869 (h->interrupts_enabled == 0));
27870 }
27871
27872@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
27873 u32 a;
27874
27875 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
27876- return h->access.command_completed(h);
27877+ return h->access->command_completed(h);
27878
27879 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
27880 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
27881@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
27882 trans_support & CFGTBL_Trans_use_short_tags);
27883
27884 /* Change the access methods to the performant access methods */
27885- h->access = SA5_performant_access;
27886+ h->access = &SA5_performant_access;
27887 h->transMethod = CFGTBL_Trans_Performant;
27888
27889 return;
27890@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
27891 if (prod_index < 0)
27892 return -ENODEV;
27893 h->product_name = products[prod_index].product_name;
27894- h->access = *(products[prod_index].access);
27895+ h->access = products[prod_index].access;
27896
27897 if (cciss_board_disabled(h)) {
27898 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
27899@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
27900 }
27901
27902 /* make sure the board interrupts are off */
27903- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27904+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27905 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
27906 if (rc)
27907 goto clean2;
27908@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
27909 * fake ones to scoop up any residual completions.
27910 */
27911 spin_lock_irqsave(&h->lock, flags);
27912- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27913+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27914 spin_unlock_irqrestore(&h->lock, flags);
27915 free_irq(h->intr[h->intr_mode], h);
27916 rc = cciss_request_irq(h, cciss_msix_discard_completions,
27917@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
27918 dev_info(&h->pdev->dev, "Board READY.\n");
27919 dev_info(&h->pdev->dev,
27920 "Waiting for stale completions to drain.\n");
27921- h->access.set_intr_mask(h, CCISS_INTR_ON);
27922+ h->access->set_intr_mask(h, CCISS_INTR_ON);
27923 msleep(10000);
27924- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27925+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27926
27927 rc = controller_reset_failed(h->cfgtable);
27928 if (rc)
27929@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
27930 cciss_scsi_setup(h);
27931
27932 /* Turn the interrupts on so we can service requests */
27933- h->access.set_intr_mask(h, CCISS_INTR_ON);
27934+ h->access->set_intr_mask(h, CCISS_INTR_ON);
27935
27936 /* Get the firmware version */
27937 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27938@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
27939 kfree(flush_buf);
27940 if (return_code != IO_OK)
27941 dev_warn(&h->pdev->dev, "Error flushing cache\n");
27942- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27943+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27944 free_irq(h->intr[h->intr_mode], h);
27945 }
27946
27947diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
27948index 7fda30e..eb5dfe0 100644
27949--- a/drivers/block/cciss.h
27950+++ b/drivers/block/cciss.h
27951@@ -101,7 +101,7 @@ struct ctlr_info
27952 /* information about each logical volume */
27953 drive_info_struct *drv[CISS_MAX_LUN];
27954
27955- struct access_method access;
27956+ struct access_method *access;
27957
27958 /* queue and queue Info */
27959 struct list_head reqQ;
27960diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
27961index 9125bbe..eede5c8 100644
27962--- a/drivers/block/cpqarray.c
27963+++ b/drivers/block/cpqarray.c
27964@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27965 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27966 goto Enomem4;
27967 }
27968- hba[i]->access.set_intr_mask(hba[i], 0);
27969+ hba[i]->access->set_intr_mask(hba[i], 0);
27970 if (request_irq(hba[i]->intr, do_ida_intr,
27971 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27972 {
27973@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27974 add_timer(&hba[i]->timer);
27975
27976 /* Enable IRQ now that spinlock and rate limit timer are set up */
27977- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27978+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27979
27980 for(j=0; j<NWD; j++) {
27981 struct gendisk *disk = ida_gendisk[i][j];
27982@@ -694,7 +694,7 @@ DBGINFO(
27983 for(i=0; i<NR_PRODUCTS; i++) {
27984 if (board_id == products[i].board_id) {
27985 c->product_name = products[i].product_name;
27986- c->access = *(products[i].access);
27987+ c->access = products[i].access;
27988 break;
27989 }
27990 }
27991@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
27992 hba[ctlr]->intr = intr;
27993 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27994 hba[ctlr]->product_name = products[j].product_name;
27995- hba[ctlr]->access = *(products[j].access);
27996+ hba[ctlr]->access = products[j].access;
27997 hba[ctlr]->ctlr = ctlr;
27998 hba[ctlr]->board_id = board_id;
27999 hba[ctlr]->pci_dev = NULL; /* not PCI */
28000@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28001
28002 while((c = h->reqQ) != NULL) {
28003 /* Can't do anything if we're busy */
28004- if (h->access.fifo_full(h) == 0)
28005+ if (h->access->fifo_full(h) == 0)
28006 return;
28007
28008 /* Get the first entry from the request Q */
28009@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28010 h->Qdepth--;
28011
28012 /* Tell the controller to do our bidding */
28013- h->access.submit_command(h, c);
28014+ h->access->submit_command(h, c);
28015
28016 /* Get onto the completion Q */
28017 addQ(&h->cmpQ, c);
28018@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28019 unsigned long flags;
28020 __u32 a,a1;
28021
28022- istat = h->access.intr_pending(h);
28023+ istat = h->access->intr_pending(h);
28024 /* Is this interrupt for us? */
28025 if (istat == 0)
28026 return IRQ_NONE;
28027@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28028 */
28029 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28030 if (istat & FIFO_NOT_EMPTY) {
28031- while((a = h->access.command_completed(h))) {
28032+ while((a = h->access->command_completed(h))) {
28033 a1 = a; a &= ~3;
28034 if ((c = h->cmpQ) == NULL)
28035 {
28036@@ -1449,11 +1449,11 @@ static int sendcmd(
28037 /*
28038 * Disable interrupt
28039 */
28040- info_p->access.set_intr_mask(info_p, 0);
28041+ info_p->access->set_intr_mask(info_p, 0);
28042 /* Make sure there is room in the command FIFO */
28043 /* Actually it should be completely empty at this time. */
28044 for (i = 200000; i > 0; i--) {
28045- temp = info_p->access.fifo_full(info_p);
28046+ temp = info_p->access->fifo_full(info_p);
28047 if (temp != 0) {
28048 break;
28049 }
28050@@ -1466,7 +1466,7 @@ DBG(
28051 /*
28052 * Send the cmd
28053 */
28054- info_p->access.submit_command(info_p, c);
28055+ info_p->access->submit_command(info_p, c);
28056 complete = pollcomplete(ctlr);
28057
28058 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28059@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28060 * we check the new geometry. Then turn interrupts back on when
28061 * we're done.
28062 */
28063- host->access.set_intr_mask(host, 0);
28064+ host->access->set_intr_mask(host, 0);
28065 getgeometry(ctlr);
28066- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28067+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28068
28069 for(i=0; i<NWD; i++) {
28070 struct gendisk *disk = ida_gendisk[ctlr][i];
28071@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28072 /* Wait (up to 2 seconds) for a command to complete */
28073
28074 for (i = 200000; i > 0; i--) {
28075- done = hba[ctlr]->access.command_completed(hba[ctlr]);
28076+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
28077 if (done == 0) {
28078 udelay(10); /* a short fixed delay */
28079 } else
28080diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28081index be73e9d..7fbf140 100644
28082--- a/drivers/block/cpqarray.h
28083+++ b/drivers/block/cpqarray.h
28084@@ -99,7 +99,7 @@ struct ctlr_info {
28085 drv_info_t drv[NWD];
28086 struct proc_dir_entry *proc;
28087
28088- struct access_method access;
28089+ struct access_method *access;
28090
28091 cmdlist_t *reqQ;
28092 cmdlist_t *cmpQ;
28093diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28094index 8d68056..e67050f 100644
28095--- a/drivers/block/drbd/drbd_int.h
28096+++ b/drivers/block/drbd/drbd_int.h
28097@@ -736,7 +736,7 @@ struct drbd_request;
28098 struct drbd_epoch {
28099 struct list_head list;
28100 unsigned int barrier_nr;
28101- atomic_t epoch_size; /* increased on every request added. */
28102+ atomic_unchecked_t epoch_size; /* increased on every request added. */
28103 atomic_t active; /* increased on every req. added, and dec on every finished. */
28104 unsigned long flags;
28105 };
28106@@ -1108,7 +1108,7 @@ struct drbd_conf {
28107 void *int_dig_in;
28108 void *int_dig_vv;
28109 wait_queue_head_t seq_wait;
28110- atomic_t packet_seq;
28111+ atomic_unchecked_t packet_seq;
28112 unsigned int peer_seq;
28113 spinlock_t peer_seq_lock;
28114 unsigned int minor;
28115@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28116
28117 static inline void drbd_tcp_cork(struct socket *sock)
28118 {
28119- int __user val = 1;
28120+ int val = 1;
28121 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28122- (char __user *)&val, sizeof(val));
28123+ (char __force_user *)&val, sizeof(val));
28124 }
28125
28126 static inline void drbd_tcp_uncork(struct socket *sock)
28127 {
28128- int __user val = 0;
28129+ int val = 0;
28130 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28131- (char __user *)&val, sizeof(val));
28132+ (char __force_user *)&val, sizeof(val));
28133 }
28134
28135 static inline void drbd_tcp_nodelay(struct socket *sock)
28136 {
28137- int __user val = 1;
28138+ int val = 1;
28139 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28140- (char __user *)&val, sizeof(val));
28141+ (char __force_user *)&val, sizeof(val));
28142 }
28143
28144 static inline void drbd_tcp_quickack(struct socket *sock)
28145 {
28146- int __user val = 2;
28147+ int val = 2;
28148 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28149- (char __user *)&val, sizeof(val));
28150+ (char __force_user *)&val, sizeof(val));
28151 }
28152
28153 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28154diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28155index 211fc44..c5116f1 100644
28156--- a/drivers/block/drbd/drbd_main.c
28157+++ b/drivers/block/drbd/drbd_main.c
28158@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28159 p.sector = sector;
28160 p.block_id = block_id;
28161 p.blksize = blksize;
28162- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28163+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28164
28165 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28166 return false;
28167@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28168 p.sector = cpu_to_be64(req->sector);
28169 p.block_id = (unsigned long)req;
28170 p.seq_num = cpu_to_be32(req->seq_num =
28171- atomic_add_return(1, &mdev->packet_seq));
28172+ atomic_add_return_unchecked(1, &mdev->packet_seq));
28173
28174 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28175
28176@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28177 atomic_set(&mdev->unacked_cnt, 0);
28178 atomic_set(&mdev->local_cnt, 0);
28179 atomic_set(&mdev->net_cnt, 0);
28180- atomic_set(&mdev->packet_seq, 0);
28181+ atomic_set_unchecked(&mdev->packet_seq, 0);
28182 atomic_set(&mdev->pp_in_use, 0);
28183 atomic_set(&mdev->pp_in_use_by_net, 0);
28184 atomic_set(&mdev->rs_sect_in, 0);
28185@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28186 mdev->receiver.t_state);
28187
28188 /* no need to lock it, I'm the only thread alive */
28189- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28190- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28191+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28192+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28193 mdev->al_writ_cnt =
28194 mdev->bm_writ_cnt =
28195 mdev->read_cnt =
28196diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28197index af2a250..219c74b 100644
28198--- a/drivers/block/drbd/drbd_nl.c
28199+++ b/drivers/block/drbd/drbd_nl.c
28200@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28201 module_put(THIS_MODULE);
28202 }
28203
28204-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28205+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28206
28207 static unsigned short *
28208 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28209@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28210 cn_reply->id.idx = CN_IDX_DRBD;
28211 cn_reply->id.val = CN_VAL_DRBD;
28212
28213- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28214+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28215 cn_reply->ack = 0; /* not used here. */
28216 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28217 (int)((char *)tl - (char *)reply->tag_list);
28218@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28219 cn_reply->id.idx = CN_IDX_DRBD;
28220 cn_reply->id.val = CN_VAL_DRBD;
28221
28222- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28223+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28224 cn_reply->ack = 0; /* not used here. */
28225 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28226 (int)((char *)tl - (char *)reply->tag_list);
28227@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28228 cn_reply->id.idx = CN_IDX_DRBD;
28229 cn_reply->id.val = CN_VAL_DRBD;
28230
28231- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28232+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28233 cn_reply->ack = 0; // not used here.
28234 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28235 (int)((char*)tl - (char*)reply->tag_list);
28236@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28237 cn_reply->id.idx = CN_IDX_DRBD;
28238 cn_reply->id.val = CN_VAL_DRBD;
28239
28240- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28241+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28242 cn_reply->ack = 0; /* not used here. */
28243 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28244 (int)((char *)tl - (char *)reply->tag_list);
28245diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28246index 43beaca..4a5b1dd 100644
28247--- a/drivers/block/drbd/drbd_receiver.c
28248+++ b/drivers/block/drbd/drbd_receiver.c
28249@@ -894,7 +894,7 @@ retry:
28250 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28251 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28252
28253- atomic_set(&mdev->packet_seq, 0);
28254+ atomic_set_unchecked(&mdev->packet_seq, 0);
28255 mdev->peer_seq = 0;
28256
28257 drbd_thread_start(&mdev->asender);
28258@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28259 do {
28260 next_epoch = NULL;
28261
28262- epoch_size = atomic_read(&epoch->epoch_size);
28263+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28264
28265 switch (ev & ~EV_CLEANUP) {
28266 case EV_PUT:
28267@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28268 rv = FE_DESTROYED;
28269 } else {
28270 epoch->flags = 0;
28271- atomic_set(&epoch->epoch_size, 0);
28272+ atomic_set_unchecked(&epoch->epoch_size, 0);
28273 /* atomic_set(&epoch->active, 0); is already zero */
28274 if (rv == FE_STILL_LIVE)
28275 rv = FE_RECYCLED;
28276@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28277 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28278 drbd_flush(mdev);
28279
28280- if (atomic_read(&mdev->current_epoch->epoch_size)) {
28281+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28282 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28283 if (epoch)
28284 break;
28285 }
28286
28287 epoch = mdev->current_epoch;
28288- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28289+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28290
28291 D_ASSERT(atomic_read(&epoch->active) == 0);
28292 D_ASSERT(epoch->flags == 0);
28293@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28294 }
28295
28296 epoch->flags = 0;
28297- atomic_set(&epoch->epoch_size, 0);
28298+ atomic_set_unchecked(&epoch->epoch_size, 0);
28299 atomic_set(&epoch->active, 0);
28300
28301 spin_lock(&mdev->epoch_lock);
28302- if (atomic_read(&mdev->current_epoch->epoch_size)) {
28303+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28304 list_add(&epoch->list, &mdev->current_epoch->list);
28305 mdev->current_epoch = epoch;
28306 mdev->epochs++;
28307@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28308 spin_unlock(&mdev->peer_seq_lock);
28309
28310 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28311- atomic_inc(&mdev->current_epoch->epoch_size);
28312+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28313 return drbd_drain_block(mdev, data_size);
28314 }
28315
28316@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28317
28318 spin_lock(&mdev->epoch_lock);
28319 e->epoch = mdev->current_epoch;
28320- atomic_inc(&e->epoch->epoch_size);
28321+ atomic_inc_unchecked(&e->epoch->epoch_size);
28322 atomic_inc(&e->epoch->active);
28323 spin_unlock(&mdev->epoch_lock);
28324
28325@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28326 D_ASSERT(list_empty(&mdev->done_ee));
28327
28328 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28329- atomic_set(&mdev->current_epoch->epoch_size, 0);
28330+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28331 D_ASSERT(list_empty(&mdev->current_epoch->list));
28332 }
28333
28334diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28335index cd50435..ba1ffb5 100644
28336--- a/drivers/block/loop.c
28337+++ b/drivers/block/loop.c
28338@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
28339 mm_segment_t old_fs = get_fs();
28340
28341 set_fs(get_ds());
28342- bw = file->f_op->write(file, buf, len, &pos);
28343+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28344 set_fs(old_fs);
28345 if (likely(bw == len))
28346 return 0;
28347diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28348index 4364303..9adf4ee 100644
28349--- a/drivers/char/Kconfig
28350+++ b/drivers/char/Kconfig
28351@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28352
28353 config DEVKMEM
28354 bool "/dev/kmem virtual device support"
28355- default y
28356+ default n
28357+ depends on !GRKERNSEC_KMEM
28358 help
28359 Say Y here if you want to support the /dev/kmem device. The
28360 /dev/kmem device is rarely used, but can be used for certain
28361@@ -596,6 +597,7 @@ config DEVPORT
28362 bool
28363 depends on !M68K
28364 depends on ISA || PCI
28365+ depends on !GRKERNSEC_KMEM
28366 default y
28367
28368 source "drivers/s390/char/Kconfig"
28369diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28370index 2e04433..22afc64 100644
28371--- a/drivers/char/agp/frontend.c
28372+++ b/drivers/char/agp/frontend.c
28373@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28374 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28375 return -EFAULT;
28376
28377- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28378+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28379 return -EFAULT;
28380
28381 client = agp_find_client_by_pid(reserve.pid);
28382diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28383index 095ab90..afad0a4 100644
28384--- a/drivers/char/briq_panel.c
28385+++ b/drivers/char/briq_panel.c
28386@@ -9,6 +9,7 @@
28387 #include <linux/types.h>
28388 #include <linux/errno.h>
28389 #include <linux/tty.h>
28390+#include <linux/mutex.h>
28391 #include <linux/timer.h>
28392 #include <linux/kernel.h>
28393 #include <linux/wait.h>
28394@@ -34,6 +35,7 @@ static int vfd_is_open;
28395 static unsigned char vfd[40];
28396 static int vfd_cursor;
28397 static unsigned char ledpb, led;
28398+static DEFINE_MUTEX(vfd_mutex);
28399
28400 static void update_vfd(void)
28401 {
28402@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28403 if (!vfd_is_open)
28404 return -EBUSY;
28405
28406+ mutex_lock(&vfd_mutex);
28407 for (;;) {
28408 char c;
28409 if (!indx)
28410 break;
28411- if (get_user(c, buf))
28412+ if (get_user(c, buf)) {
28413+ mutex_unlock(&vfd_mutex);
28414 return -EFAULT;
28415+ }
28416 if (esc) {
28417 set_led(c);
28418 esc = 0;
28419@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28420 buf++;
28421 }
28422 update_vfd();
28423+ mutex_unlock(&vfd_mutex);
28424
28425 return len;
28426 }
28427diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
28428index f773a9d..65cd683 100644
28429--- a/drivers/char/genrtc.c
28430+++ b/drivers/char/genrtc.c
28431@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
28432 switch (cmd) {
28433
28434 case RTC_PLL_GET:
28435+ memset(&pll, 0, sizeof(pll));
28436 if (get_rtc_pll(&pll))
28437 return -EINVAL;
28438 else
28439diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
28440index 0833896..cccce52 100644
28441--- a/drivers/char/hpet.c
28442+++ b/drivers/char/hpet.c
28443@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
28444 }
28445
28446 static int
28447-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
28448+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
28449 struct hpet_info *info)
28450 {
28451 struct hpet_timer __iomem *timer;
28452diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
28453index 58c0e63..46c16bf 100644
28454--- a/drivers/char/ipmi/ipmi_msghandler.c
28455+++ b/drivers/char/ipmi/ipmi_msghandler.c
28456@@ -415,7 +415,7 @@ struct ipmi_smi {
28457 struct proc_dir_entry *proc_dir;
28458 char proc_dir_name[10];
28459
28460- atomic_t stats[IPMI_NUM_STATS];
28461+ atomic_unchecked_t stats[IPMI_NUM_STATS];
28462
28463 /*
28464 * run_to_completion duplicate of smb_info, smi_info
28465@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28466
28467
28468 #define ipmi_inc_stat(intf, stat) \
28469- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28470+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28471 #define ipmi_get_stat(intf, stat) \
28472- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28473+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28474
28475 static int is_lan_addr(struct ipmi_addr *addr)
28476 {
28477@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28478 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28479 init_waitqueue_head(&intf->waitq);
28480 for (i = 0; i < IPMI_NUM_STATS; i++)
28481- atomic_set(&intf->stats[i], 0);
28482+ atomic_set_unchecked(&intf->stats[i], 0);
28483
28484 intf->proc_dir = NULL;
28485
28486diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
28487index 50fcf9c..91b5528 100644
28488--- a/drivers/char/ipmi/ipmi_si_intf.c
28489+++ b/drivers/char/ipmi/ipmi_si_intf.c
28490@@ -277,7 +277,7 @@ struct smi_info {
28491 unsigned char slave_addr;
28492
28493 /* Counters and things for the proc filesystem. */
28494- atomic_t stats[SI_NUM_STATS];
28495+ atomic_unchecked_t stats[SI_NUM_STATS];
28496
28497 struct task_struct *thread;
28498
28499@@ -286,9 +286,9 @@ struct smi_info {
28500 };
28501
28502 #define smi_inc_stat(smi, stat) \
28503- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28504+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28505 #define smi_get_stat(smi, stat) \
28506- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28507+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28508
28509 #define SI_MAX_PARMS 4
28510
28511@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
28512 atomic_set(&new_smi->req_events, 0);
28513 new_smi->run_to_completion = 0;
28514 for (i = 0; i < SI_NUM_STATS; i++)
28515- atomic_set(&new_smi->stats[i], 0);
28516+ atomic_set_unchecked(&new_smi->stats[i], 0);
28517
28518 new_smi->interrupt_disabled = 1;
28519 atomic_set(&new_smi->stop_operation, 0);
28520diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
28521index 1aeaaba..e018570 100644
28522--- a/drivers/char/mbcs.c
28523+++ b/drivers/char/mbcs.c
28524@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
28525 return 0;
28526 }
28527
28528-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
28529+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
28530 {
28531 .part_num = MBCS_PART_NUM,
28532 .mfg_num = MBCS_MFG_NUM,
28533diff --git a/drivers/char/mem.c b/drivers/char/mem.c
28534index d6e9d08..4493e89 100644
28535--- a/drivers/char/mem.c
28536+++ b/drivers/char/mem.c
28537@@ -18,6 +18,7 @@
28538 #include <linux/raw.h>
28539 #include <linux/tty.h>
28540 #include <linux/capability.h>
28541+#include <linux/security.h>
28542 #include <linux/ptrace.h>
28543 #include <linux/device.h>
28544 #include <linux/highmem.h>
28545@@ -35,6 +36,10 @@
28546 # include <linux/efi.h>
28547 #endif
28548
28549+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28550+extern const struct file_operations grsec_fops;
28551+#endif
28552+
28553 static inline unsigned long size_inside_page(unsigned long start,
28554 unsigned long size)
28555 {
28556@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28557
28558 while (cursor < to) {
28559 if (!devmem_is_allowed(pfn)) {
28560+#ifdef CONFIG_GRKERNSEC_KMEM
28561+ gr_handle_mem_readwrite(from, to);
28562+#else
28563 printk(KERN_INFO
28564 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28565 current->comm, from, to);
28566+#endif
28567 return 0;
28568 }
28569 cursor += PAGE_SIZE;
28570@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28571 }
28572 return 1;
28573 }
28574+#elif defined(CONFIG_GRKERNSEC_KMEM)
28575+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28576+{
28577+ return 0;
28578+}
28579 #else
28580 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28581 {
28582@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28583
28584 while (count > 0) {
28585 unsigned long remaining;
28586+ char *temp;
28587
28588 sz = size_inside_page(p, count);
28589
28590@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28591 if (!ptr)
28592 return -EFAULT;
28593
28594- remaining = copy_to_user(buf, ptr, sz);
28595+#ifdef CONFIG_PAX_USERCOPY
28596+ temp = kmalloc(sz, GFP_KERNEL);
28597+ if (!temp) {
28598+ unxlate_dev_mem_ptr(p, ptr);
28599+ return -ENOMEM;
28600+ }
28601+ memcpy(temp, ptr, sz);
28602+#else
28603+ temp = ptr;
28604+#endif
28605+
28606+ remaining = copy_to_user(buf, temp, sz);
28607+
28608+#ifdef CONFIG_PAX_USERCOPY
28609+ kfree(temp);
28610+#endif
28611+
28612 unxlate_dev_mem_ptr(p, ptr);
28613 if (remaining)
28614 return -EFAULT;
28615@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28616 size_t count, loff_t *ppos)
28617 {
28618 unsigned long p = *ppos;
28619- ssize_t low_count, read, sz;
28620+ ssize_t low_count, read, sz, err = 0;
28621 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28622- int err = 0;
28623
28624 read = 0;
28625 if (p < (unsigned long) high_memory) {
28626@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28627 }
28628 #endif
28629 while (low_count > 0) {
28630+ char *temp;
28631+
28632 sz = size_inside_page(p, low_count);
28633
28634 /*
28635@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28636 */
28637 kbuf = xlate_dev_kmem_ptr((char *)p);
28638
28639- if (copy_to_user(buf, kbuf, sz))
28640+#ifdef CONFIG_PAX_USERCOPY
28641+ temp = kmalloc(sz, GFP_KERNEL);
28642+ if (!temp)
28643+ return -ENOMEM;
28644+ memcpy(temp, kbuf, sz);
28645+#else
28646+ temp = kbuf;
28647+#endif
28648+
28649+ err = copy_to_user(buf, temp, sz);
28650+
28651+#ifdef CONFIG_PAX_USERCOPY
28652+ kfree(temp);
28653+#endif
28654+
28655+ if (err)
28656 return -EFAULT;
28657 buf += sz;
28658 p += sz;
28659@@ -867,6 +914,9 @@ static const struct memdev {
28660 #ifdef CONFIG_CRASH_DUMP
28661 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28662 #endif
28663+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28664+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28665+#endif
28666 };
28667
28668 static int memory_open(struct inode *inode, struct file *filp)
28669diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
28670index da3cfee..a5a6606 100644
28671--- a/drivers/char/nvram.c
28672+++ b/drivers/char/nvram.c
28673@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
28674
28675 spin_unlock_irq(&rtc_lock);
28676
28677- if (copy_to_user(buf, contents, tmp - contents))
28678+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
28679 return -EFAULT;
28680
28681 *ppos = i;
28682diff --git a/drivers/char/random.c b/drivers/char/random.c
28683index 54ca8b2..d58cb51 100644
28684--- a/drivers/char/random.c
28685+++ b/drivers/char/random.c
28686@@ -261,8 +261,13 @@
28687 /*
28688 * Configuration information
28689 */
28690+#ifdef CONFIG_GRKERNSEC_RANDNET
28691+#define INPUT_POOL_WORDS 512
28692+#define OUTPUT_POOL_WORDS 128
28693+#else
28694 #define INPUT_POOL_WORDS 128
28695 #define OUTPUT_POOL_WORDS 32
28696+#endif
28697 #define SEC_XFER_SIZE 512
28698 #define EXTRACT_SIZE 10
28699
28700@@ -300,10 +305,17 @@ static struct poolinfo {
28701 int poolwords;
28702 int tap1, tap2, tap3, tap4, tap5;
28703 } poolinfo_table[] = {
28704+#ifdef CONFIG_GRKERNSEC_RANDNET
28705+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28706+ { 512, 411, 308, 208, 104, 1 },
28707+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28708+ { 128, 103, 76, 51, 25, 1 },
28709+#else
28710 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28711 { 128, 103, 76, 51, 25, 1 },
28712 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28713 { 32, 26, 20, 14, 7, 1 },
28714+#endif
28715 #if 0
28716 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28717 { 2048, 1638, 1231, 819, 411, 1 },
28718@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
28719
28720 extract_buf(r, tmp);
28721 i = min_t(int, nbytes, EXTRACT_SIZE);
28722- if (copy_to_user(buf, tmp, i)) {
28723+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
28724 ret = -EFAULT;
28725 break;
28726 }
28727@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28728 #include <linux/sysctl.h>
28729
28730 static int min_read_thresh = 8, min_write_thresh;
28731-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28732+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28733 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28734 static char sysctl_bootid[16];
28735
28736diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
28737index 1ee8ce7..b778bef 100644
28738--- a/drivers/char/sonypi.c
28739+++ b/drivers/char/sonypi.c
28740@@ -55,6 +55,7 @@
28741 #include <asm/uaccess.h>
28742 #include <asm/io.h>
28743 #include <asm/system.h>
28744+#include <asm/local.h>
28745
28746 #include <linux/sonypi.h>
28747
28748@@ -491,7 +492,7 @@ static struct sonypi_device {
28749 spinlock_t fifo_lock;
28750 wait_queue_head_t fifo_proc_list;
28751 struct fasync_struct *fifo_async;
28752- int open_count;
28753+ local_t open_count;
28754 int model;
28755 struct input_dev *input_jog_dev;
28756 struct input_dev *input_key_dev;
28757@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
28758 static int sonypi_misc_release(struct inode *inode, struct file *file)
28759 {
28760 mutex_lock(&sonypi_device.lock);
28761- sonypi_device.open_count--;
28762+ local_dec(&sonypi_device.open_count);
28763 mutex_unlock(&sonypi_device.lock);
28764 return 0;
28765 }
28766@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
28767 {
28768 mutex_lock(&sonypi_device.lock);
28769 /* Flush input queue on first open */
28770- if (!sonypi_device.open_count)
28771+ if (!local_read(&sonypi_device.open_count))
28772 kfifo_reset(&sonypi_device.fifo);
28773- sonypi_device.open_count++;
28774+ local_inc(&sonypi_device.open_count);
28775 mutex_unlock(&sonypi_device.lock);
28776
28777 return 0;
28778diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
28779index 32362cf..32a96e9 100644
28780--- a/drivers/char/tpm/tpm.c
28781+++ b/drivers/char/tpm/tpm.c
28782@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
28783 chip->vendor.req_complete_val)
28784 goto out_recv;
28785
28786- if ((status == chip->vendor.req_canceled)) {
28787+ if (status == chip->vendor.req_canceled) {
28788 dev_err(chip->dev, "Operation Canceled\n");
28789 rc = -ECANCELED;
28790 goto out;
28791diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
28792index 0636520..169c1d0 100644
28793--- a/drivers/char/tpm/tpm_bios.c
28794+++ b/drivers/char/tpm/tpm_bios.c
28795@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
28796 event = addr;
28797
28798 if ((event->event_type == 0 && event->event_size == 0) ||
28799- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28800+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28801 return NULL;
28802
28803 return addr;
28804@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
28805 return NULL;
28806
28807 if ((event->event_type == 0 && event->event_size == 0) ||
28808- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28809+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28810 return NULL;
28811
28812 (*pos)++;
28813@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
28814 int i;
28815
28816 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28817- seq_putc(m, data[i]);
28818+ if (!seq_putc(m, data[i]))
28819+ return -EFAULT;
28820
28821 return 0;
28822 }
28823@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
28824 log->bios_event_log_end = log->bios_event_log + len;
28825
28826 virt = acpi_os_map_memory(start, len);
28827+ if (!virt) {
28828+ kfree(log->bios_event_log);
28829+ log->bios_event_log = NULL;
28830+ return -EFAULT;
28831+ }
28832
28833- memcpy(log->bios_event_log, virt, len);
28834+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
28835
28836 acpi_os_unmap_memory(virt, len);
28837 return 0;
28838diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
28839index b58b561..c9088c8 100644
28840--- a/drivers/char/virtio_console.c
28841+++ b/drivers/char/virtio_console.c
28842@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
28843 if (to_user) {
28844 ssize_t ret;
28845
28846- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
28847+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
28848 if (ret)
28849 return -EFAULT;
28850 } else {
28851@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
28852 if (!port_has_data(port) && !port->host_connected)
28853 return 0;
28854
28855- return fill_readbuf(port, ubuf, count, true);
28856+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
28857 }
28858
28859 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
28860diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
28861index c9eee6d..f9d5280 100644
28862--- a/drivers/edac/amd64_edac.c
28863+++ b/drivers/edac/amd64_edac.c
28864@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
28865 * PCI core identifies what devices are on a system during boot, and then
28866 * inquiry this table to see if this driver is for a given device found.
28867 */
28868-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
28869+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
28870 {
28871 .vendor = PCI_VENDOR_ID_AMD,
28872 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
28873diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
28874index e47e73b..348e0bd 100644
28875--- a/drivers/edac/amd76x_edac.c
28876+++ b/drivers/edac/amd76x_edac.c
28877@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
28878 edac_mc_free(mci);
28879 }
28880
28881-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
28882+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
28883 {
28884 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28885 AMD762},
28886diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
28887index 1af531a..3a8ff27 100644
28888--- a/drivers/edac/e752x_edac.c
28889+++ b/drivers/edac/e752x_edac.c
28890@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
28891 edac_mc_free(mci);
28892 }
28893
28894-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
28895+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
28896 {
28897 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28898 E7520},
28899diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
28900index 6ffb6d2..383d8d7 100644
28901--- a/drivers/edac/e7xxx_edac.c
28902+++ b/drivers/edac/e7xxx_edac.c
28903@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
28904 edac_mc_free(mci);
28905 }
28906
28907-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
28908+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
28909 {
28910 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28911 E7205},
28912diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
28913index 97f5064..202b6e6 100644
28914--- a/drivers/edac/edac_pci_sysfs.c
28915+++ b/drivers/edac/edac_pci_sysfs.c
28916@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
28917 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28918 static int edac_pci_poll_msec = 1000; /* one second workq period */
28919
28920-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28921-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28922+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28923+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28924
28925 static struct kobject *edac_pci_top_main_kobj;
28926 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28927@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28928 edac_printk(KERN_CRIT, EDAC_PCI,
28929 "Signaled System Error on %s\n",
28930 pci_name(dev));
28931- atomic_inc(&pci_nonparity_count);
28932+ atomic_inc_unchecked(&pci_nonparity_count);
28933 }
28934
28935 if (status & (PCI_STATUS_PARITY)) {
28936@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28937 "Master Data Parity Error on %s\n",
28938 pci_name(dev));
28939
28940- atomic_inc(&pci_parity_count);
28941+ atomic_inc_unchecked(&pci_parity_count);
28942 }
28943
28944 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28945@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28946 "Detected Parity Error on %s\n",
28947 pci_name(dev));
28948
28949- atomic_inc(&pci_parity_count);
28950+ atomic_inc_unchecked(&pci_parity_count);
28951 }
28952 }
28953
28954@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28955 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28956 "Signaled System Error on %s\n",
28957 pci_name(dev));
28958- atomic_inc(&pci_nonparity_count);
28959+ atomic_inc_unchecked(&pci_nonparity_count);
28960 }
28961
28962 if (status & (PCI_STATUS_PARITY)) {
28963@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28964 "Master Data Parity Error on "
28965 "%s\n", pci_name(dev));
28966
28967- atomic_inc(&pci_parity_count);
28968+ atomic_inc_unchecked(&pci_parity_count);
28969 }
28970
28971 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28972@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28973 "Detected Parity Error on %s\n",
28974 pci_name(dev));
28975
28976- atomic_inc(&pci_parity_count);
28977+ atomic_inc_unchecked(&pci_parity_count);
28978 }
28979 }
28980 }
28981@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
28982 if (!check_pci_errors)
28983 return;
28984
28985- before_count = atomic_read(&pci_parity_count);
28986+ before_count = atomic_read_unchecked(&pci_parity_count);
28987
28988 /* scan all PCI devices looking for a Parity Error on devices and
28989 * bridges.
28990@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
28991 /* Only if operator has selected panic on PCI Error */
28992 if (edac_pci_get_panic_on_pe()) {
28993 /* If the count is different 'after' from 'before' */
28994- if (before_count != atomic_read(&pci_parity_count))
28995+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28996 panic("EDAC: PCI Parity Error");
28997 }
28998 }
28999diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29000index c0510b3..6e2a954 100644
29001--- a/drivers/edac/i3000_edac.c
29002+++ b/drivers/edac/i3000_edac.c
29003@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29004 edac_mc_free(mci);
29005 }
29006
29007-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29008+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29009 {
29010 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29011 I3000},
29012diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29013index 73f55e200..5faaf59 100644
29014--- a/drivers/edac/i3200_edac.c
29015+++ b/drivers/edac/i3200_edac.c
29016@@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29017 edac_mc_free(mci);
29018 }
29019
29020-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29021+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29022 {
29023 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29024 I3200},
29025diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29026index 4dc3ac2..67d05a6 100644
29027--- a/drivers/edac/i5000_edac.c
29028+++ b/drivers/edac/i5000_edac.c
29029@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29030 *
29031 * The "E500P" device is the first device supported.
29032 */
29033-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29034+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29035 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29036 .driver_data = I5000P},
29037
29038diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29039index bcbdeec..9886d16 100644
29040--- a/drivers/edac/i5100_edac.c
29041+++ b/drivers/edac/i5100_edac.c
29042@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29043 edac_mc_free(mci);
29044 }
29045
29046-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29047+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29048 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29049 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29050 { 0, }
29051diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29052index 74d6ec34..baff517 100644
29053--- a/drivers/edac/i5400_edac.c
29054+++ b/drivers/edac/i5400_edac.c
29055@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29056 *
29057 * The "E500P" device is the first device supported.
29058 */
29059-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29060+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29061 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29062 {0,} /* 0 terminated list. */
29063 };
29064diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29065index 6104dba..e7ea8e1 100644
29066--- a/drivers/edac/i7300_edac.c
29067+++ b/drivers/edac/i7300_edac.c
29068@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29069 *
29070 * Has only 8086:360c PCI ID
29071 */
29072-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29073+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29074 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29075 {0,} /* 0 terminated list. */
29076 };
29077diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29078index 8568d9b..42b2fa8 100644
29079--- a/drivers/edac/i7core_edac.c
29080+++ b/drivers/edac/i7core_edac.c
29081@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29082 /*
29083 * pci_device_id table for which devices we are looking for
29084 */
29085-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29086+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29087 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29088 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29089 {0,} /* 0 terminated list. */
29090diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29091index 4329d39..f3022ef 100644
29092--- a/drivers/edac/i82443bxgx_edac.c
29093+++ b/drivers/edac/i82443bxgx_edac.c
29094@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29095
29096 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29097
29098-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29099+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29100 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29101 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29102 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29103diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29104index 931a057..fd28340 100644
29105--- a/drivers/edac/i82860_edac.c
29106+++ b/drivers/edac/i82860_edac.c
29107@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29108 edac_mc_free(mci);
29109 }
29110
29111-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29112+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29113 {
29114 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29115 I82860},
29116diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29117index 33864c6..01edc61 100644
29118--- a/drivers/edac/i82875p_edac.c
29119+++ b/drivers/edac/i82875p_edac.c
29120@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29121 edac_mc_free(mci);
29122 }
29123
29124-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29125+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29126 {
29127 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29128 I82875P},
29129diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29130index 4184e01..dcb2cd3 100644
29131--- a/drivers/edac/i82975x_edac.c
29132+++ b/drivers/edac/i82975x_edac.c
29133@@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29134 edac_mc_free(mci);
29135 }
29136
29137-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29138+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29139 {
29140 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29141 I82975X
29142diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29143index 0106747..0b40417 100644
29144--- a/drivers/edac/mce_amd.h
29145+++ b/drivers/edac/mce_amd.h
29146@@ -83,7 +83,7 @@ struct amd_decoder_ops {
29147 bool (*dc_mce)(u16, u8);
29148 bool (*ic_mce)(u16, u8);
29149 bool (*nb_mce)(u16, u8);
29150-};
29151+} __no_const;
29152
29153 void amd_report_gart_errors(bool);
29154 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29155diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29156index e294e1b..a41b05b 100644
29157--- a/drivers/edac/r82600_edac.c
29158+++ b/drivers/edac/r82600_edac.c
29159@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29160 edac_mc_free(mci);
29161 }
29162
29163-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29164+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29165 {
29166 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29167 },
29168diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29169index 1dc118d..8c68af9 100644
29170--- a/drivers/edac/sb_edac.c
29171+++ b/drivers/edac/sb_edac.c
29172@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29173 /*
29174 * pci_device_id table for which devices we are looking for
29175 */
29176-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29177+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29178 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29179 {0,} /* 0 terminated list. */
29180 };
29181diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29182index b6f47de..c5acf3a 100644
29183--- a/drivers/edac/x38_edac.c
29184+++ b/drivers/edac/x38_edac.c
29185@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29186 edac_mc_free(mci);
29187 }
29188
29189-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29190+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29191 {
29192 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29193 X38},
29194diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29195index 85661b0..c784559a 100644
29196--- a/drivers/firewire/core-card.c
29197+++ b/drivers/firewire/core-card.c
29198@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29199
29200 void fw_core_remove_card(struct fw_card *card)
29201 {
29202- struct fw_card_driver dummy_driver = dummy_driver_template;
29203+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29204
29205 card->driver->update_phy_reg(card, 4,
29206 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29207diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29208index 4799393..37bd3ab 100644
29209--- a/drivers/firewire/core-cdev.c
29210+++ b/drivers/firewire/core-cdev.c
29211@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29212 int ret;
29213
29214 if ((request->channels == 0 && request->bandwidth == 0) ||
29215- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29216- request->bandwidth < 0)
29217+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29218 return -EINVAL;
29219
29220 r = kmalloc(sizeof(*r), GFP_KERNEL);
29221diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29222index 855ab3f..11f4bbd 100644
29223--- a/drivers/firewire/core-transaction.c
29224+++ b/drivers/firewire/core-transaction.c
29225@@ -37,6 +37,7 @@
29226 #include <linux/timer.h>
29227 #include <linux/types.h>
29228 #include <linux/workqueue.h>
29229+#include <linux/sched.h>
29230
29231 #include <asm/byteorder.h>
29232
29233diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29234index b45be57..5fad18b 100644
29235--- a/drivers/firewire/core.h
29236+++ b/drivers/firewire/core.h
29237@@ -101,6 +101,7 @@ struct fw_card_driver {
29238
29239 int (*stop_iso)(struct fw_iso_context *ctx);
29240 };
29241+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29242
29243 void fw_card_initialize(struct fw_card *card,
29244 const struct fw_card_driver *driver, struct device *device);
29245diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29246index 153980b..4b4d046 100644
29247--- a/drivers/firmware/dmi_scan.c
29248+++ b/drivers/firmware/dmi_scan.c
29249@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29250 }
29251 }
29252 else {
29253- /*
29254- * no iounmap() for that ioremap(); it would be a no-op, but
29255- * it's so early in setup that sucker gets confused into doing
29256- * what it shouldn't if we actually call it.
29257- */
29258 p = dmi_ioremap(0xF0000, 0x10000);
29259 if (p == NULL)
29260 goto error;
29261@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29262 if (buf == NULL)
29263 return -1;
29264
29265- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29266+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29267
29268 iounmap(buf);
29269 return 0;
29270diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29271index 82d5c20..44a7177 100644
29272--- a/drivers/gpio/gpio-vr41xx.c
29273+++ b/drivers/gpio/gpio-vr41xx.c
29274@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29275 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29276 maskl, pendl, maskh, pendh);
29277
29278- atomic_inc(&irq_err_count);
29279+ atomic_inc_unchecked(&irq_err_count);
29280
29281 return -EINVAL;
29282 }
29283diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29284index 84a4a80..ce0306e 100644
29285--- a/drivers/gpu/drm/drm_crtc_helper.c
29286+++ b/drivers/gpu/drm/drm_crtc_helper.c
29287@@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29288 struct drm_crtc *tmp;
29289 int crtc_mask = 1;
29290
29291- WARN(!crtc, "checking null crtc?\n");
29292+ BUG_ON(!crtc);
29293
29294 dev = crtc->dev;
29295
29296diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29297index ebf7d3f..d64c436 100644
29298--- a/drivers/gpu/drm/drm_drv.c
29299+++ b/drivers/gpu/drm/drm_drv.c
29300@@ -312,7 +312,7 @@ module_exit(drm_core_exit);
29301 /**
29302 * Copy and IOCTL return string to user space
29303 */
29304-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29305+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29306 {
29307 int len;
29308
29309@@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
29310
29311 dev = file_priv->minor->dev;
29312 atomic_inc(&dev->ioctl_count);
29313- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29314+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29315 ++file_priv->ioctl_count;
29316
29317 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29318diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29319index 6263b01..7987f55 100644
29320--- a/drivers/gpu/drm/drm_fops.c
29321+++ b/drivers/gpu/drm/drm_fops.c
29322@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29323 }
29324
29325 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29326- atomic_set(&dev->counts[i], 0);
29327+ atomic_set_unchecked(&dev->counts[i], 0);
29328
29329 dev->sigdata.lock = NULL;
29330
29331@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29332
29333 retcode = drm_open_helper(inode, filp, dev);
29334 if (!retcode) {
29335- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29336- if (!dev->open_count++)
29337+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29338+ if (local_inc_return(&dev->open_count) == 1)
29339 retcode = drm_setup(dev);
29340 }
29341 if (!retcode) {
29342@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29343
29344 mutex_lock(&drm_global_mutex);
29345
29346- DRM_DEBUG("open_count = %d\n", dev->open_count);
29347+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29348
29349 if (dev->driver->preclose)
29350 dev->driver->preclose(dev, file_priv);
29351@@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
29352 * Begin inline drm_release
29353 */
29354
29355- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29356+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29357 task_pid_nr(current),
29358 (long)old_encode_dev(file_priv->minor->device),
29359- dev->open_count);
29360+ local_read(&dev->open_count));
29361
29362 /* Release any auth tokens that might point to this file_priv,
29363 (do that under the drm_global_mutex) */
29364@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29365 * End inline drm_release
29366 */
29367
29368- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29369- if (!--dev->open_count) {
29370+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29371+ if (local_dec_and_test(&dev->open_count)) {
29372 if (atomic_read(&dev->ioctl_count)) {
29373 DRM_ERROR("Device busy: %d\n",
29374 atomic_read(&dev->ioctl_count));
29375diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29376index c87dc96..326055d 100644
29377--- a/drivers/gpu/drm/drm_global.c
29378+++ b/drivers/gpu/drm/drm_global.c
29379@@ -36,7 +36,7 @@
29380 struct drm_global_item {
29381 struct mutex mutex;
29382 void *object;
29383- int refcount;
29384+ atomic_t refcount;
29385 };
29386
29387 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29388@@ -49,7 +49,7 @@ void drm_global_init(void)
29389 struct drm_global_item *item = &glob[i];
29390 mutex_init(&item->mutex);
29391 item->object = NULL;
29392- item->refcount = 0;
29393+ atomic_set(&item->refcount, 0);
29394 }
29395 }
29396
29397@@ -59,7 +59,7 @@ void drm_global_release(void)
29398 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
29399 struct drm_global_item *item = &glob[i];
29400 BUG_ON(item->object != NULL);
29401- BUG_ON(item->refcount != 0);
29402+ BUG_ON(atomic_read(&item->refcount) != 0);
29403 }
29404 }
29405
29406@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29407 void *object;
29408
29409 mutex_lock(&item->mutex);
29410- if (item->refcount == 0) {
29411+ if (atomic_read(&item->refcount) == 0) {
29412 item->object = kzalloc(ref->size, GFP_KERNEL);
29413 if (unlikely(item->object == NULL)) {
29414 ret = -ENOMEM;
29415@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29416 goto out_err;
29417
29418 }
29419- ++item->refcount;
29420+ atomic_inc(&item->refcount);
29421 ref->object = item->object;
29422 object = item->object;
29423 mutex_unlock(&item->mutex);
29424@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
29425 struct drm_global_item *item = &glob[ref->global_type];
29426
29427 mutex_lock(&item->mutex);
29428- BUG_ON(item->refcount == 0);
29429+ BUG_ON(atomic_read(&item->refcount) == 0);
29430 BUG_ON(ref->object != item->object);
29431- if (--item->refcount == 0) {
29432+ if (atomic_dec_and_test(&item->refcount)) {
29433 ref->release(ref);
29434 item->object = NULL;
29435 }
29436diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
29437index ab1162d..42587b2 100644
29438--- a/drivers/gpu/drm/drm_info.c
29439+++ b/drivers/gpu/drm/drm_info.c
29440@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
29441 struct drm_local_map *map;
29442 struct drm_map_list *r_list;
29443
29444- /* Hardcoded from _DRM_FRAME_BUFFER,
29445- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29446- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29447- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29448+ static const char * const types[] = {
29449+ [_DRM_FRAME_BUFFER] = "FB",
29450+ [_DRM_REGISTERS] = "REG",
29451+ [_DRM_SHM] = "SHM",
29452+ [_DRM_AGP] = "AGP",
29453+ [_DRM_SCATTER_GATHER] = "SG",
29454+ [_DRM_CONSISTENT] = "PCI",
29455+ [_DRM_GEM] = "GEM" };
29456 const char *type;
29457 int i;
29458
29459@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
29460 map = r_list->map;
29461 if (!map)
29462 continue;
29463- if (map->type < 0 || map->type > 5)
29464+ if (map->type >= ARRAY_SIZE(types))
29465 type = "??";
29466 else
29467 type = types[map->type];
29468@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
29469 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29470 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29471 vma->vm_flags & VM_IO ? 'i' : '-',
29472+#ifdef CONFIG_GRKERNSEC_HIDESYM
29473+ 0);
29474+#else
29475 vma->vm_pgoff);
29476+#endif
29477
29478 #if defined(__i386__)
29479 pgprot = pgprot_val(vma->vm_page_prot);
29480diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
29481index 637fcc3..e890b33 100644
29482--- a/drivers/gpu/drm/drm_ioc32.c
29483+++ b/drivers/gpu/drm/drm_ioc32.c
29484@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
29485 request = compat_alloc_user_space(nbytes);
29486 if (!access_ok(VERIFY_WRITE, request, nbytes))
29487 return -EFAULT;
29488- list = (struct drm_buf_desc *) (request + 1);
29489+ list = (struct drm_buf_desc __user *) (request + 1);
29490
29491 if (__put_user(count, &request->count)
29492 || __put_user(list, &request->list))
29493@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
29494 request = compat_alloc_user_space(nbytes);
29495 if (!access_ok(VERIFY_WRITE, request, nbytes))
29496 return -EFAULT;
29497- list = (struct drm_buf_pub *) (request + 1);
29498+ list = (struct drm_buf_pub __user *) (request + 1);
29499
29500 if (__put_user(count, &request->count)
29501 || __put_user(list, &request->list))
29502diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
29503index 956fd38..e52167a 100644
29504--- a/drivers/gpu/drm/drm_ioctl.c
29505+++ b/drivers/gpu/drm/drm_ioctl.c
29506@@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
29507 stats->data[i].value =
29508 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29509 else
29510- stats->data[i].value = atomic_read(&dev->counts[i]);
29511+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29512 stats->data[i].type = dev->types[i];
29513 }
29514
29515diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
29516index c79c713..2048588 100644
29517--- a/drivers/gpu/drm/drm_lock.c
29518+++ b/drivers/gpu/drm/drm_lock.c
29519@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29520 if (drm_lock_take(&master->lock, lock->context)) {
29521 master->lock.file_priv = file_priv;
29522 master->lock.lock_time = jiffies;
29523- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29524+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29525 break; /* Got lock */
29526 }
29527
29528@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29529 return -EINVAL;
29530 }
29531
29532- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29533+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29534
29535 if (drm_lock_free(&master->lock, lock->context)) {
29536 /* FIXME: Should really bail out here. */
29537diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
29538index 7f4b4e1..bf4def2 100644
29539--- a/drivers/gpu/drm/i810/i810_dma.c
29540+++ b/drivers/gpu/drm/i810/i810_dma.c
29541@@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
29542 dma->buflist[vertex->idx],
29543 vertex->discard, vertex->used);
29544
29545- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29546- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29547+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29548+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29549 sarea_priv->last_enqueue = dev_priv->counter - 1;
29550 sarea_priv->last_dispatch = (int)hw_status[5];
29551
29552@@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
29553 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29554 mc->last_render);
29555
29556- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29557- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29558+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29559+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29560 sarea_priv->last_enqueue = dev_priv->counter - 1;
29561 sarea_priv->last_dispatch = (int)hw_status[5];
29562
29563diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
29564index c9339f4..f5e1b9d 100644
29565--- a/drivers/gpu/drm/i810/i810_drv.h
29566+++ b/drivers/gpu/drm/i810/i810_drv.h
29567@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29568 int page_flipping;
29569
29570 wait_queue_head_t irq_queue;
29571- atomic_t irq_received;
29572- atomic_t irq_emitted;
29573+ atomic_unchecked_t irq_received;
29574+ atomic_unchecked_t irq_emitted;
29575
29576 int front_offset;
29577 } drm_i810_private_t;
29578diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
29579index deaa657..e0fd296 100644
29580--- a/drivers/gpu/drm/i915/i915_debugfs.c
29581+++ b/drivers/gpu/drm/i915/i915_debugfs.c
29582@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
29583 I915_READ(GTIMR));
29584 }
29585 seq_printf(m, "Interrupts received: %d\n",
29586- atomic_read(&dev_priv->irq_received));
29587+ atomic_read_unchecked(&dev_priv->irq_received));
29588 for (i = 0; i < I915_NUM_RINGS; i++) {
29589 if (IS_GEN6(dev) || IS_GEN7(dev)) {
29590 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
29591@@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
29592 return ret;
29593
29594 if (opregion->header)
29595- seq_write(m, opregion->header, OPREGION_SIZE);
29596+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
29597
29598 mutex_unlock(&dev->struct_mutex);
29599
29600diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
29601index ddfe3d9..f6e6b21 100644
29602--- a/drivers/gpu/drm/i915/i915_dma.c
29603+++ b/drivers/gpu/drm/i915/i915_dma.c
29604@@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
29605 bool can_switch;
29606
29607 spin_lock(&dev->count_lock);
29608- can_switch = (dev->open_count == 0);
29609+ can_switch = (local_read(&dev->open_count) == 0);
29610 spin_unlock(&dev->count_lock);
29611 return can_switch;
29612 }
29613diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
29614index 9689ca3..294f9c1 100644
29615--- a/drivers/gpu/drm/i915/i915_drv.h
29616+++ b/drivers/gpu/drm/i915/i915_drv.h
29617@@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
29618 /* render clock increase/decrease */
29619 /* display clock increase/decrease */
29620 /* pll clock increase/decrease */
29621-};
29622+} __no_const;
29623
29624 struct intel_device_info {
29625 u8 gen;
29626@@ -320,7 +320,7 @@ typedef struct drm_i915_private {
29627 int current_page;
29628 int page_flipping;
29629
29630- atomic_t irq_received;
29631+ atomic_unchecked_t irq_received;
29632
29633 /* protects the irq masks */
29634 spinlock_t irq_lock;
29635@@ -896,7 +896,7 @@ struct drm_i915_gem_object {
29636 * will be page flipped away on the next vblank. When it
29637 * reaches 0, dev_priv->pending_flip_queue will be woken up.
29638 */
29639- atomic_t pending_flip;
29640+ atomic_unchecked_t pending_flip;
29641 };
29642
29643 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
29644@@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
29645 extern void intel_teardown_gmbus(struct drm_device *dev);
29646 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
29647 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
29648-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29649+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29650 {
29651 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
29652 }
29653diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29654index 65e1f00..a30ef00 100644
29655--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29656+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29657@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
29658 i915_gem_clflush_object(obj);
29659
29660 if (obj->base.pending_write_domain)
29661- cd->flips |= atomic_read(&obj->pending_flip);
29662+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
29663
29664 /* The actual obj->write_domain will be updated with
29665 * pending_write_domain after we emit the accumulated flush for all
29666@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
29667
29668 static int
29669 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
29670- int count)
29671+ unsigned int count)
29672 {
29673- int i;
29674+ unsigned int i;
29675
29676 for (i = 0; i < count; i++) {
29677 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
29678diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
29679index 5bd4361..0241a42 100644
29680--- a/drivers/gpu/drm/i915/i915_irq.c
29681+++ b/drivers/gpu/drm/i915/i915_irq.c
29682@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
29683 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
29684 struct drm_i915_master_private *master_priv;
29685
29686- atomic_inc(&dev_priv->irq_received);
29687+ atomic_inc_unchecked(&dev_priv->irq_received);
29688
29689 /* disable master interrupt before clearing iir */
29690 de_ier = I915_READ(DEIER);
29691@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
29692 struct drm_i915_master_private *master_priv;
29693 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
29694
29695- atomic_inc(&dev_priv->irq_received);
29696+ atomic_inc_unchecked(&dev_priv->irq_received);
29697
29698 if (IS_GEN6(dev))
29699 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
29700@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
29701 int ret = IRQ_NONE, pipe;
29702 bool blc_event = false;
29703
29704- atomic_inc(&dev_priv->irq_received);
29705+ atomic_inc_unchecked(&dev_priv->irq_received);
29706
29707 iir = I915_READ(IIR);
29708
29709@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
29710 {
29711 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29712
29713- atomic_set(&dev_priv->irq_received, 0);
29714+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29715
29716 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29717 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29718@@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
29719 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29720 int pipe;
29721
29722- atomic_set(&dev_priv->irq_received, 0);
29723+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29724
29725 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29726 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29727diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
29728index 397087c..9178d0d 100644
29729--- a/drivers/gpu/drm/i915/intel_display.c
29730+++ b/drivers/gpu/drm/i915/intel_display.c
29731@@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
29732
29733 wait_event(dev_priv->pending_flip_queue,
29734 atomic_read(&dev_priv->mm.wedged) ||
29735- atomic_read(&obj->pending_flip) == 0);
29736+ atomic_read_unchecked(&obj->pending_flip) == 0);
29737
29738 /* Big Hammer, we also need to ensure that any pending
29739 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
29740@@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
29741 obj = to_intel_framebuffer(crtc->fb)->obj;
29742 dev_priv = crtc->dev->dev_private;
29743 wait_event(dev_priv->pending_flip_queue,
29744- atomic_read(&obj->pending_flip) == 0);
29745+ atomic_read_unchecked(&obj->pending_flip) == 0);
29746 }
29747
29748 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
29749@@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
29750
29751 atomic_clear_mask(1 << intel_crtc->plane,
29752 &obj->pending_flip.counter);
29753- if (atomic_read(&obj->pending_flip) == 0)
29754+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
29755 wake_up(&dev_priv->pending_flip_queue);
29756
29757 schedule_work(&work->work);
29758@@ -7461,7 +7461,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29759 /* Block clients from rendering to the new back buffer until
29760 * the flip occurs and the object is no longer visible.
29761 */
29762- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29763+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29764
29765 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
29766 if (ret)
29767@@ -7475,7 +7475,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29768 return 0;
29769
29770 cleanup_pending:
29771- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29772+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29773 drm_gem_object_unreference(&work->old_fb_obj->base);
29774 drm_gem_object_unreference(&obj->base);
29775 mutex_unlock(&dev->struct_mutex);
29776diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
29777index 54558a0..2d97005 100644
29778--- a/drivers/gpu/drm/mga/mga_drv.h
29779+++ b/drivers/gpu/drm/mga/mga_drv.h
29780@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29781 u32 clear_cmd;
29782 u32 maccess;
29783
29784- atomic_t vbl_received; /**< Number of vblanks received. */
29785+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29786 wait_queue_head_t fence_queue;
29787- atomic_t last_fence_retired;
29788+ atomic_unchecked_t last_fence_retired;
29789 u32 next_fence_to_post;
29790
29791 unsigned int fb_cpp;
29792diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
29793index 2581202..f230a8d9 100644
29794--- a/drivers/gpu/drm/mga/mga_irq.c
29795+++ b/drivers/gpu/drm/mga/mga_irq.c
29796@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
29797 if (crtc != 0)
29798 return 0;
29799
29800- return atomic_read(&dev_priv->vbl_received);
29801+ return atomic_read_unchecked(&dev_priv->vbl_received);
29802 }
29803
29804
29805@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29806 /* VBLANK interrupt */
29807 if (status & MGA_VLINEPEN) {
29808 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29809- atomic_inc(&dev_priv->vbl_received);
29810+ atomic_inc_unchecked(&dev_priv->vbl_received);
29811 drm_handle_vblank(dev, 0);
29812 handled = 1;
29813 }
29814@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29815 if ((prim_start & ~0x03) != (prim_end & ~0x03))
29816 MGA_WRITE(MGA_PRIMEND, prim_end);
29817
29818- atomic_inc(&dev_priv->last_fence_retired);
29819+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29820 DRM_WAKEUP(&dev_priv->fence_queue);
29821 handled = 1;
29822 }
29823@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
29824 * using fences.
29825 */
29826 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29827- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29828+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29829 - *sequence) <= (1 << 23)));
29830
29831 *sequence = cur_fence;
29832diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
29833index e5cbead..6c354a3 100644
29834--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
29835+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
29836@@ -199,7 +199,7 @@ struct methods {
29837 const char desc[8];
29838 void (*loadbios)(struct drm_device *, uint8_t *);
29839 const bool rw;
29840-};
29841+} __do_const;
29842
29843 static struct methods shadow_methods[] = {
29844 { "PRAMIN", load_vbios_pramin, true },
29845@@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
29846 struct bit_table {
29847 const char id;
29848 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
29849-};
29850+} __no_const;
29851
29852 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
29853
29854diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
29855index b827098..c31a797 100644
29856--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
29857+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
29858@@ -242,7 +242,7 @@ struct nouveau_channel {
29859 struct list_head pending;
29860 uint32_t sequence;
29861 uint32_t sequence_ack;
29862- atomic_t last_sequence_irq;
29863+ atomic_unchecked_t last_sequence_irq;
29864 struct nouveau_vma vma;
29865 } fence;
29866
29867@@ -323,7 +323,7 @@ struct nouveau_exec_engine {
29868 u32 handle, u16 class);
29869 void (*set_tile_region)(struct drm_device *dev, int i);
29870 void (*tlb_flush)(struct drm_device *, int engine);
29871-};
29872+} __no_const;
29873
29874 struct nouveau_instmem_engine {
29875 void *priv;
29876@@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
29877 struct nouveau_mc_engine {
29878 int (*init)(struct drm_device *dev);
29879 void (*takedown)(struct drm_device *dev);
29880-};
29881+} __no_const;
29882
29883 struct nouveau_timer_engine {
29884 int (*init)(struct drm_device *dev);
29885 void (*takedown)(struct drm_device *dev);
29886 uint64_t (*read)(struct drm_device *dev);
29887-};
29888+} __no_const;
29889
29890 struct nouveau_fb_engine {
29891 int num_tiles;
29892@@ -566,7 +566,7 @@ struct nouveau_vram_engine {
29893 void (*put)(struct drm_device *, struct nouveau_mem **);
29894
29895 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
29896-};
29897+} __no_const;
29898
29899 struct nouveau_engine {
29900 struct nouveau_instmem_engine instmem;
29901@@ -714,7 +714,7 @@ struct drm_nouveau_private {
29902 struct drm_global_reference mem_global_ref;
29903 struct ttm_bo_global_ref bo_global_ref;
29904 struct ttm_bo_device bdev;
29905- atomic_t validate_sequence;
29906+ atomic_unchecked_t validate_sequence;
29907 } ttm;
29908
29909 struct {
29910diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
29911index 2f6daae..c9d7b9e 100644
29912--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
29913+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
29914@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
29915 if (USE_REFCNT(dev))
29916 sequence = nvchan_rd32(chan, 0x48);
29917 else
29918- sequence = atomic_read(&chan->fence.last_sequence_irq);
29919+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
29920
29921 if (chan->fence.sequence_ack == sequence)
29922 goto out;
29923@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
29924 return ret;
29925 }
29926
29927- atomic_set(&chan->fence.last_sequence_irq, 0);
29928+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
29929 return 0;
29930 }
29931
29932diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
29933index 7ce3fde..cb3ea04 100644
29934--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
29935+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
29936@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
29937 int trycnt = 0;
29938 int ret, i;
29939
29940- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
29941+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
29942 retry:
29943 if (++trycnt > 100000) {
29944 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
29945diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
29946index f80c5e0..936baa7 100644
29947--- a/drivers/gpu/drm/nouveau/nouveau_state.c
29948+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
29949@@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
29950 bool can_switch;
29951
29952 spin_lock(&dev->count_lock);
29953- can_switch = (dev->open_count == 0);
29954+ can_switch = (local_read(&dev->open_count) == 0);
29955 spin_unlock(&dev->count_lock);
29956 return can_switch;
29957 }
29958diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
29959index dbdea8e..cd6eeeb 100644
29960--- a/drivers/gpu/drm/nouveau/nv04_graph.c
29961+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
29962@@ -554,7 +554,7 @@ static int
29963 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
29964 u32 class, u32 mthd, u32 data)
29965 {
29966- atomic_set(&chan->fence.last_sequence_irq, data);
29967+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
29968 return 0;
29969 }
29970
29971diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
29972index bcac90b..53bfc76 100644
29973--- a/drivers/gpu/drm/r128/r128_cce.c
29974+++ b/drivers/gpu/drm/r128/r128_cce.c
29975@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
29976
29977 /* GH: Simple idle check.
29978 */
29979- atomic_set(&dev_priv->idle_count, 0);
29980+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29981
29982 /* We don't support anything other than bus-mastering ring mode,
29983 * but the ring can be in either AGP or PCI space for the ring
29984diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
29985index 930c71b..499aded 100644
29986--- a/drivers/gpu/drm/r128/r128_drv.h
29987+++ b/drivers/gpu/drm/r128/r128_drv.h
29988@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29989 int is_pci;
29990 unsigned long cce_buffers_offset;
29991
29992- atomic_t idle_count;
29993+ atomic_unchecked_t idle_count;
29994
29995 int page_flipping;
29996 int current_page;
29997 u32 crtc_offset;
29998 u32 crtc_offset_cntl;
29999
30000- atomic_t vbl_received;
30001+ atomic_unchecked_t vbl_received;
30002
30003 u32 color_fmt;
30004 unsigned int front_offset;
30005diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30006index 429d5a0..7e899ed 100644
30007--- a/drivers/gpu/drm/r128/r128_irq.c
30008+++ b/drivers/gpu/drm/r128/r128_irq.c
30009@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30010 if (crtc != 0)
30011 return 0;
30012
30013- return atomic_read(&dev_priv->vbl_received);
30014+ return atomic_read_unchecked(&dev_priv->vbl_received);
30015 }
30016
30017 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30018@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30019 /* VBLANK interrupt */
30020 if (status & R128_CRTC_VBLANK_INT) {
30021 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30022- atomic_inc(&dev_priv->vbl_received);
30023+ atomic_inc_unchecked(&dev_priv->vbl_received);
30024 drm_handle_vblank(dev, 0);
30025 return IRQ_HANDLED;
30026 }
30027diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30028index a9e33ce..09edd4b 100644
30029--- a/drivers/gpu/drm/r128/r128_state.c
30030+++ b/drivers/gpu/drm/r128/r128_state.c
30031@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30032
30033 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30034 {
30035- if (atomic_read(&dev_priv->idle_count) == 0)
30036+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30037 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30038 else
30039- atomic_set(&dev_priv->idle_count, 0);
30040+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30041 }
30042
30043 #endif
30044diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30045index 5a82b6b..9e69c73 100644
30046--- a/drivers/gpu/drm/radeon/mkregtable.c
30047+++ b/drivers/gpu/drm/radeon/mkregtable.c
30048@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30049 regex_t mask_rex;
30050 regmatch_t match[4];
30051 char buf[1024];
30052- size_t end;
30053+ long end;
30054 int len;
30055 int done = 0;
30056 int r;
30057 unsigned o;
30058 struct offset *offset;
30059 char last_reg_s[10];
30060- int last_reg;
30061+ unsigned long last_reg;
30062
30063 if (regcomp
30064 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30065diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30066index 1668ec1..30ebdab 100644
30067--- a/drivers/gpu/drm/radeon/radeon.h
30068+++ b/drivers/gpu/drm/radeon/radeon.h
30069@@ -250,7 +250,7 @@ struct radeon_fence_driver {
30070 uint32_t scratch_reg;
30071 uint64_t gpu_addr;
30072 volatile uint32_t *cpu_addr;
30073- atomic_t seq;
30074+ atomic_unchecked_t seq;
30075 uint32_t last_seq;
30076 unsigned long last_jiffies;
30077 unsigned long last_timeout;
30078@@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
30079 int x2, int y2);
30080 void (*draw_auto)(struct radeon_device *rdev);
30081 void (*set_default_state)(struct radeon_device *rdev);
30082-};
30083+} __no_const;
30084
30085 struct r600_blit {
30086 struct mutex mutex;
30087@@ -1201,7 +1201,7 @@ struct radeon_asic {
30088 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30089 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30090 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30091-};
30092+} __no_const;
30093
30094 /*
30095 * Asic structures
30096diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30097index 49f7cb7..2fcb48f 100644
30098--- a/drivers/gpu/drm/radeon/radeon_device.c
30099+++ b/drivers/gpu/drm/radeon/radeon_device.c
30100@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30101 bool can_switch;
30102
30103 spin_lock(&dev->count_lock);
30104- can_switch = (dev->open_count == 0);
30105+ can_switch = (local_read(&dev->open_count) == 0);
30106 spin_unlock(&dev->count_lock);
30107 return can_switch;
30108 }
30109diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30110index a1b59ca..86f2d44 100644
30111--- a/drivers/gpu/drm/radeon/radeon_drv.h
30112+++ b/drivers/gpu/drm/radeon/radeon_drv.h
30113@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30114
30115 /* SW interrupt */
30116 wait_queue_head_t swi_queue;
30117- atomic_t swi_emitted;
30118+ atomic_unchecked_t swi_emitted;
30119 int vblank_crtc;
30120 uint32_t irq_enable_reg;
30121 uint32_t r500_disp_irq_reg;
30122diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30123index 4bd36a3..e66fe9c 100644
30124--- a/drivers/gpu/drm/radeon/radeon_fence.c
30125+++ b/drivers/gpu/drm/radeon/radeon_fence.c
30126@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30127 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30128 return 0;
30129 }
30130- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30131+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30132 if (!rdev->ring[fence->ring].ready)
30133 /* FIXME: cp is not running assume everythings is done right
30134 * away
30135@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30136 }
30137 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30138 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30139- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30140+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30141 rdev->fence_drv[ring].initialized = true;
30142 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30143 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30144@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30145 rdev->fence_drv[ring].scratch_reg = -1;
30146 rdev->fence_drv[ring].cpu_addr = NULL;
30147 rdev->fence_drv[ring].gpu_addr = 0;
30148- atomic_set(&rdev->fence_drv[ring].seq, 0);
30149+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30150 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30151 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30152 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30153diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30154index 48b7cea..342236f 100644
30155--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30156+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30157@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30158 request = compat_alloc_user_space(sizeof(*request));
30159 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30160 || __put_user(req32.param, &request->param)
30161- || __put_user((void __user *)(unsigned long)req32.value,
30162+ || __put_user((unsigned long)req32.value,
30163 &request->value))
30164 return -EFAULT;
30165
30166diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30167index 00da384..32f972d 100644
30168--- a/drivers/gpu/drm/radeon/radeon_irq.c
30169+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30170@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30171 unsigned int ret;
30172 RING_LOCALS;
30173
30174- atomic_inc(&dev_priv->swi_emitted);
30175- ret = atomic_read(&dev_priv->swi_emitted);
30176+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30177+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30178
30179 BEGIN_RING(4);
30180 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30181@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30182 drm_radeon_private_t *dev_priv =
30183 (drm_radeon_private_t *) dev->dev_private;
30184
30185- atomic_set(&dev_priv->swi_emitted, 0);
30186+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30187 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30188
30189 dev->max_vblank_count = 0x001fffff;
30190diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30191index e8422ae..d22d4a8 100644
30192--- a/drivers/gpu/drm/radeon/radeon_state.c
30193+++ b/drivers/gpu/drm/radeon/radeon_state.c
30194@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30195 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30196 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30197
30198- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30199+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30200 sarea_priv->nbox * sizeof(depth_boxes[0])))
30201 return -EFAULT;
30202
30203@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30204 {
30205 drm_radeon_private_t *dev_priv = dev->dev_private;
30206 drm_radeon_getparam_t *param = data;
30207- int value;
30208+ int value = 0;
30209
30210 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30211
30212diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30213index c421e77..e6bf2e8 100644
30214--- a/drivers/gpu/drm/radeon/radeon_ttm.c
30215+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30216@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30217 }
30218 if (unlikely(ttm_vm_ops == NULL)) {
30219 ttm_vm_ops = vma->vm_ops;
30220- radeon_ttm_vm_ops = *ttm_vm_ops;
30221- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30222+ pax_open_kernel();
30223+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30224+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30225+ pax_close_kernel();
30226 }
30227 vma->vm_ops = &radeon_ttm_vm_ops;
30228 return 0;
30229diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30230index f68dff2..8df955c 100644
30231--- a/drivers/gpu/drm/radeon/rs690.c
30232+++ b/drivers/gpu/drm/radeon/rs690.c
30233@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30234 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30235 rdev->pm.sideport_bandwidth.full)
30236 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30237- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30238+ read_delay_latency.full = dfixed_const(800 * 1000);
30239 read_delay_latency.full = dfixed_div(read_delay_latency,
30240 rdev->pm.igp_sideport_mclk);
30241+ a.full = dfixed_const(370);
30242+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30243 } else {
30244 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30245 rdev->pm.k8_bandwidth.full)
30246diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30247index 499debd..66fce72 100644
30248--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30249+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30250@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30251 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30252 struct shrink_control *sc)
30253 {
30254- static atomic_t start_pool = ATOMIC_INIT(0);
30255+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30256 unsigned i;
30257- unsigned pool_offset = atomic_add_return(1, &start_pool);
30258+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30259 struct ttm_page_pool *pool;
30260 int shrink_pages = sc->nr_to_scan;
30261
30262diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30263index 88edacc..1e5412b 100644
30264--- a/drivers/gpu/drm/via/via_drv.h
30265+++ b/drivers/gpu/drm/via/via_drv.h
30266@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30267 typedef uint32_t maskarray_t[5];
30268
30269 typedef struct drm_via_irq {
30270- atomic_t irq_received;
30271+ atomic_unchecked_t irq_received;
30272 uint32_t pending_mask;
30273 uint32_t enable_mask;
30274 wait_queue_head_t irq_queue;
30275@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30276 struct timeval last_vblank;
30277 int last_vblank_valid;
30278 unsigned usec_per_vblank;
30279- atomic_t vbl_received;
30280+ atomic_unchecked_t vbl_received;
30281 drm_via_state_t hc_state;
30282 char pci_buf[VIA_PCI_BUF_SIZE];
30283 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30284diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30285index d391f48..10c8ca3 100644
30286--- a/drivers/gpu/drm/via/via_irq.c
30287+++ b/drivers/gpu/drm/via/via_irq.c
30288@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30289 if (crtc != 0)
30290 return 0;
30291
30292- return atomic_read(&dev_priv->vbl_received);
30293+ return atomic_read_unchecked(&dev_priv->vbl_received);
30294 }
30295
30296 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30297@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30298
30299 status = VIA_READ(VIA_REG_INTERRUPT);
30300 if (status & VIA_IRQ_VBLANK_PENDING) {
30301- atomic_inc(&dev_priv->vbl_received);
30302- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30303+ atomic_inc_unchecked(&dev_priv->vbl_received);
30304+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30305 do_gettimeofday(&cur_vblank);
30306 if (dev_priv->last_vblank_valid) {
30307 dev_priv->usec_per_vblank =
30308@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30309 dev_priv->last_vblank = cur_vblank;
30310 dev_priv->last_vblank_valid = 1;
30311 }
30312- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30313+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30314 DRM_DEBUG("US per vblank is: %u\n",
30315 dev_priv->usec_per_vblank);
30316 }
30317@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30318
30319 for (i = 0; i < dev_priv->num_irqs; ++i) {
30320 if (status & cur_irq->pending_mask) {
30321- atomic_inc(&cur_irq->irq_received);
30322+ atomic_inc_unchecked(&cur_irq->irq_received);
30323 DRM_WAKEUP(&cur_irq->irq_queue);
30324 handled = 1;
30325 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30326@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30327 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30328 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30329 masks[irq][4]));
30330- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30331+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30332 } else {
30333 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30334 (((cur_irq_sequence =
30335- atomic_read(&cur_irq->irq_received)) -
30336+ atomic_read_unchecked(&cur_irq->irq_received)) -
30337 *sequence) <= (1 << 23)));
30338 }
30339 *sequence = cur_irq_sequence;
30340@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30341 }
30342
30343 for (i = 0; i < dev_priv->num_irqs; ++i) {
30344- atomic_set(&cur_irq->irq_received, 0);
30345+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30346 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30347 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30348 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30349@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30350 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30351 case VIA_IRQ_RELATIVE:
30352 irqwait->request.sequence +=
30353- atomic_read(&cur_irq->irq_received);
30354+ atomic_read_unchecked(&cur_irq->irq_received);
30355 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30356 case VIA_IRQ_ABSOLUTE:
30357 break;
30358diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30359index dc27970..f18b008 100644
30360--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30361+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30362@@ -260,7 +260,7 @@ struct vmw_private {
30363 * Fencing and IRQs.
30364 */
30365
30366- atomic_t marker_seq;
30367+ atomic_unchecked_t marker_seq;
30368 wait_queue_head_t fence_queue;
30369 wait_queue_head_t fifo_queue;
30370 int fence_queue_waiters; /* Protected by hw_mutex */
30371diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30372index a0c2f12..68ae6cb 100644
30373--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30374+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30375@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30376 (unsigned int) min,
30377 (unsigned int) fifo->capabilities);
30378
30379- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30380+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30381 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30382 vmw_marker_queue_init(&fifo->marker_queue);
30383 return vmw_fifo_send_fence(dev_priv, &dummy);
30384@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30385 if (reserveable)
30386 iowrite32(bytes, fifo_mem +
30387 SVGA_FIFO_RESERVED);
30388- return fifo_mem + (next_cmd >> 2);
30389+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30390 } else {
30391 need_bounce = true;
30392 }
30393@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30394
30395 fm = vmw_fifo_reserve(dev_priv, bytes);
30396 if (unlikely(fm == NULL)) {
30397- *seqno = atomic_read(&dev_priv->marker_seq);
30398+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30399 ret = -ENOMEM;
30400 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
30401 false, 3*HZ);
30402@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30403 }
30404
30405 do {
30406- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
30407+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
30408 } while (*seqno == 0);
30409
30410 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
30411diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30412index cabc95f..14b3d77 100644
30413--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30414+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30415@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
30416 * emitted. Then the fence is stale and signaled.
30417 */
30418
30419- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
30420+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
30421 > VMW_FENCE_WRAP);
30422
30423 return ret;
30424@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30425
30426 if (fifo_idle)
30427 down_read(&fifo_state->rwsem);
30428- signal_seq = atomic_read(&dev_priv->marker_seq);
30429+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
30430 ret = 0;
30431
30432 for (;;) {
30433diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30434index 8a8725c..afed796 100644
30435--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30436+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30437@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
30438 while (!vmw_lag_lt(queue, us)) {
30439 spin_lock(&queue->lock);
30440 if (list_empty(&queue->head))
30441- seqno = atomic_read(&dev_priv->marker_seq);
30442+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30443 else {
30444 marker = list_first_entry(&queue->head,
30445 struct vmw_marker, head);
30446diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
30447index af08ce7..7a15038 100644
30448--- a/drivers/hid/hid-core.c
30449+++ b/drivers/hid/hid-core.c
30450@@ -2020,7 +2020,7 @@ static bool hid_ignore(struct hid_device *hdev)
30451
30452 int hid_add_device(struct hid_device *hdev)
30453 {
30454- static atomic_t id = ATOMIC_INIT(0);
30455+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30456 int ret;
30457
30458 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30459@@ -2035,7 +2035,7 @@ int hid_add_device(struct hid_device *hdev)
30460 /* XXX hack, any other cleaner solution after the driver core
30461 * is converted to allow more than 20 bytes as the device name? */
30462 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30463- hdev->vendor, hdev->product, atomic_inc_return(&id));
30464+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30465
30466 hid_debug_register(hdev, dev_name(&hdev->dev));
30467 ret = device_add(&hdev->dev);
30468diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
30469index b1ec0e2..c295a61 100644
30470--- a/drivers/hid/usbhid/hiddev.c
30471+++ b/drivers/hid/usbhid/hiddev.c
30472@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
30473 break;
30474
30475 case HIDIOCAPPLICATION:
30476- if (arg < 0 || arg >= hid->maxapplication)
30477+ if (arg >= hid->maxapplication)
30478 break;
30479
30480 for (i = 0; i < hid->maxcollection; i++)
30481diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
30482index 4065374..10ed7dc 100644
30483--- a/drivers/hv/channel.c
30484+++ b/drivers/hv/channel.c
30485@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
30486 int ret = 0;
30487 int t;
30488
30489- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
30490- atomic_inc(&vmbus_connection.next_gpadl_handle);
30491+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
30492+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
30493
30494 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
30495 if (ret)
30496diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
30497index 12aa97f..c0679f7 100644
30498--- a/drivers/hv/hv.c
30499+++ b/drivers/hv/hv.c
30500@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
30501 u64 output_address = (output) ? virt_to_phys(output) : 0;
30502 u32 output_address_hi = output_address >> 32;
30503 u32 output_address_lo = output_address & 0xFFFFFFFF;
30504- void *hypercall_page = hv_context.hypercall_page;
30505+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
30506
30507 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
30508 "=a"(hv_status_lo) : "d" (control_hi),
30509diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
30510index 6d7d286..92b0873 100644
30511--- a/drivers/hv/hyperv_vmbus.h
30512+++ b/drivers/hv/hyperv_vmbus.h
30513@@ -556,7 +556,7 @@ enum vmbus_connect_state {
30514 struct vmbus_connection {
30515 enum vmbus_connect_state conn_state;
30516
30517- atomic_t next_gpadl_handle;
30518+ atomic_unchecked_t next_gpadl_handle;
30519
30520 /*
30521 * Represents channel interrupts. Each bit position represents a
30522diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
30523index a220e57..428f54d 100644
30524--- a/drivers/hv/vmbus_drv.c
30525+++ b/drivers/hv/vmbus_drv.c
30526@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
30527 {
30528 int ret = 0;
30529
30530- static atomic_t device_num = ATOMIC_INIT(0);
30531+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
30532
30533 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
30534- atomic_inc_return(&device_num));
30535+ atomic_inc_return_unchecked(&device_num));
30536
30537 child_device_obj->device.bus = &hv_bus;
30538 child_device_obj->device.parent = &hv_acpi_dev->dev;
30539diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
30540index 554f046..f8b4729 100644
30541--- a/drivers/hwmon/acpi_power_meter.c
30542+++ b/drivers/hwmon/acpi_power_meter.c
30543@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
30544 return res;
30545
30546 temp /= 1000;
30547- if (temp < 0)
30548- return -EINVAL;
30549
30550 mutex_lock(&resource->lock);
30551 resource->trip[attr->index - 7] = temp;
30552diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
30553index 91fdd1f..b66a686 100644
30554--- a/drivers/hwmon/sht15.c
30555+++ b/drivers/hwmon/sht15.c
30556@@ -166,7 +166,7 @@ struct sht15_data {
30557 int supply_uV;
30558 bool supply_uV_valid;
30559 struct work_struct update_supply_work;
30560- atomic_t interrupt_handled;
30561+ atomic_unchecked_t interrupt_handled;
30562 };
30563
30564 /**
30565@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
30566 return ret;
30567
30568 gpio_direction_input(data->pdata->gpio_data);
30569- atomic_set(&data->interrupt_handled, 0);
30570+ atomic_set_unchecked(&data->interrupt_handled, 0);
30571
30572 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30573 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30574 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30575 /* Only relevant if the interrupt hasn't occurred. */
30576- if (!atomic_read(&data->interrupt_handled))
30577+ if (!atomic_read_unchecked(&data->interrupt_handled))
30578 schedule_work(&data->read_work);
30579 }
30580 ret = wait_event_timeout(data->wait_queue,
30581@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
30582
30583 /* First disable the interrupt */
30584 disable_irq_nosync(irq);
30585- atomic_inc(&data->interrupt_handled);
30586+ atomic_inc_unchecked(&data->interrupt_handled);
30587 /* Then schedule a reading work struct */
30588 if (data->state != SHT15_READING_NOTHING)
30589 schedule_work(&data->read_work);
30590@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
30591 * If not, then start the interrupt again - care here as could
30592 * have gone low in meantime so verify it hasn't!
30593 */
30594- atomic_set(&data->interrupt_handled, 0);
30595+ atomic_set_unchecked(&data->interrupt_handled, 0);
30596 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30597 /* If still not occurred or another handler has been scheduled */
30598 if (gpio_get_value(data->pdata->gpio_data)
30599- || atomic_read(&data->interrupt_handled))
30600+ || atomic_read_unchecked(&data->interrupt_handled))
30601 return;
30602 }
30603
30604diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
30605index 378fcb5..5e91fa8 100644
30606--- a/drivers/i2c/busses/i2c-amd756-s4882.c
30607+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
30608@@ -43,7 +43,7 @@
30609 extern struct i2c_adapter amd756_smbus;
30610
30611 static struct i2c_adapter *s4882_adapter;
30612-static struct i2c_algorithm *s4882_algo;
30613+static i2c_algorithm_no_const *s4882_algo;
30614
30615 /* Wrapper access functions for multiplexed SMBus */
30616 static DEFINE_MUTEX(amd756_lock);
30617diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
30618index 29015eb..af2d8e9 100644
30619--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
30620+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
30621@@ -41,7 +41,7 @@
30622 extern struct i2c_adapter *nforce2_smbus;
30623
30624 static struct i2c_adapter *s4985_adapter;
30625-static struct i2c_algorithm *s4985_algo;
30626+static i2c_algorithm_no_const *s4985_algo;
30627
30628 /* Wrapper access functions for multiplexed SMBus */
30629 static DEFINE_MUTEX(nforce2_lock);
30630diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
30631index d7a4833..7fae376 100644
30632--- a/drivers/i2c/i2c-mux.c
30633+++ b/drivers/i2c/i2c-mux.c
30634@@ -28,7 +28,7 @@
30635 /* multiplexer per channel data */
30636 struct i2c_mux_priv {
30637 struct i2c_adapter adap;
30638- struct i2c_algorithm algo;
30639+ i2c_algorithm_no_const algo;
30640
30641 struct i2c_adapter *parent;
30642 void *mux_dev; /* the mux chip/device */
30643diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
30644index 57d00ca..0145194 100644
30645--- a/drivers/ide/aec62xx.c
30646+++ b/drivers/ide/aec62xx.c
30647@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
30648 .cable_detect = atp86x_cable_detect,
30649 };
30650
30651-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
30652+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
30653 { /* 0: AEC6210 */
30654 .name = DRV_NAME,
30655 .init_chipset = init_chipset_aec62xx,
30656diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
30657index 2c8016a..911a27c 100644
30658--- a/drivers/ide/alim15x3.c
30659+++ b/drivers/ide/alim15x3.c
30660@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
30661 .dma_sff_read_status = ide_dma_sff_read_status,
30662 };
30663
30664-static const struct ide_port_info ali15x3_chipset __devinitdata = {
30665+static const struct ide_port_info ali15x3_chipset __devinitconst = {
30666 .name = DRV_NAME,
30667 .init_chipset = init_chipset_ali15x3,
30668 .init_hwif = init_hwif_ali15x3,
30669diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
30670index 3747b25..56fc995 100644
30671--- a/drivers/ide/amd74xx.c
30672+++ b/drivers/ide/amd74xx.c
30673@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
30674 .udma_mask = udma, \
30675 }
30676
30677-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
30678+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
30679 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
30680 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
30681 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
30682diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
30683index 15f0ead..cb43480 100644
30684--- a/drivers/ide/atiixp.c
30685+++ b/drivers/ide/atiixp.c
30686@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
30687 .cable_detect = atiixp_cable_detect,
30688 };
30689
30690-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
30691+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
30692 { /* 0: IXP200/300/400/700 */
30693 .name = DRV_NAME,
30694 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
30695diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
30696index 5f80312..d1fc438 100644
30697--- a/drivers/ide/cmd64x.c
30698+++ b/drivers/ide/cmd64x.c
30699@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
30700 .dma_sff_read_status = ide_dma_sff_read_status,
30701 };
30702
30703-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
30704+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
30705 { /* 0: CMD643 */
30706 .name = DRV_NAME,
30707 .init_chipset = init_chipset_cmd64x,
30708diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
30709index 2c1e5f7..1444762 100644
30710--- a/drivers/ide/cs5520.c
30711+++ b/drivers/ide/cs5520.c
30712@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
30713 .set_dma_mode = cs5520_set_dma_mode,
30714 };
30715
30716-static const struct ide_port_info cyrix_chipset __devinitdata = {
30717+static const struct ide_port_info cyrix_chipset __devinitconst = {
30718 .name = DRV_NAME,
30719 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
30720 .port_ops = &cs5520_port_ops,
30721diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
30722index 4dc4eb9..49b40ad 100644
30723--- a/drivers/ide/cs5530.c
30724+++ b/drivers/ide/cs5530.c
30725@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
30726 .udma_filter = cs5530_udma_filter,
30727 };
30728
30729-static const struct ide_port_info cs5530_chipset __devinitdata = {
30730+static const struct ide_port_info cs5530_chipset __devinitconst = {
30731 .name = DRV_NAME,
30732 .init_chipset = init_chipset_cs5530,
30733 .init_hwif = init_hwif_cs5530,
30734diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
30735index 5059faf..18d4c85 100644
30736--- a/drivers/ide/cs5535.c
30737+++ b/drivers/ide/cs5535.c
30738@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
30739 .cable_detect = cs5535_cable_detect,
30740 };
30741
30742-static const struct ide_port_info cs5535_chipset __devinitdata = {
30743+static const struct ide_port_info cs5535_chipset __devinitconst = {
30744 .name = DRV_NAME,
30745 .port_ops = &cs5535_port_ops,
30746 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
30747diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
30748index 847553f..3ffb49d 100644
30749--- a/drivers/ide/cy82c693.c
30750+++ b/drivers/ide/cy82c693.c
30751@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
30752 .set_dma_mode = cy82c693_set_dma_mode,
30753 };
30754
30755-static const struct ide_port_info cy82c693_chipset __devinitdata = {
30756+static const struct ide_port_info cy82c693_chipset __devinitconst = {
30757 .name = DRV_NAME,
30758 .init_iops = init_iops_cy82c693,
30759 .port_ops = &cy82c693_port_ops,
30760diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
30761index 58c51cd..4aec3b8 100644
30762--- a/drivers/ide/hpt366.c
30763+++ b/drivers/ide/hpt366.c
30764@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
30765 }
30766 };
30767
30768-static const struct hpt_info hpt36x __devinitdata = {
30769+static const struct hpt_info hpt36x __devinitconst = {
30770 .chip_name = "HPT36x",
30771 .chip_type = HPT36x,
30772 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
30773@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
30774 .timings = &hpt36x_timings
30775 };
30776
30777-static const struct hpt_info hpt370 __devinitdata = {
30778+static const struct hpt_info hpt370 __devinitconst = {
30779 .chip_name = "HPT370",
30780 .chip_type = HPT370,
30781 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30782@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
30783 .timings = &hpt37x_timings
30784 };
30785
30786-static const struct hpt_info hpt370a __devinitdata = {
30787+static const struct hpt_info hpt370a __devinitconst = {
30788 .chip_name = "HPT370A",
30789 .chip_type = HPT370A,
30790 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30791@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
30792 .timings = &hpt37x_timings
30793 };
30794
30795-static const struct hpt_info hpt374 __devinitdata = {
30796+static const struct hpt_info hpt374 __devinitconst = {
30797 .chip_name = "HPT374",
30798 .chip_type = HPT374,
30799 .udma_mask = ATA_UDMA5,
30800@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
30801 .timings = &hpt37x_timings
30802 };
30803
30804-static const struct hpt_info hpt372 __devinitdata = {
30805+static const struct hpt_info hpt372 __devinitconst = {
30806 .chip_name = "HPT372",
30807 .chip_type = HPT372,
30808 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30809@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
30810 .timings = &hpt37x_timings
30811 };
30812
30813-static const struct hpt_info hpt372a __devinitdata = {
30814+static const struct hpt_info hpt372a __devinitconst = {
30815 .chip_name = "HPT372A",
30816 .chip_type = HPT372A,
30817 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30818@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
30819 .timings = &hpt37x_timings
30820 };
30821
30822-static const struct hpt_info hpt302 __devinitdata = {
30823+static const struct hpt_info hpt302 __devinitconst = {
30824 .chip_name = "HPT302",
30825 .chip_type = HPT302,
30826 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30827@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
30828 .timings = &hpt37x_timings
30829 };
30830
30831-static const struct hpt_info hpt371 __devinitdata = {
30832+static const struct hpt_info hpt371 __devinitconst = {
30833 .chip_name = "HPT371",
30834 .chip_type = HPT371,
30835 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30836@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
30837 .timings = &hpt37x_timings
30838 };
30839
30840-static const struct hpt_info hpt372n __devinitdata = {
30841+static const struct hpt_info hpt372n __devinitconst = {
30842 .chip_name = "HPT372N",
30843 .chip_type = HPT372N,
30844 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30845@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
30846 .timings = &hpt37x_timings
30847 };
30848
30849-static const struct hpt_info hpt302n __devinitdata = {
30850+static const struct hpt_info hpt302n __devinitconst = {
30851 .chip_name = "HPT302N",
30852 .chip_type = HPT302N,
30853 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30854@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
30855 .timings = &hpt37x_timings
30856 };
30857
30858-static const struct hpt_info hpt371n __devinitdata = {
30859+static const struct hpt_info hpt371n __devinitconst = {
30860 .chip_name = "HPT371N",
30861 .chip_type = HPT371N,
30862 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30863@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
30864 .dma_sff_read_status = ide_dma_sff_read_status,
30865 };
30866
30867-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
30868+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
30869 { /* 0: HPT36x */
30870 .name = DRV_NAME,
30871 .init_chipset = init_chipset_hpt366,
30872diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
30873index 8126824..55a2798 100644
30874--- a/drivers/ide/ide-cd.c
30875+++ b/drivers/ide/ide-cd.c
30876@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
30877 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30878 if ((unsigned long)buf & alignment
30879 || blk_rq_bytes(rq) & q->dma_pad_mask
30880- || object_is_on_stack(buf))
30881+ || object_starts_on_stack(buf))
30882 drive->dma = 0;
30883 }
30884 }
30885diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
30886index 7f56b73..dab5b67 100644
30887--- a/drivers/ide/ide-pci-generic.c
30888+++ b/drivers/ide/ide-pci-generic.c
30889@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
30890 .udma_mask = ATA_UDMA6, \
30891 }
30892
30893-static const struct ide_port_info generic_chipsets[] __devinitdata = {
30894+static const struct ide_port_info generic_chipsets[] __devinitconst = {
30895 /* 0: Unknown */
30896 DECLARE_GENERIC_PCI_DEV(0),
30897
30898diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
30899index 560e66d..d5dd180 100644
30900--- a/drivers/ide/it8172.c
30901+++ b/drivers/ide/it8172.c
30902@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
30903 .set_dma_mode = it8172_set_dma_mode,
30904 };
30905
30906-static const struct ide_port_info it8172_port_info __devinitdata = {
30907+static const struct ide_port_info it8172_port_info __devinitconst = {
30908 .name = DRV_NAME,
30909 .port_ops = &it8172_port_ops,
30910 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
30911diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
30912index 46816ba..1847aeb 100644
30913--- a/drivers/ide/it8213.c
30914+++ b/drivers/ide/it8213.c
30915@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
30916 .cable_detect = it8213_cable_detect,
30917 };
30918
30919-static const struct ide_port_info it8213_chipset __devinitdata = {
30920+static const struct ide_port_info it8213_chipset __devinitconst = {
30921 .name = DRV_NAME,
30922 .enablebits = { {0x41, 0x80, 0x80} },
30923 .port_ops = &it8213_port_ops,
30924diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
30925index 2e3169f..c5611db 100644
30926--- a/drivers/ide/it821x.c
30927+++ b/drivers/ide/it821x.c
30928@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
30929 .cable_detect = it821x_cable_detect,
30930 };
30931
30932-static const struct ide_port_info it821x_chipset __devinitdata = {
30933+static const struct ide_port_info it821x_chipset __devinitconst = {
30934 .name = DRV_NAME,
30935 .init_chipset = init_chipset_it821x,
30936 .init_hwif = init_hwif_it821x,
30937diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
30938index 74c2c4a..efddd7d 100644
30939--- a/drivers/ide/jmicron.c
30940+++ b/drivers/ide/jmicron.c
30941@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
30942 .cable_detect = jmicron_cable_detect,
30943 };
30944
30945-static const struct ide_port_info jmicron_chipset __devinitdata = {
30946+static const struct ide_port_info jmicron_chipset __devinitconst = {
30947 .name = DRV_NAME,
30948 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
30949 .port_ops = &jmicron_port_ops,
30950diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
30951index 95327a2..73f78d8 100644
30952--- a/drivers/ide/ns87415.c
30953+++ b/drivers/ide/ns87415.c
30954@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
30955 .dma_sff_read_status = superio_dma_sff_read_status,
30956 };
30957
30958-static const struct ide_port_info ns87415_chipset __devinitdata = {
30959+static const struct ide_port_info ns87415_chipset __devinitconst = {
30960 .name = DRV_NAME,
30961 .init_hwif = init_hwif_ns87415,
30962 .tp_ops = &ns87415_tp_ops,
30963diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
30964index 1a53a4c..39edc66 100644
30965--- a/drivers/ide/opti621.c
30966+++ b/drivers/ide/opti621.c
30967@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
30968 .set_pio_mode = opti621_set_pio_mode,
30969 };
30970
30971-static const struct ide_port_info opti621_chipset __devinitdata = {
30972+static const struct ide_port_info opti621_chipset __devinitconst = {
30973 .name = DRV_NAME,
30974 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
30975 .port_ops = &opti621_port_ops,
30976diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
30977index 9546fe2..2e5ceb6 100644
30978--- a/drivers/ide/pdc202xx_new.c
30979+++ b/drivers/ide/pdc202xx_new.c
30980@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
30981 .udma_mask = udma, \
30982 }
30983
30984-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
30985+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
30986 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
30987 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
30988 };
30989diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
30990index 3a35ec6..5634510 100644
30991--- a/drivers/ide/pdc202xx_old.c
30992+++ b/drivers/ide/pdc202xx_old.c
30993@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
30994 .max_sectors = sectors, \
30995 }
30996
30997-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
30998+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
30999 { /* 0: PDC20246 */
31000 .name = DRV_NAME,
31001 .init_chipset = init_chipset_pdc202xx,
31002diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31003index 1892e81..fe0fd60 100644
31004--- a/drivers/ide/piix.c
31005+++ b/drivers/ide/piix.c
31006@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31007 .udma_mask = udma, \
31008 }
31009
31010-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31011+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31012 /* 0: MPIIX */
31013 { /*
31014 * MPIIX actually has only a single IDE channel mapped to
31015diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31016index a6414a8..c04173e 100644
31017--- a/drivers/ide/rz1000.c
31018+++ b/drivers/ide/rz1000.c
31019@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31020 }
31021 }
31022
31023-static const struct ide_port_info rz1000_chipset __devinitdata = {
31024+static const struct ide_port_info rz1000_chipset __devinitconst = {
31025 .name = DRV_NAME,
31026 .host_flags = IDE_HFLAG_NO_DMA,
31027 };
31028diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31029index 356b9b5..d4758eb 100644
31030--- a/drivers/ide/sc1200.c
31031+++ b/drivers/ide/sc1200.c
31032@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31033 .dma_sff_read_status = ide_dma_sff_read_status,
31034 };
31035
31036-static const struct ide_port_info sc1200_chipset __devinitdata = {
31037+static const struct ide_port_info sc1200_chipset __devinitconst = {
31038 .name = DRV_NAME,
31039 .port_ops = &sc1200_port_ops,
31040 .dma_ops = &sc1200_dma_ops,
31041diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31042index b7f5b0c..9701038 100644
31043--- a/drivers/ide/scc_pata.c
31044+++ b/drivers/ide/scc_pata.c
31045@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31046 .dma_sff_read_status = scc_dma_sff_read_status,
31047 };
31048
31049-static const struct ide_port_info scc_chipset __devinitdata = {
31050+static const struct ide_port_info scc_chipset __devinitconst = {
31051 .name = "sccIDE",
31052 .init_iops = init_iops_scc,
31053 .init_dma = scc_init_dma,
31054diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31055index 35fb8da..24d72ef 100644
31056--- a/drivers/ide/serverworks.c
31057+++ b/drivers/ide/serverworks.c
31058@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31059 .cable_detect = svwks_cable_detect,
31060 };
31061
31062-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31063+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31064 { /* 0: OSB4 */
31065 .name = DRV_NAME,
31066 .init_chipset = init_chipset_svwks,
31067diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31068index ddeda44..46f7e30 100644
31069--- a/drivers/ide/siimage.c
31070+++ b/drivers/ide/siimage.c
31071@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31072 .udma_mask = ATA_UDMA6, \
31073 }
31074
31075-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31076+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31077 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31078 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31079 };
31080diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31081index 4a00225..09e61b4 100644
31082--- a/drivers/ide/sis5513.c
31083+++ b/drivers/ide/sis5513.c
31084@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31085 .cable_detect = sis_cable_detect,
31086 };
31087
31088-static const struct ide_port_info sis5513_chipset __devinitdata = {
31089+static const struct ide_port_info sis5513_chipset __devinitconst = {
31090 .name = DRV_NAME,
31091 .init_chipset = init_chipset_sis5513,
31092 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31093diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31094index f21dc2a..d051cd2 100644
31095--- a/drivers/ide/sl82c105.c
31096+++ b/drivers/ide/sl82c105.c
31097@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31098 .dma_sff_read_status = ide_dma_sff_read_status,
31099 };
31100
31101-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31102+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31103 .name = DRV_NAME,
31104 .init_chipset = init_chipset_sl82c105,
31105 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31106diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31107index 864ffe0..863a5e9 100644
31108--- a/drivers/ide/slc90e66.c
31109+++ b/drivers/ide/slc90e66.c
31110@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31111 .cable_detect = slc90e66_cable_detect,
31112 };
31113
31114-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31115+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31116 .name = DRV_NAME,
31117 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31118 .port_ops = &slc90e66_port_ops,
31119diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31120index 4799d5c..1794678 100644
31121--- a/drivers/ide/tc86c001.c
31122+++ b/drivers/ide/tc86c001.c
31123@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31124 .dma_sff_read_status = ide_dma_sff_read_status,
31125 };
31126
31127-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31128+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31129 .name = DRV_NAME,
31130 .init_hwif = init_hwif_tc86c001,
31131 .port_ops = &tc86c001_port_ops,
31132diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31133index 281c914..55ce1b8 100644
31134--- a/drivers/ide/triflex.c
31135+++ b/drivers/ide/triflex.c
31136@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31137 .set_dma_mode = triflex_set_mode,
31138 };
31139
31140-static const struct ide_port_info triflex_device __devinitdata = {
31141+static const struct ide_port_info triflex_device __devinitconst = {
31142 .name = DRV_NAME,
31143 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31144 .port_ops = &triflex_port_ops,
31145diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31146index 4b42ca0..e494a98 100644
31147--- a/drivers/ide/trm290.c
31148+++ b/drivers/ide/trm290.c
31149@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31150 .dma_check = trm290_dma_check,
31151 };
31152
31153-static const struct ide_port_info trm290_chipset __devinitdata = {
31154+static const struct ide_port_info trm290_chipset __devinitconst = {
31155 .name = DRV_NAME,
31156 .init_hwif = init_hwif_trm290,
31157 .tp_ops = &trm290_tp_ops,
31158diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31159index f46f49c..eb77678 100644
31160--- a/drivers/ide/via82cxxx.c
31161+++ b/drivers/ide/via82cxxx.c
31162@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31163 .cable_detect = via82cxxx_cable_detect,
31164 };
31165
31166-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31167+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31168 .name = DRV_NAME,
31169 .init_chipset = init_chipset_via82cxxx,
31170 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31171diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31172index 73d4531..c90cd2d 100644
31173--- a/drivers/ieee802154/fakehard.c
31174+++ b/drivers/ieee802154/fakehard.c
31175@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31176 phy->transmit_power = 0xbf;
31177
31178 dev->netdev_ops = &fake_ops;
31179- dev->ml_priv = &fake_mlme;
31180+ dev->ml_priv = (void *)&fake_mlme;
31181
31182 priv = netdev_priv(dev);
31183 priv->phy = phy;
31184diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31185index c889aae..6cf5aa7 100644
31186--- a/drivers/infiniband/core/cm.c
31187+++ b/drivers/infiniband/core/cm.c
31188@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31189
31190 struct cm_counter_group {
31191 struct kobject obj;
31192- atomic_long_t counter[CM_ATTR_COUNT];
31193+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31194 };
31195
31196 struct cm_counter_attribute {
31197@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31198 struct ib_mad_send_buf *msg = NULL;
31199 int ret;
31200
31201- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31202+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31203 counter[CM_REQ_COUNTER]);
31204
31205 /* Quick state check to discard duplicate REQs. */
31206@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31207 if (!cm_id_priv)
31208 return;
31209
31210- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31211+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31212 counter[CM_REP_COUNTER]);
31213 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31214 if (ret)
31215@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31216 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31217 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31218 spin_unlock_irq(&cm_id_priv->lock);
31219- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31220+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31221 counter[CM_RTU_COUNTER]);
31222 goto out;
31223 }
31224@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31225 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31226 dreq_msg->local_comm_id);
31227 if (!cm_id_priv) {
31228- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31229+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31230 counter[CM_DREQ_COUNTER]);
31231 cm_issue_drep(work->port, work->mad_recv_wc);
31232 return -EINVAL;
31233@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31234 case IB_CM_MRA_REP_RCVD:
31235 break;
31236 case IB_CM_TIMEWAIT:
31237- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31238+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31239 counter[CM_DREQ_COUNTER]);
31240 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31241 goto unlock;
31242@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31243 cm_free_msg(msg);
31244 goto deref;
31245 case IB_CM_DREQ_RCVD:
31246- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31247+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31248 counter[CM_DREQ_COUNTER]);
31249 goto unlock;
31250 default:
31251@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31252 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31253 cm_id_priv->msg, timeout)) {
31254 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31255- atomic_long_inc(&work->port->
31256+ atomic_long_inc_unchecked(&work->port->
31257 counter_group[CM_RECV_DUPLICATES].
31258 counter[CM_MRA_COUNTER]);
31259 goto out;
31260@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31261 break;
31262 case IB_CM_MRA_REQ_RCVD:
31263 case IB_CM_MRA_REP_RCVD:
31264- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31265+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31266 counter[CM_MRA_COUNTER]);
31267 /* fall through */
31268 default:
31269@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31270 case IB_CM_LAP_IDLE:
31271 break;
31272 case IB_CM_MRA_LAP_SENT:
31273- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31274+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31275 counter[CM_LAP_COUNTER]);
31276 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31277 goto unlock;
31278@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31279 cm_free_msg(msg);
31280 goto deref;
31281 case IB_CM_LAP_RCVD:
31282- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31283+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31284 counter[CM_LAP_COUNTER]);
31285 goto unlock;
31286 default:
31287@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31288 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31289 if (cur_cm_id_priv) {
31290 spin_unlock_irq(&cm.lock);
31291- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31292+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31293 counter[CM_SIDR_REQ_COUNTER]);
31294 goto out; /* Duplicate message. */
31295 }
31296@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31297 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31298 msg->retries = 1;
31299
31300- atomic_long_add(1 + msg->retries,
31301+ atomic_long_add_unchecked(1 + msg->retries,
31302 &port->counter_group[CM_XMIT].counter[attr_index]);
31303 if (msg->retries)
31304- atomic_long_add(msg->retries,
31305+ atomic_long_add_unchecked(msg->retries,
31306 &port->counter_group[CM_XMIT_RETRIES].
31307 counter[attr_index]);
31308
31309@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31310 }
31311
31312 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31313- atomic_long_inc(&port->counter_group[CM_RECV].
31314+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31315 counter[attr_id - CM_ATTR_ID_OFFSET]);
31316
31317 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31318@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31319 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31320
31321 return sprintf(buf, "%ld\n",
31322- atomic_long_read(&group->counter[cm_attr->index]));
31323+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31324 }
31325
31326 static const struct sysfs_ops cm_counter_ops = {
31327diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31328index 176c8f9..2627b62 100644
31329--- a/drivers/infiniband/core/fmr_pool.c
31330+++ b/drivers/infiniband/core/fmr_pool.c
31331@@ -98,8 +98,8 @@ struct ib_fmr_pool {
31332
31333 struct task_struct *thread;
31334
31335- atomic_t req_ser;
31336- atomic_t flush_ser;
31337+ atomic_unchecked_t req_ser;
31338+ atomic_unchecked_t flush_ser;
31339
31340 wait_queue_head_t force_wait;
31341 };
31342@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31343 struct ib_fmr_pool *pool = pool_ptr;
31344
31345 do {
31346- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31347+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31348 ib_fmr_batch_release(pool);
31349
31350- atomic_inc(&pool->flush_ser);
31351+ atomic_inc_unchecked(&pool->flush_ser);
31352 wake_up_interruptible(&pool->force_wait);
31353
31354 if (pool->flush_function)
31355@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31356 }
31357
31358 set_current_state(TASK_INTERRUPTIBLE);
31359- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31360+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31361 !kthread_should_stop())
31362 schedule();
31363 __set_current_state(TASK_RUNNING);
31364@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31365 pool->dirty_watermark = params->dirty_watermark;
31366 pool->dirty_len = 0;
31367 spin_lock_init(&pool->pool_lock);
31368- atomic_set(&pool->req_ser, 0);
31369- atomic_set(&pool->flush_ser, 0);
31370+ atomic_set_unchecked(&pool->req_ser, 0);
31371+ atomic_set_unchecked(&pool->flush_ser, 0);
31372 init_waitqueue_head(&pool->force_wait);
31373
31374 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31375@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31376 }
31377 spin_unlock_irq(&pool->pool_lock);
31378
31379- serial = atomic_inc_return(&pool->req_ser);
31380+ serial = atomic_inc_return_unchecked(&pool->req_ser);
31381 wake_up_process(pool->thread);
31382
31383 if (wait_event_interruptible(pool->force_wait,
31384- atomic_read(&pool->flush_ser) - serial >= 0))
31385+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31386 return -EINTR;
31387
31388 return 0;
31389@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31390 } else {
31391 list_add_tail(&fmr->list, &pool->dirty_list);
31392 if (++pool->dirty_len >= pool->dirty_watermark) {
31393- atomic_inc(&pool->req_ser);
31394+ atomic_inc_unchecked(&pool->req_ser);
31395 wake_up_process(pool->thread);
31396 }
31397 }
31398diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
31399index 40c8353..946b0e4 100644
31400--- a/drivers/infiniband/hw/cxgb4/mem.c
31401+++ b/drivers/infiniband/hw/cxgb4/mem.c
31402@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31403 int err;
31404 struct fw_ri_tpte tpt;
31405 u32 stag_idx;
31406- static atomic_t key;
31407+ static atomic_unchecked_t key;
31408
31409 if (c4iw_fatal_error(rdev))
31410 return -EIO;
31411@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31412 &rdev->resource.tpt_fifo_lock);
31413 if (!stag_idx)
31414 return -ENOMEM;
31415- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
31416+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
31417 }
31418 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
31419 __func__, stag_state, type, pdid, stag_idx);
31420diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
31421index 79b3dbc..96e5fcc 100644
31422--- a/drivers/infiniband/hw/ipath/ipath_rc.c
31423+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
31424@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31425 struct ib_atomic_eth *ateth;
31426 struct ipath_ack_entry *e;
31427 u64 vaddr;
31428- atomic64_t *maddr;
31429+ atomic64_unchecked_t *maddr;
31430 u64 sdata;
31431 u32 rkey;
31432 u8 next;
31433@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31434 IB_ACCESS_REMOTE_ATOMIC)))
31435 goto nack_acc_unlck;
31436 /* Perform atomic OP and save result. */
31437- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31438+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31439 sdata = be64_to_cpu(ateth->swap_data);
31440 e = &qp->s_ack_queue[qp->r_head_ack_queue];
31441 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
31442- (u64) atomic64_add_return(sdata, maddr) - sdata :
31443+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31444 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31445 be64_to_cpu(ateth->compare_data),
31446 sdata);
31447diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
31448index 1f95bba..9530f87 100644
31449--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
31450+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
31451@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
31452 unsigned long flags;
31453 struct ib_wc wc;
31454 u64 sdata;
31455- atomic64_t *maddr;
31456+ atomic64_unchecked_t *maddr;
31457 enum ib_wc_status send_status;
31458
31459 /*
31460@@ -382,11 +382,11 @@ again:
31461 IB_ACCESS_REMOTE_ATOMIC)))
31462 goto acc_err;
31463 /* Perform atomic OP and save result. */
31464- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31465+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31466 sdata = wqe->wr.wr.atomic.compare_add;
31467 *(u64 *) sqp->s_sge.sge.vaddr =
31468 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
31469- (u64) atomic64_add_return(sdata, maddr) - sdata :
31470+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31471 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31472 sdata, wqe->wr.wr.atomic.swap);
31473 goto send_comp;
31474diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
31475index 7140199..da60063 100644
31476--- a/drivers/infiniband/hw/nes/nes.c
31477+++ b/drivers/infiniband/hw/nes/nes.c
31478@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
31479 LIST_HEAD(nes_adapter_list);
31480 static LIST_HEAD(nes_dev_list);
31481
31482-atomic_t qps_destroyed;
31483+atomic_unchecked_t qps_destroyed;
31484
31485 static unsigned int ee_flsh_adapter;
31486 static unsigned int sysfs_nonidx_addr;
31487@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
31488 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
31489 struct nes_adapter *nesadapter = nesdev->nesadapter;
31490
31491- atomic_inc(&qps_destroyed);
31492+ atomic_inc_unchecked(&qps_destroyed);
31493
31494 /* Free the control structures */
31495
31496diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
31497index c438e46..ca30356 100644
31498--- a/drivers/infiniband/hw/nes/nes.h
31499+++ b/drivers/infiniband/hw/nes/nes.h
31500@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
31501 extern unsigned int wqm_quanta;
31502 extern struct list_head nes_adapter_list;
31503
31504-extern atomic_t cm_connects;
31505-extern atomic_t cm_accepts;
31506-extern atomic_t cm_disconnects;
31507-extern atomic_t cm_closes;
31508-extern atomic_t cm_connecteds;
31509-extern atomic_t cm_connect_reqs;
31510-extern atomic_t cm_rejects;
31511-extern atomic_t mod_qp_timouts;
31512-extern atomic_t qps_created;
31513-extern atomic_t qps_destroyed;
31514-extern atomic_t sw_qps_destroyed;
31515+extern atomic_unchecked_t cm_connects;
31516+extern atomic_unchecked_t cm_accepts;
31517+extern atomic_unchecked_t cm_disconnects;
31518+extern atomic_unchecked_t cm_closes;
31519+extern atomic_unchecked_t cm_connecteds;
31520+extern atomic_unchecked_t cm_connect_reqs;
31521+extern atomic_unchecked_t cm_rejects;
31522+extern atomic_unchecked_t mod_qp_timouts;
31523+extern atomic_unchecked_t qps_created;
31524+extern atomic_unchecked_t qps_destroyed;
31525+extern atomic_unchecked_t sw_qps_destroyed;
31526 extern u32 mh_detected;
31527 extern u32 mh_pauses_sent;
31528 extern u32 cm_packets_sent;
31529@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
31530 extern u32 cm_packets_received;
31531 extern u32 cm_packets_dropped;
31532 extern u32 cm_packets_retrans;
31533-extern atomic_t cm_listens_created;
31534-extern atomic_t cm_listens_destroyed;
31535+extern atomic_unchecked_t cm_listens_created;
31536+extern atomic_unchecked_t cm_listens_destroyed;
31537 extern u32 cm_backlog_drops;
31538-extern atomic_t cm_loopbacks;
31539-extern atomic_t cm_nodes_created;
31540-extern atomic_t cm_nodes_destroyed;
31541-extern atomic_t cm_accel_dropped_pkts;
31542-extern atomic_t cm_resets_recvd;
31543-extern atomic_t pau_qps_created;
31544-extern atomic_t pau_qps_destroyed;
31545+extern atomic_unchecked_t cm_loopbacks;
31546+extern atomic_unchecked_t cm_nodes_created;
31547+extern atomic_unchecked_t cm_nodes_destroyed;
31548+extern atomic_unchecked_t cm_accel_dropped_pkts;
31549+extern atomic_unchecked_t cm_resets_recvd;
31550+extern atomic_unchecked_t pau_qps_created;
31551+extern atomic_unchecked_t pau_qps_destroyed;
31552
31553 extern u32 int_mod_timer_init;
31554 extern u32 int_mod_cq_depth_256;
31555diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
31556index a4972ab..1bcfc31 100644
31557--- a/drivers/infiniband/hw/nes/nes_cm.c
31558+++ b/drivers/infiniband/hw/nes/nes_cm.c
31559@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
31560 u32 cm_packets_retrans;
31561 u32 cm_packets_created;
31562 u32 cm_packets_received;
31563-atomic_t cm_listens_created;
31564-atomic_t cm_listens_destroyed;
31565+atomic_unchecked_t cm_listens_created;
31566+atomic_unchecked_t cm_listens_destroyed;
31567 u32 cm_backlog_drops;
31568-atomic_t cm_loopbacks;
31569-atomic_t cm_nodes_created;
31570-atomic_t cm_nodes_destroyed;
31571-atomic_t cm_accel_dropped_pkts;
31572-atomic_t cm_resets_recvd;
31573+atomic_unchecked_t cm_loopbacks;
31574+atomic_unchecked_t cm_nodes_created;
31575+atomic_unchecked_t cm_nodes_destroyed;
31576+atomic_unchecked_t cm_accel_dropped_pkts;
31577+atomic_unchecked_t cm_resets_recvd;
31578
31579 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
31580 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
31581@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
31582
31583 static struct nes_cm_core *g_cm_core;
31584
31585-atomic_t cm_connects;
31586-atomic_t cm_accepts;
31587-atomic_t cm_disconnects;
31588-atomic_t cm_closes;
31589-atomic_t cm_connecteds;
31590-atomic_t cm_connect_reqs;
31591-atomic_t cm_rejects;
31592+atomic_unchecked_t cm_connects;
31593+atomic_unchecked_t cm_accepts;
31594+atomic_unchecked_t cm_disconnects;
31595+atomic_unchecked_t cm_closes;
31596+atomic_unchecked_t cm_connecteds;
31597+atomic_unchecked_t cm_connect_reqs;
31598+atomic_unchecked_t cm_rejects;
31599
31600 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
31601 {
31602@@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
31603 kfree(listener);
31604 listener = NULL;
31605 ret = 0;
31606- atomic_inc(&cm_listens_destroyed);
31607+ atomic_inc_unchecked(&cm_listens_destroyed);
31608 } else {
31609 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
31610 }
31611@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
31612 cm_node->rem_mac);
31613
31614 add_hte_node(cm_core, cm_node);
31615- atomic_inc(&cm_nodes_created);
31616+ atomic_inc_unchecked(&cm_nodes_created);
31617
31618 return cm_node;
31619 }
31620@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
31621 }
31622
31623 atomic_dec(&cm_core->node_cnt);
31624- atomic_inc(&cm_nodes_destroyed);
31625+ atomic_inc_unchecked(&cm_nodes_destroyed);
31626 nesqp = cm_node->nesqp;
31627 if (nesqp) {
31628 nesqp->cm_node = NULL;
31629@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
31630
31631 static void drop_packet(struct sk_buff *skb)
31632 {
31633- atomic_inc(&cm_accel_dropped_pkts);
31634+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31635 dev_kfree_skb_any(skb);
31636 }
31637
31638@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
31639 {
31640
31641 int reset = 0; /* whether to send reset in case of err.. */
31642- atomic_inc(&cm_resets_recvd);
31643+ atomic_inc_unchecked(&cm_resets_recvd);
31644 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31645 " refcnt=%d\n", cm_node, cm_node->state,
31646 atomic_read(&cm_node->ref_count));
31647@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
31648 rem_ref_cm_node(cm_node->cm_core, cm_node);
31649 return NULL;
31650 }
31651- atomic_inc(&cm_loopbacks);
31652+ atomic_inc_unchecked(&cm_loopbacks);
31653 loopbackremotenode->loopbackpartner = cm_node;
31654 loopbackremotenode->tcp_cntxt.rcv_wscale =
31655 NES_CM_DEFAULT_RCV_WND_SCALE;
31656@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
31657 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
31658 else {
31659 rem_ref_cm_node(cm_core, cm_node);
31660- atomic_inc(&cm_accel_dropped_pkts);
31661+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31662 dev_kfree_skb_any(skb);
31663 }
31664 break;
31665@@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31666
31667 if ((cm_id) && (cm_id->event_handler)) {
31668 if (issue_disconn) {
31669- atomic_inc(&cm_disconnects);
31670+ atomic_inc_unchecked(&cm_disconnects);
31671 cm_event.event = IW_CM_EVENT_DISCONNECT;
31672 cm_event.status = disconn_status;
31673 cm_event.local_addr = cm_id->local_addr;
31674@@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31675 }
31676
31677 if (issue_close) {
31678- atomic_inc(&cm_closes);
31679+ atomic_inc_unchecked(&cm_closes);
31680 nes_disconnect(nesqp, 1);
31681
31682 cm_id->provider_data = nesqp;
31683@@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31684
31685 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31686 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31687- atomic_inc(&cm_accepts);
31688+ atomic_inc_unchecked(&cm_accepts);
31689
31690 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31691 netdev_refcnt_read(nesvnic->netdev));
31692@@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
31693 struct nes_cm_core *cm_core;
31694 u8 *start_buff;
31695
31696- atomic_inc(&cm_rejects);
31697+ atomic_inc_unchecked(&cm_rejects);
31698 cm_node = (struct nes_cm_node *)cm_id->provider_data;
31699 loopback = cm_node->loopbackpartner;
31700 cm_core = cm_node->cm_core;
31701@@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31702 ntohl(cm_id->local_addr.sin_addr.s_addr),
31703 ntohs(cm_id->local_addr.sin_port));
31704
31705- atomic_inc(&cm_connects);
31706+ atomic_inc_unchecked(&cm_connects);
31707 nesqp->active_conn = 1;
31708
31709 /* cache the cm_id in the qp */
31710@@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
31711 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
31712 return err;
31713 }
31714- atomic_inc(&cm_listens_created);
31715+ atomic_inc_unchecked(&cm_listens_created);
31716 }
31717
31718 cm_id->add_ref(cm_id);
31719@@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
31720
31721 if (nesqp->destroyed)
31722 return;
31723- atomic_inc(&cm_connecteds);
31724+ atomic_inc_unchecked(&cm_connecteds);
31725 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31726 " local port 0x%04X. jiffies = %lu.\n",
31727 nesqp->hwqp.qp_id,
31728@@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
31729
31730 cm_id->add_ref(cm_id);
31731 ret = cm_id->event_handler(cm_id, &cm_event);
31732- atomic_inc(&cm_closes);
31733+ atomic_inc_unchecked(&cm_closes);
31734 cm_event.event = IW_CM_EVENT_CLOSE;
31735 cm_event.status = 0;
31736 cm_event.provider_data = cm_id->provider_data;
31737@@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
31738 return;
31739 cm_id = cm_node->cm_id;
31740
31741- atomic_inc(&cm_connect_reqs);
31742+ atomic_inc_unchecked(&cm_connect_reqs);
31743 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31744 cm_node, cm_id, jiffies);
31745
31746@@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
31747 return;
31748 cm_id = cm_node->cm_id;
31749
31750- atomic_inc(&cm_connect_reqs);
31751+ atomic_inc_unchecked(&cm_connect_reqs);
31752 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31753 cm_node, cm_id, jiffies);
31754
31755diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
31756index 3ba7be3..c81f6ff 100644
31757--- a/drivers/infiniband/hw/nes/nes_mgt.c
31758+++ b/drivers/infiniband/hw/nes/nes_mgt.c
31759@@ -40,8 +40,8 @@
31760 #include "nes.h"
31761 #include "nes_mgt.h"
31762
31763-atomic_t pau_qps_created;
31764-atomic_t pau_qps_destroyed;
31765+atomic_unchecked_t pau_qps_created;
31766+atomic_unchecked_t pau_qps_destroyed;
31767
31768 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
31769 {
31770@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
31771 {
31772 struct sk_buff *skb;
31773 unsigned long flags;
31774- atomic_inc(&pau_qps_destroyed);
31775+ atomic_inc_unchecked(&pau_qps_destroyed);
31776
31777 /* Free packets that have not yet been forwarded */
31778 /* Lock is acquired by skb_dequeue when removing the skb */
31779@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
31780 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
31781 skb_queue_head_init(&nesqp->pau_list);
31782 spin_lock_init(&nesqp->pau_lock);
31783- atomic_inc(&pau_qps_created);
31784+ atomic_inc_unchecked(&pau_qps_created);
31785 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
31786 }
31787
31788diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
31789index f3a3ecf..57d311d 100644
31790--- a/drivers/infiniband/hw/nes/nes_nic.c
31791+++ b/drivers/infiniband/hw/nes/nes_nic.c
31792@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
31793 target_stat_values[++index] = mh_detected;
31794 target_stat_values[++index] = mh_pauses_sent;
31795 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31796- target_stat_values[++index] = atomic_read(&cm_connects);
31797- target_stat_values[++index] = atomic_read(&cm_accepts);
31798- target_stat_values[++index] = atomic_read(&cm_disconnects);
31799- target_stat_values[++index] = atomic_read(&cm_connecteds);
31800- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31801- target_stat_values[++index] = atomic_read(&cm_rejects);
31802- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31803- target_stat_values[++index] = atomic_read(&qps_created);
31804- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31805- target_stat_values[++index] = atomic_read(&qps_destroyed);
31806- target_stat_values[++index] = atomic_read(&cm_closes);
31807+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31808+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31809+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31810+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31811+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31812+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31813+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31814+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31815+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31816+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31817+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31818 target_stat_values[++index] = cm_packets_sent;
31819 target_stat_values[++index] = cm_packets_bounced;
31820 target_stat_values[++index] = cm_packets_created;
31821 target_stat_values[++index] = cm_packets_received;
31822 target_stat_values[++index] = cm_packets_dropped;
31823 target_stat_values[++index] = cm_packets_retrans;
31824- target_stat_values[++index] = atomic_read(&cm_listens_created);
31825- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
31826+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
31827+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
31828 target_stat_values[++index] = cm_backlog_drops;
31829- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31830- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31831- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31832- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31833- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31834+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31835+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31836+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31837+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31838+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31839 target_stat_values[++index] = nesadapter->free_4kpbl;
31840 target_stat_values[++index] = nesadapter->free_256pbl;
31841 target_stat_values[++index] = int_mod_timer_init;
31842 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
31843 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
31844 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
31845- target_stat_values[++index] = atomic_read(&pau_qps_created);
31846- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
31847+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
31848+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
31849 }
31850
31851 /**
31852diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
31853index 0927b5c..ed67986 100644
31854--- a/drivers/infiniband/hw/nes/nes_verbs.c
31855+++ b/drivers/infiniband/hw/nes/nes_verbs.c
31856@@ -46,9 +46,9 @@
31857
31858 #include <rdma/ib_umem.h>
31859
31860-atomic_t mod_qp_timouts;
31861-atomic_t qps_created;
31862-atomic_t sw_qps_destroyed;
31863+atomic_unchecked_t mod_qp_timouts;
31864+atomic_unchecked_t qps_created;
31865+atomic_unchecked_t sw_qps_destroyed;
31866
31867 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31868
31869@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
31870 if (init_attr->create_flags)
31871 return ERR_PTR(-EINVAL);
31872
31873- atomic_inc(&qps_created);
31874+ atomic_inc_unchecked(&qps_created);
31875 switch (init_attr->qp_type) {
31876 case IB_QPT_RC:
31877 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31878@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
31879 struct iw_cm_event cm_event;
31880 int ret = 0;
31881
31882- atomic_inc(&sw_qps_destroyed);
31883+ atomic_inc_unchecked(&sw_qps_destroyed);
31884 nesqp->destroyed = 1;
31885
31886 /* Blow away the connection if it exists. */
31887diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
31888index b881bdc..c2e360c 100644
31889--- a/drivers/infiniband/hw/qib/qib.h
31890+++ b/drivers/infiniband/hw/qib/qib.h
31891@@ -51,6 +51,7 @@
31892 #include <linux/completion.h>
31893 #include <linux/kref.h>
31894 #include <linux/sched.h>
31895+#include <linux/slab.h>
31896
31897 #include "qib_common.h"
31898 #include "qib_verbs.h"
31899diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
31900index c351aa4..e6967c2 100644
31901--- a/drivers/input/gameport/gameport.c
31902+++ b/drivers/input/gameport/gameport.c
31903@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
31904 */
31905 static void gameport_init_port(struct gameport *gameport)
31906 {
31907- static atomic_t gameport_no = ATOMIC_INIT(0);
31908+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31909
31910 __module_get(THIS_MODULE);
31911
31912 mutex_init(&gameport->drv_mutex);
31913 device_initialize(&gameport->dev);
31914 dev_set_name(&gameport->dev, "gameport%lu",
31915- (unsigned long)atomic_inc_return(&gameport_no) - 1);
31916+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31917 gameport->dev.bus = &gameport_bus;
31918 gameport->dev.release = gameport_release_port;
31919 if (gameport->parent)
31920diff --git a/drivers/input/input.c b/drivers/input/input.c
31921index 1f78c95..3cddc6c 100644
31922--- a/drivers/input/input.c
31923+++ b/drivers/input/input.c
31924@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
31925 */
31926 int input_register_device(struct input_dev *dev)
31927 {
31928- static atomic_t input_no = ATOMIC_INIT(0);
31929+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31930 struct input_handler *handler;
31931 const char *path;
31932 int error;
31933@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
31934 dev->setkeycode = input_default_setkeycode;
31935
31936 dev_set_name(&dev->dev, "input%ld",
31937- (unsigned long) atomic_inc_return(&input_no) - 1);
31938+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31939
31940 error = device_add(&dev->dev);
31941 if (error)
31942diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
31943index b8d8611..7a4a04b 100644
31944--- a/drivers/input/joystick/sidewinder.c
31945+++ b/drivers/input/joystick/sidewinder.c
31946@@ -30,6 +30,7 @@
31947 #include <linux/kernel.h>
31948 #include <linux/module.h>
31949 #include <linux/slab.h>
31950+#include <linux/sched.h>
31951 #include <linux/init.h>
31952 #include <linux/input.h>
31953 #include <linux/gameport.h>
31954diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
31955index fd7a0d5..a4af10c 100644
31956--- a/drivers/input/joystick/xpad.c
31957+++ b/drivers/input/joystick/xpad.c
31958@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
31959
31960 static int xpad_led_probe(struct usb_xpad *xpad)
31961 {
31962- static atomic_t led_seq = ATOMIC_INIT(0);
31963+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31964 long led_no;
31965 struct xpad_led *led;
31966 struct led_classdev *led_cdev;
31967@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
31968 if (!led)
31969 return -ENOMEM;
31970
31971- led_no = (long)atomic_inc_return(&led_seq) - 1;
31972+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31973
31974 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31975 led->xpad = xpad;
31976diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
31977index 0110b5a..d3ad144 100644
31978--- a/drivers/input/mousedev.c
31979+++ b/drivers/input/mousedev.c
31980@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
31981
31982 spin_unlock_irq(&client->packet_lock);
31983
31984- if (copy_to_user(buffer, data, count))
31985+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
31986 return -EFAULT;
31987
31988 return count;
31989diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
31990index ba70058..571d25d 100644
31991--- a/drivers/input/serio/serio.c
31992+++ b/drivers/input/serio/serio.c
31993@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
31994 */
31995 static void serio_init_port(struct serio *serio)
31996 {
31997- static atomic_t serio_no = ATOMIC_INIT(0);
31998+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31999
32000 __module_get(THIS_MODULE);
32001
32002@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32003 mutex_init(&serio->drv_mutex);
32004 device_initialize(&serio->dev);
32005 dev_set_name(&serio->dev, "serio%ld",
32006- (long)atomic_inc_return(&serio_no) - 1);
32007+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32008 serio->dev.bus = &serio_bus;
32009 serio->dev.release = serio_release_port;
32010 serio->dev.groups = serio_device_attr_groups;
32011diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32012index e44933d..9ba484a 100644
32013--- a/drivers/isdn/capi/capi.c
32014+++ b/drivers/isdn/capi/capi.c
32015@@ -83,8 +83,8 @@ struct capiminor {
32016
32017 struct capi20_appl *ap;
32018 u32 ncci;
32019- atomic_t datahandle;
32020- atomic_t msgid;
32021+ atomic_unchecked_t datahandle;
32022+ atomic_unchecked_t msgid;
32023
32024 struct tty_port port;
32025 int ttyinstop;
32026@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32027 capimsg_setu16(s, 2, mp->ap->applid);
32028 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32029 capimsg_setu8 (s, 5, CAPI_RESP);
32030- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32031+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32032 capimsg_setu32(s, 8, mp->ncci);
32033 capimsg_setu16(s, 12, datahandle);
32034 }
32035@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32036 mp->outbytes -= len;
32037 spin_unlock_bh(&mp->outlock);
32038
32039- datahandle = atomic_inc_return(&mp->datahandle);
32040+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32041 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32042 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32043 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32044 capimsg_setu16(skb->data, 2, mp->ap->applid);
32045 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32046 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32047- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32048+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32049 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32050 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32051 capimsg_setu16(skb->data, 16, len); /* Data length */
32052diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32053index db621db..825ea1a 100644
32054--- a/drivers/isdn/gigaset/common.c
32055+++ b/drivers/isdn/gigaset/common.c
32056@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32057 cs->commands_pending = 0;
32058 cs->cur_at_seq = 0;
32059 cs->gotfwver = -1;
32060- cs->open_count = 0;
32061+ local_set(&cs->open_count, 0);
32062 cs->dev = NULL;
32063 cs->tty = NULL;
32064 cs->tty_dev = NULL;
32065diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32066index 212efaf..f187c6b 100644
32067--- a/drivers/isdn/gigaset/gigaset.h
32068+++ b/drivers/isdn/gigaset/gigaset.h
32069@@ -35,6 +35,7 @@
32070 #include <linux/tty_driver.h>
32071 #include <linux/list.h>
32072 #include <linux/atomic.h>
32073+#include <asm/local.h>
32074
32075 #define GIG_VERSION {0, 5, 0, 0}
32076 #define GIG_COMPAT {0, 4, 0, 0}
32077@@ -433,7 +434,7 @@ struct cardstate {
32078 spinlock_t cmdlock;
32079 unsigned curlen, cmdbytes;
32080
32081- unsigned open_count;
32082+ local_t open_count;
32083 struct tty_struct *tty;
32084 struct tasklet_struct if_wake_tasklet;
32085 unsigned control_state;
32086diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32087index ee0a549..a7c9798 100644
32088--- a/drivers/isdn/gigaset/interface.c
32089+++ b/drivers/isdn/gigaset/interface.c
32090@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32091 }
32092 tty->driver_data = cs;
32093
32094- ++cs->open_count;
32095-
32096- if (cs->open_count == 1) {
32097+ if (local_inc_return(&cs->open_count) == 1) {
32098 spin_lock_irqsave(&cs->lock, flags);
32099 cs->tty = tty;
32100 spin_unlock_irqrestore(&cs->lock, flags);
32101@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32102
32103 if (!cs->connected)
32104 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32105- else if (!cs->open_count)
32106+ else if (!local_read(&cs->open_count))
32107 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32108 else {
32109- if (!--cs->open_count) {
32110+ if (!local_dec_return(&cs->open_count)) {
32111 spin_lock_irqsave(&cs->lock, flags);
32112 cs->tty = NULL;
32113 spin_unlock_irqrestore(&cs->lock, flags);
32114@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32115 if (!cs->connected) {
32116 gig_dbg(DEBUG_IF, "not connected");
32117 retval = -ENODEV;
32118- } else if (!cs->open_count)
32119+ } else if (!local_read(&cs->open_count))
32120 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32121 else {
32122 retval = 0;
32123@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32124 retval = -ENODEV;
32125 goto done;
32126 }
32127- if (!cs->open_count) {
32128+ if (!local_read(&cs->open_count)) {
32129 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32130 retval = -ENODEV;
32131 goto done;
32132@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32133 if (!cs->connected) {
32134 gig_dbg(DEBUG_IF, "not connected");
32135 retval = -ENODEV;
32136- } else if (!cs->open_count)
32137+ } else if (!local_read(&cs->open_count))
32138 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32139 else if (cs->mstate != MS_LOCKED) {
32140 dev_warn(cs->dev, "can't write to unlocked device\n");
32141@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32142
32143 if (!cs->connected)
32144 gig_dbg(DEBUG_IF, "not connected");
32145- else if (!cs->open_count)
32146+ else if (!local_read(&cs->open_count))
32147 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32148 else if (cs->mstate != MS_LOCKED)
32149 dev_warn(cs->dev, "can't write to unlocked device\n");
32150@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32151
32152 if (!cs->connected)
32153 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32154- else if (!cs->open_count)
32155+ else if (!local_read(&cs->open_count))
32156 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32157 else
32158 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32159@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32160
32161 if (!cs->connected)
32162 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32163- else if (!cs->open_count)
32164+ else if (!local_read(&cs->open_count))
32165 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32166 else
32167 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32168@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32169 goto out;
32170 }
32171
32172- if (!cs->open_count) {
32173+ if (!local_read(&cs->open_count)) {
32174 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32175 goto out;
32176 }
32177diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32178index 2a57da59..e7a12ed 100644
32179--- a/drivers/isdn/hardware/avm/b1.c
32180+++ b/drivers/isdn/hardware/avm/b1.c
32181@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32182 }
32183 if (left) {
32184 if (t4file->user) {
32185- if (copy_from_user(buf, dp, left))
32186+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32187 return -EFAULT;
32188 } else {
32189 memcpy(buf, dp, left);
32190@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32191 }
32192 if (left) {
32193 if (config->user) {
32194- if (copy_from_user(buf, dp, left))
32195+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32196 return -EFAULT;
32197 } else {
32198 memcpy(buf, dp, left);
32199diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32200index 85784a7..a19ca98 100644
32201--- a/drivers/isdn/hardware/eicon/divasync.h
32202+++ b/drivers/isdn/hardware/eicon/divasync.h
32203@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32204 } diva_didd_add_adapter_t;
32205 typedef struct _diva_didd_remove_adapter {
32206 IDI_CALL p_request;
32207-} diva_didd_remove_adapter_t;
32208+} __no_const diva_didd_remove_adapter_t;
32209 typedef struct _diva_didd_read_adapter_array {
32210 void * buffer;
32211 dword length;
32212diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32213index a3bd163..8956575 100644
32214--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32215+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32216@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32217 typedef struct _diva_os_idi_adapter_interface {
32218 diva_init_card_proc_t cleanup_adapter_proc;
32219 diva_cmd_card_proc_t cmd_proc;
32220-} diva_os_idi_adapter_interface_t;
32221+} __no_const diva_os_idi_adapter_interface_t;
32222
32223 typedef struct _diva_os_xdi_adapter {
32224 struct list_head link;
32225diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32226index 1f355bb..43f1fea 100644
32227--- a/drivers/isdn/icn/icn.c
32228+++ b/drivers/isdn/icn/icn.c
32229@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32230 if (count > len)
32231 count = len;
32232 if (user) {
32233- if (copy_from_user(msg, buf, count))
32234+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32235 return -EFAULT;
32236 } else
32237 memcpy(msg, buf, count);
32238diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32239index b5fdcb7..5b6c59f 100644
32240--- a/drivers/lguest/core.c
32241+++ b/drivers/lguest/core.c
32242@@ -92,9 +92,17 @@ static __init int map_switcher(void)
32243 * it's worked so far. The end address needs +1 because __get_vm_area
32244 * allocates an extra guard page, so we need space for that.
32245 */
32246+
32247+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32248+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32249+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32250+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32251+#else
32252 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32253 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32254 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32255+#endif
32256+
32257 if (!switcher_vma) {
32258 err = -ENOMEM;
32259 printk("lguest: could not map switcher pages high\n");
32260@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32261 * Now the Switcher is mapped at the right address, we can't fail!
32262 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32263 */
32264- memcpy(switcher_vma->addr, start_switcher_text,
32265+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32266 end_switcher_text - start_switcher_text);
32267
32268 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32269diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32270index 3980903..ce25c5e 100644
32271--- a/drivers/lguest/x86/core.c
32272+++ b/drivers/lguest/x86/core.c
32273@@ -59,7 +59,7 @@ static struct {
32274 /* Offset from where switcher.S was compiled to where we've copied it */
32275 static unsigned long switcher_offset(void)
32276 {
32277- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32278+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32279 }
32280
32281 /* This cpu's struct lguest_pages. */
32282@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32283 * These copies are pretty cheap, so we do them unconditionally: */
32284 /* Save the current Host top-level page directory.
32285 */
32286+
32287+#ifdef CONFIG_PAX_PER_CPU_PGD
32288+ pages->state.host_cr3 = read_cr3();
32289+#else
32290 pages->state.host_cr3 = __pa(current->mm->pgd);
32291+#endif
32292+
32293 /*
32294 * Set up the Guest's page tables to see this CPU's pages (and no
32295 * other CPU's pages).
32296@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32297 * compiled-in switcher code and the high-mapped copy we just made.
32298 */
32299 for (i = 0; i < IDT_ENTRIES; i++)
32300- default_idt_entries[i] += switcher_offset();
32301+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32302
32303 /*
32304 * Set up the Switcher's per-cpu areas.
32305@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32306 * it will be undisturbed when we switch. To change %cs and jump we
32307 * need this structure to feed to Intel's "lcall" instruction.
32308 */
32309- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32310+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32311 lguest_entry.segment = LGUEST_CS;
32312
32313 /*
32314diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32315index 40634b0..4f5855e 100644
32316--- a/drivers/lguest/x86/switcher_32.S
32317+++ b/drivers/lguest/x86/switcher_32.S
32318@@ -87,6 +87,7 @@
32319 #include <asm/page.h>
32320 #include <asm/segment.h>
32321 #include <asm/lguest.h>
32322+#include <asm/processor-flags.h>
32323
32324 // We mark the start of the code to copy
32325 // It's placed in .text tho it's never run here
32326@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32327 // Changes type when we load it: damn Intel!
32328 // For after we switch over our page tables
32329 // That entry will be read-only: we'd crash.
32330+
32331+#ifdef CONFIG_PAX_KERNEXEC
32332+ mov %cr0, %edx
32333+ xor $X86_CR0_WP, %edx
32334+ mov %edx, %cr0
32335+#endif
32336+
32337 movl $(GDT_ENTRY_TSS*8), %edx
32338 ltr %dx
32339
32340@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32341 // Let's clear it again for our return.
32342 // The GDT descriptor of the Host
32343 // Points to the table after two "size" bytes
32344- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32345+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32346 // Clear "used" from type field (byte 5, bit 2)
32347- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32348+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32349+
32350+#ifdef CONFIG_PAX_KERNEXEC
32351+ mov %cr0, %eax
32352+ xor $X86_CR0_WP, %eax
32353+ mov %eax, %cr0
32354+#endif
32355
32356 // Once our page table's switched, the Guest is live!
32357 // The Host fades as we run this final step.
32358@@ -295,13 +309,12 @@ deliver_to_host:
32359 // I consulted gcc, and it gave
32360 // These instructions, which I gladly credit:
32361 leal (%edx,%ebx,8), %eax
32362- movzwl (%eax),%edx
32363- movl 4(%eax), %eax
32364- xorw %ax, %ax
32365- orl %eax, %edx
32366+ movl 4(%eax), %edx
32367+ movw (%eax), %dx
32368 // Now the address of the handler's in %edx
32369 // We call it now: its "iret" drops us home.
32370- jmp *%edx
32371+ ljmp $__KERNEL_CS, $1f
32372+1: jmp *%edx
32373
32374 // Every interrupt can come to us here
32375 // But we must truly tell each apart.
32376diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32377index 4daf9e5..b8d1d0f 100644
32378--- a/drivers/macintosh/macio_asic.c
32379+++ b/drivers/macintosh/macio_asic.c
32380@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32381 * MacIO is matched against any Apple ID, it's probe() function
32382 * will then decide wether it applies or not
32383 */
32384-static const struct pci_device_id __devinitdata pci_ids [] = { {
32385+static const struct pci_device_id __devinitconst pci_ids [] = { {
32386 .vendor = PCI_VENDOR_ID_APPLE,
32387 .device = PCI_ANY_ID,
32388 .subvendor = PCI_ANY_ID,
32389diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32390index 1ce84ed..0fdd40a 100644
32391--- a/drivers/md/dm-ioctl.c
32392+++ b/drivers/md/dm-ioctl.c
32393@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32394 cmd == DM_LIST_VERSIONS_CMD)
32395 return 0;
32396
32397- if ((cmd == DM_DEV_CREATE_CMD)) {
32398+ if (cmd == DM_DEV_CREATE_CMD) {
32399 if (!*param->name) {
32400 DMWARN("name not supplied when creating device");
32401 return -EINVAL;
32402diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32403index 9bfd057..01180bc 100644
32404--- a/drivers/md/dm-raid1.c
32405+++ b/drivers/md/dm-raid1.c
32406@@ -40,7 +40,7 @@ enum dm_raid1_error {
32407
32408 struct mirror {
32409 struct mirror_set *ms;
32410- atomic_t error_count;
32411+ atomic_unchecked_t error_count;
32412 unsigned long error_type;
32413 struct dm_dev *dev;
32414 sector_t offset;
32415@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32416 struct mirror *m;
32417
32418 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32419- if (!atomic_read(&m->error_count))
32420+ if (!atomic_read_unchecked(&m->error_count))
32421 return m;
32422
32423 return NULL;
32424@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32425 * simple way to tell if a device has encountered
32426 * errors.
32427 */
32428- atomic_inc(&m->error_count);
32429+ atomic_inc_unchecked(&m->error_count);
32430
32431 if (test_and_set_bit(error_type, &m->error_type))
32432 return;
32433@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
32434 struct mirror *m = get_default_mirror(ms);
32435
32436 do {
32437- if (likely(!atomic_read(&m->error_count)))
32438+ if (likely(!atomic_read_unchecked(&m->error_count)))
32439 return m;
32440
32441 if (m-- == ms->mirror)
32442@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
32443 {
32444 struct mirror *default_mirror = get_default_mirror(m->ms);
32445
32446- return !atomic_read(&default_mirror->error_count);
32447+ return !atomic_read_unchecked(&default_mirror->error_count);
32448 }
32449
32450 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32451@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
32452 */
32453 if (likely(region_in_sync(ms, region, 1)))
32454 m = choose_mirror(ms, bio->bi_sector);
32455- else if (m && atomic_read(&m->error_count))
32456+ else if (m && atomic_read_unchecked(&m->error_count))
32457 m = NULL;
32458
32459 if (likely(m))
32460@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
32461 }
32462
32463 ms->mirror[mirror].ms = ms;
32464- atomic_set(&(ms->mirror[mirror].error_count), 0);
32465+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32466 ms->mirror[mirror].error_type = 0;
32467 ms->mirror[mirror].offset = offset;
32468
32469@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
32470 */
32471 static char device_status_char(struct mirror *m)
32472 {
32473- if (!atomic_read(&(m->error_count)))
32474+ if (!atomic_read_unchecked(&(m->error_count)))
32475 return 'A';
32476
32477 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
32478diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
32479index 3d80cf0..b77cc47 100644
32480--- a/drivers/md/dm-stripe.c
32481+++ b/drivers/md/dm-stripe.c
32482@@ -20,7 +20,7 @@ struct stripe {
32483 struct dm_dev *dev;
32484 sector_t physical_start;
32485
32486- atomic_t error_count;
32487+ atomic_unchecked_t error_count;
32488 };
32489
32490 struct stripe_c {
32491@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32492 kfree(sc);
32493 return r;
32494 }
32495- atomic_set(&(sc->stripe[i].error_count), 0);
32496+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32497 }
32498
32499 ti->private = sc;
32500@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
32501 DMEMIT("%d ", sc->stripes);
32502 for (i = 0; i < sc->stripes; i++) {
32503 DMEMIT("%s ", sc->stripe[i].dev->name);
32504- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32505+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32506 'D' : 'A';
32507 }
32508 buffer[i] = '\0';
32509@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
32510 */
32511 for (i = 0; i < sc->stripes; i++)
32512 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32513- atomic_inc(&(sc->stripe[i].error_count));
32514- if (atomic_read(&(sc->stripe[i].error_count)) <
32515+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
32516+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32517 DM_IO_ERROR_THRESHOLD)
32518 schedule_work(&sc->trigger_event);
32519 }
32520diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
32521index 63cc542..8d45caf3 100644
32522--- a/drivers/md/dm-table.c
32523+++ b/drivers/md/dm-table.c
32524@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
32525 if (!dev_size)
32526 return 0;
32527
32528- if ((start >= dev_size) || (start + len > dev_size)) {
32529+ if ((start >= dev_size) || (len > dev_size - start)) {
32530 DMWARN("%s: %s too small for target: "
32531 "start=%llu, len=%llu, dev_size=%llu",
32532 dm_device_name(ti->table->md), bdevname(bdev, b),
32533diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
32534index 237571a..fb6d19b 100644
32535--- a/drivers/md/dm-thin-metadata.c
32536+++ b/drivers/md/dm-thin-metadata.c
32537@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32538
32539 pmd->info.tm = tm;
32540 pmd->info.levels = 2;
32541- pmd->info.value_type.context = pmd->data_sm;
32542+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32543 pmd->info.value_type.size = sizeof(__le64);
32544 pmd->info.value_type.inc = data_block_inc;
32545 pmd->info.value_type.dec = data_block_dec;
32546@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32547
32548 pmd->bl_info.tm = tm;
32549 pmd->bl_info.levels = 1;
32550- pmd->bl_info.value_type.context = pmd->data_sm;
32551+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32552 pmd->bl_info.value_type.size = sizeof(__le64);
32553 pmd->bl_info.value_type.inc = data_block_inc;
32554 pmd->bl_info.value_type.dec = data_block_dec;
32555diff --git a/drivers/md/dm.c b/drivers/md/dm.c
32556index b89c548..2af3ce4 100644
32557--- a/drivers/md/dm.c
32558+++ b/drivers/md/dm.c
32559@@ -176,9 +176,9 @@ struct mapped_device {
32560 /*
32561 * Event handling.
32562 */
32563- atomic_t event_nr;
32564+ atomic_unchecked_t event_nr;
32565 wait_queue_head_t eventq;
32566- atomic_t uevent_seq;
32567+ atomic_unchecked_t uevent_seq;
32568 struct list_head uevent_list;
32569 spinlock_t uevent_lock; /* Protect access to uevent_list */
32570
32571@@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
32572 rwlock_init(&md->map_lock);
32573 atomic_set(&md->holders, 1);
32574 atomic_set(&md->open_count, 0);
32575- atomic_set(&md->event_nr, 0);
32576- atomic_set(&md->uevent_seq, 0);
32577+ atomic_set_unchecked(&md->event_nr, 0);
32578+ atomic_set_unchecked(&md->uevent_seq, 0);
32579 INIT_LIST_HEAD(&md->uevent_list);
32580 spin_lock_init(&md->uevent_lock);
32581
32582@@ -1979,7 +1979,7 @@ static void event_callback(void *context)
32583
32584 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32585
32586- atomic_inc(&md->event_nr);
32587+ atomic_inc_unchecked(&md->event_nr);
32588 wake_up(&md->eventq);
32589 }
32590
32591@@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
32592
32593 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32594 {
32595- return atomic_add_return(1, &md->uevent_seq);
32596+ return atomic_add_return_unchecked(1, &md->uevent_seq);
32597 }
32598
32599 uint32_t dm_get_event_nr(struct mapped_device *md)
32600 {
32601- return atomic_read(&md->event_nr);
32602+ return atomic_read_unchecked(&md->event_nr);
32603 }
32604
32605 int dm_wait_event(struct mapped_device *md, int event_nr)
32606 {
32607 return wait_event_interruptible(md->eventq,
32608- (event_nr != atomic_read(&md->event_nr)));
32609+ (event_nr != atomic_read_unchecked(&md->event_nr)));
32610 }
32611
32612 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32613diff --git a/drivers/md/md.c b/drivers/md/md.c
32614index ce88755..4d8686d 100644
32615--- a/drivers/md/md.c
32616+++ b/drivers/md/md.c
32617@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
32618 * start build, activate spare
32619 */
32620 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32621-static atomic_t md_event_count;
32622+static atomic_unchecked_t md_event_count;
32623 void md_new_event(struct mddev *mddev)
32624 {
32625- atomic_inc(&md_event_count);
32626+ atomic_inc_unchecked(&md_event_count);
32627 wake_up(&md_event_waiters);
32628 }
32629 EXPORT_SYMBOL_GPL(md_new_event);
32630@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32631 */
32632 static void md_new_event_inintr(struct mddev *mddev)
32633 {
32634- atomic_inc(&md_event_count);
32635+ atomic_inc_unchecked(&md_event_count);
32636 wake_up(&md_event_waiters);
32637 }
32638
32639@@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
32640
32641 rdev->preferred_minor = 0xffff;
32642 rdev->data_offset = le64_to_cpu(sb->data_offset);
32643- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32644+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32645
32646 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32647 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32648@@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
32649 else
32650 sb->resync_offset = cpu_to_le64(0);
32651
32652- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32653+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32654
32655 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32656 sb->size = cpu_to_le64(mddev->dev_sectors);
32657@@ -2688,7 +2688,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
32658 static ssize_t
32659 errors_show(struct md_rdev *rdev, char *page)
32660 {
32661- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32662+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32663 }
32664
32665 static ssize_t
32666@@ -2697,7 +2697,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
32667 char *e;
32668 unsigned long n = simple_strtoul(buf, &e, 10);
32669 if (*buf && (*e == 0 || *e == '\n')) {
32670- atomic_set(&rdev->corrected_errors, n);
32671+ atomic_set_unchecked(&rdev->corrected_errors, n);
32672 return len;
32673 }
32674 return -EINVAL;
32675@@ -3083,8 +3083,8 @@ int md_rdev_init(struct md_rdev *rdev)
32676 rdev->sb_loaded = 0;
32677 rdev->bb_page = NULL;
32678 atomic_set(&rdev->nr_pending, 0);
32679- atomic_set(&rdev->read_errors, 0);
32680- atomic_set(&rdev->corrected_errors, 0);
32681+ atomic_set_unchecked(&rdev->read_errors, 0);
32682+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32683
32684 INIT_LIST_HEAD(&rdev->same_set);
32685 init_waitqueue_head(&rdev->blocked_wait);
32686@@ -6735,7 +6735,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32687
32688 spin_unlock(&pers_lock);
32689 seq_printf(seq, "\n");
32690- seq->poll_event = atomic_read(&md_event_count);
32691+ seq->poll_event = atomic_read_unchecked(&md_event_count);
32692 return 0;
32693 }
32694 if (v == (void*)2) {
32695@@ -6827,7 +6827,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32696 chunk_kb ? "KB" : "B");
32697 if (bitmap->file) {
32698 seq_printf(seq, ", file: ");
32699- seq_path(seq, &bitmap->file->f_path, " \t\n");
32700+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32701 }
32702
32703 seq_printf(seq, "\n");
32704@@ -6858,7 +6858,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
32705 return error;
32706
32707 seq = file->private_data;
32708- seq->poll_event = atomic_read(&md_event_count);
32709+ seq->poll_event = atomic_read_unchecked(&md_event_count);
32710 return error;
32711 }
32712
32713@@ -6872,7 +6872,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
32714 /* always allow read */
32715 mask = POLLIN | POLLRDNORM;
32716
32717- if (seq->poll_event != atomic_read(&md_event_count))
32718+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
32719 mask |= POLLERR | POLLPRI;
32720 return mask;
32721 }
32722@@ -6916,7 +6916,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
32723 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32724 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32725 (int)part_stat_read(&disk->part0, sectors[1]) -
32726- atomic_read(&disk->sync_io);
32727+ atomic_read_unchecked(&disk->sync_io);
32728 /* sync IO will cause sync_io to increase before the disk_stats
32729 * as sync_io is counted when a request starts, and
32730 * disk_stats is counted when it completes.
32731diff --git a/drivers/md/md.h b/drivers/md/md.h
32732index 44c63df..b795d1a 100644
32733--- a/drivers/md/md.h
32734+++ b/drivers/md/md.h
32735@@ -93,13 +93,13 @@ struct md_rdev {
32736 * only maintained for arrays that
32737 * support hot removal
32738 */
32739- atomic_t read_errors; /* number of consecutive read errors that
32740+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32741 * we have tried to ignore.
32742 */
32743 struct timespec last_read_error; /* monotonic time since our
32744 * last read error
32745 */
32746- atomic_t corrected_errors; /* number of corrected read errors,
32747+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32748 * for reporting to userspace and storing
32749 * in superblock.
32750 */
32751@@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
32752
32753 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32754 {
32755- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32756+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32757 }
32758
32759 struct md_personality
32760diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
32761index 50ed53b..4f29d7d 100644
32762--- a/drivers/md/persistent-data/dm-space-map-checker.c
32763+++ b/drivers/md/persistent-data/dm-space-map-checker.c
32764@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
32765 /*----------------------------------------------------------------*/
32766
32767 struct sm_checker {
32768- struct dm_space_map sm;
32769+ dm_space_map_no_const sm;
32770
32771 struct count_array old_counts;
32772 struct count_array counts;
32773diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
32774index fc469ba..2d91555 100644
32775--- a/drivers/md/persistent-data/dm-space-map-disk.c
32776+++ b/drivers/md/persistent-data/dm-space-map-disk.c
32777@@ -23,7 +23,7 @@
32778 * Space map interface.
32779 */
32780 struct sm_disk {
32781- struct dm_space_map sm;
32782+ dm_space_map_no_const sm;
32783
32784 struct ll_disk ll;
32785 struct ll_disk old_ll;
32786diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
32787index e89ae5e..062e4c2 100644
32788--- a/drivers/md/persistent-data/dm-space-map-metadata.c
32789+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
32790@@ -43,7 +43,7 @@ struct block_op {
32791 };
32792
32793 struct sm_metadata {
32794- struct dm_space_map sm;
32795+ dm_space_map_no_const sm;
32796
32797 struct ll_disk ll;
32798 struct ll_disk old_ll;
32799diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
32800index 1cbfc6b..56e1dbb 100644
32801--- a/drivers/md/persistent-data/dm-space-map.h
32802+++ b/drivers/md/persistent-data/dm-space-map.h
32803@@ -60,6 +60,7 @@ struct dm_space_map {
32804 int (*root_size)(struct dm_space_map *sm, size_t *result);
32805 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
32806 };
32807+typedef struct dm_space_map __no_const dm_space_map_no_const;
32808
32809 /*----------------------------------------------------------------*/
32810
32811diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
32812index a0b225e..a9be913 100644
32813--- a/drivers/md/raid1.c
32814+++ b/drivers/md/raid1.c
32815@@ -1632,7 +1632,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
32816 if (r1_sync_page_io(rdev, sect, s,
32817 bio->bi_io_vec[idx].bv_page,
32818 READ) != 0)
32819- atomic_add(s, &rdev->corrected_errors);
32820+ atomic_add_unchecked(s, &rdev->corrected_errors);
32821 }
32822 sectors -= s;
32823 sect += s;
32824@@ -1845,7 +1845,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
32825 test_bit(In_sync, &rdev->flags)) {
32826 if (r1_sync_page_io(rdev, sect, s,
32827 conf->tmppage, READ)) {
32828- atomic_add(s, &rdev->corrected_errors);
32829+ atomic_add_unchecked(s, &rdev->corrected_errors);
32830 printk(KERN_INFO
32831 "md/raid1:%s: read error corrected "
32832 "(%d sectors at %llu on %s)\n",
32833diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
32834index 58c44d6..f090bad 100644
32835--- a/drivers/md/raid10.c
32836+++ b/drivers/md/raid10.c
32837@@ -1623,7 +1623,7 @@ static void end_sync_read(struct bio *bio, int error)
32838 /* The write handler will notice the lack of
32839 * R10BIO_Uptodate and record any errors etc
32840 */
32841- atomic_add(r10_bio->sectors,
32842+ atomic_add_unchecked(r10_bio->sectors,
32843 &conf->mirrors[d].rdev->corrected_errors);
32844
32845 /* for reconstruct, we always reschedule after a read.
32846@@ -1974,7 +1974,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32847 {
32848 struct timespec cur_time_mon;
32849 unsigned long hours_since_last;
32850- unsigned int read_errors = atomic_read(&rdev->read_errors);
32851+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
32852
32853 ktime_get_ts(&cur_time_mon);
32854
32855@@ -1996,9 +1996,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32856 * overflowing the shift of read_errors by hours_since_last.
32857 */
32858 if (hours_since_last >= 8 * sizeof(read_errors))
32859- atomic_set(&rdev->read_errors, 0);
32860+ atomic_set_unchecked(&rdev->read_errors, 0);
32861 else
32862- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
32863+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
32864 }
32865
32866 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
32867@@ -2052,8 +2052,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32868 return;
32869
32870 check_decay_read_errors(mddev, rdev);
32871- atomic_inc(&rdev->read_errors);
32872- if (atomic_read(&rdev->read_errors) > max_read_errors) {
32873+ atomic_inc_unchecked(&rdev->read_errors);
32874+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
32875 char b[BDEVNAME_SIZE];
32876 bdevname(rdev->bdev, b);
32877
32878@@ -2061,7 +2061,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32879 "md/raid10:%s: %s: Raid device exceeded "
32880 "read_error threshold [cur %d:max %d]\n",
32881 mdname(mddev), b,
32882- atomic_read(&rdev->read_errors), max_read_errors);
32883+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
32884 printk(KERN_NOTICE
32885 "md/raid10:%s: %s: Failing raid device\n",
32886 mdname(mddev), b);
32887@@ -2210,7 +2210,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32888 (unsigned long long)(
32889 sect + rdev->data_offset),
32890 bdevname(rdev->bdev, b));
32891- atomic_add(s, &rdev->corrected_errors);
32892+ atomic_add_unchecked(s, &rdev->corrected_errors);
32893 }
32894
32895 rdev_dec_pending(rdev, mddev);
32896diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
32897index 360f2b9..08b5382 100644
32898--- a/drivers/md/raid5.c
32899+++ b/drivers/md/raid5.c
32900@@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
32901 (unsigned long long)(sh->sector
32902 + rdev->data_offset),
32903 bdevname(rdev->bdev, b));
32904- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
32905+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
32906 clear_bit(R5_ReadError, &sh->dev[i].flags);
32907 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32908 }
32909- if (atomic_read(&rdev->read_errors))
32910- atomic_set(&rdev->read_errors, 0);
32911+ if (atomic_read_unchecked(&rdev->read_errors))
32912+ atomic_set_unchecked(&rdev->read_errors, 0);
32913 } else {
32914 const char *bdn = bdevname(rdev->bdev, b);
32915 int retry = 0;
32916
32917 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32918- atomic_inc(&rdev->read_errors);
32919+ atomic_inc_unchecked(&rdev->read_errors);
32920 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
32921 printk_ratelimited(
32922 KERN_WARNING
32923@@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
32924 (unsigned long long)(sh->sector
32925 + rdev->data_offset),
32926 bdn);
32927- else if (atomic_read(&rdev->read_errors)
32928+ else if (atomic_read_unchecked(&rdev->read_errors)
32929 > conf->max_nr_stripes)
32930 printk(KERN_WARNING
32931 "md/raid:%s: Too many read errors, failing device %s.\n",
32932diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
32933index ce4f858..7bcfb46 100644
32934--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
32935+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
32936@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
32937 .subvendor = _subvend, .subdevice = _subdev, \
32938 .driver_data = (unsigned long)&_driverdata }
32939
32940-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
32941+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
32942 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
32943 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
32944 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
32945diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
32946index a7d876f..8c21b61 100644
32947--- a/drivers/media/dvb/dvb-core/dvb_demux.h
32948+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
32949@@ -73,7 +73,7 @@ struct dvb_demux_feed {
32950 union {
32951 dmx_ts_cb ts;
32952 dmx_section_cb sec;
32953- } cb;
32954+ } __no_const cb;
32955
32956 struct dvb_demux *demux;
32957 void *priv;
32958diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
32959index 00a6732..70a682e 100644
32960--- a/drivers/media/dvb/dvb-core/dvbdev.c
32961+++ b/drivers/media/dvb/dvb-core/dvbdev.c
32962@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
32963 const struct dvb_device *template, void *priv, int type)
32964 {
32965 struct dvb_device *dvbdev;
32966- struct file_operations *dvbdevfops;
32967+ file_operations_no_const *dvbdevfops;
32968 struct device *clsdev;
32969 int minor;
32970 int id;
32971diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
32972index 3940bb0..fb3952a 100644
32973--- a/drivers/media/dvb/dvb-usb/cxusb.c
32974+++ b/drivers/media/dvb/dvb-usb/cxusb.c
32975@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
32976
32977 struct dib0700_adapter_state {
32978 int (*set_param_save) (struct dvb_frontend *);
32979-};
32980+} __no_const;
32981
32982 static int dib7070_set_param_override(struct dvb_frontend *fe)
32983 {
32984diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
32985index 451c5a7..649f711 100644
32986--- a/drivers/media/dvb/dvb-usb/dw2102.c
32987+++ b/drivers/media/dvb/dvb-usb/dw2102.c
32988@@ -95,7 +95,7 @@ struct su3000_state {
32989
32990 struct s6x0_state {
32991 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
32992-};
32993+} __no_const;
32994
32995 /* debug */
32996 static int dvb_usb_dw2102_debug;
32997diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
32998index 404f63a..4796533 100644
32999--- a/drivers/media/dvb/frontends/dib3000.h
33000+++ b/drivers/media/dvb/frontends/dib3000.h
33001@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33002 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33003 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33004 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33005-};
33006+} __no_const;
33007
33008 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33009 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33010diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33011index 8418c02..8555013 100644
33012--- a/drivers/media/dvb/ngene/ngene-cards.c
33013+++ b/drivers/media/dvb/ngene/ngene-cards.c
33014@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33015
33016 /****************************************************************************/
33017
33018-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33019+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33020 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33021 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33022 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33023diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33024index 16a089f..ab1667d 100644
33025--- a/drivers/media/radio/radio-cadet.c
33026+++ b/drivers/media/radio/radio-cadet.c
33027@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33028 unsigned char readbuf[RDS_BUFFER];
33029 int i = 0;
33030
33031+ if (count > RDS_BUFFER)
33032+ return -EFAULT;
33033 mutex_lock(&dev->lock);
33034 if (dev->rdsstat == 0) {
33035 dev->rdsstat = 1;
33036diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33037index 9cde353..8c6a1c3 100644
33038--- a/drivers/media/video/au0828/au0828.h
33039+++ b/drivers/media/video/au0828/au0828.h
33040@@ -191,7 +191,7 @@ struct au0828_dev {
33041
33042 /* I2C */
33043 struct i2c_adapter i2c_adap;
33044- struct i2c_algorithm i2c_algo;
33045+ i2c_algorithm_no_const i2c_algo;
33046 struct i2c_client i2c_client;
33047 u32 i2c_rc;
33048
33049diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33050index 04bf662..e0ac026 100644
33051--- a/drivers/media/video/cx88/cx88-alsa.c
33052+++ b/drivers/media/video/cx88/cx88-alsa.c
33053@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33054 * Only boards with eeprom and byte 1 at eeprom=1 have it
33055 */
33056
33057-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33058+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33059 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33060 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33061 {0, }
33062diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33063index 1fb7d5b..3901e77 100644
33064--- a/drivers/media/video/omap/omap_vout.c
33065+++ b/drivers/media/video/omap/omap_vout.c
33066@@ -64,7 +64,6 @@ enum omap_vout_channels {
33067 OMAP_VIDEO2,
33068 };
33069
33070-static struct videobuf_queue_ops video_vbq_ops;
33071 /* Variables configurable through module params*/
33072 static u32 video1_numbuffers = 3;
33073 static u32 video2_numbuffers = 3;
33074@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33075 {
33076 struct videobuf_queue *q;
33077 struct omap_vout_device *vout = NULL;
33078+ static struct videobuf_queue_ops video_vbq_ops = {
33079+ .buf_setup = omap_vout_buffer_setup,
33080+ .buf_prepare = omap_vout_buffer_prepare,
33081+ .buf_release = omap_vout_buffer_release,
33082+ .buf_queue = omap_vout_buffer_queue,
33083+ };
33084
33085 vout = video_drvdata(file);
33086 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33087@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33088 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33089
33090 q = &vout->vbq;
33091- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33092- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33093- video_vbq_ops.buf_release = omap_vout_buffer_release;
33094- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33095 spin_lock_init(&vout->vbq_lock);
33096
33097 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33098diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33099index 305e6aa..0143317 100644
33100--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33101+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33102@@ -196,7 +196,7 @@ struct pvr2_hdw {
33103
33104 /* I2C stuff */
33105 struct i2c_adapter i2c_adap;
33106- struct i2c_algorithm i2c_algo;
33107+ i2c_algorithm_no_const i2c_algo;
33108 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33109 int i2c_cx25840_hack_state;
33110 int i2c_linked;
33111diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33112index 4ed1c7c2..8f15e13 100644
33113--- a/drivers/media/video/timblogiw.c
33114+++ b/drivers/media/video/timblogiw.c
33115@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33116
33117 /* Platform device functions */
33118
33119-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33120+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33121 .vidioc_querycap = timblogiw_querycap,
33122 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33123 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33124@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33125 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33126 };
33127
33128-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33129+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33130 .owner = THIS_MODULE,
33131 .open = timblogiw_open,
33132 .release = timblogiw_close,
33133diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33134index a7dc467..a55c423 100644
33135--- a/drivers/message/fusion/mptbase.c
33136+++ b/drivers/message/fusion/mptbase.c
33137@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33138 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33139 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33140
33141+#ifdef CONFIG_GRKERNSEC_HIDESYM
33142+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33143+#else
33144 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33145 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33146+#endif
33147+
33148 /*
33149 * Rounding UP to nearest 4-kB boundary here...
33150 */
33151diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33152index 551262e..7551198 100644
33153--- a/drivers/message/fusion/mptsas.c
33154+++ b/drivers/message/fusion/mptsas.c
33155@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33156 return 0;
33157 }
33158
33159+static inline void
33160+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33161+{
33162+ if (phy_info->port_details) {
33163+ phy_info->port_details->rphy = rphy;
33164+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33165+ ioc->name, rphy));
33166+ }
33167+
33168+ if (rphy) {
33169+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33170+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33171+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33172+ ioc->name, rphy, rphy->dev.release));
33173+ }
33174+}
33175+
33176 /* no mutex */
33177 static void
33178 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33179@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33180 return NULL;
33181 }
33182
33183-static inline void
33184-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33185-{
33186- if (phy_info->port_details) {
33187- phy_info->port_details->rphy = rphy;
33188- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33189- ioc->name, rphy));
33190- }
33191-
33192- if (rphy) {
33193- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33194- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33195- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33196- ioc->name, rphy, rphy->dev.release));
33197- }
33198-}
33199-
33200 static inline struct sas_port *
33201 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33202 {
33203diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33204index 0c3ced7..1fe34ec 100644
33205--- a/drivers/message/fusion/mptscsih.c
33206+++ b/drivers/message/fusion/mptscsih.c
33207@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33208
33209 h = shost_priv(SChost);
33210
33211- if (h) {
33212- if (h->info_kbuf == NULL)
33213- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33214- return h->info_kbuf;
33215- h->info_kbuf[0] = '\0';
33216+ if (!h)
33217+ return NULL;
33218
33219- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33220- h->info_kbuf[size-1] = '\0';
33221- }
33222+ if (h->info_kbuf == NULL)
33223+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33224+ return h->info_kbuf;
33225+ h->info_kbuf[0] = '\0';
33226+
33227+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33228+ h->info_kbuf[size-1] = '\0';
33229
33230 return h->info_kbuf;
33231 }
33232diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33233index 6d115c7..58ff7fd 100644
33234--- a/drivers/message/i2o/i2o_proc.c
33235+++ b/drivers/message/i2o/i2o_proc.c
33236@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33237 "Array Controller Device"
33238 };
33239
33240-static char *chtostr(u8 * chars, int n)
33241-{
33242- char tmp[256];
33243- tmp[0] = 0;
33244- return strncat(tmp, (char *)chars, n);
33245-}
33246-
33247 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33248 char *group)
33249 {
33250@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33251
33252 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33253 seq_printf(seq, "%-#8x", ddm_table.module_id);
33254- seq_printf(seq, "%-29s",
33255- chtostr(ddm_table.module_name_version, 28));
33256+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33257 seq_printf(seq, "%9d ", ddm_table.data_size);
33258 seq_printf(seq, "%8d", ddm_table.code_size);
33259
33260@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33261
33262 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33263 seq_printf(seq, "%-#8x", dst->module_id);
33264- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33265- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33266+ seq_printf(seq, "%-.28s", dst->module_name_version);
33267+ seq_printf(seq, "%-.8s", dst->date);
33268 seq_printf(seq, "%8d ", dst->module_size);
33269 seq_printf(seq, "%8d ", dst->mpb_size);
33270 seq_printf(seq, "0x%04x", dst->module_flags);
33271@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33272 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33273 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33274 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33275- seq_printf(seq, "Vendor info : %s\n",
33276- chtostr((u8 *) (work32 + 2), 16));
33277- seq_printf(seq, "Product info : %s\n",
33278- chtostr((u8 *) (work32 + 6), 16));
33279- seq_printf(seq, "Description : %s\n",
33280- chtostr((u8 *) (work32 + 10), 16));
33281- seq_printf(seq, "Product rev. : %s\n",
33282- chtostr((u8 *) (work32 + 14), 8));
33283+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33284+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33285+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33286+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33287
33288 seq_printf(seq, "Serial number : ");
33289 print_serial_number(seq, (u8 *) (work32 + 16),
33290@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33291 }
33292
33293 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33294- seq_printf(seq, "Module name : %s\n",
33295- chtostr(result.module_name, 24));
33296- seq_printf(seq, "Module revision : %s\n",
33297- chtostr(result.module_rev, 8));
33298+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
33299+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33300
33301 seq_printf(seq, "Serial number : ");
33302 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33303@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33304 return 0;
33305 }
33306
33307- seq_printf(seq, "Device name : %s\n",
33308- chtostr(result.device_name, 64));
33309- seq_printf(seq, "Service name : %s\n",
33310- chtostr(result.service_name, 64));
33311- seq_printf(seq, "Physical name : %s\n",
33312- chtostr(result.physical_location, 64));
33313- seq_printf(seq, "Instance number : %s\n",
33314- chtostr(result.instance_number, 4));
33315+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
33316+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
33317+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33318+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33319
33320 return 0;
33321 }
33322diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33323index a8c08f3..155fe3d 100644
33324--- a/drivers/message/i2o/iop.c
33325+++ b/drivers/message/i2o/iop.c
33326@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33327
33328 spin_lock_irqsave(&c->context_list_lock, flags);
33329
33330- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33331- atomic_inc(&c->context_list_counter);
33332+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33333+ atomic_inc_unchecked(&c->context_list_counter);
33334
33335- entry->context = atomic_read(&c->context_list_counter);
33336+ entry->context = atomic_read_unchecked(&c->context_list_counter);
33337
33338 list_add(&entry->list, &c->context_list);
33339
33340@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33341
33342 #if BITS_PER_LONG == 64
33343 spin_lock_init(&c->context_list_lock);
33344- atomic_set(&c->context_list_counter, 0);
33345+ atomic_set_unchecked(&c->context_list_counter, 0);
33346 INIT_LIST_HEAD(&c->context_list);
33347 #endif
33348
33349diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33350index 7ce65f4..e66e9bc 100644
33351--- a/drivers/mfd/abx500-core.c
33352+++ b/drivers/mfd/abx500-core.c
33353@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33354
33355 struct abx500_device_entry {
33356 struct list_head list;
33357- struct abx500_ops ops;
33358+ abx500_ops_no_const ops;
33359 struct device *dev;
33360 };
33361
33362diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33363index a9223ed..4127b13 100644
33364--- a/drivers/mfd/janz-cmodio.c
33365+++ b/drivers/mfd/janz-cmodio.c
33366@@ -13,6 +13,7 @@
33367
33368 #include <linux/kernel.h>
33369 #include <linux/module.h>
33370+#include <linux/slab.h>
33371 #include <linux/init.h>
33372 #include <linux/pci.h>
33373 #include <linux/interrupt.h>
33374diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33375index a981e2a..5ca0c8b 100644
33376--- a/drivers/misc/lis3lv02d/lis3lv02d.c
33377+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33378@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33379 * the lid is closed. This leads to interrupts as soon as a little move
33380 * is done.
33381 */
33382- atomic_inc(&lis3->count);
33383+ atomic_inc_unchecked(&lis3->count);
33384
33385 wake_up_interruptible(&lis3->misc_wait);
33386 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33387@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33388 if (lis3->pm_dev)
33389 pm_runtime_get_sync(lis3->pm_dev);
33390
33391- atomic_set(&lis3->count, 0);
33392+ atomic_set_unchecked(&lis3->count, 0);
33393 return 0;
33394 }
33395
33396@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33397 add_wait_queue(&lis3->misc_wait, &wait);
33398 while (true) {
33399 set_current_state(TASK_INTERRUPTIBLE);
33400- data = atomic_xchg(&lis3->count, 0);
33401+ data = atomic_xchg_unchecked(&lis3->count, 0);
33402 if (data)
33403 break;
33404
33405@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33406 struct lis3lv02d, miscdev);
33407
33408 poll_wait(file, &lis3->misc_wait, wait);
33409- if (atomic_read(&lis3->count))
33410+ if (atomic_read_unchecked(&lis3->count))
33411 return POLLIN | POLLRDNORM;
33412 return 0;
33413 }
33414diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33415index 2b1482a..5d33616 100644
33416--- a/drivers/misc/lis3lv02d/lis3lv02d.h
33417+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33418@@ -266,7 +266,7 @@ struct lis3lv02d {
33419 struct input_polled_dev *idev; /* input device */
33420 struct platform_device *pdev; /* platform device */
33421 struct regulator_bulk_data regulators[2];
33422- atomic_t count; /* interrupt count after last read */
33423+ atomic_unchecked_t count; /* interrupt count after last read */
33424 union axis_conversion ac; /* hw -> logical axis */
33425 int mapped_btns[3];
33426
33427diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33428index 2f30bad..c4c13d0 100644
33429--- a/drivers/misc/sgi-gru/gruhandles.c
33430+++ b/drivers/misc/sgi-gru/gruhandles.c
33431@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33432 unsigned long nsec;
33433
33434 nsec = CLKS2NSEC(clks);
33435- atomic_long_inc(&mcs_op_statistics[op].count);
33436- atomic_long_add(nsec, &mcs_op_statistics[op].total);
33437+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33438+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
33439 if (mcs_op_statistics[op].max < nsec)
33440 mcs_op_statistics[op].max = nsec;
33441 }
33442diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
33443index 950dbe9..eeef0f8 100644
33444--- a/drivers/misc/sgi-gru/gruprocfs.c
33445+++ b/drivers/misc/sgi-gru/gruprocfs.c
33446@@ -32,9 +32,9 @@
33447
33448 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33449
33450-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33451+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33452 {
33453- unsigned long val = atomic_long_read(v);
33454+ unsigned long val = atomic_long_read_unchecked(v);
33455
33456 seq_printf(s, "%16lu %s\n", val, id);
33457 }
33458@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
33459
33460 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
33461 for (op = 0; op < mcsop_last; op++) {
33462- count = atomic_long_read(&mcs_op_statistics[op].count);
33463- total = atomic_long_read(&mcs_op_statistics[op].total);
33464+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33465+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33466 max = mcs_op_statistics[op].max;
33467 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33468 count ? total / count : 0, max);
33469diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
33470index 5c3ce24..4915ccb 100644
33471--- a/drivers/misc/sgi-gru/grutables.h
33472+++ b/drivers/misc/sgi-gru/grutables.h
33473@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
33474 * GRU statistics.
33475 */
33476 struct gru_stats_s {
33477- atomic_long_t vdata_alloc;
33478- atomic_long_t vdata_free;
33479- atomic_long_t gts_alloc;
33480- atomic_long_t gts_free;
33481- atomic_long_t gms_alloc;
33482- atomic_long_t gms_free;
33483- atomic_long_t gts_double_allocate;
33484- atomic_long_t assign_context;
33485- atomic_long_t assign_context_failed;
33486- atomic_long_t free_context;
33487- atomic_long_t load_user_context;
33488- atomic_long_t load_kernel_context;
33489- atomic_long_t lock_kernel_context;
33490- atomic_long_t unlock_kernel_context;
33491- atomic_long_t steal_user_context;
33492- atomic_long_t steal_kernel_context;
33493- atomic_long_t steal_context_failed;
33494- atomic_long_t nopfn;
33495- atomic_long_t asid_new;
33496- atomic_long_t asid_next;
33497- atomic_long_t asid_wrap;
33498- atomic_long_t asid_reuse;
33499- atomic_long_t intr;
33500- atomic_long_t intr_cbr;
33501- atomic_long_t intr_tfh;
33502- atomic_long_t intr_spurious;
33503- atomic_long_t intr_mm_lock_failed;
33504- atomic_long_t call_os;
33505- atomic_long_t call_os_wait_queue;
33506- atomic_long_t user_flush_tlb;
33507- atomic_long_t user_unload_context;
33508- atomic_long_t user_exception;
33509- atomic_long_t set_context_option;
33510- atomic_long_t check_context_retarget_intr;
33511- atomic_long_t check_context_unload;
33512- atomic_long_t tlb_dropin;
33513- atomic_long_t tlb_preload_page;
33514- atomic_long_t tlb_dropin_fail_no_asid;
33515- atomic_long_t tlb_dropin_fail_upm;
33516- atomic_long_t tlb_dropin_fail_invalid;
33517- atomic_long_t tlb_dropin_fail_range_active;
33518- atomic_long_t tlb_dropin_fail_idle;
33519- atomic_long_t tlb_dropin_fail_fmm;
33520- atomic_long_t tlb_dropin_fail_no_exception;
33521- atomic_long_t tfh_stale_on_fault;
33522- atomic_long_t mmu_invalidate_range;
33523- atomic_long_t mmu_invalidate_page;
33524- atomic_long_t flush_tlb;
33525- atomic_long_t flush_tlb_gru;
33526- atomic_long_t flush_tlb_gru_tgh;
33527- atomic_long_t flush_tlb_gru_zero_asid;
33528+ atomic_long_unchecked_t vdata_alloc;
33529+ atomic_long_unchecked_t vdata_free;
33530+ atomic_long_unchecked_t gts_alloc;
33531+ atomic_long_unchecked_t gts_free;
33532+ atomic_long_unchecked_t gms_alloc;
33533+ atomic_long_unchecked_t gms_free;
33534+ atomic_long_unchecked_t gts_double_allocate;
33535+ atomic_long_unchecked_t assign_context;
33536+ atomic_long_unchecked_t assign_context_failed;
33537+ atomic_long_unchecked_t free_context;
33538+ atomic_long_unchecked_t load_user_context;
33539+ atomic_long_unchecked_t load_kernel_context;
33540+ atomic_long_unchecked_t lock_kernel_context;
33541+ atomic_long_unchecked_t unlock_kernel_context;
33542+ atomic_long_unchecked_t steal_user_context;
33543+ atomic_long_unchecked_t steal_kernel_context;
33544+ atomic_long_unchecked_t steal_context_failed;
33545+ atomic_long_unchecked_t nopfn;
33546+ atomic_long_unchecked_t asid_new;
33547+ atomic_long_unchecked_t asid_next;
33548+ atomic_long_unchecked_t asid_wrap;
33549+ atomic_long_unchecked_t asid_reuse;
33550+ atomic_long_unchecked_t intr;
33551+ atomic_long_unchecked_t intr_cbr;
33552+ atomic_long_unchecked_t intr_tfh;
33553+ atomic_long_unchecked_t intr_spurious;
33554+ atomic_long_unchecked_t intr_mm_lock_failed;
33555+ atomic_long_unchecked_t call_os;
33556+ atomic_long_unchecked_t call_os_wait_queue;
33557+ atomic_long_unchecked_t user_flush_tlb;
33558+ atomic_long_unchecked_t user_unload_context;
33559+ atomic_long_unchecked_t user_exception;
33560+ atomic_long_unchecked_t set_context_option;
33561+ atomic_long_unchecked_t check_context_retarget_intr;
33562+ atomic_long_unchecked_t check_context_unload;
33563+ atomic_long_unchecked_t tlb_dropin;
33564+ atomic_long_unchecked_t tlb_preload_page;
33565+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33566+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33567+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33568+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33569+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33570+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33571+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33572+ atomic_long_unchecked_t tfh_stale_on_fault;
33573+ atomic_long_unchecked_t mmu_invalidate_range;
33574+ atomic_long_unchecked_t mmu_invalidate_page;
33575+ atomic_long_unchecked_t flush_tlb;
33576+ atomic_long_unchecked_t flush_tlb_gru;
33577+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33578+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33579
33580- atomic_long_t copy_gpa;
33581- atomic_long_t read_gpa;
33582+ atomic_long_unchecked_t copy_gpa;
33583+ atomic_long_unchecked_t read_gpa;
33584
33585- atomic_long_t mesq_receive;
33586- atomic_long_t mesq_receive_none;
33587- atomic_long_t mesq_send;
33588- atomic_long_t mesq_send_failed;
33589- atomic_long_t mesq_noop;
33590- atomic_long_t mesq_send_unexpected_error;
33591- atomic_long_t mesq_send_lb_overflow;
33592- atomic_long_t mesq_send_qlimit_reached;
33593- atomic_long_t mesq_send_amo_nacked;
33594- atomic_long_t mesq_send_put_nacked;
33595- atomic_long_t mesq_page_overflow;
33596- atomic_long_t mesq_qf_locked;
33597- atomic_long_t mesq_qf_noop_not_full;
33598- atomic_long_t mesq_qf_switch_head_failed;
33599- atomic_long_t mesq_qf_unexpected_error;
33600- atomic_long_t mesq_noop_unexpected_error;
33601- atomic_long_t mesq_noop_lb_overflow;
33602- atomic_long_t mesq_noop_qlimit_reached;
33603- atomic_long_t mesq_noop_amo_nacked;
33604- atomic_long_t mesq_noop_put_nacked;
33605- atomic_long_t mesq_noop_page_overflow;
33606+ atomic_long_unchecked_t mesq_receive;
33607+ atomic_long_unchecked_t mesq_receive_none;
33608+ atomic_long_unchecked_t mesq_send;
33609+ atomic_long_unchecked_t mesq_send_failed;
33610+ atomic_long_unchecked_t mesq_noop;
33611+ atomic_long_unchecked_t mesq_send_unexpected_error;
33612+ atomic_long_unchecked_t mesq_send_lb_overflow;
33613+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33614+ atomic_long_unchecked_t mesq_send_amo_nacked;
33615+ atomic_long_unchecked_t mesq_send_put_nacked;
33616+ atomic_long_unchecked_t mesq_page_overflow;
33617+ atomic_long_unchecked_t mesq_qf_locked;
33618+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33619+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33620+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33621+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33622+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33623+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33624+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33625+ atomic_long_unchecked_t mesq_noop_put_nacked;
33626+ atomic_long_unchecked_t mesq_noop_page_overflow;
33627
33628 };
33629
33630@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
33631 tghop_invalidate, mcsop_last};
33632
33633 struct mcs_op_statistic {
33634- atomic_long_t count;
33635- atomic_long_t total;
33636+ atomic_long_unchecked_t count;
33637+ atomic_long_unchecked_t total;
33638 unsigned long max;
33639 };
33640
33641@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
33642
33643 #define STAT(id) do { \
33644 if (gru_options & OPT_STATS) \
33645- atomic_long_inc(&gru_stats.id); \
33646+ atomic_long_inc_unchecked(&gru_stats.id); \
33647 } while (0)
33648
33649 #ifdef CONFIG_SGI_GRU_DEBUG
33650diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
33651index 851b2f2..a4ec097 100644
33652--- a/drivers/misc/sgi-xp/xp.h
33653+++ b/drivers/misc/sgi-xp/xp.h
33654@@ -289,7 +289,7 @@ struct xpc_interface {
33655 xpc_notify_func, void *);
33656 void (*received) (short, int, void *);
33657 enum xp_retval (*partid_to_nasids) (short, void *);
33658-};
33659+} __no_const;
33660
33661 extern struct xpc_interface xpc_interface;
33662
33663diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
33664index b94d5f7..7f494c5 100644
33665--- a/drivers/misc/sgi-xp/xpc.h
33666+++ b/drivers/misc/sgi-xp/xpc.h
33667@@ -835,6 +835,7 @@ struct xpc_arch_operations {
33668 void (*received_payload) (struct xpc_channel *, void *);
33669 void (*notify_senders_of_disconnect) (struct xpc_channel *);
33670 };
33671+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
33672
33673 /* struct xpc_partition act_state values (for XPC HB) */
33674
33675@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
33676 /* found in xpc_main.c */
33677 extern struct device *xpc_part;
33678 extern struct device *xpc_chan;
33679-extern struct xpc_arch_operations xpc_arch_ops;
33680+extern xpc_arch_operations_no_const xpc_arch_ops;
33681 extern int xpc_disengage_timelimit;
33682 extern int xpc_disengage_timedout;
33683 extern int xpc_activate_IRQ_rcvd;
33684diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
33685index 8d082b4..aa749ae 100644
33686--- a/drivers/misc/sgi-xp/xpc_main.c
33687+++ b/drivers/misc/sgi-xp/xpc_main.c
33688@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
33689 .notifier_call = xpc_system_die,
33690 };
33691
33692-struct xpc_arch_operations xpc_arch_ops;
33693+xpc_arch_operations_no_const xpc_arch_ops;
33694
33695 /*
33696 * Timer function to enforce the timelimit on the partition disengage.
33697diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
33698index 6ebdc40..9edf5d8 100644
33699--- a/drivers/mmc/host/sdhci-pci.c
33700+++ b/drivers/mmc/host/sdhci-pci.c
33701@@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
33702 .probe = via_probe,
33703 };
33704
33705-static const struct pci_device_id pci_ids[] __devinitdata = {
33706+static const struct pci_device_id pci_ids[] __devinitconst = {
33707 {
33708 .vendor = PCI_VENDOR_ID_RICOH,
33709 .device = PCI_DEVICE_ID_RICOH_R5C822,
33710diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
33711index b1cdf64..ce6e438 100644
33712--- a/drivers/mtd/devices/doc2000.c
33713+++ b/drivers/mtd/devices/doc2000.c
33714@@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
33715
33716 /* The ECC will not be calculated correctly if less than 512 is written */
33717 /* DBB-
33718- if (len != 0x200 && eccbuf)
33719+ if (len != 0x200)
33720 printk(KERN_WARNING
33721 "ECC needs a full sector write (adr: %lx size %lx)\n",
33722 (long) to, (long) len);
33723diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
33724index 7543b98..7069947 100644
33725--- a/drivers/mtd/devices/doc2001.c
33726+++ b/drivers/mtd/devices/doc2001.c
33727@@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
33728 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33729
33730 /* Don't allow read past end of device */
33731- if (from >= this->totlen)
33732+ if (from >= this->totlen || !len)
33733 return -EINVAL;
33734
33735 /* Don't allow a single read to cross a 512-byte block boundary */
33736diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33737index 3984d48..28aa897 100644
33738--- a/drivers/mtd/nand/denali.c
33739+++ b/drivers/mtd/nand/denali.c
33740@@ -26,6 +26,7 @@
33741 #include <linux/pci.h>
33742 #include <linux/mtd/mtd.h>
33743 #include <linux/module.h>
33744+#include <linux/slab.h>
33745
33746 #include "denali.h"
33747
33748diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33749index 51b9d6a..52af9a7 100644
33750--- a/drivers/mtd/nftlmount.c
33751+++ b/drivers/mtd/nftlmount.c
33752@@ -24,6 +24,7 @@
33753 #include <asm/errno.h>
33754 #include <linux/delay.h>
33755 #include <linux/slab.h>
33756+#include <linux/sched.h>
33757 #include <linux/mtd/mtd.h>
33758 #include <linux/mtd/nand.h>
33759 #include <linux/mtd/nftl.h>
33760diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33761index 115749f..3021f01 100644
33762--- a/drivers/mtd/ubi/build.c
33763+++ b/drivers/mtd/ubi/build.c
33764@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33765 static int __init bytes_str_to_int(const char *str)
33766 {
33767 char *endp;
33768- unsigned long result;
33769+ unsigned long result, scale = 1;
33770
33771 result = simple_strtoul(str, &endp, 0);
33772 if (str == endp || result >= INT_MAX) {
33773@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33774
33775 switch (*endp) {
33776 case 'G':
33777- result *= 1024;
33778+ scale *= 1024;
33779 case 'M':
33780- result *= 1024;
33781+ scale *= 1024;
33782 case 'K':
33783- result *= 1024;
33784+ scale *= 1024;
33785 if (endp[1] == 'i' && endp[2] == 'B')
33786 endp += 2;
33787 case '\0':
33788@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33789 return -EINVAL;
33790 }
33791
33792- return result;
33793+ if ((intoverflow_t)result*scale >= INT_MAX) {
33794+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33795+ str);
33796+ return -EINVAL;
33797+ }
33798+
33799+ return result*scale;
33800 }
33801
33802 /**
33803diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
33804index 071f4c8..440862e 100644
33805--- a/drivers/net/ethernet/atheros/atlx/atl2.c
33806+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
33807@@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33808 */
33809
33810 #define ATL2_PARAM(X, desc) \
33811- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33812+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33813 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33814 MODULE_PARM_DESC(X, desc);
33815 #else
33816diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33817index 66da39f..5dc436d 100644
33818--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33819+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33820@@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
33821
33822 int (*wait_comp)(struct bnx2x *bp,
33823 struct bnx2x_rx_mode_ramrod_params *p);
33824-};
33825+} __no_const;
33826
33827 /********************** Set multicast group ***********************************/
33828
33829diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
33830index aea8f72..fcebf75 100644
33831--- a/drivers/net/ethernet/broadcom/tg3.h
33832+++ b/drivers/net/ethernet/broadcom/tg3.h
33833@@ -140,6 +140,7 @@
33834 #define CHIPREV_ID_5750_A0 0x4000
33835 #define CHIPREV_ID_5750_A1 0x4001
33836 #define CHIPREV_ID_5750_A3 0x4003
33837+#define CHIPREV_ID_5750_C1 0x4201
33838 #define CHIPREV_ID_5750_C2 0x4202
33839 #define CHIPREV_ID_5752_A0_HW 0x5000
33840 #define CHIPREV_ID_5752_A0 0x6000
33841diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33842index c4e8643..0979484 100644
33843--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33844+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33845@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33846 */
33847 struct l2t_skb_cb {
33848 arp_failure_handler_func arp_failure_handler;
33849-};
33850+} __no_const;
33851
33852 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33853
33854diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
33855index 4d71f5a..8004440 100644
33856--- a/drivers/net/ethernet/dec/tulip/de4x5.c
33857+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
33858@@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33859 for (i=0; i<ETH_ALEN; i++) {
33860 tmp.addr[i] = dev->dev_addr[i];
33861 }
33862- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33863+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33864 break;
33865
33866 case DE4X5_SET_HWADDR: /* Set the hardware address */
33867@@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33868 spin_lock_irqsave(&lp->lock, flags);
33869 memcpy(&statbuf, &lp->pktStats, ioc->len);
33870 spin_unlock_irqrestore(&lp->lock, flags);
33871- if (copy_to_user(ioc->data, &statbuf, ioc->len))
33872+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
33873 return -EFAULT;
33874 break;
33875 }
33876diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
33877index 14d5b61..1398636 100644
33878--- a/drivers/net/ethernet/dec/tulip/eeprom.c
33879+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
33880@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
33881 {NULL}};
33882
33883
33884-static const char *block_name[] __devinitdata = {
33885+static const char *block_name[] __devinitconst = {
33886 "21140 non-MII",
33887 "21140 MII PHY",
33888 "21142 Serial PHY",
33889diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
33890index 52da7b2..4ddfe1c 100644
33891--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
33892+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
33893@@ -236,7 +236,7 @@ struct pci_id_info {
33894 int drv_flags; /* Driver use, intended as capability flags. */
33895 };
33896
33897-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33898+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33899 { /* Sometime a Level-One switch card. */
33900 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
33901 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
33902diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
33903index 28a3a9b..d96cb63 100644
33904--- a/drivers/net/ethernet/dlink/sundance.c
33905+++ b/drivers/net/ethernet/dlink/sundance.c
33906@@ -218,7 +218,7 @@ enum {
33907 struct pci_id_info {
33908 const char *name;
33909 };
33910-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33911+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33912 {"D-Link DFE-550TX FAST Ethernet Adapter"},
33913 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
33914 {"D-Link DFE-580TX 4 port Server Adapter"},
33915diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
33916index e703d64..d62ecf9 100644
33917--- a/drivers/net/ethernet/emulex/benet/be_main.c
33918+++ b/drivers/net/ethernet/emulex/benet/be_main.c
33919@@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
33920
33921 if (wrapped)
33922 newacc += 65536;
33923- ACCESS_ONCE(*acc) = newacc;
33924+ ACCESS_ONCE_RW(*acc) = newacc;
33925 }
33926
33927 void be_parse_stats(struct be_adapter *adapter)
33928diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
33929index 47f85c3..82ab6c4 100644
33930--- a/drivers/net/ethernet/faraday/ftgmac100.c
33931+++ b/drivers/net/ethernet/faraday/ftgmac100.c
33932@@ -31,6 +31,8 @@
33933 #include <linux/netdevice.h>
33934 #include <linux/phy.h>
33935 #include <linux/platform_device.h>
33936+#include <linux/interrupt.h>
33937+#include <linux/irqreturn.h>
33938 #include <net/ip.h>
33939
33940 #include "ftgmac100.h"
33941diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
33942index bb336a0..4b472da 100644
33943--- a/drivers/net/ethernet/faraday/ftmac100.c
33944+++ b/drivers/net/ethernet/faraday/ftmac100.c
33945@@ -31,6 +31,8 @@
33946 #include <linux/module.h>
33947 #include <linux/netdevice.h>
33948 #include <linux/platform_device.h>
33949+#include <linux/interrupt.h>
33950+#include <linux/irqreturn.h>
33951
33952 #include "ftmac100.h"
33953
33954diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
33955index c82d444..0007fb4 100644
33956--- a/drivers/net/ethernet/fealnx.c
33957+++ b/drivers/net/ethernet/fealnx.c
33958@@ -150,7 +150,7 @@ struct chip_info {
33959 int flags;
33960 };
33961
33962-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33963+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33964 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33965 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33966 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33967diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33968index e1159e5..e18684d 100644
33969--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33970+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33971@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
33972 {
33973 struct e1000_hw *hw = &adapter->hw;
33974 struct e1000_mac_info *mac = &hw->mac;
33975- struct e1000_mac_operations *func = &mac->ops;
33976+ e1000_mac_operations_no_const *func = &mac->ops;
33977
33978 /* Set media type */
33979 switch (adapter->pdev->device) {
33980diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
33981index a3e65fd..f451444 100644
33982--- a/drivers/net/ethernet/intel/e1000e/82571.c
33983+++ b/drivers/net/ethernet/intel/e1000e/82571.c
33984@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
33985 {
33986 struct e1000_hw *hw = &adapter->hw;
33987 struct e1000_mac_info *mac = &hw->mac;
33988- struct e1000_mac_operations *func = &mac->ops;
33989+ e1000_mac_operations_no_const *func = &mac->ops;
33990 u32 swsm = 0;
33991 u32 swsm2 = 0;
33992 bool force_clear_smbi = false;
33993diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
33994index 2967039..ca8c40c 100644
33995--- a/drivers/net/ethernet/intel/e1000e/hw.h
33996+++ b/drivers/net/ethernet/intel/e1000e/hw.h
33997@@ -778,6 +778,7 @@ struct e1000_mac_operations {
33998 void (*write_vfta)(struct e1000_hw *, u32, u32);
33999 s32 (*read_mac_addr)(struct e1000_hw *);
34000 };
34001+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34002
34003 /*
34004 * When to use various PHY register access functions:
34005@@ -818,6 +819,7 @@ struct e1000_phy_operations {
34006 void (*power_up)(struct e1000_hw *);
34007 void (*power_down)(struct e1000_hw *);
34008 };
34009+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34010
34011 /* Function pointers for the NVM. */
34012 struct e1000_nvm_operations {
34013@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34014 s32 (*validate)(struct e1000_hw *);
34015 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34016 };
34017+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34018
34019 struct e1000_mac_info {
34020- struct e1000_mac_operations ops;
34021+ e1000_mac_operations_no_const ops;
34022 u8 addr[ETH_ALEN];
34023 u8 perm_addr[ETH_ALEN];
34024
34025@@ -872,7 +875,7 @@ struct e1000_mac_info {
34026 };
34027
34028 struct e1000_phy_info {
34029- struct e1000_phy_operations ops;
34030+ e1000_phy_operations_no_const ops;
34031
34032 enum e1000_phy_type type;
34033
34034@@ -906,7 +909,7 @@ struct e1000_phy_info {
34035 };
34036
34037 struct e1000_nvm_info {
34038- struct e1000_nvm_operations ops;
34039+ e1000_nvm_operations_no_const ops;
34040
34041 enum e1000_nvm_type type;
34042 enum e1000_nvm_override override;
34043diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34044index f67cbd3..cef9e3d 100644
34045--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34046+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34047@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34048 s32 (*read_mac_addr)(struct e1000_hw *);
34049 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34050 };
34051+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34052
34053 struct e1000_phy_operations {
34054 s32 (*acquire)(struct e1000_hw *);
34055@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34056 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34057 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34058 };
34059+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34060
34061 struct e1000_nvm_operations {
34062 s32 (*acquire)(struct e1000_hw *);
34063@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34064 s32 (*update)(struct e1000_hw *);
34065 s32 (*validate)(struct e1000_hw *);
34066 };
34067+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34068
34069 struct e1000_info {
34070 s32 (*get_invariants)(struct e1000_hw *);
34071@@ -350,7 +353,7 @@ struct e1000_info {
34072 extern const struct e1000_info e1000_82575_info;
34073
34074 struct e1000_mac_info {
34075- struct e1000_mac_operations ops;
34076+ e1000_mac_operations_no_const ops;
34077
34078 u8 addr[6];
34079 u8 perm_addr[6];
34080@@ -388,7 +391,7 @@ struct e1000_mac_info {
34081 };
34082
34083 struct e1000_phy_info {
34084- struct e1000_phy_operations ops;
34085+ e1000_phy_operations_no_const ops;
34086
34087 enum e1000_phy_type type;
34088
34089@@ -423,7 +426,7 @@ struct e1000_phy_info {
34090 };
34091
34092 struct e1000_nvm_info {
34093- struct e1000_nvm_operations ops;
34094+ e1000_nvm_operations_no_const ops;
34095 enum e1000_nvm_type type;
34096 enum e1000_nvm_override override;
34097
34098@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34099 s32 (*check_for_ack)(struct e1000_hw *, u16);
34100 s32 (*check_for_rst)(struct e1000_hw *, u16);
34101 };
34102+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34103
34104 struct e1000_mbx_stats {
34105 u32 msgs_tx;
34106@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34107 };
34108
34109 struct e1000_mbx_info {
34110- struct e1000_mbx_operations ops;
34111+ e1000_mbx_operations_no_const ops;
34112 struct e1000_mbx_stats stats;
34113 u32 timeout;
34114 u32 usec_delay;
34115diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34116index 57db3c6..aa825fc 100644
34117--- a/drivers/net/ethernet/intel/igbvf/vf.h
34118+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34119@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34120 s32 (*read_mac_addr)(struct e1000_hw *);
34121 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34122 };
34123+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34124
34125 struct e1000_mac_info {
34126- struct e1000_mac_operations ops;
34127+ e1000_mac_operations_no_const ops;
34128 u8 addr[6];
34129 u8 perm_addr[6];
34130
34131@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34132 s32 (*check_for_ack)(struct e1000_hw *);
34133 s32 (*check_for_rst)(struct e1000_hw *);
34134 };
34135+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34136
34137 struct e1000_mbx_stats {
34138 u32 msgs_tx;
34139@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34140 };
34141
34142 struct e1000_mbx_info {
34143- struct e1000_mbx_operations ops;
34144+ e1000_mbx_operations_no_const ops;
34145 struct e1000_mbx_stats stats;
34146 u32 timeout;
34147 u32 usec_delay;
34148diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34149index 9b95bef..7e254ee 100644
34150--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34151+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34152@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34153 s32 (*update_checksum)(struct ixgbe_hw *);
34154 u16 (*calc_checksum)(struct ixgbe_hw *);
34155 };
34156+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34157
34158 struct ixgbe_mac_operations {
34159 s32 (*init_hw)(struct ixgbe_hw *);
34160@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34161 /* Manageability interface */
34162 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34163 };
34164+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34165
34166 struct ixgbe_phy_operations {
34167 s32 (*identify)(struct ixgbe_hw *);
34168@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
34169 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34170 s32 (*check_overtemp)(struct ixgbe_hw *);
34171 };
34172+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34173
34174 struct ixgbe_eeprom_info {
34175- struct ixgbe_eeprom_operations ops;
34176+ ixgbe_eeprom_operations_no_const ops;
34177 enum ixgbe_eeprom_type type;
34178 u32 semaphore_delay;
34179 u16 word_size;
34180@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
34181
34182 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34183 struct ixgbe_mac_info {
34184- struct ixgbe_mac_operations ops;
34185+ ixgbe_mac_operations_no_const ops;
34186 enum ixgbe_mac_type type;
34187 u8 addr[ETH_ALEN];
34188 u8 perm_addr[ETH_ALEN];
34189@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
34190 };
34191
34192 struct ixgbe_phy_info {
34193- struct ixgbe_phy_operations ops;
34194+ ixgbe_phy_operations_no_const ops;
34195 struct mdio_if_info mdio;
34196 enum ixgbe_phy_type type;
34197 u32 id;
34198@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
34199 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34200 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34201 };
34202+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34203
34204 struct ixgbe_mbx_stats {
34205 u32 msgs_tx;
34206@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
34207 };
34208
34209 struct ixgbe_mbx_info {
34210- struct ixgbe_mbx_operations ops;
34211+ ixgbe_mbx_operations_no_const ops;
34212 struct ixgbe_mbx_stats stats;
34213 u32 timeout;
34214 u32 usec_delay;
34215diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34216index 25c951d..cc7cf33 100644
34217--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34218+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34219@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34220 s32 (*clear_vfta)(struct ixgbe_hw *);
34221 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34222 };
34223+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34224
34225 enum ixgbe_mac_type {
34226 ixgbe_mac_unknown = 0,
34227@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34228 };
34229
34230 struct ixgbe_mac_info {
34231- struct ixgbe_mac_operations ops;
34232+ ixgbe_mac_operations_no_const ops;
34233 u8 addr[6];
34234 u8 perm_addr[6];
34235
34236@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34237 s32 (*check_for_ack)(struct ixgbe_hw *);
34238 s32 (*check_for_rst)(struct ixgbe_hw *);
34239 };
34240+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34241
34242 struct ixgbe_mbx_stats {
34243 u32 msgs_tx;
34244@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34245 };
34246
34247 struct ixgbe_mbx_info {
34248- struct ixgbe_mbx_operations ops;
34249+ ixgbe_mbx_operations_no_const ops;
34250 struct ixgbe_mbx_stats stats;
34251 u32 timeout;
34252 u32 udelay;
34253diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34254index d498f04..1b49bed 100644
34255--- a/drivers/net/ethernet/mellanox/mlx4/main.c
34256+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34257@@ -41,6 +41,7 @@
34258 #include <linux/slab.h>
34259 #include <linux/io-mapping.h>
34260 #include <linux/delay.h>
34261+#include <linux/sched.h>
34262
34263 #include <linux/mlx4/device.h>
34264 #include <linux/mlx4/doorbell.h>
34265diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34266index 5046a64..71ca936 100644
34267--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34268+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34269@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34270 void (*link_down)(struct __vxge_hw_device *devh);
34271 void (*crit_err)(struct __vxge_hw_device *devh,
34272 enum vxge_hw_event type, u64 ext_data);
34273-};
34274+} __no_const;
34275
34276 /*
34277 * struct __vxge_hw_blockpool_entry - Block private data structure
34278diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34279index 4a518a3..936b334 100644
34280--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34281+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34282@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34283 struct vxge_hw_mempool_dma *dma_object,
34284 u32 index,
34285 u32 is_last);
34286-};
34287+} __no_const;
34288
34289 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34290 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34291diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34292index bbacb37..d60887d 100644
34293--- a/drivers/net/ethernet/realtek/r8169.c
34294+++ b/drivers/net/ethernet/realtek/r8169.c
34295@@ -695,17 +695,17 @@ struct rtl8169_private {
34296 struct mdio_ops {
34297 void (*write)(void __iomem *, int, int);
34298 int (*read)(void __iomem *, int);
34299- } mdio_ops;
34300+ } __no_const mdio_ops;
34301
34302 struct pll_power_ops {
34303 void (*down)(struct rtl8169_private *);
34304 void (*up)(struct rtl8169_private *);
34305- } pll_power_ops;
34306+ } __no_const pll_power_ops;
34307
34308 struct jumbo_ops {
34309 void (*enable)(struct rtl8169_private *);
34310 void (*disable)(struct rtl8169_private *);
34311- } jumbo_ops;
34312+ } __no_const jumbo_ops;
34313
34314 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34315 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34316diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34317index 5b118cd..858b523 100644
34318--- a/drivers/net/ethernet/sis/sis190.c
34319+++ b/drivers/net/ethernet/sis/sis190.c
34320@@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34321 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34322 struct net_device *dev)
34323 {
34324- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34325+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34326 struct sis190_private *tp = netdev_priv(dev);
34327 struct pci_dev *isa_bridge;
34328 u8 reg, tmp8;
34329diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34330index c07cfe9..81cbf7e 100644
34331--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34332+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34333@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34334
34335 writel(value, ioaddr + MMC_CNTRL);
34336
34337- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34338- MMC_CNTRL, value);
34339+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34340+// MMC_CNTRL, value);
34341 }
34342
34343 /* To mask all all interrupts.*/
34344diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34345index dec5836..6d4db7d 100644
34346--- a/drivers/net/hyperv/hyperv_net.h
34347+++ b/drivers/net/hyperv/hyperv_net.h
34348@@ -97,7 +97,7 @@ struct rndis_device {
34349
34350 enum rndis_device_state state;
34351 bool link_state;
34352- atomic_t new_req_id;
34353+ atomic_unchecked_t new_req_id;
34354
34355 spinlock_t request_lock;
34356 struct list_head req_list;
34357diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34358index 133b7fb..d58c559 100644
34359--- a/drivers/net/hyperv/rndis_filter.c
34360+++ b/drivers/net/hyperv/rndis_filter.c
34361@@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34362 * template
34363 */
34364 set = &rndis_msg->msg.set_req;
34365- set->req_id = atomic_inc_return(&dev->new_req_id);
34366+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34367
34368 /* Add to the request list */
34369 spin_lock_irqsave(&dev->request_lock, flags);
34370@@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34371
34372 /* Setup the rndis set */
34373 halt = &request->request_msg.msg.halt_req;
34374- halt->req_id = atomic_inc_return(&dev->new_req_id);
34375+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34376
34377 /* Ignore return since this msg is optional. */
34378 rndis_filter_send_request(dev, request);
34379diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34380index 486b404..0d6677d 100644
34381--- a/drivers/net/ppp/ppp_generic.c
34382+++ b/drivers/net/ppp/ppp_generic.c
34383@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34384 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34385 struct ppp_stats stats;
34386 struct ppp_comp_stats cstats;
34387- char *vers;
34388
34389 switch (cmd) {
34390 case SIOCGPPPSTATS:
34391@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34392 break;
34393
34394 case SIOCGPPPVER:
34395- vers = PPP_VERSION;
34396- if (copy_to_user(addr, vers, strlen(vers) + 1))
34397+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34398 break;
34399 err = 0;
34400 break;
34401diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34402index 515f122..41dd273 100644
34403--- a/drivers/net/tokenring/abyss.c
34404+++ b/drivers/net/tokenring/abyss.c
34405@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34406
34407 static int __init abyss_init (void)
34408 {
34409- abyss_netdev_ops = tms380tr_netdev_ops;
34410+ pax_open_kernel();
34411+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34412
34413- abyss_netdev_ops.ndo_open = abyss_open;
34414- abyss_netdev_ops.ndo_stop = abyss_close;
34415+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34416+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34417+ pax_close_kernel();
34418
34419 return pci_register_driver(&abyss_driver);
34420 }
34421diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34422index 6153cfd..cf69c1c 100644
34423--- a/drivers/net/tokenring/madgemc.c
34424+++ b/drivers/net/tokenring/madgemc.c
34425@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34426
34427 static int __init madgemc_init (void)
34428 {
34429- madgemc_netdev_ops = tms380tr_netdev_ops;
34430- madgemc_netdev_ops.ndo_open = madgemc_open;
34431- madgemc_netdev_ops.ndo_stop = madgemc_close;
34432+ pax_open_kernel();
34433+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34434+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34435+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34436+ pax_close_kernel();
34437
34438 return mca_register_driver (&madgemc_driver);
34439 }
34440diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34441index 8d362e6..f91cc52 100644
34442--- a/drivers/net/tokenring/proteon.c
34443+++ b/drivers/net/tokenring/proteon.c
34444@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34445 struct platform_device *pdev;
34446 int i, num = 0, err = 0;
34447
34448- proteon_netdev_ops = tms380tr_netdev_ops;
34449- proteon_netdev_ops.ndo_open = proteon_open;
34450- proteon_netdev_ops.ndo_stop = tms380tr_close;
34451+ pax_open_kernel();
34452+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34453+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34454+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34455+ pax_close_kernel();
34456
34457 err = platform_driver_register(&proteon_driver);
34458 if (err)
34459diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34460index 46db5c5..37c1536 100644
34461--- a/drivers/net/tokenring/skisa.c
34462+++ b/drivers/net/tokenring/skisa.c
34463@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34464 struct platform_device *pdev;
34465 int i, num = 0, err = 0;
34466
34467- sk_isa_netdev_ops = tms380tr_netdev_ops;
34468- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34469- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34470+ pax_open_kernel();
34471+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34472+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34473+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34474+ pax_close_kernel();
34475
34476 err = platform_driver_register(&sk_isa_driver);
34477 if (err)
34478diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34479index e1324b4..e1b0041 100644
34480--- a/drivers/net/usb/hso.c
34481+++ b/drivers/net/usb/hso.c
34482@@ -71,7 +71,7 @@
34483 #include <asm/byteorder.h>
34484 #include <linux/serial_core.h>
34485 #include <linux/serial.h>
34486-
34487+#include <asm/local.h>
34488
34489 #define MOD_AUTHOR "Option Wireless"
34490 #define MOD_DESCRIPTION "USB High Speed Option driver"
34491@@ -257,7 +257,7 @@ struct hso_serial {
34492
34493 /* from usb_serial_port */
34494 struct tty_struct *tty;
34495- int open_count;
34496+ local_t open_count;
34497 spinlock_t serial_lock;
34498
34499 int (*write_data) (struct hso_serial *serial);
34500@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34501 struct urb *urb;
34502
34503 urb = serial->rx_urb[0];
34504- if (serial->open_count > 0) {
34505+ if (local_read(&serial->open_count) > 0) {
34506 count = put_rxbuf_data(urb, serial);
34507 if (count == -1)
34508 return;
34509@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34510 DUMP1(urb->transfer_buffer, urb->actual_length);
34511
34512 /* Anyone listening? */
34513- if (serial->open_count == 0)
34514+ if (local_read(&serial->open_count) == 0)
34515 return;
34516
34517 if (status == 0) {
34518@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34519 spin_unlock_irq(&serial->serial_lock);
34520
34521 /* check for port already opened, if not set the termios */
34522- serial->open_count++;
34523- if (serial->open_count == 1) {
34524+ if (local_inc_return(&serial->open_count) == 1) {
34525 serial->rx_state = RX_IDLE;
34526 /* Force default termio settings */
34527 _hso_serial_set_termios(tty, NULL);
34528@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34529 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34530 if (result) {
34531 hso_stop_serial_device(serial->parent);
34532- serial->open_count--;
34533+ local_dec(&serial->open_count);
34534 kref_put(&serial->parent->ref, hso_serial_ref_free);
34535 }
34536 } else {
34537@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34538
34539 /* reset the rts and dtr */
34540 /* do the actual close */
34541- serial->open_count--;
34542+ local_dec(&serial->open_count);
34543
34544- if (serial->open_count <= 0) {
34545- serial->open_count = 0;
34546+ if (local_read(&serial->open_count) <= 0) {
34547+ local_set(&serial->open_count, 0);
34548 spin_lock_irq(&serial->serial_lock);
34549 if (serial->tty == tty) {
34550 serial->tty->driver_data = NULL;
34551@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34552
34553 /* the actual setup */
34554 spin_lock_irqsave(&serial->serial_lock, flags);
34555- if (serial->open_count)
34556+ if (local_read(&serial->open_count))
34557 _hso_serial_set_termios(tty, old);
34558 else
34559 tty->termios = old;
34560@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34561 D1("Pending read interrupt on port %d\n", i);
34562 spin_lock(&serial->serial_lock);
34563 if (serial->rx_state == RX_IDLE &&
34564- serial->open_count > 0) {
34565+ local_read(&serial->open_count) > 0) {
34566 /* Setup and send a ctrl req read on
34567 * port i */
34568 if (!serial->rx_urb_filled[0]) {
34569@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34570 /* Start all serial ports */
34571 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34572 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34573- if (dev2ser(serial_table[i])->open_count) {
34574+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
34575 result =
34576 hso_start_serial_device(serial_table[i], GFP_NOIO);
34577 hso_kick_transmit(dev2ser(serial_table[i]));
34578diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34579index efc0111..79c8f5b 100644
34580--- a/drivers/net/wireless/ath/ath.h
34581+++ b/drivers/net/wireless/ath/ath.h
34582@@ -119,6 +119,7 @@ struct ath_ops {
34583 void (*write_flush) (void *);
34584 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34585 };
34586+typedef struct ath_ops __no_const ath_ops_no_const;
34587
34588 struct ath_common;
34589 struct ath_bus_ops;
34590diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34591index 7b6417b..ab5db98 100644
34592--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34593+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34594@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34595 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
34596 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
34597
34598- ACCESS_ONCE(ads->ds_link) = i->link;
34599- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
34600+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
34601+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
34602
34603 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
34604 ctl6 = SM(i->keytype, AR_EncrType);
34605@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34606
34607 if ((i->is_first || i->is_last) &&
34608 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
34609- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
34610+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
34611 | set11nTries(i->rates, 1)
34612 | set11nTries(i->rates, 2)
34613 | set11nTries(i->rates, 3)
34614 | (i->dur_update ? AR_DurUpdateEna : 0)
34615 | SM(0, AR_BurstDur);
34616
34617- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
34618+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
34619 | set11nRate(i->rates, 1)
34620 | set11nRate(i->rates, 2)
34621 | set11nRate(i->rates, 3);
34622 } else {
34623- ACCESS_ONCE(ads->ds_ctl2) = 0;
34624- ACCESS_ONCE(ads->ds_ctl3) = 0;
34625+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
34626+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
34627 }
34628
34629 if (!i->is_first) {
34630- ACCESS_ONCE(ads->ds_ctl0) = 0;
34631- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34632- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34633+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
34634+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34635+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34636 return;
34637 }
34638
34639@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34640 break;
34641 }
34642
34643- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34644+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34645 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34646 | SM(i->txpower, AR_XmitPower)
34647 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34648@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34649 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
34650 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
34651
34652- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34653- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34654+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34655+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34656
34657 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
34658 return;
34659
34660- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34661+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34662 | set11nPktDurRTSCTS(i->rates, 1);
34663
34664- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34665+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34666 | set11nPktDurRTSCTS(i->rates, 3);
34667
34668- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34669+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34670 | set11nRateFlags(i->rates, 1)
34671 | set11nRateFlags(i->rates, 2)
34672 | set11nRateFlags(i->rates, 3)
34673diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34674index 09b8c9d..905339e 100644
34675--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34676+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34677@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34678 (i->qcu << AR_TxQcuNum_S) | 0x17;
34679
34680 checksum += val;
34681- ACCESS_ONCE(ads->info) = val;
34682+ ACCESS_ONCE_RW(ads->info) = val;
34683
34684 checksum += i->link;
34685- ACCESS_ONCE(ads->link) = i->link;
34686+ ACCESS_ONCE_RW(ads->link) = i->link;
34687
34688 checksum += i->buf_addr[0];
34689- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
34690+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
34691 checksum += i->buf_addr[1];
34692- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
34693+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
34694 checksum += i->buf_addr[2];
34695- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
34696+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
34697 checksum += i->buf_addr[3];
34698- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
34699+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
34700
34701 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
34702- ACCESS_ONCE(ads->ctl3) = val;
34703+ ACCESS_ONCE_RW(ads->ctl3) = val;
34704 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
34705- ACCESS_ONCE(ads->ctl5) = val;
34706+ ACCESS_ONCE_RW(ads->ctl5) = val;
34707 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
34708- ACCESS_ONCE(ads->ctl7) = val;
34709+ ACCESS_ONCE_RW(ads->ctl7) = val;
34710 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
34711- ACCESS_ONCE(ads->ctl9) = val;
34712+ ACCESS_ONCE_RW(ads->ctl9) = val;
34713
34714 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
34715- ACCESS_ONCE(ads->ctl10) = checksum;
34716+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
34717
34718 if (i->is_first || i->is_last) {
34719- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
34720+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
34721 | set11nTries(i->rates, 1)
34722 | set11nTries(i->rates, 2)
34723 | set11nTries(i->rates, 3)
34724 | (i->dur_update ? AR_DurUpdateEna : 0)
34725 | SM(0, AR_BurstDur);
34726
34727- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
34728+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
34729 | set11nRate(i->rates, 1)
34730 | set11nRate(i->rates, 2)
34731 | set11nRate(i->rates, 3);
34732 } else {
34733- ACCESS_ONCE(ads->ctl13) = 0;
34734- ACCESS_ONCE(ads->ctl14) = 0;
34735+ ACCESS_ONCE_RW(ads->ctl13) = 0;
34736+ ACCESS_ONCE_RW(ads->ctl14) = 0;
34737 }
34738
34739 ads->ctl20 = 0;
34740@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34741
34742 ctl17 = SM(i->keytype, AR_EncrType);
34743 if (!i->is_first) {
34744- ACCESS_ONCE(ads->ctl11) = 0;
34745- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34746- ACCESS_ONCE(ads->ctl15) = 0;
34747- ACCESS_ONCE(ads->ctl16) = 0;
34748- ACCESS_ONCE(ads->ctl17) = ctl17;
34749- ACCESS_ONCE(ads->ctl18) = 0;
34750- ACCESS_ONCE(ads->ctl19) = 0;
34751+ ACCESS_ONCE_RW(ads->ctl11) = 0;
34752+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34753+ ACCESS_ONCE_RW(ads->ctl15) = 0;
34754+ ACCESS_ONCE_RW(ads->ctl16) = 0;
34755+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34756+ ACCESS_ONCE_RW(ads->ctl18) = 0;
34757+ ACCESS_ONCE_RW(ads->ctl19) = 0;
34758 return;
34759 }
34760
34761- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34762+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34763 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34764 | SM(i->txpower, AR_XmitPower)
34765 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34766@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34767 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
34768 ctl12 |= SM(val, AR_PAPRDChainMask);
34769
34770- ACCESS_ONCE(ads->ctl12) = ctl12;
34771- ACCESS_ONCE(ads->ctl17) = ctl17;
34772+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
34773+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34774
34775- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34776+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34777 | set11nPktDurRTSCTS(i->rates, 1);
34778
34779- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34780+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34781 | set11nPktDurRTSCTS(i->rates, 3);
34782
34783- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
34784+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
34785 | set11nRateFlags(i->rates, 1)
34786 | set11nRateFlags(i->rates, 2)
34787 | set11nRateFlags(i->rates, 3)
34788 | SM(i->rtscts_rate, AR_RTSCTSRate);
34789
34790- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
34791+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
34792 }
34793
34794 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
34795diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34796index c8261d4..8d88929 100644
34797--- a/drivers/net/wireless/ath/ath9k/hw.h
34798+++ b/drivers/net/wireless/ath/ath9k/hw.h
34799@@ -773,7 +773,7 @@ struct ath_hw_private_ops {
34800
34801 /* ANI */
34802 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34803-};
34804+} __no_const;
34805
34806 /**
34807 * struct ath_hw_ops - callbacks used by hardware code and driver code
34808@@ -803,7 +803,7 @@ struct ath_hw_ops {
34809 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34810 struct ath_hw_antcomb_conf *antconf);
34811
34812-};
34813+} __no_const;
34814
34815 struct ath_nf_limits {
34816 s16 max;
34817@@ -823,7 +823,7 @@ enum ath_cal_list {
34818 #define AH_FASTCC 0x4
34819
34820 struct ath_hw {
34821- struct ath_ops reg_ops;
34822+ ath_ops_no_const reg_ops;
34823
34824 struct ieee80211_hw *hw;
34825 struct ath_common common;
34826diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34827index af00e2c..ab04d34 100644
34828--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34829+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34830@@ -545,7 +545,7 @@ struct phy_func_ptr {
34831 void (*carrsuppr)(struct brcms_phy *);
34832 s32 (*rxsigpwr)(struct brcms_phy *, s32);
34833 void (*detach)(struct brcms_phy *);
34834-};
34835+} __no_const;
34836
34837 struct brcms_phy {
34838 struct brcms_phy_pub pubpi_ro;
34839diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
34840index a7dfba8..e28eacd 100644
34841--- a/drivers/net/wireless/iwlegacy/3945-mac.c
34842+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
34843@@ -3647,7 +3647,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
34844 */
34845 if (il3945_mod_params.disable_hw_scan) {
34846 D_INFO("Disabling hw_scan\n");
34847- il3945_hw_ops.hw_scan = NULL;
34848+ pax_open_kernel();
34849+ *(void **)&il3945_hw_ops.hw_scan = NULL;
34850+ pax_close_kernel();
34851 }
34852
34853 D_INFO("*** LOAD DRIVER ***\n");
34854diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34855index f8fc239..8cade22 100644
34856--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34857+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34858@@ -86,8 +86,8 @@ do { \
34859 } while (0)
34860
34861 #else
34862-#define IWL_DEBUG(m, level, fmt, args...)
34863-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
34864+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
34865+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
34866 #define iwl_print_hex_dump(m, level, p, len)
34867 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
34868 do { \
34869diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34870index 4b9e730..7603659 100644
34871--- a/drivers/net/wireless/mac80211_hwsim.c
34872+++ b/drivers/net/wireless/mac80211_hwsim.c
34873@@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
34874 return -EINVAL;
34875
34876 if (fake_hw_scan) {
34877- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34878- mac80211_hwsim_ops.sw_scan_start = NULL;
34879- mac80211_hwsim_ops.sw_scan_complete = NULL;
34880+ pax_open_kernel();
34881+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34882+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34883+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34884+ pax_close_kernel();
34885 }
34886
34887 spin_lock_init(&hwsim_radio_lock);
34888diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34889index 3186aa4..b35b09f 100644
34890--- a/drivers/net/wireless/mwifiex/main.h
34891+++ b/drivers/net/wireless/mwifiex/main.h
34892@@ -536,7 +536,7 @@ struct mwifiex_if_ops {
34893 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34894 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
34895 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
34896-};
34897+} __no_const;
34898
34899 struct mwifiex_adapter {
34900 u8 iface_type;
34901diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34902index a330c69..a81540f 100644
34903--- a/drivers/net/wireless/rndis_wlan.c
34904+++ b/drivers/net/wireless/rndis_wlan.c
34905@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34906
34907 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34908
34909- if (rts_threshold < 0 || rts_threshold > 2347)
34910+ if (rts_threshold > 2347)
34911 rts_threshold = 2347;
34912
34913 tmp = cpu_to_le32(rts_threshold);
34914diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34915index a77f1bb..c608b2b 100644
34916--- a/drivers/net/wireless/wl1251/wl1251.h
34917+++ b/drivers/net/wireless/wl1251/wl1251.h
34918@@ -266,7 +266,7 @@ struct wl1251_if_operations {
34919 void (*reset)(struct wl1251 *wl);
34920 void (*enable_irq)(struct wl1251 *wl);
34921 void (*disable_irq)(struct wl1251 *wl);
34922-};
34923+} __no_const;
34924
34925 struct wl1251 {
34926 struct ieee80211_hw *hw;
34927diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34928index f34b5b2..b5abb9f 100644
34929--- a/drivers/oprofile/buffer_sync.c
34930+++ b/drivers/oprofile/buffer_sync.c
34931@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34932 if (cookie == NO_COOKIE)
34933 offset = pc;
34934 if (cookie == INVALID_COOKIE) {
34935- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34936+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34937 offset = pc;
34938 }
34939 if (cookie != last_cookie) {
34940@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34941 /* add userspace sample */
34942
34943 if (!mm) {
34944- atomic_inc(&oprofile_stats.sample_lost_no_mm);
34945+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34946 return 0;
34947 }
34948
34949 cookie = lookup_dcookie(mm, s->eip, &offset);
34950
34951 if (cookie == INVALID_COOKIE) {
34952- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34953+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34954 return 0;
34955 }
34956
34957@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
34958 /* ignore backtraces if failed to add a sample */
34959 if (state == sb_bt_start) {
34960 state = sb_bt_ignore;
34961- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
34962+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
34963 }
34964 }
34965 release_mm(mm);
34966diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
34967index c0cc4e7..44d4e54 100644
34968--- a/drivers/oprofile/event_buffer.c
34969+++ b/drivers/oprofile/event_buffer.c
34970@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
34971 }
34972
34973 if (buffer_pos == buffer_size) {
34974- atomic_inc(&oprofile_stats.event_lost_overflow);
34975+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
34976 return;
34977 }
34978
34979diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
34980index ed2c3ec..deda85a 100644
34981--- a/drivers/oprofile/oprof.c
34982+++ b/drivers/oprofile/oprof.c
34983@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
34984 if (oprofile_ops.switch_events())
34985 return;
34986
34987- atomic_inc(&oprofile_stats.multiplex_counter);
34988+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
34989 start_switch_worker();
34990 }
34991
34992diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
34993index 917d28e..d62d981 100644
34994--- a/drivers/oprofile/oprofile_stats.c
34995+++ b/drivers/oprofile/oprofile_stats.c
34996@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
34997 cpu_buf->sample_invalid_eip = 0;
34998 }
34999
35000- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35001- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35002- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35003- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35004- atomic_set(&oprofile_stats.multiplex_counter, 0);
35005+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35006+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35007+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35008+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35009+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35010 }
35011
35012
35013diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35014index 38b6fc0..b5cbfce 100644
35015--- a/drivers/oprofile/oprofile_stats.h
35016+++ b/drivers/oprofile/oprofile_stats.h
35017@@ -13,11 +13,11 @@
35018 #include <linux/atomic.h>
35019
35020 struct oprofile_stat_struct {
35021- atomic_t sample_lost_no_mm;
35022- atomic_t sample_lost_no_mapping;
35023- atomic_t bt_lost_no_mapping;
35024- atomic_t event_lost_overflow;
35025- atomic_t multiplex_counter;
35026+ atomic_unchecked_t sample_lost_no_mm;
35027+ atomic_unchecked_t sample_lost_no_mapping;
35028+ atomic_unchecked_t bt_lost_no_mapping;
35029+ atomic_unchecked_t event_lost_overflow;
35030+ atomic_unchecked_t multiplex_counter;
35031 };
35032
35033 extern struct oprofile_stat_struct oprofile_stats;
35034diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35035index 2f0aa0f..90fab02 100644
35036--- a/drivers/oprofile/oprofilefs.c
35037+++ b/drivers/oprofile/oprofilefs.c
35038@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35039
35040
35041 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35042- char const *name, atomic_t *val)
35043+ char const *name, atomic_unchecked_t *val)
35044 {
35045 return __oprofilefs_create_file(sb, root, name,
35046 &atomic_ro_fops, 0444, val);
35047diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35048index 3f56bc0..707d642 100644
35049--- a/drivers/parport/procfs.c
35050+++ b/drivers/parport/procfs.c
35051@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35052
35053 *ppos += len;
35054
35055- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35056+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35057 }
35058
35059 #ifdef CONFIG_PARPORT_1284
35060@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35061
35062 *ppos += len;
35063
35064- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35065+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35066 }
35067 #endif /* IEEE1284.3 support. */
35068
35069diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35070index 9fff878..ad0ad53 100644
35071--- a/drivers/pci/hotplug/cpci_hotplug.h
35072+++ b/drivers/pci/hotplug/cpci_hotplug.h
35073@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35074 int (*hardware_test) (struct slot* slot, u32 value);
35075 u8 (*get_power) (struct slot* slot);
35076 int (*set_power) (struct slot* slot, int value);
35077-};
35078+} __no_const;
35079
35080 struct cpci_hp_controller {
35081 unsigned int irq;
35082diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35083index 76ba8a1..20ca857 100644
35084--- a/drivers/pci/hotplug/cpqphp_nvram.c
35085+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35086@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35087
35088 void compaq_nvram_init (void __iomem *rom_start)
35089 {
35090+
35091+#ifndef CONFIG_PAX_KERNEXEC
35092 if (rom_start) {
35093 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35094 }
35095+#endif
35096+
35097 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35098
35099 /* initialize our int15 lock */
35100diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35101index 24f049e..051f66e 100644
35102--- a/drivers/pci/pcie/aspm.c
35103+++ b/drivers/pci/pcie/aspm.c
35104@@ -27,9 +27,9 @@
35105 #define MODULE_PARAM_PREFIX "pcie_aspm."
35106
35107 /* Note: those are not register definitions */
35108-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35109-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35110-#define ASPM_STATE_L1 (4) /* L1 state */
35111+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35112+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35113+#define ASPM_STATE_L1 (4U) /* L1 state */
35114 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35115 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35116
35117diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35118index 71eac9c..2de27ef 100644
35119--- a/drivers/pci/probe.c
35120+++ b/drivers/pci/probe.c
35121@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35122 u32 l, sz, mask;
35123 u16 orig_cmd;
35124
35125- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35126+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35127
35128 if (!dev->mmio_always_on) {
35129 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35130diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35131index 27911b5..5b6db88 100644
35132--- a/drivers/pci/proc.c
35133+++ b/drivers/pci/proc.c
35134@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35135 static int __init pci_proc_init(void)
35136 {
35137 struct pci_dev *dev = NULL;
35138+
35139+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35140+#ifdef CONFIG_GRKERNSEC_PROC_USER
35141+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35142+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35143+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35144+#endif
35145+#else
35146 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35147+#endif
35148 proc_create("devices", 0, proc_bus_pci_dir,
35149 &proc_bus_pci_dev_operations);
35150 proc_initialized = 1;
35151diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35152index ea0c607..58c4628 100644
35153--- a/drivers/platform/x86/thinkpad_acpi.c
35154+++ b/drivers/platform/x86/thinkpad_acpi.c
35155@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35156 return 0;
35157 }
35158
35159-void static hotkey_mask_warn_incomplete_mask(void)
35160+static void hotkey_mask_warn_incomplete_mask(void)
35161 {
35162 /* log only what the user can fix... */
35163 const u32 wantedmask = hotkey_driver_mask &
35164@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35165 }
35166 }
35167
35168-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35169- struct tp_nvram_state *newn,
35170- const u32 event_mask)
35171-{
35172-
35173 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35174 do { \
35175 if ((event_mask & (1 << __scancode)) && \
35176@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35177 tpacpi_hotkey_send_key(__scancode); \
35178 } while (0)
35179
35180- void issue_volchange(const unsigned int oldvol,
35181- const unsigned int newvol)
35182- {
35183- unsigned int i = oldvol;
35184+static void issue_volchange(const unsigned int oldvol,
35185+ const unsigned int newvol,
35186+ const u32 event_mask)
35187+{
35188+ unsigned int i = oldvol;
35189
35190- while (i > newvol) {
35191- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35192- i--;
35193- }
35194- while (i < newvol) {
35195- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35196- i++;
35197- }
35198+ while (i > newvol) {
35199+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35200+ i--;
35201 }
35202+ while (i < newvol) {
35203+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35204+ i++;
35205+ }
35206+}
35207
35208- void issue_brightnesschange(const unsigned int oldbrt,
35209- const unsigned int newbrt)
35210- {
35211- unsigned int i = oldbrt;
35212+static void issue_brightnesschange(const unsigned int oldbrt,
35213+ const unsigned int newbrt,
35214+ const u32 event_mask)
35215+{
35216+ unsigned int i = oldbrt;
35217
35218- while (i > newbrt) {
35219- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35220- i--;
35221- }
35222- while (i < newbrt) {
35223- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35224- i++;
35225- }
35226+ while (i > newbrt) {
35227+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35228+ i--;
35229+ }
35230+ while (i < newbrt) {
35231+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35232+ i++;
35233 }
35234+}
35235
35236+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35237+ struct tp_nvram_state *newn,
35238+ const u32 event_mask)
35239+{
35240 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35241 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35242 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35243@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35244 oldn->volume_level != newn->volume_level) {
35245 /* recently muted, or repeated mute keypress, or
35246 * multiple presses ending in mute */
35247- issue_volchange(oldn->volume_level, newn->volume_level);
35248+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35249 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35250 }
35251 } else {
35252@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35253 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35254 }
35255 if (oldn->volume_level != newn->volume_level) {
35256- issue_volchange(oldn->volume_level, newn->volume_level);
35257+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35258 } else if (oldn->volume_toggle != newn->volume_toggle) {
35259 /* repeated vol up/down keypress at end of scale ? */
35260 if (newn->volume_level == 0)
35261@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35262 /* handle brightness */
35263 if (oldn->brightness_level != newn->brightness_level) {
35264 issue_brightnesschange(oldn->brightness_level,
35265- newn->brightness_level);
35266+ newn->brightness_level,
35267+ event_mask);
35268 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35269 /* repeated key presses that didn't change state */
35270 if (newn->brightness_level == 0)
35271@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35272 && !tp_features.bright_unkfw)
35273 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35274 }
35275+}
35276
35277 #undef TPACPI_COMPARE_KEY
35278 #undef TPACPI_MAY_SEND_KEY
35279-}
35280
35281 /*
35282 * Polling driver
35283diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35284index b859d16..5cc6b1a 100644
35285--- a/drivers/pnp/pnpbios/bioscalls.c
35286+++ b/drivers/pnp/pnpbios/bioscalls.c
35287@@ -59,7 +59,7 @@ do { \
35288 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35289 } while(0)
35290
35291-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35292+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35293 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35294
35295 /*
35296@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35297
35298 cpu = get_cpu();
35299 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35300+
35301+ pax_open_kernel();
35302 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35303+ pax_close_kernel();
35304
35305 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35306 spin_lock_irqsave(&pnp_bios_lock, flags);
35307@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35308 :"memory");
35309 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35310
35311+ pax_open_kernel();
35312 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35313+ pax_close_kernel();
35314+
35315 put_cpu();
35316
35317 /* If we get here and this is set then the PnP BIOS faulted on us. */
35318@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35319 return status;
35320 }
35321
35322-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35323+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35324 {
35325 int i;
35326
35327@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35328 pnp_bios_callpoint.offset = header->fields.pm16offset;
35329 pnp_bios_callpoint.segment = PNP_CS16;
35330
35331+ pax_open_kernel();
35332+
35333 for_each_possible_cpu(i) {
35334 struct desc_struct *gdt = get_cpu_gdt_table(i);
35335 if (!gdt)
35336@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35337 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35338 (unsigned long)__va(header->fields.pm16dseg));
35339 }
35340+
35341+ pax_close_kernel();
35342 }
35343diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35344index b0ecacb..7c9da2e 100644
35345--- a/drivers/pnp/resource.c
35346+++ b/drivers/pnp/resource.c
35347@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35348 return 1;
35349
35350 /* check if the resource is valid */
35351- if (*irq < 0 || *irq > 15)
35352+ if (*irq > 15)
35353 return 0;
35354
35355 /* check if the resource is reserved */
35356@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35357 return 1;
35358
35359 /* check if the resource is valid */
35360- if (*dma < 0 || *dma == 4 || *dma > 7)
35361+ if (*dma == 4 || *dma > 7)
35362 return 0;
35363
35364 /* check if the resource is reserved */
35365diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35366index 1ed6ea0..77c0bd2 100644
35367--- a/drivers/power/bq27x00_battery.c
35368+++ b/drivers/power/bq27x00_battery.c
35369@@ -72,7 +72,7 @@
35370 struct bq27x00_device_info;
35371 struct bq27x00_access_methods {
35372 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35373-};
35374+} __no_const;
35375
35376 enum bq27x00_chip { BQ27000, BQ27500 };
35377
35378diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35379index a838e66..a9e1665 100644
35380--- a/drivers/regulator/max8660.c
35381+++ b/drivers/regulator/max8660.c
35382@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35383 max8660->shadow_regs[MAX8660_OVER1] = 5;
35384 } else {
35385 /* Otherwise devices can be toggled via software */
35386- max8660_dcdc_ops.enable = max8660_dcdc_enable;
35387- max8660_dcdc_ops.disable = max8660_dcdc_disable;
35388+ pax_open_kernel();
35389+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35390+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35391+ pax_close_kernel();
35392 }
35393
35394 /*
35395diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35396index e8cfc99..072aee2 100644
35397--- a/drivers/regulator/mc13892-regulator.c
35398+++ b/drivers/regulator/mc13892-regulator.c
35399@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35400 }
35401 mc13xxx_unlock(mc13892);
35402
35403- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35404+ pax_open_kernel();
35405+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35406 = mc13892_vcam_set_mode;
35407- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35408+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35409 = mc13892_vcam_get_mode;
35410+ pax_close_kernel();
35411
35412 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35413 ARRAY_SIZE(mc13892_regulators));
35414diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35415index cace6d3..f623fda 100644
35416--- a/drivers/rtc/rtc-dev.c
35417+++ b/drivers/rtc/rtc-dev.c
35418@@ -14,6 +14,7 @@
35419 #include <linux/module.h>
35420 #include <linux/rtc.h>
35421 #include <linux/sched.h>
35422+#include <linux/grsecurity.h>
35423 #include "rtc-core.h"
35424
35425 static dev_t rtc_devt;
35426@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35427 if (copy_from_user(&tm, uarg, sizeof(tm)))
35428 return -EFAULT;
35429
35430+ gr_log_timechange();
35431+
35432 return rtc_set_time(rtc, &tm);
35433
35434 case RTC_PIE_ON:
35435diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35436index ffb5878..e6d785c 100644
35437--- a/drivers/scsi/aacraid/aacraid.h
35438+++ b/drivers/scsi/aacraid/aacraid.h
35439@@ -492,7 +492,7 @@ struct adapter_ops
35440 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35441 /* Administrative operations */
35442 int (*adapter_comm)(struct aac_dev * dev, int comm);
35443-};
35444+} __no_const;
35445
35446 /*
35447 * Define which interrupt handler needs to be installed
35448diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35449index 705e13e..91c873c 100644
35450--- a/drivers/scsi/aacraid/linit.c
35451+++ b/drivers/scsi/aacraid/linit.c
35452@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35453 #elif defined(__devinitconst)
35454 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35455 #else
35456-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35457+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35458 #endif
35459 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35460 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35461diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35462index d5ff142..49c0ebb 100644
35463--- a/drivers/scsi/aic94xx/aic94xx_init.c
35464+++ b/drivers/scsi/aic94xx/aic94xx_init.c
35465@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35466 .lldd_control_phy = asd_control_phy,
35467 };
35468
35469-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35470+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35471 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35472 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35473 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35474diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35475index a796de9..1ef20e1 100644
35476--- a/drivers/scsi/bfa/bfa.h
35477+++ b/drivers/scsi/bfa/bfa.h
35478@@ -196,7 +196,7 @@ struct bfa_hwif_s {
35479 u32 *end);
35480 int cpe_vec_q0;
35481 int rme_vec_q0;
35482-};
35483+} __no_const;
35484 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35485
35486 struct bfa_faa_cbfn_s {
35487diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35488index f0f80e2..8ec946b 100644
35489--- a/drivers/scsi/bfa/bfa_fcpim.c
35490+++ b/drivers/scsi/bfa/bfa_fcpim.c
35491@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35492
35493 bfa_iotag_attach(fcp);
35494
35495- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
35496+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
35497 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
35498 (fcp->num_itns * sizeof(struct bfa_itn_s));
35499 memset(fcp->itn_arr, 0,
35500@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35501 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35502 {
35503 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35504- struct bfa_itn_s *itn;
35505+ bfa_itn_s_no_const *itn;
35506
35507 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35508 itn->isr = isr;
35509diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35510index 36f26da..38a34a8 100644
35511--- a/drivers/scsi/bfa/bfa_fcpim.h
35512+++ b/drivers/scsi/bfa/bfa_fcpim.h
35513@@ -37,6 +37,7 @@ struct bfa_iotag_s {
35514 struct bfa_itn_s {
35515 bfa_isr_func_t isr;
35516 };
35517+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35518
35519 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35520 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35521@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
35522 struct list_head iotag_tio_free_q; /* free IO resources */
35523 struct list_head iotag_unused_q; /* unused IO resources*/
35524 struct bfa_iotag_s *iotag_arr;
35525- struct bfa_itn_s *itn_arr;
35526+ bfa_itn_s_no_const *itn_arr;
35527 int num_ioim_reqs;
35528 int num_fwtio_reqs;
35529 int num_itns;
35530diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35531index 546d46b..642fa5b 100644
35532--- a/drivers/scsi/bfa/bfa_ioc.h
35533+++ b/drivers/scsi/bfa/bfa_ioc.h
35534@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35535 bfa_ioc_disable_cbfn_t disable_cbfn;
35536 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35537 bfa_ioc_reset_cbfn_t reset_cbfn;
35538-};
35539+} __no_const;
35540
35541 /*
35542 * IOC event notification mechanism.
35543@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35544 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35545 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35546 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35547-};
35548+} __no_const;
35549
35550 /*
35551 * Queue element to wait for room in request queue. FIFO order is
35552diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35553index 351dc0b..951dc32 100644
35554--- a/drivers/scsi/hosts.c
35555+++ b/drivers/scsi/hosts.c
35556@@ -42,7 +42,7 @@
35557 #include "scsi_logging.h"
35558
35559
35560-static atomic_t scsi_host_next_hn; /* host_no for next new host */
35561+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35562
35563
35564 static void scsi_host_cls_release(struct device *dev)
35565@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35566 * subtract one because we increment first then return, but we need to
35567 * know what the next host number was before increment
35568 */
35569- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35570+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35571 shost->dma_channel = 0xff;
35572
35573 /* These three are default values which can be overridden */
35574diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35575index b96962c..0c82ec2 100644
35576--- a/drivers/scsi/hpsa.c
35577+++ b/drivers/scsi/hpsa.c
35578@@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
35579 u32 a;
35580
35581 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35582- return h->access.command_completed(h);
35583+ return h->access->command_completed(h);
35584
35585 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35586 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35587@@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
35588 while (!list_empty(&h->reqQ)) {
35589 c = list_entry(h->reqQ.next, struct CommandList, list);
35590 /* can't do anything if fifo is full */
35591- if ((h->access.fifo_full(h))) {
35592+ if ((h->access->fifo_full(h))) {
35593 dev_warn(&h->pdev->dev, "fifo full\n");
35594 break;
35595 }
35596@@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
35597 h->Qdepth--;
35598
35599 /* Tell the controller execute command */
35600- h->access.submit_command(h, c);
35601+ h->access->submit_command(h, c);
35602
35603 /* Put job onto the completed Q */
35604 addQ(&h->cmpQ, c);
35605@@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
35606
35607 static inline unsigned long get_next_completion(struct ctlr_info *h)
35608 {
35609- return h->access.command_completed(h);
35610+ return h->access->command_completed(h);
35611 }
35612
35613 static inline bool interrupt_pending(struct ctlr_info *h)
35614 {
35615- return h->access.intr_pending(h);
35616+ return h->access->intr_pending(h);
35617 }
35618
35619 static inline long interrupt_not_for_us(struct ctlr_info *h)
35620 {
35621- return (h->access.intr_pending(h) == 0) ||
35622+ return (h->access->intr_pending(h) == 0) ||
35623 (h->interrupts_enabled == 0);
35624 }
35625
35626@@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35627 if (prod_index < 0)
35628 return -ENODEV;
35629 h->product_name = products[prod_index].product_name;
35630- h->access = *(products[prod_index].access);
35631+ h->access = products[prod_index].access;
35632
35633 if (hpsa_board_disabled(h->pdev)) {
35634 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35635@@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
35636
35637 assert_spin_locked(&lockup_detector_lock);
35638 remove_ctlr_from_lockup_detector_list(h);
35639- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35640+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35641 spin_lock_irqsave(&h->lock, flags);
35642 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
35643 spin_unlock_irqrestore(&h->lock, flags);
35644@@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
35645 }
35646
35647 /* make sure the board interrupts are off */
35648- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35649+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35650
35651 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35652 goto clean2;
35653@@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
35654 * fake ones to scoop up any residual completions.
35655 */
35656 spin_lock_irqsave(&h->lock, flags);
35657- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35658+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35659 spin_unlock_irqrestore(&h->lock, flags);
35660 free_irq(h->intr[h->intr_mode], h);
35661 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35662@@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
35663 dev_info(&h->pdev->dev, "Board READY.\n");
35664 dev_info(&h->pdev->dev,
35665 "Waiting for stale completions to drain.\n");
35666- h->access.set_intr_mask(h, HPSA_INTR_ON);
35667+ h->access->set_intr_mask(h, HPSA_INTR_ON);
35668 msleep(10000);
35669- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35670+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35671
35672 rc = controller_reset_failed(h->cfgtable);
35673 if (rc)
35674@@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
35675 }
35676
35677 /* Turn the interrupts on so we can service requests */
35678- h->access.set_intr_mask(h, HPSA_INTR_ON);
35679+ h->access->set_intr_mask(h, HPSA_INTR_ON);
35680
35681 hpsa_hba_inquiry(h);
35682 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35683@@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35684 * To write all data in the battery backed cache to disks
35685 */
35686 hpsa_flush_cache(h);
35687- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35688+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35689 free_irq(h->intr[h->intr_mode], h);
35690 #ifdef CONFIG_PCI_MSI
35691 if (h->msix_vector)
35692@@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35693 return;
35694 }
35695 /* Change the access methods to the performant access methods */
35696- h->access = SA5_performant_access;
35697+ h->access = &SA5_performant_access;
35698 h->transMethod = CFGTBL_Trans_Performant;
35699 }
35700
35701diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35702index 91edafb..a9b88ec 100644
35703--- a/drivers/scsi/hpsa.h
35704+++ b/drivers/scsi/hpsa.h
35705@@ -73,7 +73,7 @@ struct ctlr_info {
35706 unsigned int msix_vector;
35707 unsigned int msi_vector;
35708 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35709- struct access_method access;
35710+ struct access_method *access;
35711
35712 /* queue and queue Info */
35713 struct list_head reqQ;
35714diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35715index f2df059..a3a9930 100644
35716--- a/drivers/scsi/ips.h
35717+++ b/drivers/scsi/ips.h
35718@@ -1027,7 +1027,7 @@ typedef struct {
35719 int (*intr)(struct ips_ha *);
35720 void (*enableint)(struct ips_ha *);
35721 uint32_t (*statupd)(struct ips_ha *);
35722-} ips_hw_func_t;
35723+} __no_const ips_hw_func_t;
35724
35725 typedef struct ips_ha {
35726 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35727diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35728index 4d70d96..84d0573 100644
35729--- a/drivers/scsi/libfc/fc_exch.c
35730+++ b/drivers/scsi/libfc/fc_exch.c
35731@@ -105,12 +105,12 @@ struct fc_exch_mgr {
35732 * all together if not used XXX
35733 */
35734 struct {
35735- atomic_t no_free_exch;
35736- atomic_t no_free_exch_xid;
35737- atomic_t xid_not_found;
35738- atomic_t xid_busy;
35739- atomic_t seq_not_found;
35740- atomic_t non_bls_resp;
35741+ atomic_unchecked_t no_free_exch;
35742+ atomic_unchecked_t no_free_exch_xid;
35743+ atomic_unchecked_t xid_not_found;
35744+ atomic_unchecked_t xid_busy;
35745+ atomic_unchecked_t seq_not_found;
35746+ atomic_unchecked_t non_bls_resp;
35747 } stats;
35748 };
35749
35750@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35751 /* allocate memory for exchange */
35752 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35753 if (!ep) {
35754- atomic_inc(&mp->stats.no_free_exch);
35755+ atomic_inc_unchecked(&mp->stats.no_free_exch);
35756 goto out;
35757 }
35758 memset(ep, 0, sizeof(*ep));
35759@@ -780,7 +780,7 @@ out:
35760 return ep;
35761 err:
35762 spin_unlock_bh(&pool->lock);
35763- atomic_inc(&mp->stats.no_free_exch_xid);
35764+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35765 mempool_free(ep, mp->ep_pool);
35766 return NULL;
35767 }
35768@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35769 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35770 ep = fc_exch_find(mp, xid);
35771 if (!ep) {
35772- atomic_inc(&mp->stats.xid_not_found);
35773+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35774 reject = FC_RJT_OX_ID;
35775 goto out;
35776 }
35777@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35778 ep = fc_exch_find(mp, xid);
35779 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35780 if (ep) {
35781- atomic_inc(&mp->stats.xid_busy);
35782+ atomic_inc_unchecked(&mp->stats.xid_busy);
35783 reject = FC_RJT_RX_ID;
35784 goto rel;
35785 }
35786@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35787 }
35788 xid = ep->xid; /* get our XID */
35789 } else if (!ep) {
35790- atomic_inc(&mp->stats.xid_not_found);
35791+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35792 reject = FC_RJT_RX_ID; /* XID not found */
35793 goto out;
35794 }
35795@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35796 } else {
35797 sp = &ep->seq;
35798 if (sp->id != fh->fh_seq_id) {
35799- atomic_inc(&mp->stats.seq_not_found);
35800+ atomic_inc_unchecked(&mp->stats.seq_not_found);
35801 if (f_ctl & FC_FC_END_SEQ) {
35802 /*
35803 * Update sequence_id based on incoming last
35804@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35805
35806 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35807 if (!ep) {
35808- atomic_inc(&mp->stats.xid_not_found);
35809+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35810 goto out;
35811 }
35812 if (ep->esb_stat & ESB_ST_COMPLETE) {
35813- atomic_inc(&mp->stats.xid_not_found);
35814+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35815 goto rel;
35816 }
35817 if (ep->rxid == FC_XID_UNKNOWN)
35818 ep->rxid = ntohs(fh->fh_rx_id);
35819 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35820- atomic_inc(&mp->stats.xid_not_found);
35821+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35822 goto rel;
35823 }
35824 if (ep->did != ntoh24(fh->fh_s_id) &&
35825 ep->did != FC_FID_FLOGI) {
35826- atomic_inc(&mp->stats.xid_not_found);
35827+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35828 goto rel;
35829 }
35830 sof = fr_sof(fp);
35831@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35832 sp->ssb_stat |= SSB_ST_RESP;
35833 sp->id = fh->fh_seq_id;
35834 } else if (sp->id != fh->fh_seq_id) {
35835- atomic_inc(&mp->stats.seq_not_found);
35836+ atomic_inc_unchecked(&mp->stats.seq_not_found);
35837 goto rel;
35838 }
35839
35840@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35841 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35842
35843 if (!sp)
35844- atomic_inc(&mp->stats.xid_not_found);
35845+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35846 else
35847- atomic_inc(&mp->stats.non_bls_resp);
35848+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
35849
35850 fc_frame_free(fp);
35851 }
35852diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35853index db9238f..4378ed2 100644
35854--- a/drivers/scsi/libsas/sas_ata.c
35855+++ b/drivers/scsi/libsas/sas_ata.c
35856@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35857 .postreset = ata_std_postreset,
35858 .error_handler = ata_std_error_handler,
35859 .post_internal_cmd = sas_ata_post_internal,
35860- .qc_defer = ata_std_qc_defer,
35861+ .qc_defer = ata_std_qc_defer,
35862 .qc_prep = ata_noop_qc_prep,
35863 .qc_issue = sas_ata_qc_issue,
35864 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35865diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35866index 825f930..ce42672 100644
35867--- a/drivers/scsi/lpfc/lpfc.h
35868+++ b/drivers/scsi/lpfc/lpfc.h
35869@@ -413,7 +413,7 @@ struct lpfc_vport {
35870 struct dentry *debug_nodelist;
35871 struct dentry *vport_debugfs_root;
35872 struct lpfc_debugfs_trc *disc_trc;
35873- atomic_t disc_trc_cnt;
35874+ atomic_unchecked_t disc_trc_cnt;
35875 #endif
35876 uint8_t stat_data_enabled;
35877 uint8_t stat_data_blocked;
35878@@ -821,8 +821,8 @@ struct lpfc_hba {
35879 struct timer_list fabric_block_timer;
35880 unsigned long bit_flags;
35881 #define FABRIC_COMANDS_BLOCKED 0
35882- atomic_t num_rsrc_err;
35883- atomic_t num_cmd_success;
35884+ atomic_unchecked_t num_rsrc_err;
35885+ atomic_unchecked_t num_cmd_success;
35886 unsigned long last_rsrc_error_time;
35887 unsigned long last_ramp_down_time;
35888 unsigned long last_ramp_up_time;
35889@@ -852,7 +852,7 @@ struct lpfc_hba {
35890
35891 struct dentry *debug_slow_ring_trc;
35892 struct lpfc_debugfs_trc *slow_ring_trc;
35893- atomic_t slow_ring_trc_cnt;
35894+ atomic_unchecked_t slow_ring_trc_cnt;
35895 /* iDiag debugfs sub-directory */
35896 struct dentry *idiag_root;
35897 struct dentry *idiag_pci_cfg;
35898diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
35899index 3587a3f..d45b81b 100644
35900--- a/drivers/scsi/lpfc/lpfc_debugfs.c
35901+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
35902@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
35903
35904 #include <linux/debugfs.h>
35905
35906-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35907+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35908 static unsigned long lpfc_debugfs_start_time = 0L;
35909
35910 /* iDiag */
35911@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
35912 lpfc_debugfs_enable = 0;
35913
35914 len = 0;
35915- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
35916+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
35917 (lpfc_debugfs_max_disc_trc - 1);
35918 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
35919 dtp = vport->disc_trc + i;
35920@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
35921 lpfc_debugfs_enable = 0;
35922
35923 len = 0;
35924- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
35925+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35926 (lpfc_debugfs_max_slow_ring_trc - 1);
35927 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
35928 dtp = phba->slow_ring_trc + i;
35929@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
35930 !vport || !vport->disc_trc)
35931 return;
35932
35933- index = atomic_inc_return(&vport->disc_trc_cnt) &
35934+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
35935 (lpfc_debugfs_max_disc_trc - 1);
35936 dtp = vport->disc_trc + index;
35937 dtp->fmt = fmt;
35938 dtp->data1 = data1;
35939 dtp->data2 = data2;
35940 dtp->data3 = data3;
35941- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35942+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35943 dtp->jif = jiffies;
35944 #endif
35945 return;
35946@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
35947 !phba || !phba->slow_ring_trc)
35948 return;
35949
35950- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
35951+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
35952 (lpfc_debugfs_max_slow_ring_trc - 1);
35953 dtp = phba->slow_ring_trc + index;
35954 dtp->fmt = fmt;
35955 dtp->data1 = data1;
35956 dtp->data2 = data2;
35957 dtp->data3 = data3;
35958- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35959+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35960 dtp->jif = jiffies;
35961 #endif
35962 return;
35963@@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
35964 "slow_ring buffer\n");
35965 goto debug_failed;
35966 }
35967- atomic_set(&phba->slow_ring_trc_cnt, 0);
35968+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
35969 memset(phba->slow_ring_trc, 0,
35970 (sizeof(struct lpfc_debugfs_trc) *
35971 lpfc_debugfs_max_slow_ring_trc));
35972@@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
35973 "buffer\n");
35974 goto debug_failed;
35975 }
35976- atomic_set(&vport->disc_trc_cnt, 0);
35977+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
35978
35979 snprintf(name, sizeof(name), "discovery_trace");
35980 vport->debug_disc_trc =
35981diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
35982index dfea2da..8e17227 100644
35983--- a/drivers/scsi/lpfc/lpfc_init.c
35984+++ b/drivers/scsi/lpfc/lpfc_init.c
35985@@ -10145,8 +10145,10 @@ lpfc_init(void)
35986 printk(LPFC_COPYRIGHT "\n");
35987
35988 if (lpfc_enable_npiv) {
35989- lpfc_transport_functions.vport_create = lpfc_vport_create;
35990- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
35991+ pax_open_kernel();
35992+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
35993+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
35994+ pax_close_kernel();
35995 }
35996 lpfc_transport_template =
35997 fc_attach_transport(&lpfc_transport_functions);
35998diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
35999index c60f5d0..751535c 100644
36000--- a/drivers/scsi/lpfc/lpfc_scsi.c
36001+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36002@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36003 uint32_t evt_posted;
36004
36005 spin_lock_irqsave(&phba->hbalock, flags);
36006- atomic_inc(&phba->num_rsrc_err);
36007+ atomic_inc_unchecked(&phba->num_rsrc_err);
36008 phba->last_rsrc_error_time = jiffies;
36009
36010 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36011@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36012 unsigned long flags;
36013 struct lpfc_hba *phba = vport->phba;
36014 uint32_t evt_posted;
36015- atomic_inc(&phba->num_cmd_success);
36016+ atomic_inc_unchecked(&phba->num_cmd_success);
36017
36018 if (vport->cfg_lun_queue_depth <= queue_depth)
36019 return;
36020@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36021 unsigned long num_rsrc_err, num_cmd_success;
36022 int i;
36023
36024- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36025- num_cmd_success = atomic_read(&phba->num_cmd_success);
36026+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36027+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36028
36029 vports = lpfc_create_vport_work_array(phba);
36030 if (vports != NULL)
36031@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36032 }
36033 }
36034 lpfc_destroy_vport_work_array(phba, vports);
36035- atomic_set(&phba->num_rsrc_err, 0);
36036- atomic_set(&phba->num_cmd_success, 0);
36037+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36038+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36039 }
36040
36041 /**
36042@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36043 }
36044 }
36045 lpfc_destroy_vport_work_array(phba, vports);
36046- atomic_set(&phba->num_rsrc_err, 0);
36047- atomic_set(&phba->num_cmd_success, 0);
36048+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36049+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36050 }
36051
36052 /**
36053diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36054index ea8a0b4..812a124 100644
36055--- a/drivers/scsi/pmcraid.c
36056+++ b/drivers/scsi/pmcraid.c
36057@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36058 res->scsi_dev = scsi_dev;
36059 scsi_dev->hostdata = res;
36060 res->change_detected = 0;
36061- atomic_set(&res->read_failures, 0);
36062- atomic_set(&res->write_failures, 0);
36063+ atomic_set_unchecked(&res->read_failures, 0);
36064+ atomic_set_unchecked(&res->write_failures, 0);
36065 rc = 0;
36066 }
36067 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36068@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36069
36070 /* If this was a SCSI read/write command keep count of errors */
36071 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36072- atomic_inc(&res->read_failures);
36073+ atomic_inc_unchecked(&res->read_failures);
36074 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36075- atomic_inc(&res->write_failures);
36076+ atomic_inc_unchecked(&res->write_failures);
36077
36078 if (!RES_IS_GSCSI(res->cfg_entry) &&
36079 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36080@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36081 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36082 * hrrq_id assigned here in queuecommand
36083 */
36084- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36085+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36086 pinstance->num_hrrq;
36087 cmd->cmd_done = pmcraid_io_done;
36088
36089@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36090 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36091 * hrrq_id assigned here in queuecommand
36092 */
36093- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36094+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36095 pinstance->num_hrrq;
36096
36097 if (request_size) {
36098@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36099
36100 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36101 /* add resources only after host is added into system */
36102- if (!atomic_read(&pinstance->expose_resources))
36103+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36104 return;
36105
36106 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36107@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36108 init_waitqueue_head(&pinstance->reset_wait_q);
36109
36110 atomic_set(&pinstance->outstanding_cmds, 0);
36111- atomic_set(&pinstance->last_message_id, 0);
36112- atomic_set(&pinstance->expose_resources, 0);
36113+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36114+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36115
36116 INIT_LIST_HEAD(&pinstance->free_res_q);
36117 INIT_LIST_HEAD(&pinstance->used_res_q);
36118@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36119 /* Schedule worker thread to handle CCN and take care of adding and
36120 * removing devices to OS
36121 */
36122- atomic_set(&pinstance->expose_resources, 1);
36123+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36124 schedule_work(&pinstance->worker_q);
36125 return rc;
36126
36127diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36128index ca496c7..9c791d5 100644
36129--- a/drivers/scsi/pmcraid.h
36130+++ b/drivers/scsi/pmcraid.h
36131@@ -748,7 +748,7 @@ struct pmcraid_instance {
36132 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36133
36134 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36135- atomic_t last_message_id;
36136+ atomic_unchecked_t last_message_id;
36137
36138 /* configuration table */
36139 struct pmcraid_config_table *cfg_table;
36140@@ -777,7 +777,7 @@ struct pmcraid_instance {
36141 atomic_t outstanding_cmds;
36142
36143 /* should add/delete resources to mid-layer now ?*/
36144- atomic_t expose_resources;
36145+ atomic_unchecked_t expose_resources;
36146
36147
36148
36149@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36150 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36151 };
36152 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36153- atomic_t read_failures; /* count of failed READ commands */
36154- atomic_t write_failures; /* count of failed WRITE commands */
36155+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36156+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36157
36158 /* To indicate add/delete/modify during CCN */
36159 u8 change_detected;
36160diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36161index af1003f..be55a75 100644
36162--- a/drivers/scsi/qla2xxx/qla_def.h
36163+++ b/drivers/scsi/qla2xxx/qla_def.h
36164@@ -2247,7 +2247,7 @@ struct isp_operations {
36165 int (*start_scsi) (srb_t *);
36166 int (*abort_isp) (struct scsi_qla_host *);
36167 int (*iospace_config)(struct qla_hw_data*);
36168-};
36169+} __no_const;
36170
36171 /* MSI-X Support *************************************************************/
36172
36173diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36174index bfe6854..ceac088 100644
36175--- a/drivers/scsi/qla4xxx/ql4_def.h
36176+++ b/drivers/scsi/qla4xxx/ql4_def.h
36177@@ -261,7 +261,7 @@ struct ddb_entry {
36178 * (4000 only) */
36179 atomic_t relogin_timer; /* Max Time to wait for
36180 * relogin to complete */
36181- atomic_t relogin_retry_count; /* Num of times relogin has been
36182+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36183 * retried */
36184 uint32_t default_time2wait; /* Default Min time between
36185 * relogins (+aens) */
36186diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36187index ce6d3b7..73fac54 100644
36188--- a/drivers/scsi/qla4xxx/ql4_os.c
36189+++ b/drivers/scsi/qla4xxx/ql4_os.c
36190@@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36191 */
36192 if (!iscsi_is_session_online(cls_sess)) {
36193 /* Reset retry relogin timer */
36194- atomic_inc(&ddb_entry->relogin_retry_count);
36195+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36196 DEBUG2(ql4_printk(KERN_INFO, ha,
36197 "%s: index[%d] relogin timed out-retrying"
36198 " relogin (%d), retry (%d)\n", __func__,
36199 ddb_entry->fw_ddb_index,
36200- atomic_read(&ddb_entry->relogin_retry_count),
36201+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36202 ddb_entry->default_time2wait + 4));
36203 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36204 atomic_set(&ddb_entry->retry_relogin_timer,
36205@@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36206
36207 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36208 atomic_set(&ddb_entry->relogin_timer, 0);
36209- atomic_set(&ddb_entry->relogin_retry_count, 0);
36210+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36211 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36212 ddb_entry->default_relogin_timeout =
36213 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36214diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36215index 2aeb2e9..46e3925 100644
36216--- a/drivers/scsi/scsi.c
36217+++ b/drivers/scsi/scsi.c
36218@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36219 unsigned long timeout;
36220 int rtn = 0;
36221
36222- atomic_inc(&cmd->device->iorequest_cnt);
36223+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36224
36225 /* check if the device is still usable */
36226 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36227diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36228index b2c95db..227d74e 100644
36229--- a/drivers/scsi/scsi_lib.c
36230+++ b/drivers/scsi/scsi_lib.c
36231@@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36232 shost = sdev->host;
36233 scsi_init_cmd_errh(cmd);
36234 cmd->result = DID_NO_CONNECT << 16;
36235- atomic_inc(&cmd->device->iorequest_cnt);
36236+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36237
36238 /*
36239 * SCSI request completion path will do scsi_device_unbusy(),
36240@@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
36241
36242 INIT_LIST_HEAD(&cmd->eh_entry);
36243
36244- atomic_inc(&cmd->device->iodone_cnt);
36245+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36246 if (cmd->result)
36247- atomic_inc(&cmd->device->ioerr_cnt);
36248+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36249
36250 disposition = scsi_decide_disposition(cmd);
36251 if (disposition != SUCCESS &&
36252diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36253index 04c2a27..9d8bd66 100644
36254--- a/drivers/scsi/scsi_sysfs.c
36255+++ b/drivers/scsi/scsi_sysfs.c
36256@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36257 char *buf) \
36258 { \
36259 struct scsi_device *sdev = to_scsi_device(dev); \
36260- unsigned long long count = atomic_read(&sdev->field); \
36261+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36262 return snprintf(buf, 20, "0x%llx\n", count); \
36263 } \
36264 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36265diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36266index 84a1fdf..693b0d6 100644
36267--- a/drivers/scsi/scsi_tgt_lib.c
36268+++ b/drivers/scsi/scsi_tgt_lib.c
36269@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36270 int err;
36271
36272 dprintk("%lx %u\n", uaddr, len);
36273- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36274+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36275 if (err) {
36276 /*
36277 * TODO: need to fixup sg_tablesize, max_segment_size,
36278diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36279index f59d4a0..1d89407 100644
36280--- a/drivers/scsi/scsi_transport_fc.c
36281+++ b/drivers/scsi/scsi_transport_fc.c
36282@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36283 * Netlink Infrastructure
36284 */
36285
36286-static atomic_t fc_event_seq;
36287+static atomic_unchecked_t fc_event_seq;
36288
36289 /**
36290 * fc_get_event_number - Obtain the next sequential FC event number
36291@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36292 u32
36293 fc_get_event_number(void)
36294 {
36295- return atomic_add_return(1, &fc_event_seq);
36296+ return atomic_add_return_unchecked(1, &fc_event_seq);
36297 }
36298 EXPORT_SYMBOL(fc_get_event_number);
36299
36300@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36301 {
36302 int error;
36303
36304- atomic_set(&fc_event_seq, 0);
36305+ atomic_set_unchecked(&fc_event_seq, 0);
36306
36307 error = transport_class_register(&fc_host_class);
36308 if (error)
36309@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36310 char *cp;
36311
36312 *val = simple_strtoul(buf, &cp, 0);
36313- if ((*cp && (*cp != '\n')) || (*val < 0))
36314+ if (*cp && (*cp != '\n'))
36315 return -EINVAL;
36316 /*
36317 * Check for overflow; dev_loss_tmo is u32
36318diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36319index cfd4914..ddd7129 100644
36320--- a/drivers/scsi/scsi_transport_iscsi.c
36321+++ b/drivers/scsi/scsi_transport_iscsi.c
36322@@ -79,7 +79,7 @@ struct iscsi_internal {
36323 struct transport_container session_cont;
36324 };
36325
36326-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36327+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36328 static struct workqueue_struct *iscsi_eh_timer_workq;
36329
36330 static DEFINE_IDA(iscsi_sess_ida);
36331@@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36332 int err;
36333
36334 ihost = shost->shost_data;
36335- session->sid = atomic_add_return(1, &iscsi_session_nr);
36336+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36337
36338 if (target_id == ISCSI_MAX_TARGET) {
36339 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36340@@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
36341 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36342 ISCSI_TRANSPORT_VERSION);
36343
36344- atomic_set(&iscsi_session_nr, 0);
36345+ atomic_set_unchecked(&iscsi_session_nr, 0);
36346
36347 err = class_register(&iscsi_transport_class);
36348 if (err)
36349diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36350index 21a045e..ec89e03 100644
36351--- a/drivers/scsi/scsi_transport_srp.c
36352+++ b/drivers/scsi/scsi_transport_srp.c
36353@@ -33,7 +33,7 @@
36354 #include "scsi_transport_srp_internal.h"
36355
36356 struct srp_host_attrs {
36357- atomic_t next_port_id;
36358+ atomic_unchecked_t next_port_id;
36359 };
36360 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36361
36362@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36363 struct Scsi_Host *shost = dev_to_shost(dev);
36364 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36365
36366- atomic_set(&srp_host->next_port_id, 0);
36367+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36368 return 0;
36369 }
36370
36371@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36372 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36373 rport->roles = ids->roles;
36374
36375- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36376+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36377 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36378
36379 transport_setup_device(&rport->dev);
36380diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36381index eacd46b..e3f4d62 100644
36382--- a/drivers/scsi/sg.c
36383+++ b/drivers/scsi/sg.c
36384@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36385 sdp->disk->disk_name,
36386 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36387 NULL,
36388- (char *)arg);
36389+ (char __user *)arg);
36390 case BLKTRACESTART:
36391 return blk_trace_startstop(sdp->device->request_queue, 1);
36392 case BLKTRACESTOP:
36393@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36394 const struct file_operations * fops;
36395 };
36396
36397-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36398+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36399 {"allow_dio", &adio_fops},
36400 {"debug", &debug_fops},
36401 {"def_reserved_size", &dressz_fops},
36402@@ -2332,7 +2332,7 @@ sg_proc_init(void)
36403 if (!sg_proc_sgp)
36404 return 1;
36405 for (k = 0; k < num_leaves; ++k) {
36406- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36407+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36408 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36409 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36410 }
36411diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36412index f64250e..1ee3049 100644
36413--- a/drivers/spi/spi-dw-pci.c
36414+++ b/drivers/spi/spi-dw-pci.c
36415@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
36416 #define spi_resume NULL
36417 #endif
36418
36419-static const struct pci_device_id pci_ids[] __devinitdata = {
36420+static const struct pci_device_id pci_ids[] __devinitconst = {
36421 /* Intel MID platform SPI controller 0 */
36422 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36423 {},
36424diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36425index b2ccdea..84cde75 100644
36426--- a/drivers/spi/spi.c
36427+++ b/drivers/spi/spi.c
36428@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
36429 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36430
36431 /* portable code must never pass more than 32 bytes */
36432-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36433+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36434
36435 static u8 *buf;
36436
36437diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36438index 400df8c..065d4f4 100644
36439--- a/drivers/staging/octeon/ethernet-rx.c
36440+++ b/drivers/staging/octeon/ethernet-rx.c
36441@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36442 /* Increment RX stats for virtual ports */
36443 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36444 #ifdef CONFIG_64BIT
36445- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36446- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36447+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36448+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36449 #else
36450- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36451- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36452+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36453+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36454 #endif
36455 }
36456 netif_receive_skb(skb);
36457@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36458 dev->name);
36459 */
36460 #ifdef CONFIG_64BIT
36461- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36462+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36463 #else
36464- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36465+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36466 #endif
36467 dev_kfree_skb_irq(skb);
36468 }
36469diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36470index 9112cd8..92f8d51 100644
36471--- a/drivers/staging/octeon/ethernet.c
36472+++ b/drivers/staging/octeon/ethernet.c
36473@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36474 * since the RX tasklet also increments it.
36475 */
36476 #ifdef CONFIG_64BIT
36477- atomic64_add(rx_status.dropped_packets,
36478- (atomic64_t *)&priv->stats.rx_dropped);
36479+ atomic64_add_unchecked(rx_status.dropped_packets,
36480+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36481 #else
36482- atomic_add(rx_status.dropped_packets,
36483- (atomic_t *)&priv->stats.rx_dropped);
36484+ atomic_add_unchecked(rx_status.dropped_packets,
36485+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
36486 #endif
36487 }
36488
36489diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36490index 86308a0..feaa925 100644
36491--- a/drivers/staging/rtl8712/rtl871x_io.h
36492+++ b/drivers/staging/rtl8712/rtl871x_io.h
36493@@ -108,7 +108,7 @@ struct _io_ops {
36494 u8 *pmem);
36495 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36496 u8 *pmem);
36497-};
36498+} __no_const;
36499
36500 struct io_req {
36501 struct list_head list;
36502diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36503index c7b5e8b..783d6cb 100644
36504--- a/drivers/staging/sbe-2t3e3/netdev.c
36505+++ b/drivers/staging/sbe-2t3e3/netdev.c
36506@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36507 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36508
36509 if (rlen)
36510- if (copy_to_user(data, &resp, rlen))
36511+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36512 return -EFAULT;
36513
36514 return 0;
36515diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
36516index 42cdafe..2769103 100644
36517--- a/drivers/staging/speakup/speakup_soft.c
36518+++ b/drivers/staging/speakup/speakup_soft.c
36519@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
36520 break;
36521 } else if (!initialized) {
36522 if (*init) {
36523- ch = *init;
36524 init++;
36525 } else {
36526 initialized = 1;
36527 }
36528+ ch = *init;
36529 } else {
36530 ch = synth_buffer_getc();
36531 }
36532diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
36533index b8f8c48..1fc5025 100644
36534--- a/drivers/staging/usbip/usbip_common.h
36535+++ b/drivers/staging/usbip/usbip_common.h
36536@@ -289,7 +289,7 @@ struct usbip_device {
36537 void (*shutdown)(struct usbip_device *);
36538 void (*reset)(struct usbip_device *);
36539 void (*unusable)(struct usbip_device *);
36540- } eh_ops;
36541+ } __no_const eh_ops;
36542 };
36543
36544 /* usbip_common.c */
36545diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
36546index 88b3298..3783eee 100644
36547--- a/drivers/staging/usbip/vhci.h
36548+++ b/drivers/staging/usbip/vhci.h
36549@@ -88,7 +88,7 @@ struct vhci_hcd {
36550 unsigned resuming:1;
36551 unsigned long re_timeout;
36552
36553- atomic_t seqnum;
36554+ atomic_unchecked_t seqnum;
36555
36556 /*
36557 * NOTE:
36558diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
36559index 2ee97e2..0420b86 100644
36560--- a/drivers/staging/usbip/vhci_hcd.c
36561+++ b/drivers/staging/usbip/vhci_hcd.c
36562@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36563 return;
36564 }
36565
36566- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36567+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36568 if (priv->seqnum == 0xffff)
36569 dev_info(&urb->dev->dev, "seqnum max\n");
36570
36571@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
36572 return -ENOMEM;
36573 }
36574
36575- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36576+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36577 if (unlink->seqnum == 0xffff)
36578 pr_info("seqnum max\n");
36579
36580@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
36581 vdev->rhport = rhport;
36582 }
36583
36584- atomic_set(&vhci->seqnum, 0);
36585+ atomic_set_unchecked(&vhci->seqnum, 0);
36586 spin_lock_init(&vhci->lock);
36587
36588 hcd->power_budget = 0; /* no limit */
36589diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
36590index 3f511b4..d3dbc1e 100644
36591--- a/drivers/staging/usbip/vhci_rx.c
36592+++ b/drivers/staging/usbip/vhci_rx.c
36593@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
36594 if (!urb) {
36595 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36596 pr_info("max seqnum %d\n",
36597- atomic_read(&the_controller->seqnum));
36598+ atomic_read_unchecked(&the_controller->seqnum));
36599 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36600 return;
36601 }
36602diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
36603index 7735027..30eed13 100644
36604--- a/drivers/staging/vt6655/hostap.c
36605+++ b/drivers/staging/vt6655/hostap.c
36606@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
36607 *
36608 */
36609
36610+static net_device_ops_no_const apdev_netdev_ops;
36611+
36612 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36613 {
36614 PSDevice apdev_priv;
36615 struct net_device *dev = pDevice->dev;
36616 int ret;
36617- const struct net_device_ops apdev_netdev_ops = {
36618- .ndo_start_xmit = pDevice->tx_80211,
36619- };
36620
36621 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36622
36623@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36624 *apdev_priv = *pDevice;
36625 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36626
36627+ /* only half broken now */
36628+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36629 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36630
36631 pDevice->apdev->type = ARPHRD_IEEE80211;
36632diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
36633index 51b5adf..098e320 100644
36634--- a/drivers/staging/vt6656/hostap.c
36635+++ b/drivers/staging/vt6656/hostap.c
36636@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
36637 *
36638 */
36639
36640+static net_device_ops_no_const apdev_netdev_ops;
36641+
36642 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36643 {
36644 PSDevice apdev_priv;
36645 struct net_device *dev = pDevice->dev;
36646 int ret;
36647- const struct net_device_ops apdev_netdev_ops = {
36648- .ndo_start_xmit = pDevice->tx_80211,
36649- };
36650
36651 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36652
36653@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36654 *apdev_priv = *pDevice;
36655 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36656
36657+ /* only half broken now */
36658+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36659 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36660
36661 pDevice->apdev->type = ARPHRD_IEEE80211;
36662diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
36663index 7843dfd..3db105f 100644
36664--- a/drivers/staging/wlan-ng/hfa384x_usb.c
36665+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
36666@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
36667
36668 struct usbctlx_completor {
36669 int (*complete) (struct usbctlx_completor *);
36670-};
36671+} __no_const;
36672
36673 static int
36674 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36675diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
36676index 1ca66ea..76f1343 100644
36677--- a/drivers/staging/zcache/tmem.c
36678+++ b/drivers/staging/zcache/tmem.c
36679@@ -39,7 +39,7 @@
36680 * A tmem host implementation must use this function to register callbacks
36681 * for memory allocation.
36682 */
36683-static struct tmem_hostops tmem_hostops;
36684+static tmem_hostops_no_const tmem_hostops;
36685
36686 static void tmem_objnode_tree_init(void);
36687
36688@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
36689 * A tmem host implementation must use this function to register
36690 * callbacks for a page-accessible memory (PAM) implementation
36691 */
36692-static struct tmem_pamops tmem_pamops;
36693+static tmem_pamops_no_const tmem_pamops;
36694
36695 void tmem_register_pamops(struct tmem_pamops *m)
36696 {
36697diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
36698index ed147c4..94fc3c6 100644
36699--- a/drivers/staging/zcache/tmem.h
36700+++ b/drivers/staging/zcache/tmem.h
36701@@ -180,6 +180,7 @@ struct tmem_pamops {
36702 void (*new_obj)(struct tmem_obj *);
36703 int (*replace_in_obj)(void *, struct tmem_obj *);
36704 };
36705+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36706 extern void tmem_register_pamops(struct tmem_pamops *m);
36707
36708 /* memory allocation methods provided by the host implementation */
36709@@ -189,6 +190,7 @@ struct tmem_hostops {
36710 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36711 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36712 };
36713+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36714 extern void tmem_register_hostops(struct tmem_hostops *m);
36715
36716 /* core tmem accessor functions */
36717diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
36718index 501b27c..39dc3d3 100644
36719--- a/drivers/target/iscsi/iscsi_target.c
36720+++ b/drivers/target/iscsi/iscsi_target.c
36721@@ -1363,7 +1363,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
36722 * outstanding_r2ts reaches zero, go ahead and send the delayed
36723 * TASK_ABORTED status.
36724 */
36725- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36726+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36727 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36728 if (--cmd->outstanding_r2ts < 1) {
36729 iscsit_stop_dataout_timer(cmd);
36730diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
36731index dcb0618..97e3d85 100644
36732--- a/drivers/target/target_core_tmr.c
36733+++ b/drivers/target/target_core_tmr.c
36734@@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
36735 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36736 cmd->t_task_list_num,
36737 atomic_read(&cmd->t_task_cdbs_left),
36738- atomic_read(&cmd->t_task_cdbs_sent),
36739+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36740 atomic_read(&cmd->t_transport_active),
36741 atomic_read(&cmd->t_transport_stop),
36742 atomic_read(&cmd->t_transport_sent));
36743@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
36744 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36745 " task: %p, t_fe_count: %d dev: %p\n", task,
36746 fe_count, dev);
36747- atomic_set(&cmd->t_transport_aborted, 1);
36748+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36749 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36750
36751 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36752@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
36753 }
36754 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36755 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36756- atomic_set(&cmd->t_transport_aborted, 1);
36757+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36758 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36759
36760 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36761diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
36762index cd5cd95..5249d30 100644
36763--- a/drivers/target/target_core_transport.c
36764+++ b/drivers/target/target_core_transport.c
36765@@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
36766 spin_lock_init(&dev->se_port_lock);
36767 spin_lock_init(&dev->se_tmr_lock);
36768 spin_lock_init(&dev->qf_cmd_lock);
36769- atomic_set(&dev->dev_ordered_id, 0);
36770+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
36771
36772 se_dev_set_default_attribs(dev, dev_limits);
36773
36774@@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
36775 * Used to determine when ORDERED commands should go from
36776 * Dormant to Active status.
36777 */
36778- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
36779+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
36780 smp_mb__after_atomic_inc();
36781 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
36782 cmd->se_ordered_id, cmd->sam_task_attr,
36783@@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
36784 " t_transport_active: %d t_transport_stop: %d"
36785 " t_transport_sent: %d\n", cmd->t_task_list_num,
36786 atomic_read(&cmd->t_task_cdbs_left),
36787- atomic_read(&cmd->t_task_cdbs_sent),
36788+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36789 atomic_read(&cmd->t_task_cdbs_ex_left),
36790 atomic_read(&cmd->t_transport_active),
36791 atomic_read(&cmd->t_transport_stop),
36792@@ -2121,9 +2121,9 @@ check_depth:
36793 cmd = task->task_se_cmd;
36794 spin_lock_irqsave(&cmd->t_state_lock, flags);
36795 task->task_flags |= (TF_ACTIVE | TF_SENT);
36796- atomic_inc(&cmd->t_task_cdbs_sent);
36797+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
36798
36799- if (atomic_read(&cmd->t_task_cdbs_sent) ==
36800+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
36801 cmd->t_task_list_num)
36802 atomic_set(&cmd->t_transport_sent, 1);
36803
36804@@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
36805 atomic_set(&cmd->transport_lun_stop, 0);
36806 }
36807 if (!atomic_read(&cmd->t_transport_active) ||
36808- atomic_read(&cmd->t_transport_aborted)) {
36809+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
36810 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36811 return false;
36812 }
36813@@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
36814 {
36815 int ret = 0;
36816
36817- if (atomic_read(&cmd->t_transport_aborted) != 0) {
36818+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
36819 if (!send_status ||
36820 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
36821 return 1;
36822@@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
36823 */
36824 if (cmd->data_direction == DMA_TO_DEVICE) {
36825 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
36826- atomic_inc(&cmd->t_transport_aborted);
36827+ atomic_inc_unchecked(&cmd->t_transport_aborted);
36828 smp_mb__after_atomic_inc();
36829 }
36830 }
36831diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
36832index b9040be..e3f5aab 100644
36833--- a/drivers/tty/hvc/hvcs.c
36834+++ b/drivers/tty/hvc/hvcs.c
36835@@ -83,6 +83,7 @@
36836 #include <asm/hvcserver.h>
36837 #include <asm/uaccess.h>
36838 #include <asm/vio.h>
36839+#include <asm/local.h>
36840
36841 /*
36842 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
36843@@ -270,7 +271,7 @@ struct hvcs_struct {
36844 unsigned int index;
36845
36846 struct tty_struct *tty;
36847- int open_count;
36848+ local_t open_count;
36849
36850 /*
36851 * Used to tell the driver kernel_thread what operations need to take
36852@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
36853
36854 spin_lock_irqsave(&hvcsd->lock, flags);
36855
36856- if (hvcsd->open_count > 0) {
36857+ if (local_read(&hvcsd->open_count) > 0) {
36858 spin_unlock_irqrestore(&hvcsd->lock, flags);
36859 printk(KERN_INFO "HVCS: vterm state unchanged. "
36860 "The hvcs device node is still in use.\n");
36861@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
36862 if ((retval = hvcs_partner_connect(hvcsd)))
36863 goto error_release;
36864
36865- hvcsd->open_count = 1;
36866+ local_set(&hvcsd->open_count, 1);
36867 hvcsd->tty = tty;
36868 tty->driver_data = hvcsd;
36869
36870@@ -1179,7 +1180,7 @@ fast_open:
36871
36872 spin_lock_irqsave(&hvcsd->lock, flags);
36873 kref_get(&hvcsd->kref);
36874- hvcsd->open_count++;
36875+ local_inc(&hvcsd->open_count);
36876 hvcsd->todo_mask |= HVCS_SCHED_READ;
36877 spin_unlock_irqrestore(&hvcsd->lock, flags);
36878
36879@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36880 hvcsd = tty->driver_data;
36881
36882 spin_lock_irqsave(&hvcsd->lock, flags);
36883- if (--hvcsd->open_count == 0) {
36884+ if (local_dec_and_test(&hvcsd->open_count)) {
36885
36886 vio_disable_interrupts(hvcsd->vdev);
36887
36888@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36889 free_irq(irq, hvcsd);
36890 kref_put(&hvcsd->kref, destroy_hvcs_struct);
36891 return;
36892- } else if (hvcsd->open_count < 0) {
36893+ } else if (local_read(&hvcsd->open_count) < 0) {
36894 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
36895 " is missmanaged.\n",
36896- hvcsd->vdev->unit_address, hvcsd->open_count);
36897+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
36898 }
36899
36900 spin_unlock_irqrestore(&hvcsd->lock, flags);
36901@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36902
36903 spin_lock_irqsave(&hvcsd->lock, flags);
36904 /* Preserve this so that we know how many kref refs to put */
36905- temp_open_count = hvcsd->open_count;
36906+ temp_open_count = local_read(&hvcsd->open_count);
36907
36908 /*
36909 * Don't kref put inside the spinlock because the destruction
36910@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36911 hvcsd->tty->driver_data = NULL;
36912 hvcsd->tty = NULL;
36913
36914- hvcsd->open_count = 0;
36915+ local_set(&hvcsd->open_count, 0);
36916
36917 /* This will drop any buffered data on the floor which is OK in a hangup
36918 * scenario. */
36919@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
36920 * the middle of a write operation? This is a crummy place to do this
36921 * but we want to keep it all in the spinlock.
36922 */
36923- if (hvcsd->open_count <= 0) {
36924+ if (local_read(&hvcsd->open_count) <= 0) {
36925 spin_unlock_irqrestore(&hvcsd->lock, flags);
36926 return -ENODEV;
36927 }
36928@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
36929 {
36930 struct hvcs_struct *hvcsd = tty->driver_data;
36931
36932- if (!hvcsd || hvcsd->open_count <= 0)
36933+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
36934 return 0;
36935
36936 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
36937diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
36938index ef92869..f4ebd88 100644
36939--- a/drivers/tty/ipwireless/tty.c
36940+++ b/drivers/tty/ipwireless/tty.c
36941@@ -29,6 +29,7 @@
36942 #include <linux/tty_driver.h>
36943 #include <linux/tty_flip.h>
36944 #include <linux/uaccess.h>
36945+#include <asm/local.h>
36946
36947 #include "tty.h"
36948 #include "network.h"
36949@@ -51,7 +52,7 @@ struct ipw_tty {
36950 int tty_type;
36951 struct ipw_network *network;
36952 struct tty_struct *linux_tty;
36953- int open_count;
36954+ local_t open_count;
36955 unsigned int control_lines;
36956 struct mutex ipw_tty_mutex;
36957 int tx_bytes_queued;
36958@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
36959 mutex_unlock(&tty->ipw_tty_mutex);
36960 return -ENODEV;
36961 }
36962- if (tty->open_count == 0)
36963+ if (local_read(&tty->open_count) == 0)
36964 tty->tx_bytes_queued = 0;
36965
36966- tty->open_count++;
36967+ local_inc(&tty->open_count);
36968
36969 tty->linux_tty = linux_tty;
36970 linux_tty->driver_data = tty;
36971@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
36972
36973 static void do_ipw_close(struct ipw_tty *tty)
36974 {
36975- tty->open_count--;
36976-
36977- if (tty->open_count == 0) {
36978+ if (local_dec_return(&tty->open_count) == 0) {
36979 struct tty_struct *linux_tty = tty->linux_tty;
36980
36981 if (linux_tty != NULL) {
36982@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
36983 return;
36984
36985 mutex_lock(&tty->ipw_tty_mutex);
36986- if (tty->open_count == 0) {
36987+ if (local_read(&tty->open_count) == 0) {
36988 mutex_unlock(&tty->ipw_tty_mutex);
36989 return;
36990 }
36991@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
36992 return;
36993 }
36994
36995- if (!tty->open_count) {
36996+ if (!local_read(&tty->open_count)) {
36997 mutex_unlock(&tty->ipw_tty_mutex);
36998 return;
36999 }
37000@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37001 return -ENODEV;
37002
37003 mutex_lock(&tty->ipw_tty_mutex);
37004- if (!tty->open_count) {
37005+ if (!local_read(&tty->open_count)) {
37006 mutex_unlock(&tty->ipw_tty_mutex);
37007 return -EINVAL;
37008 }
37009@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37010 if (!tty)
37011 return -ENODEV;
37012
37013- if (!tty->open_count)
37014+ if (!local_read(&tty->open_count))
37015 return -EINVAL;
37016
37017 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37018@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37019 if (!tty)
37020 return 0;
37021
37022- if (!tty->open_count)
37023+ if (!local_read(&tty->open_count))
37024 return 0;
37025
37026 return tty->tx_bytes_queued;
37027@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37028 if (!tty)
37029 return -ENODEV;
37030
37031- if (!tty->open_count)
37032+ if (!local_read(&tty->open_count))
37033 return -EINVAL;
37034
37035 return get_control_lines(tty);
37036@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37037 if (!tty)
37038 return -ENODEV;
37039
37040- if (!tty->open_count)
37041+ if (!local_read(&tty->open_count))
37042 return -EINVAL;
37043
37044 return set_control_lines(tty, set, clear);
37045@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37046 if (!tty)
37047 return -ENODEV;
37048
37049- if (!tty->open_count)
37050+ if (!local_read(&tty->open_count))
37051 return -EINVAL;
37052
37053 /* FIXME: Exactly how is the tty object locked here .. */
37054@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37055 against a parallel ioctl etc */
37056 mutex_lock(&ttyj->ipw_tty_mutex);
37057 }
37058- while (ttyj->open_count)
37059+ while (local_read(&ttyj->open_count))
37060 do_ipw_close(ttyj);
37061 ipwireless_disassociate_network_ttys(network,
37062 ttyj->channel_idx);
37063diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37064index fc7bbba..9527e93 100644
37065--- a/drivers/tty/n_gsm.c
37066+++ b/drivers/tty/n_gsm.c
37067@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37068 kref_init(&dlci->ref);
37069 mutex_init(&dlci->mutex);
37070 dlci->fifo = &dlci->_fifo;
37071- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37072+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37073 kfree(dlci);
37074 return NULL;
37075 }
37076diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37077index d2256d0..97476fa 100644
37078--- a/drivers/tty/n_tty.c
37079+++ b/drivers/tty/n_tty.c
37080@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37081 {
37082 *ops = tty_ldisc_N_TTY;
37083 ops->owner = NULL;
37084- ops->refcount = ops->flags = 0;
37085+ atomic_set(&ops->refcount, 0);
37086+ ops->flags = 0;
37087 }
37088 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37089diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37090index d8653ab..f8afd9d 100644
37091--- a/drivers/tty/pty.c
37092+++ b/drivers/tty/pty.c
37093@@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
37094 register_sysctl_table(pty_root_table);
37095
37096 /* Now create the /dev/ptmx special device */
37097+ pax_open_kernel();
37098 tty_default_fops(&ptmx_fops);
37099- ptmx_fops.open = ptmx_open;
37100+ *(void **)&ptmx_fops.open = ptmx_open;
37101+ pax_close_kernel();
37102
37103 cdev_init(&ptmx_cdev, &ptmx_fops);
37104 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37105diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37106index 2b42a01..32a2ed3 100644
37107--- a/drivers/tty/serial/kgdboc.c
37108+++ b/drivers/tty/serial/kgdboc.c
37109@@ -24,8 +24,9 @@
37110 #define MAX_CONFIG_LEN 40
37111
37112 static struct kgdb_io kgdboc_io_ops;
37113+static struct kgdb_io kgdboc_io_ops_console;
37114
37115-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37116+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37117 static int configured = -1;
37118
37119 static char config[MAX_CONFIG_LEN];
37120@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37121 kgdboc_unregister_kbd();
37122 if (configured == 1)
37123 kgdb_unregister_io_module(&kgdboc_io_ops);
37124+ else if (configured == 2)
37125+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
37126 }
37127
37128 static int configure_kgdboc(void)
37129@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37130 int err;
37131 char *cptr = config;
37132 struct console *cons;
37133+ int is_console = 0;
37134
37135 err = kgdboc_option_setup(config);
37136 if (err || !strlen(config) || isspace(config[0]))
37137 goto noconfig;
37138
37139 err = -ENODEV;
37140- kgdboc_io_ops.is_console = 0;
37141 kgdb_tty_driver = NULL;
37142
37143 kgdboc_use_kms = 0;
37144@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37145 int idx;
37146 if (cons->device && cons->device(cons, &idx) == p &&
37147 idx == tty_line) {
37148- kgdboc_io_ops.is_console = 1;
37149+ is_console = 1;
37150 break;
37151 }
37152 cons = cons->next;
37153@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37154 kgdb_tty_line = tty_line;
37155
37156 do_register:
37157- err = kgdb_register_io_module(&kgdboc_io_ops);
37158+ if (is_console) {
37159+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
37160+ configured = 2;
37161+ } else {
37162+ err = kgdb_register_io_module(&kgdboc_io_ops);
37163+ configured = 1;
37164+ }
37165 if (err)
37166 goto noconfig;
37167
37168- configured = 1;
37169-
37170 return 0;
37171
37172 noconfig:
37173@@ -213,7 +220,7 @@ noconfig:
37174 static int __init init_kgdboc(void)
37175 {
37176 /* Already configured? */
37177- if (configured == 1)
37178+ if (configured >= 1)
37179 return 0;
37180
37181 return configure_kgdboc();
37182@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37183 if (config[len - 1] == '\n')
37184 config[len - 1] = '\0';
37185
37186- if (configured == 1)
37187+ if (configured >= 1)
37188 cleanup_kgdboc();
37189
37190 /* Go and configure with the new params. */
37191@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37192 .post_exception = kgdboc_post_exp_handler,
37193 };
37194
37195+static struct kgdb_io kgdboc_io_ops_console = {
37196+ .name = "kgdboc",
37197+ .read_char = kgdboc_get_char,
37198+ .write_char = kgdboc_put_char,
37199+ .pre_exception = kgdboc_pre_exp_handler,
37200+ .post_exception = kgdboc_post_exp_handler,
37201+ .is_console = 1
37202+};
37203+
37204 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37205 /* This is only available if kgdboc is a built in for early debugging */
37206 static int __init kgdboc_early_init(char *opt)
37207diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37208index e41b9bb..84002fb 100644
37209--- a/drivers/tty/tty_io.c
37210+++ b/drivers/tty/tty_io.c
37211@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37212
37213 void tty_default_fops(struct file_operations *fops)
37214 {
37215- *fops = tty_fops;
37216+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37217 }
37218
37219 /*
37220diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37221index 24b95db..9c078d0 100644
37222--- a/drivers/tty/tty_ldisc.c
37223+++ b/drivers/tty/tty_ldisc.c
37224@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37225 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37226 struct tty_ldisc_ops *ldo = ld->ops;
37227
37228- ldo->refcount--;
37229+ atomic_dec(&ldo->refcount);
37230 module_put(ldo->owner);
37231 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37232
37233@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37234 spin_lock_irqsave(&tty_ldisc_lock, flags);
37235 tty_ldiscs[disc] = new_ldisc;
37236 new_ldisc->num = disc;
37237- new_ldisc->refcount = 0;
37238+ atomic_set(&new_ldisc->refcount, 0);
37239 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37240
37241 return ret;
37242@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37243 return -EINVAL;
37244
37245 spin_lock_irqsave(&tty_ldisc_lock, flags);
37246- if (tty_ldiscs[disc]->refcount)
37247+ if (atomic_read(&tty_ldiscs[disc]->refcount))
37248 ret = -EBUSY;
37249 else
37250 tty_ldiscs[disc] = NULL;
37251@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37252 if (ldops) {
37253 ret = ERR_PTR(-EAGAIN);
37254 if (try_module_get(ldops->owner)) {
37255- ldops->refcount++;
37256+ atomic_inc(&ldops->refcount);
37257 ret = ldops;
37258 }
37259 }
37260@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37261 unsigned long flags;
37262
37263 spin_lock_irqsave(&tty_ldisc_lock, flags);
37264- ldops->refcount--;
37265+ atomic_dec(&ldops->refcount);
37266 module_put(ldops->owner);
37267 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37268 }
37269diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37270index a605549..6bd3c96 100644
37271--- a/drivers/tty/vt/keyboard.c
37272+++ b/drivers/tty/vt/keyboard.c
37273@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37274 kbd->kbdmode == VC_OFF) &&
37275 value != KVAL(K_SAK))
37276 return; /* SAK is allowed even in raw mode */
37277+
37278+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37279+ {
37280+ void *func = fn_handler[value];
37281+ if (func == fn_show_state || func == fn_show_ptregs ||
37282+ func == fn_show_mem)
37283+ return;
37284+ }
37285+#endif
37286+
37287 fn_handler[value](vc);
37288 }
37289
37290diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37291index 65447c5..0526f0a 100644
37292--- a/drivers/tty/vt/vt_ioctl.c
37293+++ b/drivers/tty/vt/vt_ioctl.c
37294@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37295 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37296 return -EFAULT;
37297
37298- if (!capable(CAP_SYS_TTY_CONFIG))
37299- perm = 0;
37300-
37301 switch (cmd) {
37302 case KDGKBENT:
37303 key_map = key_maps[s];
37304@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37305 val = (i ? K_HOLE : K_NOSUCHMAP);
37306 return put_user(val, &user_kbe->kb_value);
37307 case KDSKBENT:
37308+ if (!capable(CAP_SYS_TTY_CONFIG))
37309+ perm = 0;
37310+
37311 if (!perm)
37312 return -EPERM;
37313 if (!i && v == K_NOSUCHMAP) {
37314@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37315 int i, j, k;
37316 int ret;
37317
37318- if (!capable(CAP_SYS_TTY_CONFIG))
37319- perm = 0;
37320-
37321 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37322 if (!kbs) {
37323 ret = -ENOMEM;
37324@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37325 kfree(kbs);
37326 return ((p && *p) ? -EOVERFLOW : 0);
37327 case KDSKBSENT:
37328+ if (!capable(CAP_SYS_TTY_CONFIG))
37329+ perm = 0;
37330+
37331 if (!perm) {
37332 ret = -EPERM;
37333 goto reterr;
37334diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37335index a783d53..cb30d94 100644
37336--- a/drivers/uio/uio.c
37337+++ b/drivers/uio/uio.c
37338@@ -25,6 +25,7 @@
37339 #include <linux/kobject.h>
37340 #include <linux/cdev.h>
37341 #include <linux/uio_driver.h>
37342+#include <asm/local.h>
37343
37344 #define UIO_MAX_DEVICES (1U << MINORBITS)
37345
37346@@ -32,10 +33,10 @@ struct uio_device {
37347 struct module *owner;
37348 struct device *dev;
37349 int minor;
37350- atomic_t event;
37351+ atomic_unchecked_t event;
37352 struct fasync_struct *async_queue;
37353 wait_queue_head_t wait;
37354- int vma_count;
37355+ local_t vma_count;
37356 struct uio_info *info;
37357 struct kobject *map_dir;
37358 struct kobject *portio_dir;
37359@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37360 struct device_attribute *attr, char *buf)
37361 {
37362 struct uio_device *idev = dev_get_drvdata(dev);
37363- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37364+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37365 }
37366
37367 static struct device_attribute uio_class_attributes[] = {
37368@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37369 {
37370 struct uio_device *idev = info->uio_dev;
37371
37372- atomic_inc(&idev->event);
37373+ atomic_inc_unchecked(&idev->event);
37374 wake_up_interruptible(&idev->wait);
37375 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37376 }
37377@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37378 }
37379
37380 listener->dev = idev;
37381- listener->event_count = atomic_read(&idev->event);
37382+ listener->event_count = atomic_read_unchecked(&idev->event);
37383 filep->private_data = listener;
37384
37385 if (idev->info->open) {
37386@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37387 return -EIO;
37388
37389 poll_wait(filep, &idev->wait, wait);
37390- if (listener->event_count != atomic_read(&idev->event))
37391+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37392 return POLLIN | POLLRDNORM;
37393 return 0;
37394 }
37395@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37396 do {
37397 set_current_state(TASK_INTERRUPTIBLE);
37398
37399- event_count = atomic_read(&idev->event);
37400+ event_count = atomic_read_unchecked(&idev->event);
37401 if (event_count != listener->event_count) {
37402 if (copy_to_user(buf, &event_count, count))
37403 retval = -EFAULT;
37404@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37405 static void uio_vma_open(struct vm_area_struct *vma)
37406 {
37407 struct uio_device *idev = vma->vm_private_data;
37408- idev->vma_count++;
37409+ local_inc(&idev->vma_count);
37410 }
37411
37412 static void uio_vma_close(struct vm_area_struct *vma)
37413 {
37414 struct uio_device *idev = vma->vm_private_data;
37415- idev->vma_count--;
37416+ local_dec(&idev->vma_count);
37417 }
37418
37419 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37420@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37421 idev->owner = owner;
37422 idev->info = info;
37423 init_waitqueue_head(&idev->wait);
37424- atomic_set(&idev->event, 0);
37425+ atomic_set_unchecked(&idev->event, 0);
37426
37427 ret = uio_get_minor(idev);
37428 if (ret)
37429diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37430index 98b89fe..aff824e 100644
37431--- a/drivers/usb/atm/cxacru.c
37432+++ b/drivers/usb/atm/cxacru.c
37433@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37434 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37435 if (ret < 2)
37436 return -EINVAL;
37437- if (index < 0 || index > 0x7f)
37438+ if (index > 0x7f)
37439 return -EINVAL;
37440 pos += tmp;
37441
37442diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37443index d3448ca..d2864ca 100644
37444--- a/drivers/usb/atm/usbatm.c
37445+++ b/drivers/usb/atm/usbatm.c
37446@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37447 if (printk_ratelimit())
37448 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37449 __func__, vpi, vci);
37450- atomic_inc(&vcc->stats->rx_err);
37451+ atomic_inc_unchecked(&vcc->stats->rx_err);
37452 return;
37453 }
37454
37455@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37456 if (length > ATM_MAX_AAL5_PDU) {
37457 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37458 __func__, length, vcc);
37459- atomic_inc(&vcc->stats->rx_err);
37460+ atomic_inc_unchecked(&vcc->stats->rx_err);
37461 goto out;
37462 }
37463
37464@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37465 if (sarb->len < pdu_length) {
37466 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37467 __func__, pdu_length, sarb->len, vcc);
37468- atomic_inc(&vcc->stats->rx_err);
37469+ atomic_inc_unchecked(&vcc->stats->rx_err);
37470 goto out;
37471 }
37472
37473 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37474 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37475 __func__, vcc);
37476- atomic_inc(&vcc->stats->rx_err);
37477+ atomic_inc_unchecked(&vcc->stats->rx_err);
37478 goto out;
37479 }
37480
37481@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37482 if (printk_ratelimit())
37483 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37484 __func__, length);
37485- atomic_inc(&vcc->stats->rx_drop);
37486+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37487 goto out;
37488 }
37489
37490@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37491
37492 vcc->push(vcc, skb);
37493
37494- atomic_inc(&vcc->stats->rx);
37495+ atomic_inc_unchecked(&vcc->stats->rx);
37496 out:
37497 skb_trim(sarb, 0);
37498 }
37499@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37500 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37501
37502 usbatm_pop(vcc, skb);
37503- atomic_inc(&vcc->stats->tx);
37504+ atomic_inc_unchecked(&vcc->stats->tx);
37505
37506 skb = skb_dequeue(&instance->sndqueue);
37507 }
37508@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37509 if (!left--)
37510 return sprintf(page,
37511 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37512- atomic_read(&atm_dev->stats.aal5.tx),
37513- atomic_read(&atm_dev->stats.aal5.tx_err),
37514- atomic_read(&atm_dev->stats.aal5.rx),
37515- atomic_read(&atm_dev->stats.aal5.rx_err),
37516- atomic_read(&atm_dev->stats.aal5.rx_drop));
37517+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37518+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37519+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37520+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37521+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37522
37523 if (!left--) {
37524 if (instance->disconnected)
37525diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37526index d956965..4179a77 100644
37527--- a/drivers/usb/core/devices.c
37528+++ b/drivers/usb/core/devices.c
37529@@ -126,7 +126,7 @@ static const char format_endpt[] =
37530 * time it gets called.
37531 */
37532 static struct device_connect_event {
37533- atomic_t count;
37534+ atomic_unchecked_t count;
37535 wait_queue_head_t wait;
37536 } device_event = {
37537 .count = ATOMIC_INIT(1),
37538@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37539
37540 void usbfs_conn_disc_event(void)
37541 {
37542- atomic_add(2, &device_event.count);
37543+ atomic_add_unchecked(2, &device_event.count);
37544 wake_up(&device_event.wait);
37545 }
37546
37547@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37548
37549 poll_wait(file, &device_event.wait, wait);
37550
37551- event_count = atomic_read(&device_event.count);
37552+ event_count = atomic_read_unchecked(&device_event.count);
37553 if (file->f_version != event_count) {
37554 file->f_version = event_count;
37555 return POLLIN | POLLRDNORM;
37556diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
37557index b3bdfed..a9460e0 100644
37558--- a/drivers/usb/core/message.c
37559+++ b/drivers/usb/core/message.c
37560@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
37561 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37562 if (buf) {
37563 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37564- if (len > 0) {
37565- smallbuf = kmalloc(++len, GFP_NOIO);
37566+ if (len++ > 0) {
37567+ smallbuf = kmalloc(len, GFP_NOIO);
37568 if (!smallbuf)
37569 return buf;
37570 memcpy(smallbuf, buf, len);
37571diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37572index 1fc8f12..20647c1 100644
37573--- a/drivers/usb/early/ehci-dbgp.c
37574+++ b/drivers/usb/early/ehci-dbgp.c
37575@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
37576
37577 #ifdef CONFIG_KGDB
37578 static struct kgdb_io kgdbdbgp_io_ops;
37579-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37580+static struct kgdb_io kgdbdbgp_io_ops_console;
37581+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37582 #else
37583 #define dbgp_kgdb_mode (0)
37584 #endif
37585@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
37586 .write_char = kgdbdbgp_write_char,
37587 };
37588
37589+static struct kgdb_io kgdbdbgp_io_ops_console = {
37590+ .name = "kgdbdbgp",
37591+ .read_char = kgdbdbgp_read_char,
37592+ .write_char = kgdbdbgp_write_char,
37593+ .is_console = 1
37594+};
37595+
37596 static int kgdbdbgp_wait_time;
37597
37598 static int __init kgdbdbgp_parse_config(char *str)
37599@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
37600 ptr++;
37601 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37602 }
37603- kgdb_register_io_module(&kgdbdbgp_io_ops);
37604- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37605+ if (early_dbgp_console.index != -1)
37606+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37607+ else
37608+ kgdb_register_io_module(&kgdbdbgp_io_ops);
37609
37610 return 0;
37611 }
37612diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
37613index d6bea3e..60b250e 100644
37614--- a/drivers/usb/wusbcore/wa-hc.h
37615+++ b/drivers/usb/wusbcore/wa-hc.h
37616@@ -192,7 +192,7 @@ struct wahc {
37617 struct list_head xfer_delayed_list;
37618 spinlock_t xfer_list_lock;
37619 struct work_struct xfer_work;
37620- atomic_t xfer_id_count;
37621+ atomic_unchecked_t xfer_id_count;
37622 };
37623
37624
37625@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
37626 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37627 spin_lock_init(&wa->xfer_list_lock);
37628 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37629- atomic_set(&wa->xfer_id_count, 1);
37630+ atomic_set_unchecked(&wa->xfer_id_count, 1);
37631 }
37632
37633 /**
37634diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
37635index 57c01ab..8a05959 100644
37636--- a/drivers/usb/wusbcore/wa-xfer.c
37637+++ b/drivers/usb/wusbcore/wa-xfer.c
37638@@ -296,7 +296,7 @@ out:
37639 */
37640 static void wa_xfer_id_init(struct wa_xfer *xfer)
37641 {
37642- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37643+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37644 }
37645
37646 /*
37647diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
37648index c14c42b..f955cc2 100644
37649--- a/drivers/vhost/vhost.c
37650+++ b/drivers/vhost/vhost.c
37651@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
37652 return 0;
37653 }
37654
37655-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37656+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37657 {
37658 struct file *eventfp, *filep = NULL,
37659 *pollstart = NULL, *pollstop = NULL;
37660diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
37661index b0b2ac3..89a4399 100644
37662--- a/drivers/video/aty/aty128fb.c
37663+++ b/drivers/video/aty/aty128fb.c
37664@@ -148,7 +148,7 @@ enum {
37665 };
37666
37667 /* Must match above enum */
37668-static const char *r128_family[] __devinitdata = {
37669+static const char *r128_family[] __devinitconst = {
37670 "AGP",
37671 "PCI",
37672 "PRO AGP",
37673diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
37674index 5c3960d..15cf8fc 100644
37675--- a/drivers/video/fbcmap.c
37676+++ b/drivers/video/fbcmap.c
37677@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
37678 rc = -ENODEV;
37679 goto out;
37680 }
37681- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37682- !info->fbops->fb_setcmap)) {
37683+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37684 rc = -EINVAL;
37685 goto out1;
37686 }
37687diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
37688index ac9141b..9f07583 100644
37689--- a/drivers/video/fbmem.c
37690+++ b/drivers/video/fbmem.c
37691@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37692 image->dx += image->width + 8;
37693 }
37694 } else if (rotate == FB_ROTATE_UD) {
37695- for (x = 0; x < num && image->dx >= 0; x++) {
37696+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37697 info->fbops->fb_imageblit(info, image);
37698 image->dx -= image->width + 8;
37699 }
37700@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37701 image->dy += image->height + 8;
37702 }
37703 } else if (rotate == FB_ROTATE_CCW) {
37704- for (x = 0; x < num && image->dy >= 0; x++) {
37705+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37706 info->fbops->fb_imageblit(info, image);
37707 image->dy -= image->height + 8;
37708 }
37709@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
37710 return -EFAULT;
37711 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37712 return -EINVAL;
37713- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37714+ if (con2fb.framebuffer >= FB_MAX)
37715 return -EINVAL;
37716 if (!registered_fb[con2fb.framebuffer])
37717 request_module("fb%d", con2fb.framebuffer);
37718diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
37719index 5a5d092..265c5ed 100644
37720--- a/drivers/video/geode/gx1fb_core.c
37721+++ b/drivers/video/geode/gx1fb_core.c
37722@@ -29,7 +29,7 @@ static int crt_option = 1;
37723 static char panel_option[32] = "";
37724
37725 /* Modes relevant to the GX1 (taken from modedb.c) */
37726-static const struct fb_videomode __devinitdata gx1_modedb[] = {
37727+static const struct fb_videomode __devinitconst gx1_modedb[] = {
37728 /* 640x480-60 VESA */
37729 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37730 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37731diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
37732index 0fad23f..0e9afa4 100644
37733--- a/drivers/video/gxt4500.c
37734+++ b/drivers/video/gxt4500.c
37735@@ -156,7 +156,7 @@ struct gxt4500_par {
37736 static char *mode_option;
37737
37738 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37739-static const struct fb_videomode defaultmode __devinitdata = {
37740+static const struct fb_videomode defaultmode __devinitconst = {
37741 .refresh = 60,
37742 .xres = 1280,
37743 .yres = 1024,
37744@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
37745 return 0;
37746 }
37747
37748-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37749+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37750 .id = "IBM GXT4500P",
37751 .type = FB_TYPE_PACKED_PIXELS,
37752 .visual = FB_VISUAL_PSEUDOCOLOR,
37753diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
37754index 7672d2e..b56437f 100644
37755--- a/drivers/video/i810/i810_accel.c
37756+++ b/drivers/video/i810/i810_accel.c
37757@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
37758 }
37759 }
37760 printk("ringbuffer lockup!!!\n");
37761+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
37762 i810_report_error(mmio);
37763 par->dev_flags |= LOCKUP;
37764 info->pixmap.scan_align = 1;
37765diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
37766index b83f361..2b05a91 100644
37767--- a/drivers/video/i810/i810_main.c
37768+++ b/drivers/video/i810/i810_main.c
37769@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
37770 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
37771
37772 /* PCI */
37773-static const char *i810_pci_list[] __devinitdata = {
37774+static const char *i810_pci_list[] __devinitconst = {
37775 "Intel(R) 810 Framebuffer Device" ,
37776 "Intel(R) 810-DC100 Framebuffer Device" ,
37777 "Intel(R) 810E Framebuffer Device" ,
37778diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
37779index de36693..3c63fc2 100644
37780--- a/drivers/video/jz4740_fb.c
37781+++ b/drivers/video/jz4740_fb.c
37782@@ -136,7 +136,7 @@ struct jzfb {
37783 uint32_t pseudo_palette[16];
37784 };
37785
37786-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
37787+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
37788 .id = "JZ4740 FB",
37789 .type = FB_TYPE_PACKED_PIXELS,
37790 .visual = FB_VISUAL_TRUECOLOR,
37791diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
37792index 3c14e43..eafa544 100644
37793--- a/drivers/video/logo/logo_linux_clut224.ppm
37794+++ b/drivers/video/logo/logo_linux_clut224.ppm
37795@@ -1,1604 +1,1123 @@
37796 P3
37797-# Standard 224-color Linux logo
37798 80 80
37799 255
37800- 0 0 0 0 0 0 0 0 0 0 0 0
37801- 0 0 0 0 0 0 0 0 0 0 0 0
37802- 0 0 0 0 0 0 0 0 0 0 0 0
37803- 0 0 0 0 0 0 0 0 0 0 0 0
37804- 0 0 0 0 0 0 0 0 0 0 0 0
37805- 0 0 0 0 0 0 0 0 0 0 0 0
37806- 0 0 0 0 0 0 0 0 0 0 0 0
37807- 0 0 0 0 0 0 0 0 0 0 0 0
37808- 0 0 0 0 0 0 0 0 0 0 0 0
37809- 6 6 6 6 6 6 10 10 10 10 10 10
37810- 10 10 10 6 6 6 6 6 6 6 6 6
37811- 0 0 0 0 0 0 0 0 0 0 0 0
37812- 0 0 0 0 0 0 0 0 0 0 0 0
37813- 0 0 0 0 0 0 0 0 0 0 0 0
37814- 0 0 0 0 0 0 0 0 0 0 0 0
37815- 0 0 0 0 0 0 0 0 0 0 0 0
37816- 0 0 0 0 0 0 0 0 0 0 0 0
37817- 0 0 0 0 0 0 0 0 0 0 0 0
37818- 0 0 0 0 0 0 0 0 0 0 0 0
37819- 0 0 0 0 0 0 0 0 0 0 0 0
37820- 0 0 0 0 0 0 0 0 0 0 0 0
37821- 0 0 0 0 0 0 0 0 0 0 0 0
37822- 0 0 0 0 0 0 0 0 0 0 0 0
37823- 0 0 0 0 0 0 0 0 0 0 0 0
37824- 0 0 0 0 0 0 0 0 0 0 0 0
37825- 0 0 0 0 0 0 0 0 0 0 0 0
37826- 0 0 0 0 0 0 0 0 0 0 0 0
37827- 0 0 0 0 0 0 0 0 0 0 0 0
37828- 0 0 0 6 6 6 10 10 10 14 14 14
37829- 22 22 22 26 26 26 30 30 30 34 34 34
37830- 30 30 30 30 30 30 26 26 26 18 18 18
37831- 14 14 14 10 10 10 6 6 6 0 0 0
37832- 0 0 0 0 0 0 0 0 0 0 0 0
37833- 0 0 0 0 0 0 0 0 0 0 0 0
37834- 0 0 0 0 0 0 0 0 0 0 0 0
37835- 0 0 0 0 0 0 0 0 0 0 0 0
37836- 0 0 0 0 0 0 0 0 0 0 0 0
37837- 0 0 0 0 0 0 0 0 0 0 0 0
37838- 0 0 0 0 0 0 0 0 0 0 0 0
37839- 0 0 0 0 0 0 0 0 0 0 0 0
37840- 0 0 0 0 0 0 0 0 0 0 0 0
37841- 0 0 0 0 0 1 0 0 1 0 0 0
37842- 0 0 0 0 0 0 0 0 0 0 0 0
37843- 0 0 0 0 0 0 0 0 0 0 0 0
37844- 0 0 0 0 0 0 0 0 0 0 0 0
37845- 0 0 0 0 0 0 0 0 0 0 0 0
37846- 0 0 0 0 0 0 0 0 0 0 0 0
37847- 0 0 0 0 0 0 0 0 0 0 0 0
37848- 6 6 6 14 14 14 26 26 26 42 42 42
37849- 54 54 54 66 66 66 78 78 78 78 78 78
37850- 78 78 78 74 74 74 66 66 66 54 54 54
37851- 42 42 42 26 26 26 18 18 18 10 10 10
37852- 6 6 6 0 0 0 0 0 0 0 0 0
37853- 0 0 0 0 0 0 0 0 0 0 0 0
37854- 0 0 0 0 0 0 0 0 0 0 0 0
37855- 0 0 0 0 0 0 0 0 0 0 0 0
37856- 0 0 0 0 0 0 0 0 0 0 0 0
37857- 0 0 0 0 0 0 0 0 0 0 0 0
37858- 0 0 0 0 0 0 0 0 0 0 0 0
37859- 0 0 0 0 0 0 0 0 0 0 0 0
37860- 0 0 0 0 0 0 0 0 0 0 0 0
37861- 0 0 1 0 0 0 0 0 0 0 0 0
37862- 0 0 0 0 0 0 0 0 0 0 0 0
37863- 0 0 0 0 0 0 0 0 0 0 0 0
37864- 0 0 0 0 0 0 0 0 0 0 0 0
37865- 0 0 0 0 0 0 0 0 0 0 0 0
37866- 0 0 0 0 0 0 0 0 0 0 0 0
37867- 0 0 0 0 0 0 0 0 0 10 10 10
37868- 22 22 22 42 42 42 66 66 66 86 86 86
37869- 66 66 66 38 38 38 38 38 38 22 22 22
37870- 26 26 26 34 34 34 54 54 54 66 66 66
37871- 86 86 86 70 70 70 46 46 46 26 26 26
37872- 14 14 14 6 6 6 0 0 0 0 0 0
37873- 0 0 0 0 0 0 0 0 0 0 0 0
37874- 0 0 0 0 0 0 0 0 0 0 0 0
37875- 0 0 0 0 0 0 0 0 0 0 0 0
37876- 0 0 0 0 0 0 0 0 0 0 0 0
37877- 0 0 0 0 0 0 0 0 0 0 0 0
37878- 0 0 0 0 0 0 0 0 0 0 0 0
37879- 0 0 0 0 0 0 0 0 0 0 0 0
37880- 0 0 0 0 0 0 0 0 0 0 0 0
37881- 0 0 1 0 0 1 0 0 1 0 0 0
37882- 0 0 0 0 0 0 0 0 0 0 0 0
37883- 0 0 0 0 0 0 0 0 0 0 0 0
37884- 0 0 0 0 0 0 0 0 0 0 0 0
37885- 0 0 0 0 0 0 0 0 0 0 0 0
37886- 0 0 0 0 0 0 0 0 0 0 0 0
37887- 0 0 0 0 0 0 10 10 10 26 26 26
37888- 50 50 50 82 82 82 58 58 58 6 6 6
37889- 2 2 6 2 2 6 2 2 6 2 2 6
37890- 2 2 6 2 2 6 2 2 6 2 2 6
37891- 6 6 6 54 54 54 86 86 86 66 66 66
37892- 38 38 38 18 18 18 6 6 6 0 0 0
37893- 0 0 0 0 0 0 0 0 0 0 0 0
37894- 0 0 0 0 0 0 0 0 0 0 0 0
37895- 0 0 0 0 0 0 0 0 0 0 0 0
37896- 0 0 0 0 0 0 0 0 0 0 0 0
37897- 0 0 0 0 0 0 0 0 0 0 0 0
37898- 0 0 0 0 0 0 0 0 0 0 0 0
37899- 0 0 0 0 0 0 0 0 0 0 0 0
37900- 0 0 0 0 0 0 0 0 0 0 0 0
37901- 0 0 0 0 0 0 0 0 0 0 0 0
37902- 0 0 0 0 0 0 0 0 0 0 0 0
37903- 0 0 0 0 0 0 0 0 0 0 0 0
37904- 0 0 0 0 0 0 0 0 0 0 0 0
37905- 0 0 0 0 0 0 0 0 0 0 0 0
37906- 0 0 0 0 0 0 0 0 0 0 0 0
37907- 0 0 0 6 6 6 22 22 22 50 50 50
37908- 78 78 78 34 34 34 2 2 6 2 2 6
37909- 2 2 6 2 2 6 2 2 6 2 2 6
37910- 2 2 6 2 2 6 2 2 6 2 2 6
37911- 2 2 6 2 2 6 6 6 6 70 70 70
37912- 78 78 78 46 46 46 22 22 22 6 6 6
37913- 0 0 0 0 0 0 0 0 0 0 0 0
37914- 0 0 0 0 0 0 0 0 0 0 0 0
37915- 0 0 0 0 0 0 0 0 0 0 0 0
37916- 0 0 0 0 0 0 0 0 0 0 0 0
37917- 0 0 0 0 0 0 0 0 0 0 0 0
37918- 0 0 0 0 0 0 0 0 0 0 0 0
37919- 0 0 0 0 0 0 0 0 0 0 0 0
37920- 0 0 0 0 0 0 0 0 0 0 0 0
37921- 0 0 1 0 0 1 0 0 1 0 0 0
37922- 0 0 0 0 0 0 0 0 0 0 0 0
37923- 0 0 0 0 0 0 0 0 0 0 0 0
37924- 0 0 0 0 0 0 0 0 0 0 0 0
37925- 0 0 0 0 0 0 0 0 0 0 0 0
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 6 6 6 18 18 18 42 42 42 82 82 82
37928- 26 26 26 2 2 6 2 2 6 2 2 6
37929- 2 2 6 2 2 6 2 2 6 2 2 6
37930- 2 2 6 2 2 6 2 2 6 14 14 14
37931- 46 46 46 34 34 34 6 6 6 2 2 6
37932- 42 42 42 78 78 78 42 42 42 18 18 18
37933- 6 6 6 0 0 0 0 0 0 0 0 0
37934- 0 0 0 0 0 0 0 0 0 0 0 0
37935- 0 0 0 0 0 0 0 0 0 0 0 0
37936- 0 0 0 0 0 0 0 0 0 0 0 0
37937- 0 0 0 0 0 0 0 0 0 0 0 0
37938- 0 0 0 0 0 0 0 0 0 0 0 0
37939- 0 0 0 0 0 0 0 0 0 0 0 0
37940- 0 0 0 0 0 0 0 0 0 0 0 0
37941- 0 0 1 0 0 0 0 0 1 0 0 0
37942- 0 0 0 0 0 0 0 0 0 0 0 0
37943- 0 0 0 0 0 0 0 0 0 0 0 0
37944- 0 0 0 0 0 0 0 0 0 0 0 0
37945- 0 0 0 0 0 0 0 0 0 0 0 0
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 10 10 10 30 30 30 66 66 66 58 58 58
37948- 2 2 6 2 2 6 2 2 6 2 2 6
37949- 2 2 6 2 2 6 2 2 6 2 2 6
37950- 2 2 6 2 2 6 2 2 6 26 26 26
37951- 86 86 86 101 101 101 46 46 46 10 10 10
37952- 2 2 6 58 58 58 70 70 70 34 34 34
37953- 10 10 10 0 0 0 0 0 0 0 0 0
37954- 0 0 0 0 0 0 0 0 0 0 0 0
37955- 0 0 0 0 0 0 0 0 0 0 0 0
37956- 0 0 0 0 0 0 0 0 0 0 0 0
37957- 0 0 0 0 0 0 0 0 0 0 0 0
37958- 0 0 0 0 0 0 0 0 0 0 0 0
37959- 0 0 0 0 0 0 0 0 0 0 0 0
37960- 0 0 0 0 0 0 0 0 0 0 0 0
37961- 0 0 1 0 0 1 0 0 1 0 0 0
37962- 0 0 0 0 0 0 0 0 0 0 0 0
37963- 0 0 0 0 0 0 0 0 0 0 0 0
37964- 0 0 0 0 0 0 0 0 0 0 0 0
37965- 0 0 0 0 0 0 0 0 0 0 0 0
37966- 0 0 0 0 0 0 0 0 0 0 0 0
37967- 14 14 14 42 42 42 86 86 86 10 10 10
37968- 2 2 6 2 2 6 2 2 6 2 2 6
37969- 2 2 6 2 2 6 2 2 6 2 2 6
37970- 2 2 6 2 2 6 2 2 6 30 30 30
37971- 94 94 94 94 94 94 58 58 58 26 26 26
37972- 2 2 6 6 6 6 78 78 78 54 54 54
37973- 22 22 22 6 6 6 0 0 0 0 0 0
37974- 0 0 0 0 0 0 0 0 0 0 0 0
37975- 0 0 0 0 0 0 0 0 0 0 0 0
37976- 0 0 0 0 0 0 0 0 0 0 0 0
37977- 0 0 0 0 0 0 0 0 0 0 0 0
37978- 0 0 0 0 0 0 0 0 0 0 0 0
37979- 0 0 0 0 0 0 0 0 0 0 0 0
37980- 0 0 0 0 0 0 0 0 0 0 0 0
37981- 0 0 0 0 0 0 0 0 0 0 0 0
37982- 0 0 0 0 0 0 0 0 0 0 0 0
37983- 0 0 0 0 0 0 0 0 0 0 0 0
37984- 0 0 0 0 0 0 0 0 0 0 0 0
37985- 0 0 0 0 0 0 0 0 0 0 0 0
37986- 0 0 0 0 0 0 0 0 0 6 6 6
37987- 22 22 22 62 62 62 62 62 62 2 2 6
37988- 2 2 6 2 2 6 2 2 6 2 2 6
37989- 2 2 6 2 2 6 2 2 6 2 2 6
37990- 2 2 6 2 2 6 2 2 6 26 26 26
37991- 54 54 54 38 38 38 18 18 18 10 10 10
37992- 2 2 6 2 2 6 34 34 34 82 82 82
37993- 38 38 38 14 14 14 0 0 0 0 0 0
37994- 0 0 0 0 0 0 0 0 0 0 0 0
37995- 0 0 0 0 0 0 0 0 0 0 0 0
37996- 0 0 0 0 0 0 0 0 0 0 0 0
37997- 0 0 0 0 0 0 0 0 0 0 0 0
37998- 0 0 0 0 0 0 0 0 0 0 0 0
37999- 0 0 0 0 0 0 0 0 0 0 0 0
38000- 0 0 0 0 0 0 0 0 0 0 0 0
38001- 0 0 0 0 0 1 0 0 1 0 0 0
38002- 0 0 0 0 0 0 0 0 0 0 0 0
38003- 0 0 0 0 0 0 0 0 0 0 0 0
38004- 0 0 0 0 0 0 0 0 0 0 0 0
38005- 0 0 0 0 0 0 0 0 0 0 0 0
38006- 0 0 0 0 0 0 0 0 0 6 6 6
38007- 30 30 30 78 78 78 30 30 30 2 2 6
38008- 2 2 6 2 2 6 2 2 6 2 2 6
38009- 2 2 6 2 2 6 2 2 6 2 2 6
38010- 2 2 6 2 2 6 2 2 6 10 10 10
38011- 10 10 10 2 2 6 2 2 6 2 2 6
38012- 2 2 6 2 2 6 2 2 6 78 78 78
38013- 50 50 50 18 18 18 6 6 6 0 0 0
38014- 0 0 0 0 0 0 0 0 0 0 0 0
38015- 0 0 0 0 0 0 0 0 0 0 0 0
38016- 0 0 0 0 0 0 0 0 0 0 0 0
38017- 0 0 0 0 0 0 0 0 0 0 0 0
38018- 0 0 0 0 0 0 0 0 0 0 0 0
38019- 0 0 0 0 0 0 0 0 0 0 0 0
38020- 0 0 0 0 0 0 0 0 0 0 0 0
38021- 0 0 1 0 0 0 0 0 0 0 0 0
38022- 0 0 0 0 0 0 0 0 0 0 0 0
38023- 0 0 0 0 0 0 0 0 0 0 0 0
38024- 0 0 0 0 0 0 0 0 0 0 0 0
38025- 0 0 0 0 0 0 0 0 0 0 0 0
38026- 0 0 0 0 0 0 0 0 0 10 10 10
38027- 38 38 38 86 86 86 14 14 14 2 2 6
38028- 2 2 6 2 2 6 2 2 6 2 2 6
38029- 2 2 6 2 2 6 2 2 6 2 2 6
38030- 2 2 6 2 2 6 2 2 6 2 2 6
38031- 2 2 6 2 2 6 2 2 6 2 2 6
38032- 2 2 6 2 2 6 2 2 6 54 54 54
38033- 66 66 66 26 26 26 6 6 6 0 0 0
38034- 0 0 0 0 0 0 0 0 0 0 0 0
38035- 0 0 0 0 0 0 0 0 0 0 0 0
38036- 0 0 0 0 0 0 0 0 0 0 0 0
38037- 0 0 0 0 0 0 0 0 0 0 0 0
38038- 0 0 0 0 0 0 0 0 0 0 0 0
38039- 0 0 0 0 0 0 0 0 0 0 0 0
38040- 0 0 0 0 0 0 0 0 0 0 0 0
38041- 0 0 0 0 0 1 0 0 1 0 0 0
38042- 0 0 0 0 0 0 0 0 0 0 0 0
38043- 0 0 0 0 0 0 0 0 0 0 0 0
38044- 0 0 0 0 0 0 0 0 0 0 0 0
38045- 0 0 0 0 0 0 0 0 0 0 0 0
38046- 0 0 0 0 0 0 0 0 0 14 14 14
38047- 42 42 42 82 82 82 2 2 6 2 2 6
38048- 2 2 6 6 6 6 10 10 10 2 2 6
38049- 2 2 6 2 2 6 2 2 6 2 2 6
38050- 2 2 6 2 2 6 2 2 6 6 6 6
38051- 14 14 14 10 10 10 2 2 6 2 2 6
38052- 2 2 6 2 2 6 2 2 6 18 18 18
38053- 82 82 82 34 34 34 10 10 10 0 0 0
38054- 0 0 0 0 0 0 0 0 0 0 0 0
38055- 0 0 0 0 0 0 0 0 0 0 0 0
38056- 0 0 0 0 0 0 0 0 0 0 0 0
38057- 0 0 0 0 0 0 0 0 0 0 0 0
38058- 0 0 0 0 0 0 0 0 0 0 0 0
38059- 0 0 0 0 0 0 0 0 0 0 0 0
38060- 0 0 0 0 0 0 0 0 0 0 0 0
38061- 0 0 1 0 0 0 0 0 0 0 0 0
38062- 0 0 0 0 0 0 0 0 0 0 0 0
38063- 0 0 0 0 0 0 0 0 0 0 0 0
38064- 0 0 0 0 0 0 0 0 0 0 0 0
38065- 0 0 0 0 0 0 0 0 0 0 0 0
38066- 0 0 0 0 0 0 0 0 0 14 14 14
38067- 46 46 46 86 86 86 2 2 6 2 2 6
38068- 6 6 6 6 6 6 22 22 22 34 34 34
38069- 6 6 6 2 2 6 2 2 6 2 2 6
38070- 2 2 6 2 2 6 18 18 18 34 34 34
38071- 10 10 10 50 50 50 22 22 22 2 2 6
38072- 2 2 6 2 2 6 2 2 6 10 10 10
38073- 86 86 86 42 42 42 14 14 14 0 0 0
38074- 0 0 0 0 0 0 0 0 0 0 0 0
38075- 0 0 0 0 0 0 0 0 0 0 0 0
38076- 0 0 0 0 0 0 0 0 0 0 0 0
38077- 0 0 0 0 0 0 0 0 0 0 0 0
38078- 0 0 0 0 0 0 0 0 0 0 0 0
38079- 0 0 0 0 0 0 0 0 0 0 0 0
38080- 0 0 0 0 0 0 0 0 0 0 0 0
38081- 0 0 1 0 0 1 0 0 1 0 0 0
38082- 0 0 0 0 0 0 0 0 0 0 0 0
38083- 0 0 0 0 0 0 0 0 0 0 0 0
38084- 0 0 0 0 0 0 0 0 0 0 0 0
38085- 0 0 0 0 0 0 0 0 0 0 0 0
38086- 0 0 0 0 0 0 0 0 0 14 14 14
38087- 46 46 46 86 86 86 2 2 6 2 2 6
38088- 38 38 38 116 116 116 94 94 94 22 22 22
38089- 22 22 22 2 2 6 2 2 6 2 2 6
38090- 14 14 14 86 86 86 138 138 138 162 162 162
38091-154 154 154 38 38 38 26 26 26 6 6 6
38092- 2 2 6 2 2 6 2 2 6 2 2 6
38093- 86 86 86 46 46 46 14 14 14 0 0 0
38094- 0 0 0 0 0 0 0 0 0 0 0 0
38095- 0 0 0 0 0 0 0 0 0 0 0 0
38096- 0 0 0 0 0 0 0 0 0 0 0 0
38097- 0 0 0 0 0 0 0 0 0 0 0 0
38098- 0 0 0 0 0 0 0 0 0 0 0 0
38099- 0 0 0 0 0 0 0 0 0 0 0 0
38100- 0 0 0 0 0 0 0 0 0 0 0 0
38101- 0 0 0 0 0 0 0 0 0 0 0 0
38102- 0 0 0 0 0 0 0 0 0 0 0 0
38103- 0 0 0 0 0 0 0 0 0 0 0 0
38104- 0 0 0 0 0 0 0 0 0 0 0 0
38105- 0 0 0 0 0 0 0 0 0 0 0 0
38106- 0 0 0 0 0 0 0 0 0 14 14 14
38107- 46 46 46 86 86 86 2 2 6 14 14 14
38108-134 134 134 198 198 198 195 195 195 116 116 116
38109- 10 10 10 2 2 6 2 2 6 6 6 6
38110-101 98 89 187 187 187 210 210 210 218 218 218
38111-214 214 214 134 134 134 14 14 14 6 6 6
38112- 2 2 6 2 2 6 2 2 6 2 2 6
38113- 86 86 86 50 50 50 18 18 18 6 6 6
38114- 0 0 0 0 0 0 0 0 0 0 0 0
38115- 0 0 0 0 0 0 0 0 0 0 0 0
38116- 0 0 0 0 0 0 0 0 0 0 0 0
38117- 0 0 0 0 0 0 0 0 0 0 0 0
38118- 0 0 0 0 0 0 0 0 0 0 0 0
38119- 0 0 0 0 0 0 0 0 0 0 0 0
38120- 0 0 0 0 0 0 0 0 1 0 0 0
38121- 0 0 1 0 0 1 0 0 1 0 0 0
38122- 0 0 0 0 0 0 0 0 0 0 0 0
38123- 0 0 0 0 0 0 0 0 0 0 0 0
38124- 0 0 0 0 0 0 0 0 0 0 0 0
38125- 0 0 0 0 0 0 0 0 0 0 0 0
38126- 0 0 0 0 0 0 0 0 0 14 14 14
38127- 46 46 46 86 86 86 2 2 6 54 54 54
38128-218 218 218 195 195 195 226 226 226 246 246 246
38129- 58 58 58 2 2 6 2 2 6 30 30 30
38130-210 210 210 253 253 253 174 174 174 123 123 123
38131-221 221 221 234 234 234 74 74 74 2 2 6
38132- 2 2 6 2 2 6 2 2 6 2 2 6
38133- 70 70 70 58 58 58 22 22 22 6 6 6
38134- 0 0 0 0 0 0 0 0 0 0 0 0
38135- 0 0 0 0 0 0 0 0 0 0 0 0
38136- 0 0 0 0 0 0 0 0 0 0 0 0
38137- 0 0 0 0 0 0 0 0 0 0 0 0
38138- 0 0 0 0 0 0 0 0 0 0 0 0
38139- 0 0 0 0 0 0 0 0 0 0 0 0
38140- 0 0 0 0 0 0 0 0 0 0 0 0
38141- 0 0 0 0 0 0 0 0 0 0 0 0
38142- 0 0 0 0 0 0 0 0 0 0 0 0
38143- 0 0 0 0 0 0 0 0 0 0 0 0
38144- 0 0 0 0 0 0 0 0 0 0 0 0
38145- 0 0 0 0 0 0 0 0 0 0 0 0
38146- 0 0 0 0 0 0 0 0 0 14 14 14
38147- 46 46 46 82 82 82 2 2 6 106 106 106
38148-170 170 170 26 26 26 86 86 86 226 226 226
38149-123 123 123 10 10 10 14 14 14 46 46 46
38150-231 231 231 190 190 190 6 6 6 70 70 70
38151- 90 90 90 238 238 238 158 158 158 2 2 6
38152- 2 2 6 2 2 6 2 2 6 2 2 6
38153- 70 70 70 58 58 58 22 22 22 6 6 6
38154- 0 0 0 0 0 0 0 0 0 0 0 0
38155- 0 0 0 0 0 0 0 0 0 0 0 0
38156- 0 0 0 0 0 0 0 0 0 0 0 0
38157- 0 0 0 0 0 0 0 0 0 0 0 0
38158- 0 0 0 0 0 0 0 0 0 0 0 0
38159- 0 0 0 0 0 0 0 0 0 0 0 0
38160- 0 0 0 0 0 0 0 0 1 0 0 0
38161- 0 0 1 0 0 1 0 0 1 0 0 0
38162- 0 0 0 0 0 0 0 0 0 0 0 0
38163- 0 0 0 0 0 0 0 0 0 0 0 0
38164- 0 0 0 0 0 0 0 0 0 0 0 0
38165- 0 0 0 0 0 0 0 0 0 0 0 0
38166- 0 0 0 0 0 0 0 0 0 14 14 14
38167- 42 42 42 86 86 86 6 6 6 116 116 116
38168-106 106 106 6 6 6 70 70 70 149 149 149
38169-128 128 128 18 18 18 38 38 38 54 54 54
38170-221 221 221 106 106 106 2 2 6 14 14 14
38171- 46 46 46 190 190 190 198 198 198 2 2 6
38172- 2 2 6 2 2 6 2 2 6 2 2 6
38173- 74 74 74 62 62 62 22 22 22 6 6 6
38174- 0 0 0 0 0 0 0 0 0 0 0 0
38175- 0 0 0 0 0 0 0 0 0 0 0 0
38176- 0 0 0 0 0 0 0 0 0 0 0 0
38177- 0 0 0 0 0 0 0 0 0 0 0 0
38178- 0 0 0 0 0 0 0 0 0 0 0 0
38179- 0 0 0 0 0 0 0 0 0 0 0 0
38180- 0 0 0 0 0 0 0 0 1 0 0 0
38181- 0 0 1 0 0 0 0 0 1 0 0 0
38182- 0 0 0 0 0 0 0 0 0 0 0 0
38183- 0 0 0 0 0 0 0 0 0 0 0 0
38184- 0 0 0 0 0 0 0 0 0 0 0 0
38185- 0 0 0 0 0 0 0 0 0 0 0 0
38186- 0 0 0 0 0 0 0 0 0 14 14 14
38187- 42 42 42 94 94 94 14 14 14 101 101 101
38188-128 128 128 2 2 6 18 18 18 116 116 116
38189-118 98 46 121 92 8 121 92 8 98 78 10
38190-162 162 162 106 106 106 2 2 6 2 2 6
38191- 2 2 6 195 195 195 195 195 195 6 6 6
38192- 2 2 6 2 2 6 2 2 6 2 2 6
38193- 74 74 74 62 62 62 22 22 22 6 6 6
38194- 0 0 0 0 0 0 0 0 0 0 0 0
38195- 0 0 0 0 0 0 0 0 0 0 0 0
38196- 0 0 0 0 0 0 0 0 0 0 0 0
38197- 0 0 0 0 0 0 0 0 0 0 0 0
38198- 0 0 0 0 0 0 0 0 0 0 0 0
38199- 0 0 0 0 0 0 0 0 0 0 0 0
38200- 0 0 0 0 0 0 0 0 1 0 0 1
38201- 0 0 1 0 0 0 0 0 1 0 0 0
38202- 0 0 0 0 0 0 0 0 0 0 0 0
38203- 0 0 0 0 0 0 0 0 0 0 0 0
38204- 0 0 0 0 0 0 0 0 0 0 0 0
38205- 0 0 0 0 0 0 0 0 0 0 0 0
38206- 0 0 0 0 0 0 0 0 0 10 10 10
38207- 38 38 38 90 90 90 14 14 14 58 58 58
38208-210 210 210 26 26 26 54 38 6 154 114 10
38209-226 170 11 236 186 11 225 175 15 184 144 12
38210-215 174 15 175 146 61 37 26 9 2 2 6
38211- 70 70 70 246 246 246 138 138 138 2 2 6
38212- 2 2 6 2 2 6 2 2 6 2 2 6
38213- 70 70 70 66 66 66 26 26 26 6 6 6
38214- 0 0 0 0 0 0 0 0 0 0 0 0
38215- 0 0 0 0 0 0 0 0 0 0 0 0
38216- 0 0 0 0 0 0 0 0 0 0 0 0
38217- 0 0 0 0 0 0 0 0 0 0 0 0
38218- 0 0 0 0 0 0 0 0 0 0 0 0
38219- 0 0 0 0 0 0 0 0 0 0 0 0
38220- 0 0 0 0 0 0 0 0 0 0 0 0
38221- 0 0 0 0 0 0 0 0 0 0 0 0
38222- 0 0 0 0 0 0 0 0 0 0 0 0
38223- 0 0 0 0 0 0 0 0 0 0 0 0
38224- 0 0 0 0 0 0 0 0 0 0 0 0
38225- 0 0 0 0 0 0 0 0 0 0 0 0
38226- 0 0 0 0 0 0 0 0 0 10 10 10
38227- 38 38 38 86 86 86 14 14 14 10 10 10
38228-195 195 195 188 164 115 192 133 9 225 175 15
38229-239 182 13 234 190 10 232 195 16 232 200 30
38230-245 207 45 241 208 19 232 195 16 184 144 12
38231-218 194 134 211 206 186 42 42 42 2 2 6
38232- 2 2 6 2 2 6 2 2 6 2 2 6
38233- 50 50 50 74 74 74 30 30 30 6 6 6
38234- 0 0 0 0 0 0 0 0 0 0 0 0
38235- 0 0 0 0 0 0 0 0 0 0 0 0
38236- 0 0 0 0 0 0 0 0 0 0 0 0
38237- 0 0 0 0 0 0 0 0 0 0 0 0
38238- 0 0 0 0 0 0 0 0 0 0 0 0
38239- 0 0 0 0 0 0 0 0 0 0 0 0
38240- 0 0 0 0 0 0 0 0 0 0 0 0
38241- 0 0 0 0 0 0 0 0 0 0 0 0
38242- 0 0 0 0 0 0 0 0 0 0 0 0
38243- 0 0 0 0 0 0 0 0 0 0 0 0
38244- 0 0 0 0 0 0 0 0 0 0 0 0
38245- 0 0 0 0 0 0 0 0 0 0 0 0
38246- 0 0 0 0 0 0 0 0 0 10 10 10
38247- 34 34 34 86 86 86 14 14 14 2 2 6
38248-121 87 25 192 133 9 219 162 10 239 182 13
38249-236 186 11 232 195 16 241 208 19 244 214 54
38250-246 218 60 246 218 38 246 215 20 241 208 19
38251-241 208 19 226 184 13 121 87 25 2 2 6
38252- 2 2 6 2 2 6 2 2 6 2 2 6
38253- 50 50 50 82 82 82 34 34 34 10 10 10
38254- 0 0 0 0 0 0 0 0 0 0 0 0
38255- 0 0 0 0 0 0 0 0 0 0 0 0
38256- 0 0 0 0 0 0 0 0 0 0 0 0
38257- 0 0 0 0 0 0 0 0 0 0 0 0
38258- 0 0 0 0 0 0 0 0 0 0 0 0
38259- 0 0 0 0 0 0 0 0 0 0 0 0
38260- 0 0 0 0 0 0 0 0 0 0 0 0
38261- 0 0 0 0 0 0 0 0 0 0 0 0
38262- 0 0 0 0 0 0 0 0 0 0 0 0
38263- 0 0 0 0 0 0 0 0 0 0 0 0
38264- 0 0 0 0 0 0 0 0 0 0 0 0
38265- 0 0 0 0 0 0 0 0 0 0 0 0
38266- 0 0 0 0 0 0 0 0 0 10 10 10
38267- 34 34 34 82 82 82 30 30 30 61 42 6
38268-180 123 7 206 145 10 230 174 11 239 182 13
38269-234 190 10 238 202 15 241 208 19 246 218 74
38270-246 218 38 246 215 20 246 215 20 246 215 20
38271-226 184 13 215 174 15 184 144 12 6 6 6
38272- 2 2 6 2 2 6 2 2 6 2 2 6
38273- 26 26 26 94 94 94 42 42 42 14 14 14
38274- 0 0 0 0 0 0 0 0 0 0 0 0
38275- 0 0 0 0 0 0 0 0 0 0 0 0
38276- 0 0 0 0 0 0 0 0 0 0 0 0
38277- 0 0 0 0 0 0 0 0 0 0 0 0
38278- 0 0 0 0 0 0 0 0 0 0 0 0
38279- 0 0 0 0 0 0 0 0 0 0 0 0
38280- 0 0 0 0 0 0 0 0 0 0 0 0
38281- 0 0 0 0 0 0 0 0 0 0 0 0
38282- 0 0 0 0 0 0 0 0 0 0 0 0
38283- 0 0 0 0 0 0 0 0 0 0 0 0
38284- 0 0 0 0 0 0 0 0 0 0 0 0
38285- 0 0 0 0 0 0 0 0 0 0 0 0
38286- 0 0 0 0 0 0 0 0 0 10 10 10
38287- 30 30 30 78 78 78 50 50 50 104 69 6
38288-192 133 9 216 158 10 236 178 12 236 186 11
38289-232 195 16 241 208 19 244 214 54 245 215 43
38290-246 215 20 246 215 20 241 208 19 198 155 10
38291-200 144 11 216 158 10 156 118 10 2 2 6
38292- 2 2 6 2 2 6 2 2 6 2 2 6
38293- 6 6 6 90 90 90 54 54 54 18 18 18
38294- 6 6 6 0 0 0 0 0 0 0 0 0
38295- 0 0 0 0 0 0 0 0 0 0 0 0
38296- 0 0 0 0 0 0 0 0 0 0 0 0
38297- 0 0 0 0 0 0 0 0 0 0 0 0
38298- 0 0 0 0 0 0 0 0 0 0 0 0
38299- 0 0 0 0 0 0 0 0 0 0 0 0
38300- 0 0 0 0 0 0 0 0 0 0 0 0
38301- 0 0 0 0 0 0 0 0 0 0 0 0
38302- 0 0 0 0 0 0 0 0 0 0 0 0
38303- 0 0 0 0 0 0 0 0 0 0 0 0
38304- 0 0 0 0 0 0 0 0 0 0 0 0
38305- 0 0 0 0 0 0 0 0 0 0 0 0
38306- 0 0 0 0 0 0 0 0 0 10 10 10
38307- 30 30 30 78 78 78 46 46 46 22 22 22
38308-137 92 6 210 162 10 239 182 13 238 190 10
38309-238 202 15 241 208 19 246 215 20 246 215 20
38310-241 208 19 203 166 17 185 133 11 210 150 10
38311-216 158 10 210 150 10 102 78 10 2 2 6
38312- 6 6 6 54 54 54 14 14 14 2 2 6
38313- 2 2 6 62 62 62 74 74 74 30 30 30
38314- 10 10 10 0 0 0 0 0 0 0 0 0
38315- 0 0 0 0 0 0 0 0 0 0 0 0
38316- 0 0 0 0 0 0 0 0 0 0 0 0
38317- 0 0 0 0 0 0 0 0 0 0 0 0
38318- 0 0 0 0 0 0 0 0 0 0 0 0
38319- 0 0 0 0 0 0 0 0 0 0 0 0
38320- 0 0 0 0 0 0 0 0 0 0 0 0
38321- 0 0 0 0 0 0 0 0 0 0 0 0
38322- 0 0 0 0 0 0 0 0 0 0 0 0
38323- 0 0 0 0 0 0 0 0 0 0 0 0
38324- 0 0 0 0 0 0 0 0 0 0 0 0
38325- 0 0 0 0 0 0 0 0 0 0 0 0
38326- 0 0 0 0 0 0 0 0 0 10 10 10
38327- 34 34 34 78 78 78 50 50 50 6 6 6
38328- 94 70 30 139 102 15 190 146 13 226 184 13
38329-232 200 30 232 195 16 215 174 15 190 146 13
38330-168 122 10 192 133 9 210 150 10 213 154 11
38331-202 150 34 182 157 106 101 98 89 2 2 6
38332- 2 2 6 78 78 78 116 116 116 58 58 58
38333- 2 2 6 22 22 22 90 90 90 46 46 46
38334- 18 18 18 6 6 6 0 0 0 0 0 0
38335- 0 0 0 0 0 0 0 0 0 0 0 0
38336- 0 0 0 0 0 0 0 0 0 0 0 0
38337- 0 0 0 0 0 0 0 0 0 0 0 0
38338- 0 0 0 0 0 0 0 0 0 0 0 0
38339- 0 0 0 0 0 0 0 0 0 0 0 0
38340- 0 0 0 0 0 0 0 0 0 0 0 0
38341- 0 0 0 0 0 0 0 0 0 0 0 0
38342- 0 0 0 0 0 0 0 0 0 0 0 0
38343- 0 0 0 0 0 0 0 0 0 0 0 0
38344- 0 0 0 0 0 0 0 0 0 0 0 0
38345- 0 0 0 0 0 0 0 0 0 0 0 0
38346- 0 0 0 0 0 0 0 0 0 10 10 10
38347- 38 38 38 86 86 86 50 50 50 6 6 6
38348-128 128 128 174 154 114 156 107 11 168 122 10
38349-198 155 10 184 144 12 197 138 11 200 144 11
38350-206 145 10 206 145 10 197 138 11 188 164 115
38351-195 195 195 198 198 198 174 174 174 14 14 14
38352- 2 2 6 22 22 22 116 116 116 116 116 116
38353- 22 22 22 2 2 6 74 74 74 70 70 70
38354- 30 30 30 10 10 10 0 0 0 0 0 0
38355- 0 0 0 0 0 0 0 0 0 0 0 0
38356- 0 0 0 0 0 0 0 0 0 0 0 0
38357- 0 0 0 0 0 0 0 0 0 0 0 0
38358- 0 0 0 0 0 0 0 0 0 0 0 0
38359- 0 0 0 0 0 0 0 0 0 0 0 0
38360- 0 0 0 0 0 0 0 0 0 0 0 0
38361- 0 0 0 0 0 0 0 0 0 0 0 0
38362- 0 0 0 0 0 0 0 0 0 0 0 0
38363- 0 0 0 0 0 0 0 0 0 0 0 0
38364- 0 0 0 0 0 0 0 0 0 0 0 0
38365- 0 0 0 0 0 0 0 0 0 0 0 0
38366- 0 0 0 0 0 0 6 6 6 18 18 18
38367- 50 50 50 101 101 101 26 26 26 10 10 10
38368-138 138 138 190 190 190 174 154 114 156 107 11
38369-197 138 11 200 144 11 197 138 11 192 133 9
38370-180 123 7 190 142 34 190 178 144 187 187 187
38371-202 202 202 221 221 221 214 214 214 66 66 66
38372- 2 2 6 2 2 6 50 50 50 62 62 62
38373- 6 6 6 2 2 6 10 10 10 90 90 90
38374- 50 50 50 18 18 18 6 6 6 0 0 0
38375- 0 0 0 0 0 0 0 0 0 0 0 0
38376- 0 0 0 0 0 0 0 0 0 0 0 0
38377- 0 0 0 0 0 0 0 0 0 0 0 0
38378- 0 0 0 0 0 0 0 0 0 0 0 0
38379- 0 0 0 0 0 0 0 0 0 0 0 0
38380- 0 0 0 0 0 0 0 0 0 0 0 0
38381- 0 0 0 0 0 0 0 0 0 0 0 0
38382- 0 0 0 0 0 0 0 0 0 0 0 0
38383- 0 0 0 0 0 0 0 0 0 0 0 0
38384- 0 0 0 0 0 0 0 0 0 0 0 0
38385- 0 0 0 0 0 0 0 0 0 0 0 0
38386- 0 0 0 0 0 0 10 10 10 34 34 34
38387- 74 74 74 74 74 74 2 2 6 6 6 6
38388-144 144 144 198 198 198 190 190 190 178 166 146
38389-154 121 60 156 107 11 156 107 11 168 124 44
38390-174 154 114 187 187 187 190 190 190 210 210 210
38391-246 246 246 253 253 253 253 253 253 182 182 182
38392- 6 6 6 2 2 6 2 2 6 2 2 6
38393- 2 2 6 2 2 6 2 2 6 62 62 62
38394- 74 74 74 34 34 34 14 14 14 0 0 0
38395- 0 0 0 0 0 0 0 0 0 0 0 0
38396- 0 0 0 0 0 0 0 0 0 0 0 0
38397- 0 0 0 0 0 0 0 0 0 0 0 0
38398- 0 0 0 0 0 0 0 0 0 0 0 0
38399- 0 0 0 0 0 0 0 0 0 0 0 0
38400- 0 0 0 0 0 0 0 0 0 0 0 0
38401- 0 0 0 0 0 0 0 0 0 0 0 0
38402- 0 0 0 0 0 0 0 0 0 0 0 0
38403- 0 0 0 0 0 0 0 0 0 0 0 0
38404- 0 0 0 0 0 0 0 0 0 0 0 0
38405- 0 0 0 0 0 0 0 0 0 0 0 0
38406- 0 0 0 10 10 10 22 22 22 54 54 54
38407- 94 94 94 18 18 18 2 2 6 46 46 46
38408-234 234 234 221 221 221 190 190 190 190 190 190
38409-190 190 190 187 187 187 187 187 187 190 190 190
38410-190 190 190 195 195 195 214 214 214 242 242 242
38411-253 253 253 253 253 253 253 253 253 253 253 253
38412- 82 82 82 2 2 6 2 2 6 2 2 6
38413- 2 2 6 2 2 6 2 2 6 14 14 14
38414- 86 86 86 54 54 54 22 22 22 6 6 6
38415- 0 0 0 0 0 0 0 0 0 0 0 0
38416- 0 0 0 0 0 0 0 0 0 0 0 0
38417- 0 0 0 0 0 0 0 0 0 0 0 0
38418- 0 0 0 0 0 0 0 0 0 0 0 0
38419- 0 0 0 0 0 0 0 0 0 0 0 0
38420- 0 0 0 0 0 0 0 0 0 0 0 0
38421- 0 0 0 0 0 0 0 0 0 0 0 0
38422- 0 0 0 0 0 0 0 0 0 0 0 0
38423- 0 0 0 0 0 0 0 0 0 0 0 0
38424- 0 0 0 0 0 0 0 0 0 0 0 0
38425- 0 0 0 0 0 0 0 0 0 0 0 0
38426- 6 6 6 18 18 18 46 46 46 90 90 90
38427- 46 46 46 18 18 18 6 6 6 182 182 182
38428-253 253 253 246 246 246 206 206 206 190 190 190
38429-190 190 190 190 190 190 190 190 190 190 190 190
38430-206 206 206 231 231 231 250 250 250 253 253 253
38431-253 253 253 253 253 253 253 253 253 253 253 253
38432-202 202 202 14 14 14 2 2 6 2 2 6
38433- 2 2 6 2 2 6 2 2 6 2 2 6
38434- 42 42 42 86 86 86 42 42 42 18 18 18
38435- 6 6 6 0 0 0 0 0 0 0 0 0
38436- 0 0 0 0 0 0 0 0 0 0 0 0
38437- 0 0 0 0 0 0 0 0 0 0 0 0
38438- 0 0 0 0 0 0 0 0 0 0 0 0
38439- 0 0 0 0 0 0 0 0 0 0 0 0
38440- 0 0 0 0 0 0 0 0 0 0 0 0
38441- 0 0 0 0 0 0 0 0 0 0 0 0
38442- 0 0 0 0 0 0 0 0 0 0 0 0
38443- 0 0 0 0 0 0 0 0 0 0 0 0
38444- 0 0 0 0 0 0 0 0 0 0 0 0
38445- 0 0 0 0 0 0 0 0 0 6 6 6
38446- 14 14 14 38 38 38 74 74 74 66 66 66
38447- 2 2 6 6 6 6 90 90 90 250 250 250
38448-253 253 253 253 253 253 238 238 238 198 198 198
38449-190 190 190 190 190 190 195 195 195 221 221 221
38450-246 246 246 253 253 253 253 253 253 253 253 253
38451-253 253 253 253 253 253 253 253 253 253 253 253
38452-253 253 253 82 82 82 2 2 6 2 2 6
38453- 2 2 6 2 2 6 2 2 6 2 2 6
38454- 2 2 6 78 78 78 70 70 70 34 34 34
38455- 14 14 14 6 6 6 0 0 0 0 0 0
38456- 0 0 0 0 0 0 0 0 0 0 0 0
38457- 0 0 0 0 0 0 0 0 0 0 0 0
38458- 0 0 0 0 0 0 0 0 0 0 0 0
38459- 0 0 0 0 0 0 0 0 0 0 0 0
38460- 0 0 0 0 0 0 0 0 0 0 0 0
38461- 0 0 0 0 0 0 0 0 0 0 0 0
38462- 0 0 0 0 0 0 0 0 0 0 0 0
38463- 0 0 0 0 0 0 0 0 0 0 0 0
38464- 0 0 0 0 0 0 0 0 0 0 0 0
38465- 0 0 0 0 0 0 0 0 0 14 14 14
38466- 34 34 34 66 66 66 78 78 78 6 6 6
38467- 2 2 6 18 18 18 218 218 218 253 253 253
38468-253 253 253 253 253 253 253 253 253 246 246 246
38469-226 226 226 231 231 231 246 246 246 253 253 253
38470-253 253 253 253 253 253 253 253 253 253 253 253
38471-253 253 253 253 253 253 253 253 253 253 253 253
38472-253 253 253 178 178 178 2 2 6 2 2 6
38473- 2 2 6 2 2 6 2 2 6 2 2 6
38474- 2 2 6 18 18 18 90 90 90 62 62 62
38475- 30 30 30 10 10 10 0 0 0 0 0 0
38476- 0 0 0 0 0 0 0 0 0 0 0 0
38477- 0 0 0 0 0 0 0 0 0 0 0 0
38478- 0 0 0 0 0 0 0 0 0 0 0 0
38479- 0 0 0 0 0 0 0 0 0 0 0 0
38480- 0 0 0 0 0 0 0 0 0 0 0 0
38481- 0 0 0 0 0 0 0 0 0 0 0 0
38482- 0 0 0 0 0 0 0 0 0 0 0 0
38483- 0 0 0 0 0 0 0 0 0 0 0 0
38484- 0 0 0 0 0 0 0 0 0 0 0 0
38485- 0 0 0 0 0 0 10 10 10 26 26 26
38486- 58 58 58 90 90 90 18 18 18 2 2 6
38487- 2 2 6 110 110 110 253 253 253 253 253 253
38488-253 253 253 253 253 253 253 253 253 253 253 253
38489-250 250 250 253 253 253 253 253 253 253 253 253
38490-253 253 253 253 253 253 253 253 253 253 253 253
38491-253 253 253 253 253 253 253 253 253 253 253 253
38492-253 253 253 231 231 231 18 18 18 2 2 6
38493- 2 2 6 2 2 6 2 2 6 2 2 6
38494- 2 2 6 2 2 6 18 18 18 94 94 94
38495- 54 54 54 26 26 26 10 10 10 0 0 0
38496- 0 0 0 0 0 0 0 0 0 0 0 0
38497- 0 0 0 0 0 0 0 0 0 0 0 0
38498- 0 0 0 0 0 0 0 0 0 0 0 0
38499- 0 0 0 0 0 0 0 0 0 0 0 0
38500- 0 0 0 0 0 0 0 0 0 0 0 0
38501- 0 0 0 0 0 0 0 0 0 0 0 0
38502- 0 0 0 0 0 0 0 0 0 0 0 0
38503- 0 0 0 0 0 0 0 0 0 0 0 0
38504- 0 0 0 0 0 0 0 0 0 0 0 0
38505- 0 0 0 6 6 6 22 22 22 50 50 50
38506- 90 90 90 26 26 26 2 2 6 2 2 6
38507- 14 14 14 195 195 195 250 250 250 253 253 253
38508-253 253 253 253 253 253 253 253 253 253 253 253
38509-253 253 253 253 253 253 253 253 253 253 253 253
38510-253 253 253 253 253 253 253 253 253 253 253 253
38511-253 253 253 253 253 253 253 253 253 253 253 253
38512-250 250 250 242 242 242 54 54 54 2 2 6
38513- 2 2 6 2 2 6 2 2 6 2 2 6
38514- 2 2 6 2 2 6 2 2 6 38 38 38
38515- 86 86 86 50 50 50 22 22 22 6 6 6
38516- 0 0 0 0 0 0 0 0 0 0 0 0
38517- 0 0 0 0 0 0 0 0 0 0 0 0
38518- 0 0 0 0 0 0 0 0 0 0 0 0
38519- 0 0 0 0 0 0 0 0 0 0 0 0
38520- 0 0 0 0 0 0 0 0 0 0 0 0
38521- 0 0 0 0 0 0 0 0 0 0 0 0
38522- 0 0 0 0 0 0 0 0 0 0 0 0
38523- 0 0 0 0 0 0 0 0 0 0 0 0
38524- 0 0 0 0 0 0 0 0 0 0 0 0
38525- 6 6 6 14 14 14 38 38 38 82 82 82
38526- 34 34 34 2 2 6 2 2 6 2 2 6
38527- 42 42 42 195 195 195 246 246 246 253 253 253
38528-253 253 253 253 253 253 253 253 253 250 250 250
38529-242 242 242 242 242 242 250 250 250 253 253 253
38530-253 253 253 253 253 253 253 253 253 253 253 253
38531-253 253 253 250 250 250 246 246 246 238 238 238
38532-226 226 226 231 231 231 101 101 101 6 6 6
38533- 2 2 6 2 2 6 2 2 6 2 2 6
38534- 2 2 6 2 2 6 2 2 6 2 2 6
38535- 38 38 38 82 82 82 42 42 42 14 14 14
38536- 6 6 6 0 0 0 0 0 0 0 0 0
38537- 0 0 0 0 0 0 0 0 0 0 0 0
38538- 0 0 0 0 0 0 0 0 0 0 0 0
38539- 0 0 0 0 0 0 0 0 0 0 0 0
38540- 0 0 0 0 0 0 0 0 0 0 0 0
38541- 0 0 0 0 0 0 0 0 0 0 0 0
38542- 0 0 0 0 0 0 0 0 0 0 0 0
38543- 0 0 0 0 0 0 0 0 0 0 0 0
38544- 0 0 0 0 0 0 0 0 0 0 0 0
38545- 10 10 10 26 26 26 62 62 62 66 66 66
38546- 2 2 6 2 2 6 2 2 6 6 6 6
38547- 70 70 70 170 170 170 206 206 206 234 234 234
38548-246 246 246 250 250 250 250 250 250 238 238 238
38549-226 226 226 231 231 231 238 238 238 250 250 250
38550-250 250 250 250 250 250 246 246 246 231 231 231
38551-214 214 214 206 206 206 202 202 202 202 202 202
38552-198 198 198 202 202 202 182 182 182 18 18 18
38553- 2 2 6 2 2 6 2 2 6 2 2 6
38554- 2 2 6 2 2 6 2 2 6 2 2 6
38555- 2 2 6 62 62 62 66 66 66 30 30 30
38556- 10 10 10 0 0 0 0 0 0 0 0 0
38557- 0 0 0 0 0 0 0 0 0 0 0 0
38558- 0 0 0 0 0 0 0 0 0 0 0 0
38559- 0 0 0 0 0 0 0 0 0 0 0 0
38560- 0 0 0 0 0 0 0 0 0 0 0 0
38561- 0 0 0 0 0 0 0 0 0 0 0 0
38562- 0 0 0 0 0 0 0 0 0 0 0 0
38563- 0 0 0 0 0 0 0 0 0 0 0 0
38564- 0 0 0 0 0 0 0 0 0 0 0 0
38565- 14 14 14 42 42 42 82 82 82 18 18 18
38566- 2 2 6 2 2 6 2 2 6 10 10 10
38567- 94 94 94 182 182 182 218 218 218 242 242 242
38568-250 250 250 253 253 253 253 253 253 250 250 250
38569-234 234 234 253 253 253 253 253 253 253 253 253
38570-253 253 253 253 253 253 253 253 253 246 246 246
38571-238 238 238 226 226 226 210 210 210 202 202 202
38572-195 195 195 195 195 195 210 210 210 158 158 158
38573- 6 6 6 14 14 14 50 50 50 14 14 14
38574- 2 2 6 2 2 6 2 2 6 2 2 6
38575- 2 2 6 6 6 6 86 86 86 46 46 46
38576- 18 18 18 6 6 6 0 0 0 0 0 0
38577- 0 0 0 0 0 0 0 0 0 0 0 0
38578- 0 0 0 0 0 0 0 0 0 0 0 0
38579- 0 0 0 0 0 0 0 0 0 0 0 0
38580- 0 0 0 0 0 0 0 0 0 0 0 0
38581- 0 0 0 0 0 0 0 0 0 0 0 0
38582- 0 0 0 0 0 0 0 0 0 0 0 0
38583- 0 0 0 0 0 0 0 0 0 0 0 0
38584- 0 0 0 0 0 0 0 0 0 6 6 6
38585- 22 22 22 54 54 54 70 70 70 2 2 6
38586- 2 2 6 10 10 10 2 2 6 22 22 22
38587-166 166 166 231 231 231 250 250 250 253 253 253
38588-253 253 253 253 253 253 253 253 253 250 250 250
38589-242 242 242 253 253 253 253 253 253 253 253 253
38590-253 253 253 253 253 253 253 253 253 253 253 253
38591-253 253 253 253 253 253 253 253 253 246 246 246
38592-231 231 231 206 206 206 198 198 198 226 226 226
38593- 94 94 94 2 2 6 6 6 6 38 38 38
38594- 30 30 30 2 2 6 2 2 6 2 2 6
38595- 2 2 6 2 2 6 62 62 62 66 66 66
38596- 26 26 26 10 10 10 0 0 0 0 0 0
38597- 0 0 0 0 0 0 0 0 0 0 0 0
38598- 0 0 0 0 0 0 0 0 0 0 0 0
38599- 0 0 0 0 0 0 0 0 0 0 0 0
38600- 0 0 0 0 0 0 0 0 0 0 0 0
38601- 0 0 0 0 0 0 0 0 0 0 0 0
38602- 0 0 0 0 0 0 0 0 0 0 0 0
38603- 0 0 0 0 0 0 0 0 0 0 0 0
38604- 0 0 0 0 0 0 0 0 0 10 10 10
38605- 30 30 30 74 74 74 50 50 50 2 2 6
38606- 26 26 26 26 26 26 2 2 6 106 106 106
38607-238 238 238 253 253 253 253 253 253 253 253 253
38608-253 253 253 253 253 253 253 253 253 253 253 253
38609-253 253 253 253 253 253 253 253 253 253 253 253
38610-253 253 253 253 253 253 253 253 253 253 253 253
38611-253 253 253 253 253 253 253 253 253 253 253 253
38612-253 253 253 246 246 246 218 218 218 202 202 202
38613-210 210 210 14 14 14 2 2 6 2 2 6
38614- 30 30 30 22 22 22 2 2 6 2 2 6
38615- 2 2 6 2 2 6 18 18 18 86 86 86
38616- 42 42 42 14 14 14 0 0 0 0 0 0
38617- 0 0 0 0 0 0 0 0 0 0 0 0
38618- 0 0 0 0 0 0 0 0 0 0 0 0
38619- 0 0 0 0 0 0 0 0 0 0 0 0
38620- 0 0 0 0 0 0 0 0 0 0 0 0
38621- 0 0 0 0 0 0 0 0 0 0 0 0
38622- 0 0 0 0 0 0 0 0 0 0 0 0
38623- 0 0 0 0 0 0 0 0 0 0 0 0
38624- 0 0 0 0 0 0 0 0 0 14 14 14
38625- 42 42 42 90 90 90 22 22 22 2 2 6
38626- 42 42 42 2 2 6 18 18 18 218 218 218
38627-253 253 253 253 253 253 253 253 253 253 253 253
38628-253 253 253 253 253 253 253 253 253 253 253 253
38629-253 253 253 253 253 253 253 253 253 253 253 253
38630-253 253 253 253 253 253 253 253 253 253 253 253
38631-253 253 253 253 253 253 253 253 253 253 253 253
38632-253 253 253 253 253 253 250 250 250 221 221 221
38633-218 218 218 101 101 101 2 2 6 14 14 14
38634- 18 18 18 38 38 38 10 10 10 2 2 6
38635- 2 2 6 2 2 6 2 2 6 78 78 78
38636- 58 58 58 22 22 22 6 6 6 0 0 0
38637- 0 0 0 0 0 0 0 0 0 0 0 0
38638- 0 0 0 0 0 0 0 0 0 0 0 0
38639- 0 0 0 0 0 0 0 0 0 0 0 0
38640- 0 0 0 0 0 0 0 0 0 0 0 0
38641- 0 0 0 0 0 0 0 0 0 0 0 0
38642- 0 0 0 0 0 0 0 0 0 0 0 0
38643- 0 0 0 0 0 0 0 0 0 0 0 0
38644- 0 0 0 0 0 0 6 6 6 18 18 18
38645- 54 54 54 82 82 82 2 2 6 26 26 26
38646- 22 22 22 2 2 6 123 123 123 253 253 253
38647-253 253 253 253 253 253 253 253 253 253 253 253
38648-253 253 253 253 253 253 253 253 253 253 253 253
38649-253 253 253 253 253 253 253 253 253 253 253 253
38650-253 253 253 253 253 253 253 253 253 253 253 253
38651-253 253 253 253 253 253 253 253 253 253 253 253
38652-253 253 253 253 253 253 253 253 253 250 250 250
38653-238 238 238 198 198 198 6 6 6 38 38 38
38654- 58 58 58 26 26 26 38 38 38 2 2 6
38655- 2 2 6 2 2 6 2 2 6 46 46 46
38656- 78 78 78 30 30 30 10 10 10 0 0 0
38657- 0 0 0 0 0 0 0 0 0 0 0 0
38658- 0 0 0 0 0 0 0 0 0 0 0 0
38659- 0 0 0 0 0 0 0 0 0 0 0 0
38660- 0 0 0 0 0 0 0 0 0 0 0 0
38661- 0 0 0 0 0 0 0 0 0 0 0 0
38662- 0 0 0 0 0 0 0 0 0 0 0 0
38663- 0 0 0 0 0 0 0 0 0 0 0 0
38664- 0 0 0 0 0 0 10 10 10 30 30 30
38665- 74 74 74 58 58 58 2 2 6 42 42 42
38666- 2 2 6 22 22 22 231 231 231 253 253 253
38667-253 253 253 253 253 253 253 253 253 253 253 253
38668-253 253 253 253 253 253 253 253 253 250 250 250
38669-253 253 253 253 253 253 253 253 253 253 253 253
38670-253 253 253 253 253 253 253 253 253 253 253 253
38671-253 253 253 253 253 253 253 253 253 253 253 253
38672-253 253 253 253 253 253 253 253 253 253 253 253
38673-253 253 253 246 246 246 46 46 46 38 38 38
38674- 42 42 42 14 14 14 38 38 38 14 14 14
38675- 2 2 6 2 2 6 2 2 6 6 6 6
38676- 86 86 86 46 46 46 14 14 14 0 0 0
38677- 0 0 0 0 0 0 0 0 0 0 0 0
38678- 0 0 0 0 0 0 0 0 0 0 0 0
38679- 0 0 0 0 0 0 0 0 0 0 0 0
38680- 0 0 0 0 0 0 0 0 0 0 0 0
38681- 0 0 0 0 0 0 0 0 0 0 0 0
38682- 0 0 0 0 0 0 0 0 0 0 0 0
38683- 0 0 0 0 0 0 0 0 0 0 0 0
38684- 0 0 0 6 6 6 14 14 14 42 42 42
38685- 90 90 90 18 18 18 18 18 18 26 26 26
38686- 2 2 6 116 116 116 253 253 253 253 253 253
38687-253 253 253 253 253 253 253 253 253 253 253 253
38688-253 253 253 253 253 253 250 250 250 238 238 238
38689-253 253 253 253 253 253 253 253 253 253 253 253
38690-253 253 253 253 253 253 253 253 253 253 253 253
38691-253 253 253 253 253 253 253 253 253 253 253 253
38692-253 253 253 253 253 253 253 253 253 253 253 253
38693-253 253 253 253 253 253 94 94 94 6 6 6
38694- 2 2 6 2 2 6 10 10 10 34 34 34
38695- 2 2 6 2 2 6 2 2 6 2 2 6
38696- 74 74 74 58 58 58 22 22 22 6 6 6
38697- 0 0 0 0 0 0 0 0 0 0 0 0
38698- 0 0 0 0 0 0 0 0 0 0 0 0
38699- 0 0 0 0 0 0 0 0 0 0 0 0
38700- 0 0 0 0 0 0 0 0 0 0 0 0
38701- 0 0 0 0 0 0 0 0 0 0 0 0
38702- 0 0 0 0 0 0 0 0 0 0 0 0
38703- 0 0 0 0 0 0 0 0 0 0 0 0
38704- 0 0 0 10 10 10 26 26 26 66 66 66
38705- 82 82 82 2 2 6 38 38 38 6 6 6
38706- 14 14 14 210 210 210 253 253 253 253 253 253
38707-253 253 253 253 253 253 253 253 253 253 253 253
38708-253 253 253 253 253 253 246 246 246 242 242 242
38709-253 253 253 253 253 253 253 253 253 253 253 253
38710-253 253 253 253 253 253 253 253 253 253 253 253
38711-253 253 253 253 253 253 253 253 253 253 253 253
38712-253 253 253 253 253 253 253 253 253 253 253 253
38713-253 253 253 253 253 253 144 144 144 2 2 6
38714- 2 2 6 2 2 6 2 2 6 46 46 46
38715- 2 2 6 2 2 6 2 2 6 2 2 6
38716- 42 42 42 74 74 74 30 30 30 10 10 10
38717- 0 0 0 0 0 0 0 0 0 0 0 0
38718- 0 0 0 0 0 0 0 0 0 0 0 0
38719- 0 0 0 0 0 0 0 0 0 0 0 0
38720- 0 0 0 0 0 0 0 0 0 0 0 0
38721- 0 0 0 0 0 0 0 0 0 0 0 0
38722- 0 0 0 0 0 0 0 0 0 0 0 0
38723- 0 0 0 0 0 0 0 0 0 0 0 0
38724- 6 6 6 14 14 14 42 42 42 90 90 90
38725- 26 26 26 6 6 6 42 42 42 2 2 6
38726- 74 74 74 250 250 250 253 253 253 253 253 253
38727-253 253 253 253 253 253 253 253 253 253 253 253
38728-253 253 253 253 253 253 242 242 242 242 242 242
38729-253 253 253 253 253 253 253 253 253 253 253 253
38730-253 253 253 253 253 253 253 253 253 253 253 253
38731-253 253 253 253 253 253 253 253 253 253 253 253
38732-253 253 253 253 253 253 253 253 253 253 253 253
38733-253 253 253 253 253 253 182 182 182 2 2 6
38734- 2 2 6 2 2 6 2 2 6 46 46 46
38735- 2 2 6 2 2 6 2 2 6 2 2 6
38736- 10 10 10 86 86 86 38 38 38 10 10 10
38737- 0 0 0 0 0 0 0 0 0 0 0 0
38738- 0 0 0 0 0 0 0 0 0 0 0 0
38739- 0 0 0 0 0 0 0 0 0 0 0 0
38740- 0 0 0 0 0 0 0 0 0 0 0 0
38741- 0 0 0 0 0 0 0 0 0 0 0 0
38742- 0 0 0 0 0 0 0 0 0 0 0 0
38743- 0 0 0 0 0 0 0 0 0 0 0 0
38744- 10 10 10 26 26 26 66 66 66 82 82 82
38745- 2 2 6 22 22 22 18 18 18 2 2 6
38746-149 149 149 253 253 253 253 253 253 253 253 253
38747-253 253 253 253 253 253 253 253 253 253 253 253
38748-253 253 253 253 253 253 234 234 234 242 242 242
38749-253 253 253 253 253 253 253 253 253 253 253 253
38750-253 253 253 253 253 253 253 253 253 253 253 253
38751-253 253 253 253 253 253 253 253 253 253 253 253
38752-253 253 253 253 253 253 253 253 253 253 253 253
38753-253 253 253 253 253 253 206 206 206 2 2 6
38754- 2 2 6 2 2 6 2 2 6 38 38 38
38755- 2 2 6 2 2 6 2 2 6 2 2 6
38756- 6 6 6 86 86 86 46 46 46 14 14 14
38757- 0 0 0 0 0 0 0 0 0 0 0 0
38758- 0 0 0 0 0 0 0 0 0 0 0 0
38759- 0 0 0 0 0 0 0 0 0 0 0 0
38760- 0 0 0 0 0 0 0 0 0 0 0 0
38761- 0 0 0 0 0 0 0 0 0 0 0 0
38762- 0 0 0 0 0 0 0 0 0 0 0 0
38763- 0 0 0 0 0 0 0 0 0 6 6 6
38764- 18 18 18 46 46 46 86 86 86 18 18 18
38765- 2 2 6 34 34 34 10 10 10 6 6 6
38766-210 210 210 253 253 253 253 253 253 253 253 253
38767-253 253 253 253 253 253 253 253 253 253 253 253
38768-253 253 253 253 253 253 234 234 234 242 242 242
38769-253 253 253 253 253 253 253 253 253 253 253 253
38770-253 253 253 253 253 253 253 253 253 253 253 253
38771-253 253 253 253 253 253 253 253 253 253 253 253
38772-253 253 253 253 253 253 253 253 253 253 253 253
38773-253 253 253 253 253 253 221 221 221 6 6 6
38774- 2 2 6 2 2 6 6 6 6 30 30 30
38775- 2 2 6 2 2 6 2 2 6 2 2 6
38776- 2 2 6 82 82 82 54 54 54 18 18 18
38777- 6 6 6 0 0 0 0 0 0 0 0 0
38778- 0 0 0 0 0 0 0 0 0 0 0 0
38779- 0 0 0 0 0 0 0 0 0 0 0 0
38780- 0 0 0 0 0 0 0 0 0 0 0 0
38781- 0 0 0 0 0 0 0 0 0 0 0 0
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 10 10 10
38784- 26 26 26 66 66 66 62 62 62 2 2 6
38785- 2 2 6 38 38 38 10 10 10 26 26 26
38786-238 238 238 253 253 253 253 253 253 253 253 253
38787-253 253 253 253 253 253 253 253 253 253 253 253
38788-253 253 253 253 253 253 231 231 231 238 238 238
38789-253 253 253 253 253 253 253 253 253 253 253 253
38790-253 253 253 253 253 253 253 253 253 253 253 253
38791-253 253 253 253 253 253 253 253 253 253 253 253
38792-253 253 253 253 253 253 253 253 253 253 253 253
38793-253 253 253 253 253 253 231 231 231 6 6 6
38794- 2 2 6 2 2 6 10 10 10 30 30 30
38795- 2 2 6 2 2 6 2 2 6 2 2 6
38796- 2 2 6 66 66 66 58 58 58 22 22 22
38797- 6 6 6 0 0 0 0 0 0 0 0 0
38798- 0 0 0 0 0 0 0 0 0 0 0 0
38799- 0 0 0 0 0 0 0 0 0 0 0 0
38800- 0 0 0 0 0 0 0 0 0 0 0 0
38801- 0 0 0 0 0 0 0 0 0 0 0 0
38802- 0 0 0 0 0 0 0 0 0 0 0 0
38803- 0 0 0 0 0 0 0 0 0 10 10 10
38804- 38 38 38 78 78 78 6 6 6 2 2 6
38805- 2 2 6 46 46 46 14 14 14 42 42 42
38806-246 246 246 253 253 253 253 253 253 253 253 253
38807-253 253 253 253 253 253 253 253 253 253 253 253
38808-253 253 253 253 253 253 231 231 231 242 242 242
38809-253 253 253 253 253 253 253 253 253 253 253 253
38810-253 253 253 253 253 253 253 253 253 253 253 253
38811-253 253 253 253 253 253 253 253 253 253 253 253
38812-253 253 253 253 253 253 253 253 253 253 253 253
38813-253 253 253 253 253 253 234 234 234 10 10 10
38814- 2 2 6 2 2 6 22 22 22 14 14 14
38815- 2 2 6 2 2 6 2 2 6 2 2 6
38816- 2 2 6 66 66 66 62 62 62 22 22 22
38817- 6 6 6 0 0 0 0 0 0 0 0 0
38818- 0 0 0 0 0 0 0 0 0 0 0 0
38819- 0 0 0 0 0 0 0 0 0 0 0 0
38820- 0 0 0 0 0 0 0 0 0 0 0 0
38821- 0 0 0 0 0 0 0 0 0 0 0 0
38822- 0 0 0 0 0 0 0 0 0 0 0 0
38823- 0 0 0 0 0 0 6 6 6 18 18 18
38824- 50 50 50 74 74 74 2 2 6 2 2 6
38825- 14 14 14 70 70 70 34 34 34 62 62 62
38826-250 250 250 253 253 253 253 253 253 253 253 253
38827-253 253 253 253 253 253 253 253 253 253 253 253
38828-253 253 253 253 253 253 231 231 231 246 246 246
38829-253 253 253 253 253 253 253 253 253 253 253 253
38830-253 253 253 253 253 253 253 253 253 253 253 253
38831-253 253 253 253 253 253 253 253 253 253 253 253
38832-253 253 253 253 253 253 253 253 253 253 253 253
38833-253 253 253 253 253 253 234 234 234 14 14 14
38834- 2 2 6 2 2 6 30 30 30 2 2 6
38835- 2 2 6 2 2 6 2 2 6 2 2 6
38836- 2 2 6 66 66 66 62 62 62 22 22 22
38837- 6 6 6 0 0 0 0 0 0 0 0 0
38838- 0 0 0 0 0 0 0 0 0 0 0 0
38839- 0 0 0 0 0 0 0 0 0 0 0 0
38840- 0 0 0 0 0 0 0 0 0 0 0 0
38841- 0 0 0 0 0 0 0 0 0 0 0 0
38842- 0 0 0 0 0 0 0 0 0 0 0 0
38843- 0 0 0 0 0 0 6 6 6 18 18 18
38844- 54 54 54 62 62 62 2 2 6 2 2 6
38845- 2 2 6 30 30 30 46 46 46 70 70 70
38846-250 250 250 253 253 253 253 253 253 253 253 253
38847-253 253 253 253 253 253 253 253 253 253 253 253
38848-253 253 253 253 253 253 231 231 231 246 246 246
38849-253 253 253 253 253 253 253 253 253 253 253 253
38850-253 253 253 253 253 253 253 253 253 253 253 253
38851-253 253 253 253 253 253 253 253 253 253 253 253
38852-253 253 253 253 253 253 253 253 253 253 253 253
38853-253 253 253 253 253 253 226 226 226 10 10 10
38854- 2 2 6 6 6 6 30 30 30 2 2 6
38855- 2 2 6 2 2 6 2 2 6 2 2 6
38856- 2 2 6 66 66 66 58 58 58 22 22 22
38857- 6 6 6 0 0 0 0 0 0 0 0 0
38858- 0 0 0 0 0 0 0 0 0 0 0 0
38859- 0 0 0 0 0 0 0 0 0 0 0 0
38860- 0 0 0 0 0 0 0 0 0 0 0 0
38861- 0 0 0 0 0 0 0 0 0 0 0 0
38862- 0 0 0 0 0 0 0 0 0 0 0 0
38863- 0 0 0 0 0 0 6 6 6 22 22 22
38864- 58 58 58 62 62 62 2 2 6 2 2 6
38865- 2 2 6 2 2 6 30 30 30 78 78 78
38866-250 250 250 253 253 253 253 253 253 253 253 253
38867-253 253 253 253 253 253 253 253 253 253 253 253
38868-253 253 253 253 253 253 231 231 231 246 246 246
38869-253 253 253 253 253 253 253 253 253 253 253 253
38870-253 253 253 253 253 253 253 253 253 253 253 253
38871-253 253 253 253 253 253 253 253 253 253 253 253
38872-253 253 253 253 253 253 253 253 253 253 253 253
38873-253 253 253 253 253 253 206 206 206 2 2 6
38874- 22 22 22 34 34 34 18 14 6 22 22 22
38875- 26 26 26 18 18 18 6 6 6 2 2 6
38876- 2 2 6 82 82 82 54 54 54 18 18 18
38877- 6 6 6 0 0 0 0 0 0 0 0 0
38878- 0 0 0 0 0 0 0 0 0 0 0 0
38879- 0 0 0 0 0 0 0 0 0 0 0 0
38880- 0 0 0 0 0 0 0 0 0 0 0 0
38881- 0 0 0 0 0 0 0 0 0 0 0 0
38882- 0 0 0 0 0 0 0 0 0 0 0 0
38883- 0 0 0 0 0 0 6 6 6 26 26 26
38884- 62 62 62 106 106 106 74 54 14 185 133 11
38885-210 162 10 121 92 8 6 6 6 62 62 62
38886-238 238 238 253 253 253 253 253 253 253 253 253
38887-253 253 253 253 253 253 253 253 253 253 253 253
38888-253 253 253 253 253 253 231 231 231 246 246 246
38889-253 253 253 253 253 253 253 253 253 253 253 253
38890-253 253 253 253 253 253 253 253 253 253 253 253
38891-253 253 253 253 253 253 253 253 253 253 253 253
38892-253 253 253 253 253 253 253 253 253 253 253 253
38893-253 253 253 253 253 253 158 158 158 18 18 18
38894- 14 14 14 2 2 6 2 2 6 2 2 6
38895- 6 6 6 18 18 18 66 66 66 38 38 38
38896- 6 6 6 94 94 94 50 50 50 18 18 18
38897- 6 6 6 0 0 0 0 0 0 0 0 0
38898- 0 0 0 0 0 0 0 0 0 0 0 0
38899- 0 0 0 0 0 0 0 0 0 0 0 0
38900- 0 0 0 0 0 0 0 0 0 0 0 0
38901- 0 0 0 0 0 0 0 0 0 0 0 0
38902- 0 0 0 0 0 0 0 0 0 6 6 6
38903- 10 10 10 10 10 10 18 18 18 38 38 38
38904- 78 78 78 142 134 106 216 158 10 242 186 14
38905-246 190 14 246 190 14 156 118 10 10 10 10
38906- 90 90 90 238 238 238 253 253 253 253 253 253
38907-253 253 253 253 253 253 253 253 253 253 253 253
38908-253 253 253 253 253 253 231 231 231 250 250 250
38909-253 253 253 253 253 253 253 253 253 253 253 253
38910-253 253 253 253 253 253 253 253 253 253 253 253
38911-253 253 253 253 253 253 253 253 253 253 253 253
38912-253 253 253 253 253 253 253 253 253 246 230 190
38913-238 204 91 238 204 91 181 142 44 37 26 9
38914- 2 2 6 2 2 6 2 2 6 2 2 6
38915- 2 2 6 2 2 6 38 38 38 46 46 46
38916- 26 26 26 106 106 106 54 54 54 18 18 18
38917- 6 6 6 0 0 0 0 0 0 0 0 0
38918- 0 0 0 0 0 0 0 0 0 0 0 0
38919- 0 0 0 0 0 0 0 0 0 0 0 0
38920- 0 0 0 0 0 0 0 0 0 0 0 0
38921- 0 0 0 0 0 0 0 0 0 0 0 0
38922- 0 0 0 6 6 6 14 14 14 22 22 22
38923- 30 30 30 38 38 38 50 50 50 70 70 70
38924-106 106 106 190 142 34 226 170 11 242 186 14
38925-246 190 14 246 190 14 246 190 14 154 114 10
38926- 6 6 6 74 74 74 226 226 226 253 253 253
38927-253 253 253 253 253 253 253 253 253 253 253 253
38928-253 253 253 253 253 253 231 231 231 250 250 250
38929-253 253 253 253 253 253 253 253 253 253 253 253
38930-253 253 253 253 253 253 253 253 253 253 253 253
38931-253 253 253 253 253 253 253 253 253 253 253 253
38932-253 253 253 253 253 253 253 253 253 228 184 62
38933-241 196 14 241 208 19 232 195 16 38 30 10
38934- 2 2 6 2 2 6 2 2 6 2 2 6
38935- 2 2 6 6 6 6 30 30 30 26 26 26
38936-203 166 17 154 142 90 66 66 66 26 26 26
38937- 6 6 6 0 0 0 0 0 0 0 0 0
38938- 0 0 0 0 0 0 0 0 0 0 0 0
38939- 0 0 0 0 0 0 0 0 0 0 0 0
38940- 0 0 0 0 0 0 0 0 0 0 0 0
38941- 0 0 0 0 0 0 0 0 0 0 0 0
38942- 6 6 6 18 18 18 38 38 38 58 58 58
38943- 78 78 78 86 86 86 101 101 101 123 123 123
38944-175 146 61 210 150 10 234 174 13 246 186 14
38945-246 190 14 246 190 14 246 190 14 238 190 10
38946-102 78 10 2 2 6 46 46 46 198 198 198
38947-253 253 253 253 253 253 253 253 253 253 253 253
38948-253 253 253 253 253 253 234 234 234 242 242 242
38949-253 253 253 253 253 253 253 253 253 253 253 253
38950-253 253 253 253 253 253 253 253 253 253 253 253
38951-253 253 253 253 253 253 253 253 253 253 253 253
38952-253 253 253 253 253 253 253 253 253 224 178 62
38953-242 186 14 241 196 14 210 166 10 22 18 6
38954- 2 2 6 2 2 6 2 2 6 2 2 6
38955- 2 2 6 2 2 6 6 6 6 121 92 8
38956-238 202 15 232 195 16 82 82 82 34 34 34
38957- 10 10 10 0 0 0 0 0 0 0 0 0
38958- 0 0 0 0 0 0 0 0 0 0 0 0
38959- 0 0 0 0 0 0 0 0 0 0 0 0
38960- 0 0 0 0 0 0 0 0 0 0 0 0
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 14 14 14 38 38 38 70 70 70 154 122 46
38963-190 142 34 200 144 11 197 138 11 197 138 11
38964-213 154 11 226 170 11 242 186 14 246 190 14
38965-246 190 14 246 190 14 246 190 14 246 190 14
38966-225 175 15 46 32 6 2 2 6 22 22 22
38967-158 158 158 250 250 250 253 253 253 253 253 253
38968-253 253 253 253 253 253 253 253 253 253 253 253
38969-253 253 253 253 253 253 253 253 253 253 253 253
38970-253 253 253 253 253 253 253 253 253 253 253 253
38971-253 253 253 253 253 253 253 253 253 253 253 253
38972-253 253 253 250 250 250 242 242 242 224 178 62
38973-239 182 13 236 186 11 213 154 11 46 32 6
38974- 2 2 6 2 2 6 2 2 6 2 2 6
38975- 2 2 6 2 2 6 61 42 6 225 175 15
38976-238 190 10 236 186 11 112 100 78 42 42 42
38977- 14 14 14 0 0 0 0 0 0 0 0 0
38978- 0 0 0 0 0 0 0 0 0 0 0 0
38979- 0 0 0 0 0 0 0 0 0 0 0 0
38980- 0 0 0 0 0 0 0 0 0 0 0 0
38981- 0 0 0 0 0 0 0 0 0 6 6 6
38982- 22 22 22 54 54 54 154 122 46 213 154 11
38983-226 170 11 230 174 11 226 170 11 226 170 11
38984-236 178 12 242 186 14 246 190 14 246 190 14
38985-246 190 14 246 190 14 246 190 14 246 190 14
38986-241 196 14 184 144 12 10 10 10 2 2 6
38987- 6 6 6 116 116 116 242 242 242 253 253 253
38988-253 253 253 253 253 253 253 253 253 253 253 253
38989-253 253 253 253 253 253 253 253 253 253 253 253
38990-253 253 253 253 253 253 253 253 253 253 253 253
38991-253 253 253 253 253 253 253 253 253 253 253 253
38992-253 253 253 231 231 231 198 198 198 214 170 54
38993-236 178 12 236 178 12 210 150 10 137 92 6
38994- 18 14 6 2 2 6 2 2 6 2 2 6
38995- 6 6 6 70 47 6 200 144 11 236 178 12
38996-239 182 13 239 182 13 124 112 88 58 58 58
38997- 22 22 22 6 6 6 0 0 0 0 0 0
38998- 0 0 0 0 0 0 0 0 0 0 0 0
38999- 0 0 0 0 0 0 0 0 0 0 0 0
39000- 0 0 0 0 0 0 0 0 0 0 0 0
39001- 0 0 0 0 0 0 0 0 0 10 10 10
39002- 30 30 30 70 70 70 180 133 36 226 170 11
39003-239 182 13 242 186 14 242 186 14 246 186 14
39004-246 190 14 246 190 14 246 190 14 246 190 14
39005-246 190 14 246 190 14 246 190 14 246 190 14
39006-246 190 14 232 195 16 98 70 6 2 2 6
39007- 2 2 6 2 2 6 66 66 66 221 221 221
39008-253 253 253 253 253 253 253 253 253 253 253 253
39009-253 253 253 253 253 253 253 253 253 253 253 253
39010-253 253 253 253 253 253 253 253 253 253 253 253
39011-253 253 253 253 253 253 253 253 253 253 253 253
39012-253 253 253 206 206 206 198 198 198 214 166 58
39013-230 174 11 230 174 11 216 158 10 192 133 9
39014-163 110 8 116 81 8 102 78 10 116 81 8
39015-167 114 7 197 138 11 226 170 11 239 182 13
39016-242 186 14 242 186 14 162 146 94 78 78 78
39017- 34 34 34 14 14 14 6 6 6 0 0 0
39018- 0 0 0 0 0 0 0 0 0 0 0 0
39019- 0 0 0 0 0 0 0 0 0 0 0 0
39020- 0 0 0 0 0 0 0 0 0 0 0 0
39021- 0 0 0 0 0 0 0 0 0 6 6 6
39022- 30 30 30 78 78 78 190 142 34 226 170 11
39023-239 182 13 246 190 14 246 190 14 246 190 14
39024-246 190 14 246 190 14 246 190 14 246 190 14
39025-246 190 14 246 190 14 246 190 14 246 190 14
39026-246 190 14 241 196 14 203 166 17 22 18 6
39027- 2 2 6 2 2 6 2 2 6 38 38 38
39028-218 218 218 253 253 253 253 253 253 253 253 253
39029-253 253 253 253 253 253 253 253 253 253 253 253
39030-253 253 253 253 253 253 253 253 253 253 253 253
39031-253 253 253 253 253 253 253 253 253 253 253 253
39032-250 250 250 206 206 206 198 198 198 202 162 69
39033-226 170 11 236 178 12 224 166 10 210 150 10
39034-200 144 11 197 138 11 192 133 9 197 138 11
39035-210 150 10 226 170 11 242 186 14 246 190 14
39036-246 190 14 246 186 14 225 175 15 124 112 88
39037- 62 62 62 30 30 30 14 14 14 6 6 6
39038- 0 0 0 0 0 0 0 0 0 0 0 0
39039- 0 0 0 0 0 0 0 0 0 0 0 0
39040- 0 0 0 0 0 0 0 0 0 0 0 0
39041- 0 0 0 0 0 0 0 0 0 10 10 10
39042- 30 30 30 78 78 78 174 135 50 224 166 10
39043-239 182 13 246 190 14 246 190 14 246 190 14
39044-246 190 14 246 190 14 246 190 14 246 190 14
39045-246 190 14 246 190 14 246 190 14 246 190 14
39046-246 190 14 246 190 14 241 196 14 139 102 15
39047- 2 2 6 2 2 6 2 2 6 2 2 6
39048- 78 78 78 250 250 250 253 253 253 253 253 253
39049-253 253 253 253 253 253 253 253 253 253 253 253
39050-253 253 253 253 253 253 253 253 253 253 253 253
39051-253 253 253 253 253 253 253 253 253 253 253 253
39052-250 250 250 214 214 214 198 198 198 190 150 46
39053-219 162 10 236 178 12 234 174 13 224 166 10
39054-216 158 10 213 154 11 213 154 11 216 158 10
39055-226 170 11 239 182 13 246 190 14 246 190 14
39056-246 190 14 246 190 14 242 186 14 206 162 42
39057-101 101 101 58 58 58 30 30 30 14 14 14
39058- 6 6 6 0 0 0 0 0 0 0 0 0
39059- 0 0 0 0 0 0 0 0 0 0 0 0
39060- 0 0 0 0 0 0 0 0 0 0 0 0
39061- 0 0 0 0 0 0 0 0 0 10 10 10
39062- 30 30 30 74 74 74 174 135 50 216 158 10
39063-236 178 12 246 190 14 246 190 14 246 190 14
39064-246 190 14 246 190 14 246 190 14 246 190 14
39065-246 190 14 246 190 14 246 190 14 246 190 14
39066-246 190 14 246 190 14 241 196 14 226 184 13
39067- 61 42 6 2 2 6 2 2 6 2 2 6
39068- 22 22 22 238 238 238 253 253 253 253 253 253
39069-253 253 253 253 253 253 253 253 253 253 253 253
39070-253 253 253 253 253 253 253 253 253 253 253 253
39071-253 253 253 253 253 253 253 253 253 253 253 253
39072-253 253 253 226 226 226 187 187 187 180 133 36
39073-216 158 10 236 178 12 239 182 13 236 178 12
39074-230 174 11 226 170 11 226 170 11 230 174 11
39075-236 178 12 242 186 14 246 190 14 246 190 14
39076-246 190 14 246 190 14 246 186 14 239 182 13
39077-206 162 42 106 106 106 66 66 66 34 34 34
39078- 14 14 14 6 6 6 0 0 0 0 0 0
39079- 0 0 0 0 0 0 0 0 0 0 0 0
39080- 0 0 0 0 0 0 0 0 0 0 0 0
39081- 0 0 0 0 0 0 0 0 0 6 6 6
39082- 26 26 26 70 70 70 163 133 67 213 154 11
39083-236 178 12 246 190 14 246 190 14 246 190 14
39084-246 190 14 246 190 14 246 190 14 246 190 14
39085-246 190 14 246 190 14 246 190 14 246 190 14
39086-246 190 14 246 190 14 246 190 14 241 196 14
39087-190 146 13 18 14 6 2 2 6 2 2 6
39088- 46 46 46 246 246 246 253 253 253 253 253 253
39089-253 253 253 253 253 253 253 253 253 253 253 253
39090-253 253 253 253 253 253 253 253 253 253 253 253
39091-253 253 253 253 253 253 253 253 253 253 253 253
39092-253 253 253 221 221 221 86 86 86 156 107 11
39093-216 158 10 236 178 12 242 186 14 246 186 14
39094-242 186 14 239 182 13 239 182 13 242 186 14
39095-242 186 14 246 186 14 246 190 14 246 190 14
39096-246 190 14 246 190 14 246 190 14 246 190 14
39097-242 186 14 225 175 15 142 122 72 66 66 66
39098- 30 30 30 10 10 10 0 0 0 0 0 0
39099- 0 0 0 0 0 0 0 0 0 0 0 0
39100- 0 0 0 0 0 0 0 0 0 0 0 0
39101- 0 0 0 0 0 0 0 0 0 6 6 6
39102- 26 26 26 70 70 70 163 133 67 210 150 10
39103-236 178 12 246 190 14 246 190 14 246 190 14
39104-246 190 14 246 190 14 246 190 14 246 190 14
39105-246 190 14 246 190 14 246 190 14 246 190 14
39106-246 190 14 246 190 14 246 190 14 246 190 14
39107-232 195 16 121 92 8 34 34 34 106 106 106
39108-221 221 221 253 253 253 253 253 253 253 253 253
39109-253 253 253 253 253 253 253 253 253 253 253 253
39110-253 253 253 253 253 253 253 253 253 253 253 253
39111-253 253 253 253 253 253 253 253 253 253 253 253
39112-242 242 242 82 82 82 18 14 6 163 110 8
39113-216 158 10 236 178 12 242 186 14 246 190 14
39114-246 190 14 246 190 14 246 190 14 246 190 14
39115-246 190 14 246 190 14 246 190 14 246 190 14
39116-246 190 14 246 190 14 246 190 14 246 190 14
39117-246 190 14 246 190 14 242 186 14 163 133 67
39118- 46 46 46 18 18 18 6 6 6 0 0 0
39119- 0 0 0 0 0 0 0 0 0 0 0 0
39120- 0 0 0 0 0 0 0 0 0 0 0 0
39121- 0 0 0 0 0 0 0 0 0 10 10 10
39122- 30 30 30 78 78 78 163 133 67 210 150 10
39123-236 178 12 246 186 14 246 190 14 246 190 14
39124-246 190 14 246 190 14 246 190 14 246 190 14
39125-246 190 14 246 190 14 246 190 14 246 190 14
39126-246 190 14 246 190 14 246 190 14 246 190 14
39127-241 196 14 215 174 15 190 178 144 253 253 253
39128-253 253 253 253 253 253 253 253 253 253 253 253
39129-253 253 253 253 253 253 253 253 253 253 253 253
39130-253 253 253 253 253 253 253 253 253 253 253 253
39131-253 253 253 253 253 253 253 253 253 218 218 218
39132- 58 58 58 2 2 6 22 18 6 167 114 7
39133-216 158 10 236 178 12 246 186 14 246 190 14
39134-246 190 14 246 190 14 246 190 14 246 190 14
39135-246 190 14 246 190 14 246 190 14 246 190 14
39136-246 190 14 246 190 14 246 190 14 246 190 14
39137-246 190 14 246 186 14 242 186 14 190 150 46
39138- 54 54 54 22 22 22 6 6 6 0 0 0
39139- 0 0 0 0 0 0 0 0 0 0 0 0
39140- 0 0 0 0 0 0 0 0 0 0 0 0
39141- 0 0 0 0 0 0 0 0 0 14 14 14
39142- 38 38 38 86 86 86 180 133 36 213 154 11
39143-236 178 12 246 186 14 246 190 14 246 190 14
39144-246 190 14 246 190 14 246 190 14 246 190 14
39145-246 190 14 246 190 14 246 190 14 246 190 14
39146-246 190 14 246 190 14 246 190 14 246 190 14
39147-246 190 14 232 195 16 190 146 13 214 214 214
39148-253 253 253 253 253 253 253 253 253 253 253 253
39149-253 253 253 253 253 253 253 253 253 253 253 253
39150-253 253 253 253 253 253 253 253 253 253 253 253
39151-253 253 253 250 250 250 170 170 170 26 26 26
39152- 2 2 6 2 2 6 37 26 9 163 110 8
39153-219 162 10 239 182 13 246 186 14 246 190 14
39154-246 190 14 246 190 14 246 190 14 246 190 14
39155-246 190 14 246 190 14 246 190 14 246 190 14
39156-246 190 14 246 190 14 246 190 14 246 190 14
39157-246 186 14 236 178 12 224 166 10 142 122 72
39158- 46 46 46 18 18 18 6 6 6 0 0 0
39159- 0 0 0 0 0 0 0 0 0 0 0 0
39160- 0 0 0 0 0 0 0 0 0 0 0 0
39161- 0 0 0 0 0 0 6 6 6 18 18 18
39162- 50 50 50 109 106 95 192 133 9 224 166 10
39163-242 186 14 246 190 14 246 190 14 246 190 14
39164-246 190 14 246 190 14 246 190 14 246 190 14
39165-246 190 14 246 190 14 246 190 14 246 190 14
39166-246 190 14 246 190 14 246 190 14 246 190 14
39167-242 186 14 226 184 13 210 162 10 142 110 46
39168-226 226 226 253 253 253 253 253 253 253 253 253
39169-253 253 253 253 253 253 253 253 253 253 253 253
39170-253 253 253 253 253 253 253 253 253 253 253 253
39171-198 198 198 66 66 66 2 2 6 2 2 6
39172- 2 2 6 2 2 6 50 34 6 156 107 11
39173-219 162 10 239 182 13 246 186 14 246 190 14
39174-246 190 14 246 190 14 246 190 14 246 190 14
39175-246 190 14 246 190 14 246 190 14 246 190 14
39176-246 190 14 246 190 14 246 190 14 242 186 14
39177-234 174 13 213 154 11 154 122 46 66 66 66
39178- 30 30 30 10 10 10 0 0 0 0 0 0
39179- 0 0 0 0 0 0 0 0 0 0 0 0
39180- 0 0 0 0 0 0 0 0 0 0 0 0
39181- 0 0 0 0 0 0 6 6 6 22 22 22
39182- 58 58 58 154 121 60 206 145 10 234 174 13
39183-242 186 14 246 186 14 246 190 14 246 190 14
39184-246 190 14 246 190 14 246 190 14 246 190 14
39185-246 190 14 246 190 14 246 190 14 246 190 14
39186-246 190 14 246 190 14 246 190 14 246 190 14
39187-246 186 14 236 178 12 210 162 10 163 110 8
39188- 61 42 6 138 138 138 218 218 218 250 250 250
39189-253 253 253 253 253 253 253 253 253 250 250 250
39190-242 242 242 210 210 210 144 144 144 66 66 66
39191- 6 6 6 2 2 6 2 2 6 2 2 6
39192- 2 2 6 2 2 6 61 42 6 163 110 8
39193-216 158 10 236 178 12 246 190 14 246 190 14
39194-246 190 14 246 190 14 246 190 14 246 190 14
39195-246 190 14 246 190 14 246 190 14 246 190 14
39196-246 190 14 239 182 13 230 174 11 216 158 10
39197-190 142 34 124 112 88 70 70 70 38 38 38
39198- 18 18 18 6 6 6 0 0 0 0 0 0
39199- 0 0 0 0 0 0 0 0 0 0 0 0
39200- 0 0 0 0 0 0 0 0 0 0 0 0
39201- 0 0 0 0 0 0 6 6 6 22 22 22
39202- 62 62 62 168 124 44 206 145 10 224 166 10
39203-236 178 12 239 182 13 242 186 14 242 186 14
39204-246 186 14 246 190 14 246 190 14 246 190 14
39205-246 190 14 246 190 14 246 190 14 246 190 14
39206-246 190 14 246 190 14 246 190 14 246 190 14
39207-246 190 14 236 178 12 216 158 10 175 118 6
39208- 80 54 7 2 2 6 6 6 6 30 30 30
39209- 54 54 54 62 62 62 50 50 50 38 38 38
39210- 14 14 14 2 2 6 2 2 6 2 2 6
39211- 2 2 6 2 2 6 2 2 6 2 2 6
39212- 2 2 6 6 6 6 80 54 7 167 114 7
39213-213 154 11 236 178 12 246 190 14 246 190 14
39214-246 190 14 246 190 14 246 190 14 246 190 14
39215-246 190 14 242 186 14 239 182 13 239 182 13
39216-230 174 11 210 150 10 174 135 50 124 112 88
39217- 82 82 82 54 54 54 34 34 34 18 18 18
39218- 6 6 6 0 0 0 0 0 0 0 0 0
39219- 0 0 0 0 0 0 0 0 0 0 0 0
39220- 0 0 0 0 0 0 0 0 0 0 0 0
39221- 0 0 0 0 0 0 6 6 6 18 18 18
39222- 50 50 50 158 118 36 192 133 9 200 144 11
39223-216 158 10 219 162 10 224 166 10 226 170 11
39224-230 174 11 236 178 12 239 182 13 239 182 13
39225-242 186 14 246 186 14 246 190 14 246 190 14
39226-246 190 14 246 190 14 246 190 14 246 190 14
39227-246 186 14 230 174 11 210 150 10 163 110 8
39228-104 69 6 10 10 10 2 2 6 2 2 6
39229- 2 2 6 2 2 6 2 2 6 2 2 6
39230- 2 2 6 2 2 6 2 2 6 2 2 6
39231- 2 2 6 2 2 6 2 2 6 2 2 6
39232- 2 2 6 6 6 6 91 60 6 167 114 7
39233-206 145 10 230 174 11 242 186 14 246 190 14
39234-246 190 14 246 190 14 246 186 14 242 186 14
39235-239 182 13 230 174 11 224 166 10 213 154 11
39236-180 133 36 124 112 88 86 86 86 58 58 58
39237- 38 38 38 22 22 22 10 10 10 6 6 6
39238- 0 0 0 0 0 0 0 0 0 0 0 0
39239- 0 0 0 0 0 0 0 0 0 0 0 0
39240- 0 0 0 0 0 0 0 0 0 0 0 0
39241- 0 0 0 0 0 0 0 0 0 14 14 14
39242- 34 34 34 70 70 70 138 110 50 158 118 36
39243-167 114 7 180 123 7 192 133 9 197 138 11
39244-200 144 11 206 145 10 213 154 11 219 162 10
39245-224 166 10 230 174 11 239 182 13 242 186 14
39246-246 186 14 246 186 14 246 186 14 246 186 14
39247-239 182 13 216 158 10 185 133 11 152 99 6
39248-104 69 6 18 14 6 2 2 6 2 2 6
39249- 2 2 6 2 2 6 2 2 6 2 2 6
39250- 2 2 6 2 2 6 2 2 6 2 2 6
39251- 2 2 6 2 2 6 2 2 6 2 2 6
39252- 2 2 6 6 6 6 80 54 7 152 99 6
39253-192 133 9 219 162 10 236 178 12 239 182 13
39254-246 186 14 242 186 14 239 182 13 236 178 12
39255-224 166 10 206 145 10 192 133 9 154 121 60
39256- 94 94 94 62 62 62 42 42 42 22 22 22
39257- 14 14 14 6 6 6 0 0 0 0 0 0
39258- 0 0 0 0 0 0 0 0 0 0 0 0
39259- 0 0 0 0 0 0 0 0 0 0 0 0
39260- 0 0 0 0 0 0 0 0 0 0 0 0
39261- 0 0 0 0 0 0 0 0 0 6 6 6
39262- 18 18 18 34 34 34 58 58 58 78 78 78
39263-101 98 89 124 112 88 142 110 46 156 107 11
39264-163 110 8 167 114 7 175 118 6 180 123 7
39265-185 133 11 197 138 11 210 150 10 219 162 10
39266-226 170 11 236 178 12 236 178 12 234 174 13
39267-219 162 10 197 138 11 163 110 8 130 83 6
39268- 91 60 6 10 10 10 2 2 6 2 2 6
39269- 18 18 18 38 38 38 38 38 38 38 38 38
39270- 38 38 38 38 38 38 38 38 38 38 38 38
39271- 38 38 38 38 38 38 26 26 26 2 2 6
39272- 2 2 6 6 6 6 70 47 6 137 92 6
39273-175 118 6 200 144 11 219 162 10 230 174 11
39274-234 174 13 230 174 11 219 162 10 210 150 10
39275-192 133 9 163 110 8 124 112 88 82 82 82
39276- 50 50 50 30 30 30 14 14 14 6 6 6
39277- 0 0 0 0 0 0 0 0 0 0 0 0
39278- 0 0 0 0 0 0 0 0 0 0 0 0
39279- 0 0 0 0 0 0 0 0 0 0 0 0
39280- 0 0 0 0 0 0 0 0 0 0 0 0
39281- 0 0 0 0 0 0 0 0 0 0 0 0
39282- 6 6 6 14 14 14 22 22 22 34 34 34
39283- 42 42 42 58 58 58 74 74 74 86 86 86
39284-101 98 89 122 102 70 130 98 46 121 87 25
39285-137 92 6 152 99 6 163 110 8 180 123 7
39286-185 133 11 197 138 11 206 145 10 200 144 11
39287-180 123 7 156 107 11 130 83 6 104 69 6
39288- 50 34 6 54 54 54 110 110 110 101 98 89
39289- 86 86 86 82 82 82 78 78 78 78 78 78
39290- 78 78 78 78 78 78 78 78 78 78 78 78
39291- 78 78 78 82 82 82 86 86 86 94 94 94
39292-106 106 106 101 101 101 86 66 34 124 80 6
39293-156 107 11 180 123 7 192 133 9 200 144 11
39294-206 145 10 200 144 11 192 133 9 175 118 6
39295-139 102 15 109 106 95 70 70 70 42 42 42
39296- 22 22 22 10 10 10 0 0 0 0 0 0
39297- 0 0 0 0 0 0 0 0 0 0 0 0
39298- 0 0 0 0 0 0 0 0 0 0 0 0
39299- 0 0 0 0 0 0 0 0 0 0 0 0
39300- 0 0 0 0 0 0 0 0 0 0 0 0
39301- 0 0 0 0 0 0 0 0 0 0 0 0
39302- 0 0 0 0 0 0 6 6 6 10 10 10
39303- 14 14 14 22 22 22 30 30 30 38 38 38
39304- 50 50 50 62 62 62 74 74 74 90 90 90
39305-101 98 89 112 100 78 121 87 25 124 80 6
39306-137 92 6 152 99 6 152 99 6 152 99 6
39307-138 86 6 124 80 6 98 70 6 86 66 30
39308-101 98 89 82 82 82 58 58 58 46 46 46
39309- 38 38 38 34 34 34 34 34 34 34 34 34
39310- 34 34 34 34 34 34 34 34 34 34 34 34
39311- 34 34 34 34 34 34 38 38 38 42 42 42
39312- 54 54 54 82 82 82 94 86 76 91 60 6
39313-134 86 6 156 107 11 167 114 7 175 118 6
39314-175 118 6 167 114 7 152 99 6 121 87 25
39315-101 98 89 62 62 62 34 34 34 18 18 18
39316- 6 6 6 0 0 0 0 0 0 0 0 0
39317- 0 0 0 0 0 0 0 0 0 0 0 0
39318- 0 0 0 0 0 0 0 0 0 0 0 0
39319- 0 0 0 0 0 0 0 0 0 0 0 0
39320- 0 0 0 0 0 0 0 0 0 0 0 0
39321- 0 0 0 0 0 0 0 0 0 0 0 0
39322- 0 0 0 0 0 0 0 0 0 0 0 0
39323- 0 0 0 6 6 6 6 6 6 10 10 10
39324- 18 18 18 22 22 22 30 30 30 42 42 42
39325- 50 50 50 66 66 66 86 86 86 101 98 89
39326-106 86 58 98 70 6 104 69 6 104 69 6
39327-104 69 6 91 60 6 82 62 34 90 90 90
39328- 62 62 62 38 38 38 22 22 22 14 14 14
39329- 10 10 10 10 10 10 10 10 10 10 10 10
39330- 10 10 10 10 10 10 6 6 6 10 10 10
39331- 10 10 10 10 10 10 10 10 10 14 14 14
39332- 22 22 22 42 42 42 70 70 70 89 81 66
39333- 80 54 7 104 69 6 124 80 6 137 92 6
39334-134 86 6 116 81 8 100 82 52 86 86 86
39335- 58 58 58 30 30 30 14 14 14 6 6 6
39336- 0 0 0 0 0 0 0 0 0 0 0 0
39337- 0 0 0 0 0 0 0 0 0 0 0 0
39338- 0 0 0 0 0 0 0 0 0 0 0 0
39339- 0 0 0 0 0 0 0 0 0 0 0 0
39340- 0 0 0 0 0 0 0 0 0 0 0 0
39341- 0 0 0 0 0 0 0 0 0 0 0 0
39342- 0 0 0 0 0 0 0 0 0 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 6 6 6 10 10 10 14 14 14
39345- 18 18 18 26 26 26 38 38 38 54 54 54
39346- 70 70 70 86 86 86 94 86 76 89 81 66
39347- 89 81 66 86 86 86 74 74 74 50 50 50
39348- 30 30 30 14 14 14 6 6 6 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 0 0 0 0 0 0
39351- 0 0 0 0 0 0 0 0 0 0 0 0
39352- 6 6 6 18 18 18 34 34 34 58 58 58
39353- 82 82 82 89 81 66 89 81 66 89 81 66
39354- 94 86 66 94 86 76 74 74 74 50 50 50
39355- 26 26 26 14 14 14 6 6 6 0 0 0
39356- 0 0 0 0 0 0 0 0 0 0 0 0
39357- 0 0 0 0 0 0 0 0 0 0 0 0
39358- 0 0 0 0 0 0 0 0 0 0 0 0
39359- 0 0 0 0 0 0 0 0 0 0 0 0
39360- 0 0 0 0 0 0 0 0 0 0 0 0
39361- 0 0 0 0 0 0 0 0 0 0 0 0
39362- 0 0 0 0 0 0 0 0 0 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 6 6 6 6 6 6 14 14 14 18 18 18
39366- 30 30 30 38 38 38 46 46 46 54 54 54
39367- 50 50 50 42 42 42 30 30 30 18 18 18
39368- 10 10 10 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 0 0 0 0 0 0 0 0 0 0 0 0
39371- 0 0 0 0 0 0 0 0 0 0 0 0
39372- 0 0 0 6 6 6 14 14 14 26 26 26
39373- 38 38 38 50 50 50 58 58 58 58 58 58
39374- 54 54 54 42 42 42 30 30 30 18 18 18
39375- 10 10 10 0 0 0 0 0 0 0 0 0
39376- 0 0 0 0 0 0 0 0 0 0 0 0
39377- 0 0 0 0 0 0 0 0 0 0 0 0
39378- 0 0 0 0 0 0 0 0 0 0 0 0
39379- 0 0 0 0 0 0 0 0 0 0 0 0
39380- 0 0 0 0 0 0 0 0 0 0 0 0
39381- 0 0 0 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 6 6 6
39386- 6 6 6 10 10 10 14 14 14 18 18 18
39387- 18 18 18 14 14 14 10 10 10 6 6 6
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 0 0 0 0 0 0 0 0 0 0 0 0
39391- 0 0 0 0 0 0 0 0 0 0 0 0
39392- 0 0 0 0 0 0 0 0 0 6 6 6
39393- 14 14 14 18 18 18 22 22 22 22 22 22
39394- 18 18 18 14 14 14 10 10 10 6 6 6
39395- 0 0 0 0 0 0 0 0 0 0 0 0
39396- 0 0 0 0 0 0 0 0 0 0 0 0
39397- 0 0 0 0 0 0 0 0 0 0 0 0
39398- 0 0 0 0 0 0 0 0 0 0 0 0
39399- 0 0 0 0 0 0 0 0 0 0 0 0
39400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39413+4 4 4 4 4 4
39414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39427+4 4 4 4 4 4
39428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39441+4 4 4 4 4 4
39442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39455+4 4 4 4 4 4
39456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39469+4 4 4 4 4 4
39470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39483+4 4 4 4 4 4
39484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39488+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39489+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39493+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39494+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39495+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39497+4 4 4 4 4 4
39498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39502+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39503+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39504+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39507+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39508+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39509+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39510+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39511+4 4 4 4 4 4
39512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39516+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39517+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39518+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39521+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39522+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39523+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39524+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39525+4 4 4 4 4 4
39526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39529+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39530+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39531+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39532+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39534+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39535+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39536+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39537+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39538+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39539+4 4 4 4 4 4
39540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39543+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39544+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39545+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39546+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39547+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39548+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39549+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39550+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39551+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39552+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39553+4 4 4 4 4 4
39554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39557+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39558+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39559+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39560+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39561+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39562+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39563+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39564+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39565+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39566+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39567+4 4 4 4 4 4
39568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39570+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39571+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39572+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39573+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39574+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39575+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39576+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39577+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39578+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39579+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39580+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39581+4 4 4 4 4 4
39582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39584+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39585+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39586+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39587+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39588+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39589+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39590+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39591+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39592+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39593+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39594+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39595+4 4 4 4 4 4
39596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39598+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39599+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39600+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39601+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39602+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39603+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39604+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39605+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39606+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39607+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39608+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39609+4 4 4 4 4 4
39610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39612+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39613+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39614+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39615+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39616+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39617+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39618+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39619+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39620+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39621+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39622+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39623+4 4 4 4 4 4
39624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39625+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39626+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39627+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39628+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39629+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39630+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39631+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39632+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39633+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39634+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39635+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39636+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39637+4 4 4 4 4 4
39638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39639+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39640+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39641+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39642+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39643+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39644+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39645+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39646+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39647+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39648+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39649+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39650+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39651+0 0 0 4 4 4
39652+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39653+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39654+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39655+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39656+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39657+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39658+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39659+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39660+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39661+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39662+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39663+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39664+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39665+2 0 0 0 0 0
39666+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39667+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39668+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39669+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39670+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39671+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39672+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39673+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39674+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39675+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39676+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39677+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39678+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39679+37 38 37 0 0 0
39680+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39681+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39682+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39683+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39684+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39685+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39686+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39687+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39688+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39689+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39690+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39691+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39692+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39693+85 115 134 4 0 0
39694+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39695+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39696+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39697+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39698+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39699+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39700+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39701+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39702+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39703+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39704+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39705+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39706+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39707+60 73 81 4 0 0
39708+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39709+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39710+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39711+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39712+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39713+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39714+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39715+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39716+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39717+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39718+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39719+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39720+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39721+16 19 21 4 0 0
39722+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39723+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39724+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39725+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39726+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39727+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39728+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39729+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39730+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39731+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39732+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39733+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39734+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39735+4 0 0 4 3 3
39736+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39737+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39738+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39740+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39741+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39742+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39743+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39744+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39745+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39746+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39747+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39748+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39749+3 2 2 4 4 4
39750+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39751+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39752+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39753+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39754+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39755+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39756+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39757+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39758+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39759+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39760+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39761+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
39762+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
39763+4 4 4 4 4 4
39764+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
39765+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
39766+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
39767+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
39768+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
39769+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
39770+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
39771+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
39772+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
39773+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
39774+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
39775+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
39776+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
39777+4 4 4 4 4 4
39778+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
39779+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
39780+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
39781+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
39782+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
39783+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39784+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
39785+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
39786+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
39787+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
39788+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
39789+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
39790+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
39791+5 5 5 5 5 5
39792+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
39793+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
39794+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
39795+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
39796+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
39797+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39798+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
39799+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
39800+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
39801+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
39802+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
39803+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
39804+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39805+5 5 5 4 4 4
39806+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
39807+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
39808+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
39809+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
39810+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39811+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
39812+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
39813+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
39814+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
39815+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
39816+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
39817+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39819+4 4 4 4 4 4
39820+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
39821+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
39822+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
39823+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
39824+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
39825+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39826+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39827+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
39828+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
39829+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
39830+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
39831+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
39832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39833+4 4 4 4 4 4
39834+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
39835+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
39836+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
39837+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
39838+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39839+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
39840+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
39841+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
39842+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
39843+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
39844+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
39845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39847+4 4 4 4 4 4
39848+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
39849+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
39850+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
39851+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
39852+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39853+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39854+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39855+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
39856+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
39857+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
39858+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
39859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39861+4 4 4 4 4 4
39862+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
39863+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
39864+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
39865+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
39866+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39867+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
39868+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39869+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
39870+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
39871+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
39872+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39875+4 4 4 4 4 4
39876+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
39877+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
39878+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
39879+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
39880+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39881+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
39882+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
39883+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
39884+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
39885+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
39886+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
39887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39889+4 4 4 4 4 4
39890+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
39891+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
39892+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
39893+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
39894+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39895+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
39896+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
39897+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
39898+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
39899+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
39900+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
39901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39903+4 4 4 4 4 4
39904+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
39905+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
39906+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
39907+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39908+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
39909+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
39910+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
39911+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
39912+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
39913+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
39914+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39917+4 4 4 4 4 4
39918+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
39919+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
39920+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
39921+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39922+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39923+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
39924+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
39925+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
39926+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
39927+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
39928+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39931+4 4 4 4 4 4
39932+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
39933+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
39934+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39935+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39936+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39937+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
39938+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
39939+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
39940+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
39941+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
39942+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39945+4 4 4 4 4 4
39946+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
39947+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
39948+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39949+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39950+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39951+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
39952+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
39953+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
39954+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
39955+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39956+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39959+4 4 4 4 4 4
39960+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
39961+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
39962+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39963+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
39964+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
39965+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
39966+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
39967+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
39968+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39969+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39970+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39973+4 4 4 4 4 4
39974+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
39975+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
39976+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39977+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
39978+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39979+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
39980+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
39981+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
39982+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
39983+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39984+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39987+4 4 4 4 4 4
39988+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
39989+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
39990+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
39991+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
39992+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
39993+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
39994+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
39995+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
39996+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39997+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39998+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40001+4 4 4 4 4 4
40002+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40003+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40004+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40005+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40006+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40007+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40008+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40009+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40010+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40011+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40012+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40015+4 4 4 4 4 4
40016+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40017+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40018+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40019+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40020+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40021+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40022+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40023+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40024+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40025+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40026+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40029+4 4 4 4 4 4
40030+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40031+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40032+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40033+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40034+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40035+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40036+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40037+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40038+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40039+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40040+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40043+4 4 4 4 4 4
40044+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40045+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40046+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40047+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40048+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40049+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40050+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40051+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40052+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40053+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40054+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40057+4 4 4 4 4 4
40058+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40059+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40060+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40061+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40062+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40063+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40064+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40065+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40066+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40067+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40068+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40071+4 4 4 4 4 4
40072+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40073+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40074+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40075+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40076+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40077+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40078+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40079+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40080+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40081+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40082+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40085+4 4 4 4 4 4
40086+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40087+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40088+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40089+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40090+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40091+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40092+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40093+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40094+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40095+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40096+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099+4 4 4 4 4 4
40100+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40101+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40102+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40103+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40104+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40105+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40106+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40107+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40108+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40109+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40110+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113+4 4 4 4 4 4
40114+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40115+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40116+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40117+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40118+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40119+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40120+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40121+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40122+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40123+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40124+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127+4 4 4 4 4 4
40128+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40129+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40130+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40131+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40132+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40133+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40134+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40135+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40136+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40137+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40138+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40141+4 4 4 4 4 4
40142+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40143+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40144+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40145+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40146+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40147+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40148+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40149+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40150+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40151+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40152+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40155+4 4 4 4 4 4
40156+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40157+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40158+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40159+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40160+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40161+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40162+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40163+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40164+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40165+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40166+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40169+4 4 4 4 4 4
40170+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40171+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40172+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40173+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40174+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40175+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40176+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40177+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40178+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40179+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40180+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40183+4 4 4 4 4 4
40184+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40185+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40186+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40187+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40188+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40189+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40190+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40191+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40192+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40193+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40194+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40197+4 4 4 4 4 4
40198+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40199+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40200+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40201+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40202+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40203+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40204+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40205+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40206+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40207+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40208+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40211+4 4 4 4 4 4
40212+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40213+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40214+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40215+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40216+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40217+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40218+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40219+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40220+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40221+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40225+4 4 4 4 4 4
40226+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40227+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40228+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40229+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40230+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40231+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40232+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40233+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40234+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40235+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40239+4 4 4 4 4 4
40240+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40241+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40242+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40243+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40244+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40245+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40246+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40247+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40248+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40249+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253+4 4 4 4 4 4
40254+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40255+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40256+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40257+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40258+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40259+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40260+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40261+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40262+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40263+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267+4 4 4 4 4 4
40268+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40269+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40270+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40271+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40272+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40273+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40274+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40275+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40276+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281+4 4 4 4 4 4
40282+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40283+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40284+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40285+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40286+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40287+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40288+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40289+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40290+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295+4 4 4 4 4 4
40296+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40297+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40298+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40299+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40300+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40301+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40302+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40303+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40304+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309+4 4 4 4 4 4
40310+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40311+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40312+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40313+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40314+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40315+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40316+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40317+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323+4 4 4 4 4 4
40324+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40325+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40326+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40327+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40328+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40329+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40330+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40331+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40337+4 4 4 4 4 4
40338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40339+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40340+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40341+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40342+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40343+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40344+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40345+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351+4 4 4 4 4 4
40352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40354+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40355+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40356+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40357+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40358+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40359+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365+4 4 4 4 4 4
40366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40368+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40369+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40370+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40371+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40372+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40373+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379+4 4 4 4 4 4
40380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40382+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40383+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40384+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40385+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40386+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40387+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393+4 4 4 4 4 4
40394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40397+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40398+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40399+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40400+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407+4 4 4 4 4 4
40408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40411+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40412+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40413+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40414+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421+4 4 4 4 4 4
40422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40426+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40427+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40428+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435+4 4 4 4 4 4
40436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40440+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40441+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40442+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449+4 4 4 4 4 4
40450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40454+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40455+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40456+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463+4 4 4 4 4 4
40464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40468+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40469+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40470+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477+4 4 4 4 4 4
40478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40482+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40483+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40484+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491+4 4 4 4 4 4
40492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40496+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40497+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505+4 4 4 4 4 4
40506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40510+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40511+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40519+4 4 4 4 4 4
40520diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40521index a197731..6c3af9d 100644
40522--- a/drivers/video/udlfb.c
40523+++ b/drivers/video/udlfb.c
40524@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40525 dlfb_urb_completion(urb);
40526
40527 error:
40528- atomic_add(bytes_sent, &dev->bytes_sent);
40529- atomic_add(bytes_identical, &dev->bytes_identical);
40530- atomic_add(width*height*2, &dev->bytes_rendered);
40531+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40532+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40533+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40534 end_cycles = get_cycles();
40535- atomic_add(((unsigned int) ((end_cycles - start_cycles)
40536+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40537 >> 10)), /* Kcycles */
40538 &dev->cpu_kcycles_used);
40539
40540@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40541 dlfb_urb_completion(urb);
40542
40543 error:
40544- atomic_add(bytes_sent, &dev->bytes_sent);
40545- atomic_add(bytes_identical, &dev->bytes_identical);
40546- atomic_add(bytes_rendered, &dev->bytes_rendered);
40547+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40548+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40549+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40550 end_cycles = get_cycles();
40551- atomic_add(((unsigned int) ((end_cycles - start_cycles)
40552+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40553 >> 10)), /* Kcycles */
40554 &dev->cpu_kcycles_used);
40555 }
40556@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40557 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40558 struct dlfb_data *dev = fb_info->par;
40559 return snprintf(buf, PAGE_SIZE, "%u\n",
40560- atomic_read(&dev->bytes_rendered));
40561+ atomic_read_unchecked(&dev->bytes_rendered));
40562 }
40563
40564 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40565@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40566 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40567 struct dlfb_data *dev = fb_info->par;
40568 return snprintf(buf, PAGE_SIZE, "%u\n",
40569- atomic_read(&dev->bytes_identical));
40570+ atomic_read_unchecked(&dev->bytes_identical));
40571 }
40572
40573 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40574@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40575 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40576 struct dlfb_data *dev = fb_info->par;
40577 return snprintf(buf, PAGE_SIZE, "%u\n",
40578- atomic_read(&dev->bytes_sent));
40579+ atomic_read_unchecked(&dev->bytes_sent));
40580 }
40581
40582 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40583@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40584 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40585 struct dlfb_data *dev = fb_info->par;
40586 return snprintf(buf, PAGE_SIZE, "%u\n",
40587- atomic_read(&dev->cpu_kcycles_used));
40588+ atomic_read_unchecked(&dev->cpu_kcycles_used));
40589 }
40590
40591 static ssize_t edid_show(
40592@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
40593 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40594 struct dlfb_data *dev = fb_info->par;
40595
40596- atomic_set(&dev->bytes_rendered, 0);
40597- atomic_set(&dev->bytes_identical, 0);
40598- atomic_set(&dev->bytes_sent, 0);
40599- atomic_set(&dev->cpu_kcycles_used, 0);
40600+ atomic_set_unchecked(&dev->bytes_rendered, 0);
40601+ atomic_set_unchecked(&dev->bytes_identical, 0);
40602+ atomic_set_unchecked(&dev->bytes_sent, 0);
40603+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40604
40605 return count;
40606 }
40607diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
40608index e7f69ef..83af4fd 100644
40609--- a/drivers/video/uvesafb.c
40610+++ b/drivers/video/uvesafb.c
40611@@ -19,6 +19,7 @@
40612 #include <linux/io.h>
40613 #include <linux/mutex.h>
40614 #include <linux/slab.h>
40615+#include <linux/moduleloader.h>
40616 #include <video/edid.h>
40617 #include <video/uvesafb.h>
40618 #ifdef CONFIG_X86
40619@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40620 NULL,
40621 };
40622
40623- return call_usermodehelper(v86d_path, argv, envp, 1);
40624+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40625 }
40626
40627 /*
40628@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
40629 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40630 par->pmi_setpal = par->ypan = 0;
40631 } else {
40632+
40633+#ifdef CONFIG_PAX_KERNEXEC
40634+#ifdef CONFIG_MODULES
40635+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40636+#endif
40637+ if (!par->pmi_code) {
40638+ par->pmi_setpal = par->ypan = 0;
40639+ return 0;
40640+ }
40641+#endif
40642+
40643 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40644 + task->t.regs.edi);
40645+
40646+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40647+ pax_open_kernel();
40648+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40649+ pax_close_kernel();
40650+
40651+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40652+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40653+#else
40654 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40655 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40656+#endif
40657+
40658 printk(KERN_INFO "uvesafb: protected mode interface info at "
40659 "%04x:%04x\n",
40660 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40661@@ -1821,6 +1844,11 @@ out:
40662 if (par->vbe_modes)
40663 kfree(par->vbe_modes);
40664
40665+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40666+ if (par->pmi_code)
40667+ module_free_exec(NULL, par->pmi_code);
40668+#endif
40669+
40670 framebuffer_release(info);
40671 return err;
40672 }
40673@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
40674 kfree(par->vbe_state_orig);
40675 if (par->vbe_state_saved)
40676 kfree(par->vbe_state_saved);
40677+
40678+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40679+ if (par->pmi_code)
40680+ module_free_exec(NULL, par->pmi_code);
40681+#endif
40682+
40683 }
40684
40685 framebuffer_release(info);
40686diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
40687index 501b340..86bd4cf 100644
40688--- a/drivers/video/vesafb.c
40689+++ b/drivers/video/vesafb.c
40690@@ -9,6 +9,7 @@
40691 */
40692
40693 #include <linux/module.h>
40694+#include <linux/moduleloader.h>
40695 #include <linux/kernel.h>
40696 #include <linux/errno.h>
40697 #include <linux/string.h>
40698@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
40699 static int vram_total __initdata; /* Set total amount of memory */
40700 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40701 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40702-static void (*pmi_start)(void) __read_mostly;
40703-static void (*pmi_pal) (void) __read_mostly;
40704+static void (*pmi_start)(void) __read_only;
40705+static void (*pmi_pal) (void) __read_only;
40706 static int depth __read_mostly;
40707 static int vga_compat __read_mostly;
40708 /* --------------------------------------------------------------------- */
40709@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
40710 unsigned int size_vmode;
40711 unsigned int size_remap;
40712 unsigned int size_total;
40713+ void *pmi_code = NULL;
40714
40715 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40716 return -ENODEV;
40717@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
40718 size_remap = size_total;
40719 vesafb_fix.smem_len = size_remap;
40720
40721-#ifndef __i386__
40722- screen_info.vesapm_seg = 0;
40723-#endif
40724-
40725 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40726 printk(KERN_WARNING
40727 "vesafb: cannot reserve video memory at 0x%lx\n",
40728@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
40729 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40730 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40731
40732+#ifdef __i386__
40733+
40734+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40735+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
40736+ if (!pmi_code)
40737+#elif !defined(CONFIG_PAX_KERNEXEC)
40738+ if (0)
40739+#endif
40740+
40741+#endif
40742+ screen_info.vesapm_seg = 0;
40743+
40744 if (screen_info.vesapm_seg) {
40745- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40746- screen_info.vesapm_seg,screen_info.vesapm_off);
40747+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40748+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40749 }
40750
40751 if (screen_info.vesapm_seg < 0xc000)
40752@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
40753
40754 if (ypan || pmi_setpal) {
40755 unsigned short *pmi_base;
40756+
40757 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40758- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40759- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40760+
40761+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40762+ pax_open_kernel();
40763+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
40764+#else
40765+ pmi_code = pmi_base;
40766+#endif
40767+
40768+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
40769+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
40770+
40771+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40772+ pmi_start = ktva_ktla(pmi_start);
40773+ pmi_pal = ktva_ktla(pmi_pal);
40774+ pax_close_kernel();
40775+#endif
40776+
40777 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
40778 if (pmi_base[3]) {
40779 printk(KERN_INFO "vesafb: pmi: ports = ");
40780@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
40781 info->node, info->fix.id);
40782 return 0;
40783 err:
40784+
40785+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40786+ module_free_exec(NULL, pmi_code);
40787+#endif
40788+
40789 if (info->screen_base)
40790 iounmap(info->screen_base);
40791 framebuffer_release(info);
40792diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
40793index 88714ae..16c2e11 100644
40794--- a/drivers/video/via/via_clock.h
40795+++ b/drivers/video/via/via_clock.h
40796@@ -56,7 +56,7 @@ struct via_clock {
40797
40798 void (*set_engine_pll_state)(u8 state);
40799 void (*set_engine_pll)(struct via_pll_config config);
40800-};
40801+} __no_const;
40802
40803
40804 static inline u32 get_pll_internal_frequency(u32 ref_freq,
40805diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
40806index e56c934..fc22f4b 100644
40807--- a/drivers/xen/xen-pciback/conf_space.h
40808+++ b/drivers/xen/xen-pciback/conf_space.h
40809@@ -44,15 +44,15 @@ struct config_field {
40810 struct {
40811 conf_dword_write write;
40812 conf_dword_read read;
40813- } dw;
40814+ } __no_const dw;
40815 struct {
40816 conf_word_write write;
40817 conf_word_read read;
40818- } w;
40819+ } __no_const w;
40820 struct {
40821 conf_byte_write write;
40822 conf_byte_read read;
40823- } b;
40824+ } __no_const b;
40825 } u;
40826 struct list_head list;
40827 };
40828diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
40829index 014c8dd..6f3dfe6 100644
40830--- a/fs/9p/vfs_inode.c
40831+++ b/fs/9p/vfs_inode.c
40832@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
40833 void
40834 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40835 {
40836- char *s = nd_get_link(nd);
40837+ const char *s = nd_get_link(nd);
40838
40839 p9_debug(P9_DEBUG_VFS, " %s %s\n",
40840 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
40841diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
40842index e95d1b6..3454244 100644
40843--- a/fs/Kconfig.binfmt
40844+++ b/fs/Kconfig.binfmt
40845@@ -89,7 +89,7 @@ config HAVE_AOUT
40846
40847 config BINFMT_AOUT
40848 tristate "Kernel support for a.out and ECOFF binaries"
40849- depends on HAVE_AOUT
40850+ depends on HAVE_AOUT && BROKEN
40851 ---help---
40852 A.out (Assembler.OUTput) is a set of formats for libraries and
40853 executables used in the earliest versions of UNIX. Linux used
40854diff --git a/fs/aio.c b/fs/aio.c
40855index b9d64d8..86cb1d5 100644
40856--- a/fs/aio.c
40857+++ b/fs/aio.c
40858@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
40859 size += sizeof(struct io_event) * nr_events;
40860 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
40861
40862- if (nr_pages < 0)
40863+ if (nr_pages <= 0)
40864 return -EINVAL;
40865
40866 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
40867@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
40868 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
40869 {
40870 ssize_t ret;
40871+ struct iovec iovstack;
40872
40873 #ifdef CONFIG_COMPAT
40874 if (compat)
40875 ret = compat_rw_copy_check_uvector(type,
40876 (struct compat_iovec __user *)kiocb->ki_buf,
40877- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40878+ kiocb->ki_nbytes, 1, &iovstack,
40879 &kiocb->ki_iovec, 1);
40880 else
40881 #endif
40882 ret = rw_copy_check_uvector(type,
40883 (struct iovec __user *)kiocb->ki_buf,
40884- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40885+ kiocb->ki_nbytes, 1, &iovstack,
40886 &kiocb->ki_iovec, 1);
40887 if (ret < 0)
40888 goto out;
40889
40890+ if (kiocb->ki_iovec == &iovstack) {
40891+ kiocb->ki_inline_vec = iovstack;
40892+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
40893+ }
40894 kiocb->ki_nr_segs = kiocb->ki_nbytes;
40895 kiocb->ki_cur_seg = 0;
40896 /* ki_nbytes/left now reflect bytes instead of segs */
40897diff --git a/fs/attr.c b/fs/attr.c
40898index 95053ad..2cc93ca 100644
40899--- a/fs/attr.c
40900+++ b/fs/attr.c
40901@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
40902 unsigned long limit;
40903
40904 limit = rlimit(RLIMIT_FSIZE);
40905+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
40906 if (limit != RLIM_INFINITY && offset > limit)
40907 goto out_sig;
40908 if (offset > inode->i_sb->s_maxbytes)
40909diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
40910index 9c098db..c755da5 100644
40911--- a/fs/autofs4/waitq.c
40912+++ b/fs/autofs4/waitq.c
40913@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
40914 {
40915 unsigned long sigpipe, flags;
40916 mm_segment_t fs;
40917- const char *data = (const char *)addr;
40918+ const char __user *data = (const char __force_user *)addr;
40919 ssize_t wr = 0;
40920
40921 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
40922diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
40923index 6e6d536..457113a 100644
40924--- a/fs/befs/linuxvfs.c
40925+++ b/fs/befs/linuxvfs.c
40926@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40927 {
40928 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
40929 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
40930- char *link = nd_get_link(nd);
40931+ const char *link = nd_get_link(nd);
40932 if (!IS_ERR(link))
40933 kfree(link);
40934 }
40935diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
40936index 1ff9405..f1e376a 100644
40937--- a/fs/binfmt_aout.c
40938+++ b/fs/binfmt_aout.c
40939@@ -16,6 +16,7 @@
40940 #include <linux/string.h>
40941 #include <linux/fs.h>
40942 #include <linux/file.h>
40943+#include <linux/security.h>
40944 #include <linux/stat.h>
40945 #include <linux/fcntl.h>
40946 #include <linux/ptrace.h>
40947@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
40948 #endif
40949 # define START_STACK(u) ((void __user *)u.start_stack)
40950
40951+ memset(&dump, 0, sizeof(dump));
40952+
40953 fs = get_fs();
40954 set_fs(KERNEL_DS);
40955 has_dumped = 1;
40956@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
40957
40958 /* If the size of the dump file exceeds the rlimit, then see what would happen
40959 if we wrote the stack, but not the data area. */
40960+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
40961 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
40962 dump.u_dsize = 0;
40963
40964 /* Make sure we have enough room to write the stack and data areas. */
40965+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
40966 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
40967 dump.u_ssize = 0;
40968
40969@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
40970 rlim = rlimit(RLIMIT_DATA);
40971 if (rlim >= RLIM_INFINITY)
40972 rlim = ~0;
40973+
40974+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
40975 if (ex.a_data + ex.a_bss > rlim)
40976 return -ENOMEM;
40977
40978@@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
40979 install_exec_creds(bprm);
40980 current->flags &= ~PF_FORKNOEXEC;
40981
40982+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40983+ current->mm->pax_flags = 0UL;
40984+#endif
40985+
40986+#ifdef CONFIG_PAX_PAGEEXEC
40987+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
40988+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
40989+
40990+#ifdef CONFIG_PAX_EMUTRAMP
40991+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
40992+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
40993+#endif
40994+
40995+#ifdef CONFIG_PAX_MPROTECT
40996+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
40997+ current->mm->pax_flags |= MF_PAX_MPROTECT;
40998+#endif
40999+
41000+ }
41001+#endif
41002+
41003 if (N_MAGIC(ex) == OMAGIC) {
41004 unsigned long text_addr, map_size;
41005 loff_t pos;
41006@@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41007
41008 down_write(&current->mm->mmap_sem);
41009 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41010- PROT_READ | PROT_WRITE | PROT_EXEC,
41011+ PROT_READ | PROT_WRITE,
41012 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41013 fd_offset + ex.a_text);
41014 up_write(&current->mm->mmap_sem);
41015diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41016index 07d096c..1ee8d2b 100644
41017--- a/fs/binfmt_elf.c
41018+++ b/fs/binfmt_elf.c
41019@@ -32,6 +32,7 @@
41020 #include <linux/elf.h>
41021 #include <linux/utsname.h>
41022 #include <linux/coredump.h>
41023+#include <linux/xattr.h>
41024 #include <asm/uaccess.h>
41025 #include <asm/param.h>
41026 #include <asm/page.h>
41027@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41028 #define elf_core_dump NULL
41029 #endif
41030
41031+#ifdef CONFIG_PAX_MPROTECT
41032+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41033+#endif
41034+
41035 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41036 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41037 #else
41038@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41039 .load_binary = load_elf_binary,
41040 .load_shlib = load_elf_library,
41041 .core_dump = elf_core_dump,
41042+
41043+#ifdef CONFIG_PAX_MPROTECT
41044+ .handle_mprotect= elf_handle_mprotect,
41045+#endif
41046+
41047 .min_coredump = ELF_EXEC_PAGESIZE,
41048 };
41049
41050@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41051
41052 static int set_brk(unsigned long start, unsigned long end)
41053 {
41054+ unsigned long e = end;
41055+
41056 start = ELF_PAGEALIGN(start);
41057 end = ELF_PAGEALIGN(end);
41058 if (end > start) {
41059@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41060 if (BAD_ADDR(addr))
41061 return addr;
41062 }
41063- current->mm->start_brk = current->mm->brk = end;
41064+ current->mm->start_brk = current->mm->brk = e;
41065 return 0;
41066 }
41067
41068@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41069 elf_addr_t __user *u_rand_bytes;
41070 const char *k_platform = ELF_PLATFORM;
41071 const char *k_base_platform = ELF_BASE_PLATFORM;
41072- unsigned char k_rand_bytes[16];
41073+ u32 k_rand_bytes[4];
41074 int items;
41075 elf_addr_t *elf_info;
41076 int ei_index = 0;
41077 const struct cred *cred = current_cred();
41078 struct vm_area_struct *vma;
41079+ unsigned long saved_auxv[AT_VECTOR_SIZE];
41080
41081 /*
41082 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41083@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41084 * Generate 16 random bytes for userspace PRNG seeding.
41085 */
41086 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41087- u_rand_bytes = (elf_addr_t __user *)
41088- STACK_ALLOC(p, sizeof(k_rand_bytes));
41089+ srandom32(k_rand_bytes[0] ^ random32());
41090+ srandom32(k_rand_bytes[1] ^ random32());
41091+ srandom32(k_rand_bytes[2] ^ random32());
41092+ srandom32(k_rand_bytes[3] ^ random32());
41093+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41094+ u_rand_bytes = (elf_addr_t __user *) p;
41095 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41096 return -EFAULT;
41097
41098@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41099 return -EFAULT;
41100 current->mm->env_end = p;
41101
41102+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41103+
41104 /* Put the elf_info on the stack in the right place. */
41105 sp = (elf_addr_t __user *)envp + 1;
41106- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41107+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41108 return -EFAULT;
41109 return 0;
41110 }
41111@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41112 {
41113 struct elf_phdr *elf_phdata;
41114 struct elf_phdr *eppnt;
41115- unsigned long load_addr = 0;
41116+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41117 int load_addr_set = 0;
41118 unsigned long last_bss = 0, elf_bss = 0;
41119- unsigned long error = ~0UL;
41120+ unsigned long error = -EINVAL;
41121 unsigned long total_size;
41122 int retval, i, size;
41123
41124@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41125 goto out_close;
41126 }
41127
41128+#ifdef CONFIG_PAX_SEGMEXEC
41129+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41130+ pax_task_size = SEGMEXEC_TASK_SIZE;
41131+#endif
41132+
41133 eppnt = elf_phdata;
41134 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41135 if (eppnt->p_type == PT_LOAD) {
41136@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41137 k = load_addr + eppnt->p_vaddr;
41138 if (BAD_ADDR(k) ||
41139 eppnt->p_filesz > eppnt->p_memsz ||
41140- eppnt->p_memsz > TASK_SIZE ||
41141- TASK_SIZE - eppnt->p_memsz < k) {
41142+ eppnt->p_memsz > pax_task_size ||
41143+ pax_task_size - eppnt->p_memsz < k) {
41144 error = -ENOMEM;
41145 goto out_close;
41146 }
41147@@ -528,6 +552,351 @@ out:
41148 return error;
41149 }
41150
41151+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41152+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41153+{
41154+ unsigned long pax_flags = 0UL;
41155+
41156+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41157+
41158+#ifdef CONFIG_PAX_PAGEEXEC
41159+ if (elf_phdata->p_flags & PF_PAGEEXEC)
41160+ pax_flags |= MF_PAX_PAGEEXEC;
41161+#endif
41162+
41163+#ifdef CONFIG_PAX_SEGMEXEC
41164+ if (elf_phdata->p_flags & PF_SEGMEXEC)
41165+ pax_flags |= MF_PAX_SEGMEXEC;
41166+#endif
41167+
41168+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41169+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41170+ if ((__supported_pte_mask & _PAGE_NX))
41171+ pax_flags &= ~MF_PAX_SEGMEXEC;
41172+ else
41173+ pax_flags &= ~MF_PAX_PAGEEXEC;
41174+ }
41175+#endif
41176+
41177+#ifdef CONFIG_PAX_EMUTRAMP
41178+ if (elf_phdata->p_flags & PF_EMUTRAMP)
41179+ pax_flags |= MF_PAX_EMUTRAMP;
41180+#endif
41181+
41182+#ifdef CONFIG_PAX_MPROTECT
41183+ if (elf_phdata->p_flags & PF_MPROTECT)
41184+ pax_flags |= MF_PAX_MPROTECT;
41185+#endif
41186+
41187+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41188+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41189+ pax_flags |= MF_PAX_RANDMMAP;
41190+#endif
41191+
41192+#endif
41193+
41194+ return pax_flags;
41195+}
41196+
41197+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41198+{
41199+ unsigned long pax_flags = 0UL;
41200+
41201+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41202+
41203+#ifdef CONFIG_PAX_PAGEEXEC
41204+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41205+ pax_flags |= MF_PAX_PAGEEXEC;
41206+#endif
41207+
41208+#ifdef CONFIG_PAX_SEGMEXEC
41209+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41210+ pax_flags |= MF_PAX_SEGMEXEC;
41211+#endif
41212+
41213+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41214+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41215+ if ((__supported_pte_mask & _PAGE_NX))
41216+ pax_flags &= ~MF_PAX_SEGMEXEC;
41217+ else
41218+ pax_flags &= ~MF_PAX_PAGEEXEC;
41219+ }
41220+#endif
41221+
41222+#ifdef CONFIG_PAX_EMUTRAMP
41223+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41224+ pax_flags |= MF_PAX_EMUTRAMP;
41225+#endif
41226+
41227+#ifdef CONFIG_PAX_MPROTECT
41228+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41229+ pax_flags |= MF_PAX_MPROTECT;
41230+#endif
41231+
41232+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41233+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41234+ pax_flags |= MF_PAX_RANDMMAP;
41235+#endif
41236+
41237+#endif
41238+
41239+ return pax_flags;
41240+}
41241+
41242+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41243+{
41244+ unsigned long pax_flags = 0UL;
41245+
41246+#ifdef CONFIG_PAX_EI_PAX
41247+
41248+#ifdef CONFIG_PAX_PAGEEXEC
41249+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41250+ pax_flags |= MF_PAX_PAGEEXEC;
41251+#endif
41252+
41253+#ifdef CONFIG_PAX_SEGMEXEC
41254+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41255+ pax_flags |= MF_PAX_SEGMEXEC;
41256+#endif
41257+
41258+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41259+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41260+ if ((__supported_pte_mask & _PAGE_NX))
41261+ pax_flags &= ~MF_PAX_SEGMEXEC;
41262+ else
41263+ pax_flags &= ~MF_PAX_PAGEEXEC;
41264+ }
41265+#endif
41266+
41267+#ifdef CONFIG_PAX_EMUTRAMP
41268+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41269+ pax_flags |= MF_PAX_EMUTRAMP;
41270+#endif
41271+
41272+#ifdef CONFIG_PAX_MPROTECT
41273+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41274+ pax_flags |= MF_PAX_MPROTECT;
41275+#endif
41276+
41277+#ifdef CONFIG_PAX_ASLR
41278+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41279+ pax_flags |= MF_PAX_RANDMMAP;
41280+#endif
41281+
41282+#else
41283+
41284+#ifdef CONFIG_PAX_PAGEEXEC
41285+ pax_flags |= MF_PAX_PAGEEXEC;
41286+#endif
41287+
41288+#ifdef CONFIG_PAX_MPROTECT
41289+ pax_flags |= MF_PAX_MPROTECT;
41290+#endif
41291+
41292+#ifdef CONFIG_PAX_RANDMMAP
41293+ pax_flags |= MF_PAX_RANDMMAP;
41294+#endif
41295+
41296+#ifdef CONFIG_PAX_SEGMEXEC
41297+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41298+ pax_flags &= ~MF_PAX_PAGEEXEC;
41299+ pax_flags |= MF_PAX_SEGMEXEC;
41300+ }
41301+#endif
41302+
41303+#endif
41304+
41305+ return pax_flags;
41306+}
41307+
41308+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41309+{
41310+
41311+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41312+ unsigned long i;
41313+
41314+ for (i = 0UL; i < elf_ex->e_phnum; i++)
41315+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41316+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41317+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41318+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41319+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41320+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41321+ return ~0UL;
41322+
41323+#ifdef CONFIG_PAX_SOFTMODE
41324+ if (pax_softmode)
41325+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41326+ else
41327+#endif
41328+
41329+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41330+ break;
41331+ }
41332+#endif
41333+
41334+ return ~0UL;
41335+}
41336+
41337+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41338+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41339+{
41340+ unsigned long pax_flags = 0UL;
41341+
41342+#ifdef CONFIG_PAX_PAGEEXEC
41343+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41344+ pax_flags |= MF_PAX_PAGEEXEC;
41345+#endif
41346+
41347+#ifdef CONFIG_PAX_SEGMEXEC
41348+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41349+ pax_flags |= MF_PAX_SEGMEXEC;
41350+#endif
41351+
41352+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41353+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41354+ if ((__supported_pte_mask & _PAGE_NX))
41355+ pax_flags &= ~MF_PAX_SEGMEXEC;
41356+ else
41357+ pax_flags &= ~MF_PAX_PAGEEXEC;
41358+ }
41359+#endif
41360+
41361+#ifdef CONFIG_PAX_EMUTRAMP
41362+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41363+ pax_flags |= MF_PAX_EMUTRAMP;
41364+#endif
41365+
41366+#ifdef CONFIG_PAX_MPROTECT
41367+ if (pax_flags_softmode & MF_PAX_MPROTECT)
41368+ pax_flags |= MF_PAX_MPROTECT;
41369+#endif
41370+
41371+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41372+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41373+ pax_flags |= MF_PAX_RANDMMAP;
41374+#endif
41375+
41376+ return pax_flags;
41377+}
41378+
41379+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41380+{
41381+ unsigned long pax_flags = 0UL;
41382+
41383+#ifdef CONFIG_PAX_PAGEEXEC
41384+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41385+ pax_flags |= MF_PAX_PAGEEXEC;
41386+#endif
41387+
41388+#ifdef CONFIG_PAX_SEGMEXEC
41389+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41390+ pax_flags |= MF_PAX_SEGMEXEC;
41391+#endif
41392+
41393+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41394+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41395+ if ((__supported_pte_mask & _PAGE_NX))
41396+ pax_flags &= ~MF_PAX_SEGMEXEC;
41397+ else
41398+ pax_flags &= ~MF_PAX_PAGEEXEC;
41399+ }
41400+#endif
41401+
41402+#ifdef CONFIG_PAX_EMUTRAMP
41403+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41404+ pax_flags |= MF_PAX_EMUTRAMP;
41405+#endif
41406+
41407+#ifdef CONFIG_PAX_MPROTECT
41408+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41409+ pax_flags |= MF_PAX_MPROTECT;
41410+#endif
41411+
41412+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41413+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41414+ pax_flags |= MF_PAX_RANDMMAP;
41415+#endif
41416+
41417+ return pax_flags;
41418+}
41419+#endif
41420+
41421+static unsigned long pax_parse_xattr_pax(struct file * const file)
41422+{
41423+
41424+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41425+ ssize_t xattr_size, i;
41426+ unsigned char xattr_value[5];
41427+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41428+
41429+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41430+ if (xattr_size <= 0)
41431+ return ~0UL;
41432+
41433+ for (i = 0; i < xattr_size; i++)
41434+ switch (xattr_value[i]) {
41435+ default:
41436+ return ~0UL;
41437+
41438+#define parse_flag(option1, option2, flag) \
41439+ case option1: \
41440+ pax_flags_hardmode |= MF_PAX_##flag; \
41441+ break; \
41442+ case option2: \
41443+ pax_flags_softmode |= MF_PAX_##flag; \
41444+ break;
41445+
41446+ parse_flag('p', 'P', PAGEEXEC);
41447+ parse_flag('e', 'E', EMUTRAMP);
41448+ parse_flag('m', 'M', MPROTECT);
41449+ parse_flag('r', 'R', RANDMMAP);
41450+ parse_flag('s', 'S', SEGMEXEC);
41451+
41452+#undef parse_flag
41453+ }
41454+
41455+ if (pax_flags_hardmode & pax_flags_softmode)
41456+ return ~0UL;
41457+
41458+#ifdef CONFIG_PAX_SOFTMODE
41459+ if (pax_softmode)
41460+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41461+ else
41462+#endif
41463+
41464+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41465+#else
41466+ return ~0UL;
41467+#endif
41468+
41469+}
41470+
41471+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41472+{
41473+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41474+
41475+ pax_flags = pax_parse_ei_pax(elf_ex);
41476+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41477+ xattr_pax_flags = pax_parse_xattr_pax(file);
41478+
41479+ if (pt_pax_flags == ~0UL)
41480+ pt_pax_flags = xattr_pax_flags;
41481+ else if (xattr_pax_flags == ~0UL)
41482+ xattr_pax_flags = pt_pax_flags;
41483+ if (pt_pax_flags != xattr_pax_flags)
41484+ return -EINVAL;
41485+ if (pt_pax_flags != ~0UL)
41486+ pax_flags = pt_pax_flags;
41487+
41488+ if (0 > pax_check_flags(&pax_flags))
41489+ return -EINVAL;
41490+
41491+ current->mm->pax_flags = pax_flags;
41492+ return 0;
41493+}
41494+#endif
41495+
41496 /*
41497 * These are the functions used to load ELF style executables and shared
41498 * libraries. There is no binary dependent code anywhere else.
41499@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41500 {
41501 unsigned int random_variable = 0;
41502
41503+#ifdef CONFIG_PAX_RANDUSTACK
41504+ if (randomize_va_space)
41505+ return stack_top - current->mm->delta_stack;
41506+#endif
41507+
41508 if ((current->flags & PF_RANDOMIZE) &&
41509 !(current->personality & ADDR_NO_RANDOMIZE)) {
41510 random_variable = get_random_int() & STACK_RND_MASK;
41511@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41512 unsigned long load_addr = 0, load_bias = 0;
41513 int load_addr_set = 0;
41514 char * elf_interpreter = NULL;
41515- unsigned long error;
41516+ unsigned long error = 0;
41517 struct elf_phdr *elf_ppnt, *elf_phdata;
41518 unsigned long elf_bss, elf_brk;
41519 int retval, i;
41520@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41521 unsigned long start_code, end_code, start_data, end_data;
41522 unsigned long reloc_func_desc __maybe_unused = 0;
41523 int executable_stack = EXSTACK_DEFAULT;
41524- unsigned long def_flags = 0;
41525 struct {
41526 struct elfhdr elf_ex;
41527 struct elfhdr interp_elf_ex;
41528 } *loc;
41529+ unsigned long pax_task_size = TASK_SIZE;
41530
41531 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41532 if (!loc) {
41533@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41534
41535 /* OK, This is the point of no return */
41536 current->flags &= ~PF_FORKNOEXEC;
41537- current->mm->def_flags = def_flags;
41538+
41539+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41540+ current->mm->pax_flags = 0UL;
41541+#endif
41542+
41543+#ifdef CONFIG_PAX_DLRESOLVE
41544+ current->mm->call_dl_resolve = 0UL;
41545+#endif
41546+
41547+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41548+ current->mm->call_syscall = 0UL;
41549+#endif
41550+
41551+#ifdef CONFIG_PAX_ASLR
41552+ current->mm->delta_mmap = 0UL;
41553+ current->mm->delta_stack = 0UL;
41554+#endif
41555+
41556+ current->mm->def_flags = 0;
41557+
41558+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41559+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41560+ send_sig(SIGKILL, current, 0);
41561+ goto out_free_dentry;
41562+ }
41563+#endif
41564+
41565+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41566+ pax_set_initial_flags(bprm);
41567+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41568+ if (pax_set_initial_flags_func)
41569+ (pax_set_initial_flags_func)(bprm);
41570+#endif
41571+
41572+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41573+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41574+ current->mm->context.user_cs_limit = PAGE_SIZE;
41575+ current->mm->def_flags |= VM_PAGEEXEC;
41576+ }
41577+#endif
41578+
41579+#ifdef CONFIG_PAX_SEGMEXEC
41580+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41581+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41582+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41583+ pax_task_size = SEGMEXEC_TASK_SIZE;
41584+ current->mm->def_flags |= VM_NOHUGEPAGE;
41585+ }
41586+#endif
41587+
41588+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41589+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41590+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41591+ put_cpu();
41592+ }
41593+#endif
41594
41595 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41596 may depend on the personality. */
41597 SET_PERSONALITY(loc->elf_ex);
41598+
41599+#ifdef CONFIG_PAX_ASLR
41600+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41601+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41602+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41603+ }
41604+#endif
41605+
41606+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41607+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41608+ executable_stack = EXSTACK_DISABLE_X;
41609+ current->personality &= ~READ_IMPLIES_EXEC;
41610+ } else
41611+#endif
41612+
41613 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41614 current->personality |= READ_IMPLIES_EXEC;
41615
41616@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41617 #else
41618 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41619 #endif
41620+
41621+#ifdef CONFIG_PAX_RANDMMAP
41622+ /* PaX: randomize base address at the default exe base if requested */
41623+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41624+#ifdef CONFIG_SPARC64
41625+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41626+#else
41627+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41628+#endif
41629+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41630+ elf_flags |= MAP_FIXED;
41631+ }
41632+#endif
41633+
41634 }
41635
41636 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41637@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41638 * allowed task size. Note that p_filesz must always be
41639 * <= p_memsz so it is only necessary to check p_memsz.
41640 */
41641- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41642- elf_ppnt->p_memsz > TASK_SIZE ||
41643- TASK_SIZE - elf_ppnt->p_memsz < k) {
41644+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41645+ elf_ppnt->p_memsz > pax_task_size ||
41646+ pax_task_size - elf_ppnt->p_memsz < k) {
41647 /* set_brk can never work. Avoid overflows. */
41648 send_sig(SIGKILL, current, 0);
41649 retval = -EINVAL;
41650@@ -881,11 +1339,35 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41651 goto out_free_dentry;
41652 }
41653 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41654- send_sig(SIGSEGV, current, 0);
41655- retval = -EFAULT; /* Nobody gets to see this, but.. */
41656- goto out_free_dentry;
41657+ /*
41658+ * This bss-zeroing can fail if the ELF
41659+ * file specifies odd protections. So
41660+ * we don't check the return value
41661+ */
41662 }
41663
41664+#ifdef CONFIG_PAX_RANDMMAP
41665+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41666+ unsigned long start, size;
41667+
41668+ start = ELF_PAGEALIGN(elf_brk);
41669+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
41670+ down_write(&current->mm->mmap_sem);
41671+ retval = -ENOMEM;
41672+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
41673+ retval = do_mmap(NULL, start, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE, 0);
41674+ if (retval >= 0)
41675+ retval = do_mmap(NULL, ELF_PAGEALIGN(start + size), PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
41676+ }
41677+ up_write(&current->mm->mmap_sem);
41678+ if (retval < 0) {
41679+ send_sig(SIGKILL, current, 0);
41680+ goto out_free_dentry;
41681+ }
41682+ current->mm->start_brk = current->mm->brk = start + size + PAGE_SIZE;
41683+ }
41684+#endif
41685+
41686 if (elf_interpreter) {
41687 unsigned long uninitialized_var(interp_map_addr);
41688
41689@@ -1098,7 +1580,7 @@ out:
41690 * Decide what to dump of a segment, part, all or none.
41691 */
41692 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41693- unsigned long mm_flags)
41694+ unsigned long mm_flags, long signr)
41695 {
41696 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41697
41698@@ -1132,7 +1614,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
41699 if (vma->vm_file == NULL)
41700 return 0;
41701
41702- if (FILTER(MAPPED_PRIVATE))
41703+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41704 goto whole;
41705
41706 /*
41707@@ -1354,9 +1836,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
41708 {
41709 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41710 int i = 0;
41711- do
41712+ do {
41713 i += 2;
41714- while (auxv[i - 2] != AT_NULL);
41715+ } while (auxv[i - 2] != AT_NULL);
41716 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41717 }
41718
41719@@ -1862,14 +2344,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41720 }
41721
41722 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41723- unsigned long mm_flags)
41724+ struct coredump_params *cprm)
41725 {
41726 struct vm_area_struct *vma;
41727 size_t size = 0;
41728
41729 for (vma = first_vma(current, gate_vma); vma != NULL;
41730 vma = next_vma(vma, gate_vma))
41731- size += vma_dump_size(vma, mm_flags);
41732+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41733 return size;
41734 }
41735
41736@@ -1963,7 +2445,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41737
41738 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41739
41740- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41741+ offset += elf_core_vma_data_size(gate_vma, cprm);
41742 offset += elf_core_extra_data_size();
41743 e_shoff = offset;
41744
41745@@ -1977,10 +2459,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41746 offset = dataoff;
41747
41748 size += sizeof(*elf);
41749+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41750 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
41751 goto end_coredump;
41752
41753 size += sizeof(*phdr4note);
41754+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41755 if (size > cprm->limit
41756 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
41757 goto end_coredump;
41758@@ -1994,7 +2478,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41759 phdr.p_offset = offset;
41760 phdr.p_vaddr = vma->vm_start;
41761 phdr.p_paddr = 0;
41762- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
41763+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41764 phdr.p_memsz = vma->vm_end - vma->vm_start;
41765 offset += phdr.p_filesz;
41766 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
41767@@ -2005,6 +2489,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41768 phdr.p_align = ELF_EXEC_PAGESIZE;
41769
41770 size += sizeof(phdr);
41771+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41772 if (size > cprm->limit
41773 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
41774 goto end_coredump;
41775@@ -2029,7 +2514,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41776 unsigned long addr;
41777 unsigned long end;
41778
41779- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
41780+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41781
41782 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
41783 struct page *page;
41784@@ -2038,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41785 page = get_dump_page(addr);
41786 if (page) {
41787 void *kaddr = kmap(page);
41788+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
41789 stop = ((size += PAGE_SIZE) > cprm->limit) ||
41790 !dump_write(cprm->file, kaddr,
41791 PAGE_SIZE);
41792@@ -2055,6 +2541,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41793
41794 if (e_phnum == PN_XNUM) {
41795 size += sizeof(*shdr4extnum);
41796+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41797 if (size > cprm->limit
41798 || !dump_write(cprm->file, shdr4extnum,
41799 sizeof(*shdr4extnum)))
41800@@ -2075,6 +2562,97 @@ out:
41801
41802 #endif /* CONFIG_ELF_CORE */
41803
41804+#ifdef CONFIG_PAX_MPROTECT
41805+/* PaX: non-PIC ELF libraries need relocations on their executable segments
41806+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
41807+ * we'll remove VM_MAYWRITE for good on RELRO segments.
41808+ *
41809+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
41810+ * basis because we want to allow the common case and not the special ones.
41811+ */
41812+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
41813+{
41814+ struct elfhdr elf_h;
41815+ struct elf_phdr elf_p;
41816+ unsigned long i;
41817+ unsigned long oldflags;
41818+ bool is_textrel_rw, is_textrel_rx, is_relro;
41819+
41820+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
41821+ return;
41822+
41823+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
41824+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
41825+
41826+#ifdef CONFIG_PAX_ELFRELOCS
41827+ /* possible TEXTREL */
41828+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
41829+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
41830+#else
41831+ is_textrel_rw = false;
41832+ is_textrel_rx = false;
41833+#endif
41834+
41835+ /* possible RELRO */
41836+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
41837+
41838+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
41839+ return;
41840+
41841+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
41842+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
41843+
41844+#ifdef CONFIG_PAX_ETEXECRELOCS
41845+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41846+#else
41847+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
41848+#endif
41849+
41850+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41851+ !elf_check_arch(&elf_h) ||
41852+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
41853+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
41854+ return;
41855+
41856+ for (i = 0UL; i < elf_h.e_phnum; i++) {
41857+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
41858+ return;
41859+ switch (elf_p.p_type) {
41860+ case PT_DYNAMIC:
41861+ if (!is_textrel_rw && !is_textrel_rx)
41862+ continue;
41863+ i = 0UL;
41864+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
41865+ elf_dyn dyn;
41866+
41867+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
41868+ return;
41869+ if (dyn.d_tag == DT_NULL)
41870+ return;
41871+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
41872+ gr_log_textrel(vma);
41873+ if (is_textrel_rw)
41874+ vma->vm_flags |= VM_MAYWRITE;
41875+ else
41876+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
41877+ vma->vm_flags &= ~VM_MAYWRITE;
41878+ return;
41879+ }
41880+ i++;
41881+ }
41882+ return;
41883+
41884+ case PT_GNU_RELRO:
41885+ if (!is_relro)
41886+ continue;
41887+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
41888+ vma->vm_flags &= ~VM_MAYWRITE;
41889+ return;
41890+ }
41891+ }
41892+}
41893+#endif
41894+
41895 static int __init init_elf_binfmt(void)
41896 {
41897 return register_binfmt(&elf_format);
41898diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
41899index 1bffbe0..c8c283e 100644
41900--- a/fs/binfmt_flat.c
41901+++ b/fs/binfmt_flat.c
41902@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
41903 realdatastart = (unsigned long) -ENOMEM;
41904 printk("Unable to allocate RAM for process data, errno %d\n",
41905 (int)-realdatastart);
41906+ down_write(&current->mm->mmap_sem);
41907 do_munmap(current->mm, textpos, text_len);
41908+ up_write(&current->mm->mmap_sem);
41909 ret = realdatastart;
41910 goto err;
41911 }
41912@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41913 }
41914 if (IS_ERR_VALUE(result)) {
41915 printk("Unable to read data+bss, errno %d\n", (int)-result);
41916+ down_write(&current->mm->mmap_sem);
41917 do_munmap(current->mm, textpos, text_len);
41918 do_munmap(current->mm, realdatastart, len);
41919+ up_write(&current->mm->mmap_sem);
41920 ret = result;
41921 goto err;
41922 }
41923@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41924 }
41925 if (IS_ERR_VALUE(result)) {
41926 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
41927+ down_write(&current->mm->mmap_sem);
41928 do_munmap(current->mm, textpos, text_len + data_len + extra +
41929 MAX_SHARED_LIBS * sizeof(unsigned long));
41930+ up_write(&current->mm->mmap_sem);
41931 ret = result;
41932 goto err;
41933 }
41934diff --git a/fs/bio.c b/fs/bio.c
41935index b980ecd..74800bf 100644
41936--- a/fs/bio.c
41937+++ b/fs/bio.c
41938@@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
41939 /*
41940 * Overflow, abort
41941 */
41942- if (end < start)
41943+ if (end < start || end - start > INT_MAX - nr_pages)
41944 return ERR_PTR(-EINVAL);
41945
41946 nr_pages += end - start;
41947@@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
41948 const int read = bio_data_dir(bio) == READ;
41949 struct bio_map_data *bmd = bio->bi_private;
41950 int i;
41951- char *p = bmd->sgvecs[0].iov_base;
41952+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
41953
41954 __bio_for_each_segment(bvec, bio, i, 0) {
41955 char *addr = page_address(bvec->bv_page);
41956diff --git a/fs/block_dev.c b/fs/block_dev.c
41957index 5e9f198..6bf9b1c 100644
41958--- a/fs/block_dev.c
41959+++ b/fs/block_dev.c
41960@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
41961 else if (bdev->bd_contains == bdev)
41962 return true; /* is a whole device which isn't held */
41963
41964- else if (whole->bd_holder == bd_may_claim)
41965+ else if (whole->bd_holder == (void *)bd_may_claim)
41966 return true; /* is a partition of a device that is being partitioned */
41967 else if (whole->bd_holder != NULL)
41968 return false; /* is a partition of a held device */
41969diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
41970index d986824..af1befd 100644
41971--- a/fs/btrfs/check-integrity.c
41972+++ b/fs/btrfs/check-integrity.c
41973@@ -157,7 +157,7 @@ struct btrfsic_block {
41974 union {
41975 bio_end_io_t *bio;
41976 bh_end_io_t *bh;
41977- } orig_bio_bh_end_io;
41978+ } __no_const orig_bio_bh_end_io;
41979 int submit_bio_bh_rw;
41980 u64 flush_gen; /* only valid if !never_written */
41981 };
41982diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
41983index 0639a55..7d9e07f 100644
41984--- a/fs/btrfs/ctree.c
41985+++ b/fs/btrfs/ctree.c
41986@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
41987 free_extent_buffer(buf);
41988 add_root_to_dirty_list(root);
41989 } else {
41990- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
41991- parent_start = parent->start;
41992- else
41993+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
41994+ if (parent)
41995+ parent_start = parent->start;
41996+ else
41997+ parent_start = 0;
41998+ } else
41999 parent_start = 0;
42000
42001 WARN_ON(trans->transid != btrfs_header_generation(parent));
42002diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42003index 892b347..b3db246 100644
42004--- a/fs/btrfs/inode.c
42005+++ b/fs/btrfs/inode.c
42006@@ -6930,7 +6930,7 @@ fail:
42007 return -ENOMEM;
42008 }
42009
42010-static int btrfs_getattr(struct vfsmount *mnt,
42011+int btrfs_getattr(struct vfsmount *mnt,
42012 struct dentry *dentry, struct kstat *stat)
42013 {
42014 struct inode *inode = dentry->d_inode;
42015@@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42016 return 0;
42017 }
42018
42019+EXPORT_SYMBOL(btrfs_getattr);
42020+
42021+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42022+{
42023+ return BTRFS_I(inode)->root->anon_dev;
42024+}
42025+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42026+
42027 /*
42028 * If a file is moved, it will inherit the cow and compression flags of the new
42029 * directory.
42030diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42031index d8b5471..e5463d7 100644
42032--- a/fs/btrfs/ioctl.c
42033+++ b/fs/btrfs/ioctl.c
42034@@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42035 for (i = 0; i < num_types; i++) {
42036 struct btrfs_space_info *tmp;
42037
42038+ /* Don't copy in more than we allocated */
42039 if (!slot_count)
42040 break;
42041
42042+ slot_count--;
42043+
42044 info = NULL;
42045 rcu_read_lock();
42046 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42047@@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42048 memcpy(dest, &space, sizeof(space));
42049 dest++;
42050 space_args.total_spaces++;
42051- slot_count--;
42052 }
42053- if (!slot_count)
42054- break;
42055 }
42056 up_read(&info->groups_sem);
42057 }
42058
42059- user_dest = (struct btrfs_ioctl_space_info *)
42060+ user_dest = (struct btrfs_ioctl_space_info __user *)
42061 (arg + sizeof(struct btrfs_ioctl_space_args));
42062
42063 if (copy_to_user(user_dest, dest_orig, alloc_size))
42064diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42065index 8c1aae2..1e46446 100644
42066--- a/fs/btrfs/relocation.c
42067+++ b/fs/btrfs/relocation.c
42068@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42069 }
42070 spin_unlock(&rc->reloc_root_tree.lock);
42071
42072- BUG_ON((struct btrfs_root *)node->data != root);
42073+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42074
42075 if (!del) {
42076 spin_lock(&rc->reloc_root_tree.lock);
42077diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42078index 622f469..e8d2d55 100644
42079--- a/fs/cachefiles/bind.c
42080+++ b/fs/cachefiles/bind.c
42081@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42082 args);
42083
42084 /* start by checking things over */
42085- ASSERT(cache->fstop_percent >= 0 &&
42086- cache->fstop_percent < cache->fcull_percent &&
42087+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42088 cache->fcull_percent < cache->frun_percent &&
42089 cache->frun_percent < 100);
42090
42091- ASSERT(cache->bstop_percent >= 0 &&
42092- cache->bstop_percent < cache->bcull_percent &&
42093+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42094 cache->bcull_percent < cache->brun_percent &&
42095 cache->brun_percent < 100);
42096
42097diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42098index 0a1467b..6a53245 100644
42099--- a/fs/cachefiles/daemon.c
42100+++ b/fs/cachefiles/daemon.c
42101@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42102 if (n > buflen)
42103 return -EMSGSIZE;
42104
42105- if (copy_to_user(_buffer, buffer, n) != 0)
42106+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42107 return -EFAULT;
42108
42109 return n;
42110@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42111 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42112 return -EIO;
42113
42114- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42115+ if (datalen > PAGE_SIZE - 1)
42116 return -EOPNOTSUPP;
42117
42118 /* drag the command string into the kernel so we can parse it */
42119@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42120 if (args[0] != '%' || args[1] != '\0')
42121 return -EINVAL;
42122
42123- if (fstop < 0 || fstop >= cache->fcull_percent)
42124+ if (fstop >= cache->fcull_percent)
42125 return cachefiles_daemon_range_error(cache, args);
42126
42127 cache->fstop_percent = fstop;
42128@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42129 if (args[0] != '%' || args[1] != '\0')
42130 return -EINVAL;
42131
42132- if (bstop < 0 || bstop >= cache->bcull_percent)
42133+ if (bstop >= cache->bcull_percent)
42134 return cachefiles_daemon_range_error(cache, args);
42135
42136 cache->bstop_percent = bstop;
42137diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42138index bd6bc1b..b627b53 100644
42139--- a/fs/cachefiles/internal.h
42140+++ b/fs/cachefiles/internal.h
42141@@ -57,7 +57,7 @@ struct cachefiles_cache {
42142 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42143 struct rb_root active_nodes; /* active nodes (can't be culled) */
42144 rwlock_t active_lock; /* lock for active_nodes */
42145- atomic_t gravecounter; /* graveyard uniquifier */
42146+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42147 unsigned frun_percent; /* when to stop culling (% files) */
42148 unsigned fcull_percent; /* when to start culling (% files) */
42149 unsigned fstop_percent; /* when to stop allocating (% files) */
42150@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42151 * proc.c
42152 */
42153 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42154-extern atomic_t cachefiles_lookup_histogram[HZ];
42155-extern atomic_t cachefiles_mkdir_histogram[HZ];
42156-extern atomic_t cachefiles_create_histogram[HZ];
42157+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42158+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42159+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42160
42161 extern int __init cachefiles_proc_init(void);
42162 extern void cachefiles_proc_cleanup(void);
42163 static inline
42164-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42165+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42166 {
42167 unsigned long jif = jiffies - start_jif;
42168 if (jif >= HZ)
42169 jif = HZ - 1;
42170- atomic_inc(&histogram[jif]);
42171+ atomic_inc_unchecked(&histogram[jif]);
42172 }
42173
42174 #else
42175diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42176index a0358c2..d6137f2 100644
42177--- a/fs/cachefiles/namei.c
42178+++ b/fs/cachefiles/namei.c
42179@@ -318,7 +318,7 @@ try_again:
42180 /* first step is to make up a grave dentry in the graveyard */
42181 sprintf(nbuffer, "%08x%08x",
42182 (uint32_t) get_seconds(),
42183- (uint32_t) atomic_inc_return(&cache->gravecounter));
42184+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42185
42186 /* do the multiway lock magic */
42187 trap = lock_rename(cache->graveyard, dir);
42188diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42189index eccd339..4c1d995 100644
42190--- a/fs/cachefiles/proc.c
42191+++ b/fs/cachefiles/proc.c
42192@@ -14,9 +14,9 @@
42193 #include <linux/seq_file.h>
42194 #include "internal.h"
42195
42196-atomic_t cachefiles_lookup_histogram[HZ];
42197-atomic_t cachefiles_mkdir_histogram[HZ];
42198-atomic_t cachefiles_create_histogram[HZ];
42199+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42200+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42201+atomic_unchecked_t cachefiles_create_histogram[HZ];
42202
42203 /*
42204 * display the latency histogram
42205@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42206 return 0;
42207 default:
42208 index = (unsigned long) v - 3;
42209- x = atomic_read(&cachefiles_lookup_histogram[index]);
42210- y = atomic_read(&cachefiles_mkdir_histogram[index]);
42211- z = atomic_read(&cachefiles_create_histogram[index]);
42212+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42213+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42214+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42215 if (x == 0 && y == 0 && z == 0)
42216 return 0;
42217
42218diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42219index 0e3c092..818480e 100644
42220--- a/fs/cachefiles/rdwr.c
42221+++ b/fs/cachefiles/rdwr.c
42222@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42223 old_fs = get_fs();
42224 set_fs(KERNEL_DS);
42225 ret = file->f_op->write(
42226- file, (const void __user *) data, len, &pos);
42227+ file, (const void __force_user *) data, len, &pos);
42228 set_fs(old_fs);
42229 kunmap(page);
42230 if (ret != len)
42231diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42232index 3e8094b..cb3ff3d 100644
42233--- a/fs/ceph/dir.c
42234+++ b/fs/ceph/dir.c
42235@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42236 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42237 struct ceph_mds_client *mdsc = fsc->mdsc;
42238 unsigned frag = fpos_frag(filp->f_pos);
42239- int off = fpos_off(filp->f_pos);
42240+ unsigned int off = fpos_off(filp->f_pos);
42241 int err;
42242 u32 ftype;
42243 struct ceph_mds_reply_info_parsed *rinfo;
42244@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42245 if (nd &&
42246 (nd->flags & LOOKUP_OPEN) &&
42247 !(nd->intent.open.flags & O_CREAT)) {
42248- int mode = nd->intent.open.create_mode & ~current->fs->umask;
42249+ int mode = nd->intent.open.create_mode & ~current_umask();
42250 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42251 }
42252
42253diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42254index 24b3dfc..3cd5454 100644
42255--- a/fs/cifs/cifs_debug.c
42256+++ b/fs/cifs/cifs_debug.c
42257@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42258
42259 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42260 #ifdef CONFIG_CIFS_STATS2
42261- atomic_set(&totBufAllocCount, 0);
42262- atomic_set(&totSmBufAllocCount, 0);
42263+ atomic_set_unchecked(&totBufAllocCount, 0);
42264+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42265 #endif /* CONFIG_CIFS_STATS2 */
42266 spin_lock(&cifs_tcp_ses_lock);
42267 list_for_each(tmp1, &cifs_tcp_ses_list) {
42268@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42269 tcon = list_entry(tmp3,
42270 struct cifs_tcon,
42271 tcon_list);
42272- atomic_set(&tcon->num_smbs_sent, 0);
42273- atomic_set(&tcon->num_writes, 0);
42274- atomic_set(&tcon->num_reads, 0);
42275- atomic_set(&tcon->num_oplock_brks, 0);
42276- atomic_set(&tcon->num_opens, 0);
42277- atomic_set(&tcon->num_posixopens, 0);
42278- atomic_set(&tcon->num_posixmkdirs, 0);
42279- atomic_set(&tcon->num_closes, 0);
42280- atomic_set(&tcon->num_deletes, 0);
42281- atomic_set(&tcon->num_mkdirs, 0);
42282- atomic_set(&tcon->num_rmdirs, 0);
42283- atomic_set(&tcon->num_renames, 0);
42284- atomic_set(&tcon->num_t2renames, 0);
42285- atomic_set(&tcon->num_ffirst, 0);
42286- atomic_set(&tcon->num_fnext, 0);
42287- atomic_set(&tcon->num_fclose, 0);
42288- atomic_set(&tcon->num_hardlinks, 0);
42289- atomic_set(&tcon->num_symlinks, 0);
42290- atomic_set(&tcon->num_locks, 0);
42291+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42292+ atomic_set_unchecked(&tcon->num_writes, 0);
42293+ atomic_set_unchecked(&tcon->num_reads, 0);
42294+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42295+ atomic_set_unchecked(&tcon->num_opens, 0);
42296+ atomic_set_unchecked(&tcon->num_posixopens, 0);
42297+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42298+ atomic_set_unchecked(&tcon->num_closes, 0);
42299+ atomic_set_unchecked(&tcon->num_deletes, 0);
42300+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
42301+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
42302+ atomic_set_unchecked(&tcon->num_renames, 0);
42303+ atomic_set_unchecked(&tcon->num_t2renames, 0);
42304+ atomic_set_unchecked(&tcon->num_ffirst, 0);
42305+ atomic_set_unchecked(&tcon->num_fnext, 0);
42306+ atomic_set_unchecked(&tcon->num_fclose, 0);
42307+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
42308+ atomic_set_unchecked(&tcon->num_symlinks, 0);
42309+ atomic_set_unchecked(&tcon->num_locks, 0);
42310 }
42311 }
42312 }
42313@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42314 smBufAllocCount.counter, cifs_min_small);
42315 #ifdef CONFIG_CIFS_STATS2
42316 seq_printf(m, "Total Large %d Small %d Allocations\n",
42317- atomic_read(&totBufAllocCount),
42318- atomic_read(&totSmBufAllocCount));
42319+ atomic_read_unchecked(&totBufAllocCount),
42320+ atomic_read_unchecked(&totSmBufAllocCount));
42321 #endif /* CONFIG_CIFS_STATS2 */
42322
42323 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42324@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42325 if (tcon->need_reconnect)
42326 seq_puts(m, "\tDISCONNECTED ");
42327 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42328- atomic_read(&tcon->num_smbs_sent),
42329- atomic_read(&tcon->num_oplock_brks));
42330+ atomic_read_unchecked(&tcon->num_smbs_sent),
42331+ atomic_read_unchecked(&tcon->num_oplock_brks));
42332 seq_printf(m, "\nReads: %d Bytes: %lld",
42333- atomic_read(&tcon->num_reads),
42334+ atomic_read_unchecked(&tcon->num_reads),
42335 (long long)(tcon->bytes_read));
42336 seq_printf(m, "\nWrites: %d Bytes: %lld",
42337- atomic_read(&tcon->num_writes),
42338+ atomic_read_unchecked(&tcon->num_writes),
42339 (long long)(tcon->bytes_written));
42340 seq_printf(m, "\nFlushes: %d",
42341- atomic_read(&tcon->num_flushes));
42342+ atomic_read_unchecked(&tcon->num_flushes));
42343 seq_printf(m, "\nLocks: %d HardLinks: %d "
42344 "Symlinks: %d",
42345- atomic_read(&tcon->num_locks),
42346- atomic_read(&tcon->num_hardlinks),
42347- atomic_read(&tcon->num_symlinks));
42348+ atomic_read_unchecked(&tcon->num_locks),
42349+ atomic_read_unchecked(&tcon->num_hardlinks),
42350+ atomic_read_unchecked(&tcon->num_symlinks));
42351 seq_printf(m, "\nOpens: %d Closes: %d "
42352 "Deletes: %d",
42353- atomic_read(&tcon->num_opens),
42354- atomic_read(&tcon->num_closes),
42355- atomic_read(&tcon->num_deletes));
42356+ atomic_read_unchecked(&tcon->num_opens),
42357+ atomic_read_unchecked(&tcon->num_closes),
42358+ atomic_read_unchecked(&tcon->num_deletes));
42359 seq_printf(m, "\nPosix Opens: %d "
42360 "Posix Mkdirs: %d",
42361- atomic_read(&tcon->num_posixopens),
42362- atomic_read(&tcon->num_posixmkdirs));
42363+ atomic_read_unchecked(&tcon->num_posixopens),
42364+ atomic_read_unchecked(&tcon->num_posixmkdirs));
42365 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42366- atomic_read(&tcon->num_mkdirs),
42367- atomic_read(&tcon->num_rmdirs));
42368+ atomic_read_unchecked(&tcon->num_mkdirs),
42369+ atomic_read_unchecked(&tcon->num_rmdirs));
42370 seq_printf(m, "\nRenames: %d T2 Renames %d",
42371- atomic_read(&tcon->num_renames),
42372- atomic_read(&tcon->num_t2renames));
42373+ atomic_read_unchecked(&tcon->num_renames),
42374+ atomic_read_unchecked(&tcon->num_t2renames));
42375 seq_printf(m, "\nFindFirst: %d FNext %d "
42376 "FClose %d",
42377- atomic_read(&tcon->num_ffirst),
42378- atomic_read(&tcon->num_fnext),
42379- atomic_read(&tcon->num_fclose));
42380+ atomic_read_unchecked(&tcon->num_ffirst),
42381+ atomic_read_unchecked(&tcon->num_fnext),
42382+ atomic_read_unchecked(&tcon->num_fclose));
42383 }
42384 }
42385 }
42386diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42387index b1fd382..df45435 100644
42388--- a/fs/cifs/cifsfs.c
42389+++ b/fs/cifs/cifsfs.c
42390@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
42391 cifs_req_cachep = kmem_cache_create("cifs_request",
42392 CIFSMaxBufSize +
42393 MAX_CIFS_HDR_SIZE, 0,
42394- SLAB_HWCACHE_ALIGN, NULL);
42395+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42396 if (cifs_req_cachep == NULL)
42397 return -ENOMEM;
42398
42399@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
42400 efficient to alloc 1 per page off the slab compared to 17K (5page)
42401 alloc of large cifs buffers even when page debugging is on */
42402 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42403- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42404+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42405 NULL);
42406 if (cifs_sm_req_cachep == NULL) {
42407 mempool_destroy(cifs_req_poolp);
42408@@ -1101,8 +1101,8 @@ init_cifs(void)
42409 atomic_set(&bufAllocCount, 0);
42410 atomic_set(&smBufAllocCount, 0);
42411 #ifdef CONFIG_CIFS_STATS2
42412- atomic_set(&totBufAllocCount, 0);
42413- atomic_set(&totSmBufAllocCount, 0);
42414+ atomic_set_unchecked(&totBufAllocCount, 0);
42415+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42416 #endif /* CONFIG_CIFS_STATS2 */
42417
42418 atomic_set(&midCount, 0);
42419diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42420index 76e7d8b..4814992 100644
42421--- a/fs/cifs/cifsglob.h
42422+++ b/fs/cifs/cifsglob.h
42423@@ -392,28 +392,28 @@ struct cifs_tcon {
42424 __u16 Flags; /* optional support bits */
42425 enum statusEnum tidStatus;
42426 #ifdef CONFIG_CIFS_STATS
42427- atomic_t num_smbs_sent;
42428- atomic_t num_writes;
42429- atomic_t num_reads;
42430- atomic_t num_flushes;
42431- atomic_t num_oplock_brks;
42432- atomic_t num_opens;
42433- atomic_t num_closes;
42434- atomic_t num_deletes;
42435- atomic_t num_mkdirs;
42436- atomic_t num_posixopens;
42437- atomic_t num_posixmkdirs;
42438- atomic_t num_rmdirs;
42439- atomic_t num_renames;
42440- atomic_t num_t2renames;
42441- atomic_t num_ffirst;
42442- atomic_t num_fnext;
42443- atomic_t num_fclose;
42444- atomic_t num_hardlinks;
42445- atomic_t num_symlinks;
42446- atomic_t num_locks;
42447- atomic_t num_acl_get;
42448- atomic_t num_acl_set;
42449+ atomic_unchecked_t num_smbs_sent;
42450+ atomic_unchecked_t num_writes;
42451+ atomic_unchecked_t num_reads;
42452+ atomic_unchecked_t num_flushes;
42453+ atomic_unchecked_t num_oplock_brks;
42454+ atomic_unchecked_t num_opens;
42455+ atomic_unchecked_t num_closes;
42456+ atomic_unchecked_t num_deletes;
42457+ atomic_unchecked_t num_mkdirs;
42458+ atomic_unchecked_t num_posixopens;
42459+ atomic_unchecked_t num_posixmkdirs;
42460+ atomic_unchecked_t num_rmdirs;
42461+ atomic_unchecked_t num_renames;
42462+ atomic_unchecked_t num_t2renames;
42463+ atomic_unchecked_t num_ffirst;
42464+ atomic_unchecked_t num_fnext;
42465+ atomic_unchecked_t num_fclose;
42466+ atomic_unchecked_t num_hardlinks;
42467+ atomic_unchecked_t num_symlinks;
42468+ atomic_unchecked_t num_locks;
42469+ atomic_unchecked_t num_acl_get;
42470+ atomic_unchecked_t num_acl_set;
42471 #ifdef CONFIG_CIFS_STATS2
42472 unsigned long long time_writes;
42473 unsigned long long time_reads;
42474@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
42475 }
42476
42477 #ifdef CONFIG_CIFS_STATS
42478-#define cifs_stats_inc atomic_inc
42479+#define cifs_stats_inc atomic_inc_unchecked
42480
42481 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42482 unsigned int bytes)
42483@@ -987,8 +987,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42484 /* Various Debug counters */
42485 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42486 #ifdef CONFIG_CIFS_STATS2
42487-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42488-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42489+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42490+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42491 #endif
42492 GLOBAL_EXTERN atomic_t smBufAllocCount;
42493 GLOBAL_EXTERN atomic_t midCount;
42494diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42495index 6b0e064..94e6c3c 100644
42496--- a/fs/cifs/link.c
42497+++ b/fs/cifs/link.c
42498@@ -600,7 +600,7 @@ symlink_exit:
42499
42500 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42501 {
42502- char *p = nd_get_link(nd);
42503+ const char *p = nd_get_link(nd);
42504 if (!IS_ERR(p))
42505 kfree(p);
42506 }
42507diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42508index 703ef5c..2a44ed5 100644
42509--- a/fs/cifs/misc.c
42510+++ b/fs/cifs/misc.c
42511@@ -156,7 +156,7 @@ cifs_buf_get(void)
42512 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42513 atomic_inc(&bufAllocCount);
42514 #ifdef CONFIG_CIFS_STATS2
42515- atomic_inc(&totBufAllocCount);
42516+ atomic_inc_unchecked(&totBufAllocCount);
42517 #endif /* CONFIG_CIFS_STATS2 */
42518 }
42519
42520@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42521 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42522 atomic_inc(&smBufAllocCount);
42523 #ifdef CONFIG_CIFS_STATS2
42524- atomic_inc(&totSmBufAllocCount);
42525+ atomic_inc_unchecked(&totSmBufAllocCount);
42526 #endif /* CONFIG_CIFS_STATS2 */
42527
42528 }
42529diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42530index 6901578..d402eb5 100644
42531--- a/fs/coda/cache.c
42532+++ b/fs/coda/cache.c
42533@@ -24,7 +24,7 @@
42534 #include "coda_linux.h"
42535 #include "coda_cache.h"
42536
42537-static atomic_t permission_epoch = ATOMIC_INIT(0);
42538+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42539
42540 /* replace or extend an acl cache hit */
42541 void coda_cache_enter(struct inode *inode, int mask)
42542@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42543 struct coda_inode_info *cii = ITOC(inode);
42544
42545 spin_lock(&cii->c_lock);
42546- cii->c_cached_epoch = atomic_read(&permission_epoch);
42547+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42548 if (cii->c_uid != current_fsuid()) {
42549 cii->c_uid = current_fsuid();
42550 cii->c_cached_perm = mask;
42551@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42552 {
42553 struct coda_inode_info *cii = ITOC(inode);
42554 spin_lock(&cii->c_lock);
42555- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42556+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
42557 spin_unlock(&cii->c_lock);
42558 }
42559
42560 /* remove all acl caches */
42561 void coda_cache_clear_all(struct super_block *sb)
42562 {
42563- atomic_inc(&permission_epoch);
42564+ atomic_inc_unchecked(&permission_epoch);
42565 }
42566
42567
42568@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
42569 spin_lock(&cii->c_lock);
42570 hit = (mask & cii->c_cached_perm) == mask &&
42571 cii->c_uid == current_fsuid() &&
42572- cii->c_cached_epoch == atomic_read(&permission_epoch);
42573+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
42574 spin_unlock(&cii->c_lock);
42575
42576 return hit;
42577diff --git a/fs/compat.c b/fs/compat.c
42578index 07880ba..3fb2862 100644
42579--- a/fs/compat.c
42580+++ b/fs/compat.c
42581@@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
42582
42583 set_fs(KERNEL_DS);
42584 /* The __user pointer cast is valid because of the set_fs() */
42585- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
42586+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
42587 set_fs(oldfs);
42588 /* truncating is ok because it's a user address */
42589 if (!ret)
42590@@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
42591 goto out;
42592
42593 ret = -EINVAL;
42594- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42595+ if (nr_segs > UIO_MAXIOV)
42596 goto out;
42597 if (nr_segs > fast_segs) {
42598 ret = -ENOMEM;
42599@@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
42600
42601 struct compat_readdir_callback {
42602 struct compat_old_linux_dirent __user *dirent;
42603+ struct file * file;
42604 int result;
42605 };
42606
42607@@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
42608 buf->result = -EOVERFLOW;
42609 return -EOVERFLOW;
42610 }
42611+
42612+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42613+ return 0;
42614+
42615 buf->result++;
42616 dirent = buf->dirent;
42617 if (!access_ok(VERIFY_WRITE, dirent,
42618@@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
42619
42620 buf.result = 0;
42621 buf.dirent = dirent;
42622+ buf.file = file;
42623
42624 error = vfs_readdir(file, compat_fillonedir, &buf);
42625 if (buf.result)
42626@@ -901,6 +907,7 @@ struct compat_linux_dirent {
42627 struct compat_getdents_callback {
42628 struct compat_linux_dirent __user *current_dir;
42629 struct compat_linux_dirent __user *previous;
42630+ struct file * file;
42631 int count;
42632 int error;
42633 };
42634@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
42635 buf->error = -EOVERFLOW;
42636 return -EOVERFLOW;
42637 }
42638+
42639+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42640+ return 0;
42641+
42642 dirent = buf->previous;
42643 if (dirent) {
42644 if (__put_user(offset, &dirent->d_off))
42645@@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
42646 buf.previous = NULL;
42647 buf.count = count;
42648 buf.error = 0;
42649+ buf.file = file;
42650
42651 error = vfs_readdir(file, compat_filldir, &buf);
42652 if (error >= 0)
42653@@ -990,6 +1002,7 @@ out:
42654 struct compat_getdents_callback64 {
42655 struct linux_dirent64 __user *current_dir;
42656 struct linux_dirent64 __user *previous;
42657+ struct file * file;
42658 int count;
42659 int error;
42660 };
42661@@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
42662 buf->error = -EINVAL; /* only used if we fail.. */
42663 if (reclen > buf->count)
42664 return -EINVAL;
42665+
42666+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42667+ return 0;
42668+
42669 dirent = buf->previous;
42670
42671 if (dirent) {
42672@@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
42673 buf.previous = NULL;
42674 buf.count = count;
42675 buf.error = 0;
42676+ buf.file = file;
42677
42678 error = vfs_readdir(file, compat_filldir64, &buf);
42679 if (error >= 0)
42680 error = buf.error;
42681 lastdirent = buf.previous;
42682 if (lastdirent) {
42683- typeof(lastdirent->d_off) d_off = file->f_pos;
42684+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42685 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42686 error = -EFAULT;
42687 else
42688diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
42689index 112e45a..b59845b 100644
42690--- a/fs/compat_binfmt_elf.c
42691+++ b/fs/compat_binfmt_elf.c
42692@@ -30,11 +30,13 @@
42693 #undef elf_phdr
42694 #undef elf_shdr
42695 #undef elf_note
42696+#undef elf_dyn
42697 #undef elf_addr_t
42698 #define elfhdr elf32_hdr
42699 #define elf_phdr elf32_phdr
42700 #define elf_shdr elf32_shdr
42701 #define elf_note elf32_note
42702+#define elf_dyn Elf32_Dyn
42703 #define elf_addr_t Elf32_Addr
42704
42705 /*
42706diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
42707index a26bea1..ae23e72 100644
42708--- a/fs/compat_ioctl.c
42709+++ b/fs/compat_ioctl.c
42710@@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
42711
42712 err = get_user(palp, &up->palette);
42713 err |= get_user(length, &up->length);
42714+ if (err)
42715+ return -EFAULT;
42716
42717 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42718 err = put_user(compat_ptr(palp), &up_native->palette);
42719@@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
42720 return -EFAULT;
42721 if (__get_user(udata, &ss32->iomem_base))
42722 return -EFAULT;
42723- ss.iomem_base = compat_ptr(udata);
42724+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42725 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42726 __get_user(ss.port_high, &ss32->port_high))
42727 return -EFAULT;
42728@@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
42729 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42730 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42731 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42732- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42733+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42734 return -EFAULT;
42735
42736 return ioctl_preallocate(file, p);
42737@@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
42738 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42739 {
42740 unsigned int a, b;
42741- a = *(unsigned int *)p;
42742- b = *(unsigned int *)q;
42743+ a = *(const unsigned int *)p;
42744+ b = *(const unsigned int *)q;
42745 if (a > b)
42746 return 1;
42747 if (a < b)
42748diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
42749index 5ddd7eb..c18bf04 100644
42750--- a/fs/configfs/dir.c
42751+++ b/fs/configfs/dir.c
42752@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42753 }
42754 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42755 struct configfs_dirent *next;
42756- const char * name;
42757+ const unsigned char * name;
42758+ char d_name[sizeof(next->s_dentry->d_iname)];
42759 int len;
42760 struct inode *inode = NULL;
42761
42762@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42763 continue;
42764
42765 name = configfs_get_name(next);
42766- len = strlen(name);
42767+ if (next->s_dentry && name == next->s_dentry->d_iname) {
42768+ len = next->s_dentry->d_name.len;
42769+ memcpy(d_name, name, len);
42770+ name = d_name;
42771+ } else
42772+ len = strlen(name);
42773
42774 /*
42775 * We'll have a dentry and an inode for
42776diff --git a/fs/dcache.c b/fs/dcache.c
42777index bcbdb33..88da6e9 100644
42778--- a/fs/dcache.c
42779+++ b/fs/dcache.c
42780@@ -3066,7 +3066,7 @@ void __init vfs_caches_init(unsigned long mempages)
42781 mempages -= reserve;
42782
42783 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
42784- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
42785+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
42786
42787 dcache_init();
42788 inode_init();
42789diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
42790index 956d5dd..e755e04 100644
42791--- a/fs/debugfs/inode.c
42792+++ b/fs/debugfs/inode.c
42793@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
42794 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
42795 {
42796 return debugfs_create_file(name,
42797+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
42798+ S_IFDIR | S_IRWXU,
42799+#else
42800 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
42801+#endif
42802 parent, NULL, NULL);
42803 }
42804 EXPORT_SYMBOL_GPL(debugfs_create_dir);
42805diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
42806index ab35b11..b30af66 100644
42807--- a/fs/ecryptfs/inode.c
42808+++ b/fs/ecryptfs/inode.c
42809@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
42810 old_fs = get_fs();
42811 set_fs(get_ds());
42812 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
42813- (char __user *)lower_buf,
42814+ (char __force_user *)lower_buf,
42815 lower_bufsiz);
42816 set_fs(old_fs);
42817 if (rc < 0)
42818@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42819 }
42820 old_fs = get_fs();
42821 set_fs(get_ds());
42822- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
42823+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
42824 set_fs(old_fs);
42825 if (rc < 0) {
42826 kfree(buf);
42827@@ -733,7 +733,7 @@ out:
42828 static void
42829 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
42830 {
42831- char *buf = nd_get_link(nd);
42832+ const char *buf = nd_get_link(nd);
42833 if (!IS_ERR(buf)) {
42834 /* Free the char* */
42835 kfree(buf);
42836diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
42837index 3a06f40..f7af544 100644
42838--- a/fs/ecryptfs/miscdev.c
42839+++ b/fs/ecryptfs/miscdev.c
42840@@ -345,7 +345,7 @@ check_list:
42841 goto out_unlock_msg_ctx;
42842 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
42843 if (msg_ctx->msg) {
42844- if (copy_to_user(&buf[i], packet_length, packet_length_size))
42845+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
42846 goto out_unlock_msg_ctx;
42847 i += packet_length_size;
42848 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
42849diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
42850index b2a34a1..162fa69 100644
42851--- a/fs/ecryptfs/read_write.c
42852+++ b/fs/ecryptfs/read_write.c
42853@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
42854 return -EIO;
42855 fs_save = get_fs();
42856 set_fs(get_ds());
42857- rc = vfs_write(lower_file, data, size, &offset);
42858+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
42859 set_fs(fs_save);
42860 mark_inode_dirty_sync(ecryptfs_inode);
42861 return rc;
42862@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
42863 return -EIO;
42864 fs_save = get_fs();
42865 set_fs(get_ds());
42866- rc = vfs_read(lower_file, data, size, &offset);
42867+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
42868 set_fs(fs_save);
42869 return rc;
42870 }
42871diff --git a/fs/exec.c b/fs/exec.c
42872index 153dee1..8ee97ba 100644
42873--- a/fs/exec.c
42874+++ b/fs/exec.c
42875@@ -55,6 +55,13 @@
42876 #include <linux/pipe_fs_i.h>
42877 #include <linux/oom.h>
42878 #include <linux/compat.h>
42879+#include <linux/random.h>
42880+#include <linux/seq_file.h>
42881+
42882+#ifdef CONFIG_PAX_REFCOUNT
42883+#include <linux/kallsyms.h>
42884+#include <linux/kdebug.h>
42885+#endif
42886
42887 #include <asm/uaccess.h>
42888 #include <asm/mmu_context.h>
42889@@ -63,6 +70,15 @@
42890 #include <trace/events/task.h>
42891 #include "internal.h"
42892
42893+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
42894+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
42895+#endif
42896+
42897+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
42898+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
42899+EXPORT_SYMBOL(pax_set_initial_flags_func);
42900+#endif
42901+
42902 int core_uses_pid;
42903 char core_pattern[CORENAME_MAX_SIZE] = "core";
42904 unsigned int core_pipe_limit;
42905@@ -72,7 +88,7 @@ struct core_name {
42906 char *corename;
42907 int used, size;
42908 };
42909-static atomic_t call_count = ATOMIC_INIT(1);
42910+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
42911
42912 /* The maximal length of core_pattern is also specified in sysctl.c */
42913
42914@@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42915 int write)
42916 {
42917 struct page *page;
42918- int ret;
42919
42920-#ifdef CONFIG_STACK_GROWSUP
42921- if (write) {
42922- ret = expand_downwards(bprm->vma, pos);
42923- if (ret < 0)
42924- return NULL;
42925- }
42926-#endif
42927- ret = get_user_pages(current, bprm->mm, pos,
42928- 1, write, 1, &page, NULL);
42929- if (ret <= 0)
42930+ if (0 > expand_downwards(bprm->vma, pos))
42931+ return NULL;
42932+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
42933 return NULL;
42934
42935 if (write) {
42936@@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42937 if (size <= ARG_MAX)
42938 return page;
42939
42940+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42941+ // only allow 512KB for argv+env on suid/sgid binaries
42942+ // to prevent easy ASLR exhaustion
42943+ if (((bprm->cred->euid != current_euid()) ||
42944+ (bprm->cred->egid != current_egid())) &&
42945+ (size > (512 * 1024))) {
42946+ put_page(page);
42947+ return NULL;
42948+ }
42949+#endif
42950+
42951 /*
42952 * Limit to 1/4-th the stack size for the argv+env strings.
42953 * This ensures that:
42954@@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
42955 vma->vm_end = STACK_TOP_MAX;
42956 vma->vm_start = vma->vm_end - PAGE_SIZE;
42957 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
42958+
42959+#ifdef CONFIG_PAX_SEGMEXEC
42960+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
42961+#endif
42962+
42963 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
42964 INIT_LIST_HEAD(&vma->anon_vma_chain);
42965
42966@@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
42967 mm->stack_vm = mm->total_vm = 1;
42968 up_write(&mm->mmap_sem);
42969 bprm->p = vma->vm_end - sizeof(void *);
42970+
42971+#ifdef CONFIG_PAX_RANDUSTACK
42972+ if (randomize_va_space)
42973+ bprm->p ^= random32() & ~PAGE_MASK;
42974+#endif
42975+
42976 return 0;
42977 err:
42978 up_write(&mm->mmap_sem);
42979@@ -398,19 +428,7 @@ err:
42980 return err;
42981 }
42982
42983-struct user_arg_ptr {
42984-#ifdef CONFIG_COMPAT
42985- bool is_compat;
42986-#endif
42987- union {
42988- const char __user *const __user *native;
42989-#ifdef CONFIG_COMPAT
42990- compat_uptr_t __user *compat;
42991-#endif
42992- } ptr;
42993-};
42994-
42995-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
42996+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
42997 {
42998 const char __user *native;
42999
43000@@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43001 compat_uptr_t compat;
43002
43003 if (get_user(compat, argv.ptr.compat + nr))
43004- return ERR_PTR(-EFAULT);
43005+ return (const char __force_user *)ERR_PTR(-EFAULT);
43006
43007 return compat_ptr(compat);
43008 }
43009 #endif
43010
43011 if (get_user(native, argv.ptr.native + nr))
43012- return ERR_PTR(-EFAULT);
43013+ return (const char __force_user *)ERR_PTR(-EFAULT);
43014
43015 return native;
43016 }
43017@@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
43018 if (!p)
43019 break;
43020
43021- if (IS_ERR(p))
43022+ if (IS_ERR((const char __force_kernel *)p))
43023 return -EFAULT;
43024
43025 if (i++ >= max)
43026@@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43027
43028 ret = -EFAULT;
43029 str = get_user_arg_ptr(argv, argc);
43030- if (IS_ERR(str))
43031+ if (IS_ERR((const char __force_kernel *)str))
43032 goto out;
43033
43034 len = strnlen_user(str, MAX_ARG_STRLEN);
43035@@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43036 int r;
43037 mm_segment_t oldfs = get_fs();
43038 struct user_arg_ptr argv = {
43039- .ptr.native = (const char __user *const __user *)__argv,
43040+ .ptr.native = (const char __force_user *const __force_user *)__argv,
43041 };
43042
43043 set_fs(KERNEL_DS);
43044@@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43045 unsigned long new_end = old_end - shift;
43046 struct mmu_gather tlb;
43047
43048- BUG_ON(new_start > new_end);
43049+ if (new_start >= new_end || new_start < mmap_min_addr)
43050+ return -ENOMEM;
43051
43052 /*
43053 * ensure there are no vmas between where we want to go
43054@@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43055 if (vma != find_vma(mm, new_start))
43056 return -EFAULT;
43057
43058+#ifdef CONFIG_PAX_SEGMEXEC
43059+ BUG_ON(pax_find_mirror_vma(vma));
43060+#endif
43061+
43062 /*
43063 * cover the whole range: [new_start, old_end)
43064 */
43065@@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43066 stack_top = arch_align_stack(stack_top);
43067 stack_top = PAGE_ALIGN(stack_top);
43068
43069- if (unlikely(stack_top < mmap_min_addr) ||
43070- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43071- return -ENOMEM;
43072-
43073 stack_shift = vma->vm_end - stack_top;
43074
43075 bprm->p -= stack_shift;
43076@@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43077 bprm->exec -= stack_shift;
43078
43079 down_write(&mm->mmap_sem);
43080+
43081+ /* Move stack pages down in memory. */
43082+ if (stack_shift) {
43083+ ret = shift_arg_pages(vma, stack_shift);
43084+ if (ret)
43085+ goto out_unlock;
43086+ }
43087+
43088 vm_flags = VM_STACK_FLAGS;
43089
43090+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43091+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43092+ vm_flags &= ~VM_EXEC;
43093+
43094+#ifdef CONFIG_PAX_MPROTECT
43095+ if (mm->pax_flags & MF_PAX_MPROTECT)
43096+ vm_flags &= ~VM_MAYEXEC;
43097+#endif
43098+
43099+ }
43100+#endif
43101+
43102 /*
43103 * Adjust stack execute permissions; explicitly enable for
43104 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43105@@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43106 goto out_unlock;
43107 BUG_ON(prev != vma);
43108
43109- /* Move stack pages down in memory. */
43110- if (stack_shift) {
43111- ret = shift_arg_pages(vma, stack_shift);
43112- if (ret)
43113- goto out_unlock;
43114- }
43115-
43116 /* mprotect_fixup is overkill to remove the temporary stack flags */
43117 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43118
43119@@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
43120 old_fs = get_fs();
43121 set_fs(get_ds());
43122 /* The cast to a user pointer is valid due to the set_fs() */
43123- result = vfs_read(file, (void __user *)addr, count, &pos);
43124+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
43125 set_fs(old_fs);
43126 return result;
43127 }
43128@@ -1252,7 +1284,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43129 }
43130 rcu_read_unlock();
43131
43132- if (p->fs->users > n_fs) {
43133+ if (atomic_read(&p->fs->users) > n_fs) {
43134 bprm->unsafe |= LSM_UNSAFE_SHARE;
43135 } else {
43136 res = -EAGAIN;
43137@@ -1447,6 +1479,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43138
43139 EXPORT_SYMBOL(search_binary_handler);
43140
43141+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43142+static DEFINE_PER_CPU(u64, exec_counter);
43143+static int __init init_exec_counters(void)
43144+{
43145+ unsigned int cpu;
43146+
43147+ for_each_possible_cpu(cpu) {
43148+ per_cpu(exec_counter, cpu) = (u64)cpu;
43149+ }
43150+
43151+ return 0;
43152+}
43153+early_initcall(init_exec_counters);
43154+static inline void increment_exec_counter(void)
43155+{
43156+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
43157+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43158+}
43159+#else
43160+static inline void increment_exec_counter(void) {}
43161+#endif
43162+
43163 /*
43164 * sys_execve() executes a new program.
43165 */
43166@@ -1455,6 +1509,11 @@ static int do_execve_common(const char *filename,
43167 struct user_arg_ptr envp,
43168 struct pt_regs *regs)
43169 {
43170+#ifdef CONFIG_GRKERNSEC
43171+ struct file *old_exec_file;
43172+ struct acl_subject_label *old_acl;
43173+ struct rlimit old_rlim[RLIM_NLIMITS];
43174+#endif
43175 struct linux_binprm *bprm;
43176 struct file *file;
43177 struct files_struct *displaced;
43178@@ -1462,6 +1521,8 @@ static int do_execve_common(const char *filename,
43179 int retval;
43180 const struct cred *cred = current_cred();
43181
43182+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43183+
43184 /*
43185 * We move the actual failure in case of RLIMIT_NPROC excess from
43186 * set*uid() to execve() because too many poorly written programs
43187@@ -1502,12 +1563,27 @@ static int do_execve_common(const char *filename,
43188 if (IS_ERR(file))
43189 goto out_unmark;
43190
43191+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
43192+ retval = -EPERM;
43193+ goto out_file;
43194+ }
43195+
43196 sched_exec();
43197
43198 bprm->file = file;
43199 bprm->filename = filename;
43200 bprm->interp = filename;
43201
43202+ if (gr_process_user_ban()) {
43203+ retval = -EPERM;
43204+ goto out_file;
43205+ }
43206+
43207+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43208+ retval = -EACCES;
43209+ goto out_file;
43210+ }
43211+
43212 retval = bprm_mm_init(bprm);
43213 if (retval)
43214 goto out_file;
43215@@ -1524,24 +1600,65 @@ static int do_execve_common(const char *filename,
43216 if (retval < 0)
43217 goto out;
43218
43219+#ifdef CONFIG_GRKERNSEC
43220+ old_acl = current->acl;
43221+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43222+ old_exec_file = current->exec_file;
43223+ get_file(file);
43224+ current->exec_file = file;
43225+#endif
43226+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43227+ /* limit suid stack to 8MB
43228+ we saved the old limits above and will restore them if this exec fails
43229+ */
43230+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43231+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43232+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43233+#endif
43234+
43235+ if (!gr_tpe_allow(file)) {
43236+ retval = -EACCES;
43237+ goto out_fail;
43238+ }
43239+
43240+ if (gr_check_crash_exec(file)) {
43241+ retval = -EACCES;
43242+ goto out_fail;
43243+ }
43244+
43245+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43246+ bprm->unsafe);
43247+ if (retval < 0)
43248+ goto out_fail;
43249+
43250 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43251 if (retval < 0)
43252- goto out;
43253+ goto out_fail;
43254
43255 bprm->exec = bprm->p;
43256 retval = copy_strings(bprm->envc, envp, bprm);
43257 if (retval < 0)
43258- goto out;
43259+ goto out_fail;
43260
43261 retval = copy_strings(bprm->argc, argv, bprm);
43262 if (retval < 0)
43263- goto out;
43264+ goto out_fail;
43265+
43266+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43267+
43268+ gr_handle_exec_args(bprm, argv);
43269
43270 retval = search_binary_handler(bprm,regs);
43271 if (retval < 0)
43272- goto out;
43273+ goto out_fail;
43274+#ifdef CONFIG_GRKERNSEC
43275+ if (old_exec_file)
43276+ fput(old_exec_file);
43277+#endif
43278
43279 /* execve succeeded */
43280+
43281+ increment_exec_counter();
43282 current->fs->in_exec = 0;
43283 current->in_execve = 0;
43284 acct_update_integrals(current);
43285@@ -1550,6 +1667,14 @@ static int do_execve_common(const char *filename,
43286 put_files_struct(displaced);
43287 return retval;
43288
43289+out_fail:
43290+#ifdef CONFIG_GRKERNSEC
43291+ current->acl = old_acl;
43292+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43293+ fput(current->exec_file);
43294+ current->exec_file = old_exec_file;
43295+#endif
43296+
43297 out:
43298 if (bprm->mm) {
43299 acct_arg_size(bprm, 0);
43300@@ -1623,7 +1748,7 @@ static int expand_corename(struct core_name *cn)
43301 {
43302 char *old_corename = cn->corename;
43303
43304- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43305+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43306 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43307
43308 if (!cn->corename) {
43309@@ -1720,7 +1845,7 @@ static int format_corename(struct core_name *cn, long signr)
43310 int pid_in_pattern = 0;
43311 int err = 0;
43312
43313- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43314+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43315 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43316 cn->used = 0;
43317
43318@@ -1817,6 +1942,218 @@ out:
43319 return ispipe;
43320 }
43321
43322+int pax_check_flags(unsigned long *flags)
43323+{
43324+ int retval = 0;
43325+
43326+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43327+ if (*flags & MF_PAX_SEGMEXEC)
43328+ {
43329+ *flags &= ~MF_PAX_SEGMEXEC;
43330+ retval = -EINVAL;
43331+ }
43332+#endif
43333+
43334+ if ((*flags & MF_PAX_PAGEEXEC)
43335+
43336+#ifdef CONFIG_PAX_PAGEEXEC
43337+ && (*flags & MF_PAX_SEGMEXEC)
43338+#endif
43339+
43340+ )
43341+ {
43342+ *flags &= ~MF_PAX_PAGEEXEC;
43343+ retval = -EINVAL;
43344+ }
43345+
43346+ if ((*flags & MF_PAX_MPROTECT)
43347+
43348+#ifdef CONFIG_PAX_MPROTECT
43349+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43350+#endif
43351+
43352+ )
43353+ {
43354+ *flags &= ~MF_PAX_MPROTECT;
43355+ retval = -EINVAL;
43356+ }
43357+
43358+ if ((*flags & MF_PAX_EMUTRAMP)
43359+
43360+#ifdef CONFIG_PAX_EMUTRAMP
43361+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43362+#endif
43363+
43364+ )
43365+ {
43366+ *flags &= ~MF_PAX_EMUTRAMP;
43367+ retval = -EINVAL;
43368+ }
43369+
43370+ return retval;
43371+}
43372+
43373+EXPORT_SYMBOL(pax_check_flags);
43374+
43375+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43376+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43377+{
43378+ struct task_struct *tsk = current;
43379+ struct mm_struct *mm = current->mm;
43380+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43381+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43382+ char *path_exec = NULL;
43383+ char *path_fault = NULL;
43384+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
43385+
43386+ if (buffer_exec && buffer_fault) {
43387+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43388+
43389+ down_read(&mm->mmap_sem);
43390+ vma = mm->mmap;
43391+ while (vma && (!vma_exec || !vma_fault)) {
43392+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43393+ vma_exec = vma;
43394+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43395+ vma_fault = vma;
43396+ vma = vma->vm_next;
43397+ }
43398+ if (vma_exec) {
43399+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43400+ if (IS_ERR(path_exec))
43401+ path_exec = "<path too long>";
43402+ else {
43403+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43404+ if (path_exec) {
43405+ *path_exec = 0;
43406+ path_exec = buffer_exec;
43407+ } else
43408+ path_exec = "<path too long>";
43409+ }
43410+ }
43411+ if (vma_fault) {
43412+ start = vma_fault->vm_start;
43413+ end = vma_fault->vm_end;
43414+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43415+ if (vma_fault->vm_file) {
43416+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43417+ if (IS_ERR(path_fault))
43418+ path_fault = "<path too long>";
43419+ else {
43420+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43421+ if (path_fault) {
43422+ *path_fault = 0;
43423+ path_fault = buffer_fault;
43424+ } else
43425+ path_fault = "<path too long>";
43426+ }
43427+ } else
43428+ path_fault = "<anonymous mapping>";
43429+ }
43430+ up_read(&mm->mmap_sem);
43431+ }
43432+ if (tsk->signal->curr_ip)
43433+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43434+ else
43435+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43436+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43437+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43438+ task_uid(tsk), task_euid(tsk), pc, sp);
43439+ free_page((unsigned long)buffer_exec);
43440+ free_page((unsigned long)buffer_fault);
43441+ pax_report_insns(regs, pc, sp);
43442+ do_coredump(SIGKILL, SIGKILL, regs);
43443+}
43444+#endif
43445+
43446+#ifdef CONFIG_PAX_REFCOUNT
43447+void pax_report_refcount_overflow(struct pt_regs *regs)
43448+{
43449+ if (current->signal->curr_ip)
43450+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43451+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43452+ else
43453+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43454+ current->comm, task_pid_nr(current), current_uid(), current_euid());
43455+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43456+ show_regs(regs);
43457+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43458+}
43459+#endif
43460+
43461+#ifdef CONFIG_PAX_USERCOPY
43462+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43463+int object_is_on_stack(const void *obj, unsigned long len)
43464+{
43465+ const void * const stack = task_stack_page(current);
43466+ const void * const stackend = stack + THREAD_SIZE;
43467+
43468+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43469+ const void *frame = NULL;
43470+ const void *oldframe;
43471+#endif
43472+
43473+ if (obj + len < obj)
43474+ return -1;
43475+
43476+ if (obj + len <= stack || stackend <= obj)
43477+ return 0;
43478+
43479+ if (obj < stack || stackend < obj + len)
43480+ return -1;
43481+
43482+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43483+ oldframe = __builtin_frame_address(1);
43484+ if (oldframe)
43485+ frame = __builtin_frame_address(2);
43486+ /*
43487+ low ----------------------------------------------> high
43488+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
43489+ ^----------------^
43490+ allow copies only within here
43491+ */
43492+ while (stack <= frame && frame < stackend) {
43493+ /* if obj + len extends past the last frame, this
43494+ check won't pass and the next frame will be 0,
43495+ causing us to bail out and correctly report
43496+ the copy as invalid
43497+ */
43498+ if (obj + len <= frame)
43499+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43500+ oldframe = frame;
43501+ frame = *(const void * const *)frame;
43502+ }
43503+ return -1;
43504+#else
43505+ return 1;
43506+#endif
43507+}
43508+
43509+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43510+{
43511+ if (current->signal->curr_ip)
43512+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43513+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43514+ else
43515+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43516+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43517+ dump_stack();
43518+ gr_handle_kernel_exploit();
43519+ do_group_exit(SIGKILL);
43520+}
43521+#endif
43522+
43523+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43524+void pax_track_stack(void)
43525+{
43526+ unsigned long sp = (unsigned long)&sp;
43527+ if (sp < current_thread_info()->lowest_stack &&
43528+ sp > (unsigned long)task_stack_page(current))
43529+ current_thread_info()->lowest_stack = sp;
43530+}
43531+EXPORT_SYMBOL(pax_track_stack);
43532+#endif
43533+
43534 static int zap_process(struct task_struct *start, int exit_code)
43535 {
43536 struct task_struct *t;
43537@@ -2014,17 +2351,17 @@ static void wait_for_dump_helpers(struct file *file)
43538 pipe = file->f_path.dentry->d_inode->i_pipe;
43539
43540 pipe_lock(pipe);
43541- pipe->readers++;
43542- pipe->writers--;
43543+ atomic_inc(&pipe->readers);
43544+ atomic_dec(&pipe->writers);
43545
43546- while ((pipe->readers > 1) && (!signal_pending(current))) {
43547+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43548 wake_up_interruptible_sync(&pipe->wait);
43549 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43550 pipe_wait(pipe);
43551 }
43552
43553- pipe->readers--;
43554- pipe->writers++;
43555+ atomic_dec(&pipe->readers);
43556+ atomic_inc(&pipe->writers);
43557 pipe_unlock(pipe);
43558
43559 }
43560@@ -2085,7 +2422,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43561 int retval = 0;
43562 int flag = 0;
43563 int ispipe;
43564- static atomic_t core_dump_count = ATOMIC_INIT(0);
43565+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43566 struct coredump_params cprm = {
43567 .signr = signr,
43568 .regs = regs,
43569@@ -2100,6 +2437,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43570
43571 audit_core_dumps(signr);
43572
43573+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43574+ gr_handle_brute_attach(current, cprm.mm_flags);
43575+
43576 binfmt = mm->binfmt;
43577 if (!binfmt || !binfmt->core_dump)
43578 goto fail;
43579@@ -2167,7 +2507,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43580 }
43581 cprm.limit = RLIM_INFINITY;
43582
43583- dump_count = atomic_inc_return(&core_dump_count);
43584+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
43585 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43586 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43587 task_tgid_vnr(current), current->comm);
43588@@ -2194,6 +2534,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43589 } else {
43590 struct inode *inode;
43591
43592+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43593+
43594 if (cprm.limit < binfmt->min_coredump)
43595 goto fail_unlock;
43596
43597@@ -2237,7 +2579,7 @@ close_fail:
43598 filp_close(cprm.file, NULL);
43599 fail_dropcount:
43600 if (ispipe)
43601- atomic_dec(&core_dump_count);
43602+ atomic_dec_unchecked(&core_dump_count);
43603 fail_unlock:
43604 kfree(cn.corename);
43605 fail_corename:
43606@@ -2256,7 +2598,7 @@ fail:
43607 */
43608 int dump_write(struct file *file, const void *addr, int nr)
43609 {
43610- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
43611+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
43612 }
43613 EXPORT_SYMBOL(dump_write);
43614
43615diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
43616index a8cbe1b..fed04cb 100644
43617--- a/fs/ext2/balloc.c
43618+++ b/fs/ext2/balloc.c
43619@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
43620
43621 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43622 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43623- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43624+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
43625 sbi->s_resuid != current_fsuid() &&
43626 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43627 return 0;
43628diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
43629index a203892..4e64db5 100644
43630--- a/fs/ext3/balloc.c
43631+++ b/fs/ext3/balloc.c
43632@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
43633
43634 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43635 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43636- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43637+ if (free_blocks < root_blocks + 1 &&
43638 !use_reservation && sbi->s_resuid != current_fsuid() &&
43639- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43640+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
43641+ !capable_nolog(CAP_SYS_RESOURCE)) {
43642 return 0;
43643 }
43644 return 1;
43645diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
43646index f9e2cd8..bfdc476 100644
43647--- a/fs/ext4/balloc.c
43648+++ b/fs/ext4/balloc.c
43649@@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
43650 /* Hm, nope. Are (enough) root reserved clusters available? */
43651 if (sbi->s_resuid == current_fsuid() ||
43652 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
43653- capable(CAP_SYS_RESOURCE) ||
43654- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
43655+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
43656+ capable_nolog(CAP_SYS_RESOURCE)) {
43657
43658 if (free_clusters >= (nclusters + dirty_clusters))
43659 return 1;
43660diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
43661index 513004f..2591a6b 100644
43662--- a/fs/ext4/ext4.h
43663+++ b/fs/ext4/ext4.h
43664@@ -1218,19 +1218,19 @@ struct ext4_sb_info {
43665 unsigned long s_mb_last_start;
43666
43667 /* stats for buddy allocator */
43668- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
43669- atomic_t s_bal_success; /* we found long enough chunks */
43670- atomic_t s_bal_allocated; /* in blocks */
43671- atomic_t s_bal_ex_scanned; /* total extents scanned */
43672- atomic_t s_bal_goals; /* goal hits */
43673- atomic_t s_bal_breaks; /* too long searches */
43674- atomic_t s_bal_2orders; /* 2^order hits */
43675+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
43676+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
43677+ atomic_unchecked_t s_bal_allocated; /* in blocks */
43678+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
43679+ atomic_unchecked_t s_bal_goals; /* goal hits */
43680+ atomic_unchecked_t s_bal_breaks; /* too long searches */
43681+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
43682 spinlock_t s_bal_lock;
43683 unsigned long s_mb_buddies_generated;
43684 unsigned long long s_mb_generation_time;
43685- atomic_t s_mb_lost_chunks;
43686- atomic_t s_mb_preallocated;
43687- atomic_t s_mb_discarded;
43688+ atomic_unchecked_t s_mb_lost_chunks;
43689+ atomic_unchecked_t s_mb_preallocated;
43690+ atomic_unchecked_t s_mb_discarded;
43691 atomic_t s_lock_busy;
43692
43693 /* locality groups */
43694diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
43695index cb990b2..4820141 100644
43696--- a/fs/ext4/mballoc.c
43697+++ b/fs/ext4/mballoc.c
43698@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
43699 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43700
43701 if (EXT4_SB(sb)->s_mb_stats)
43702- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43703+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43704
43705 break;
43706 }
43707@@ -2088,7 +2088,7 @@ repeat:
43708 ac->ac_status = AC_STATUS_CONTINUE;
43709 ac->ac_flags |= EXT4_MB_HINT_FIRST;
43710 cr = 3;
43711- atomic_inc(&sbi->s_mb_lost_chunks);
43712+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
43713 goto repeat;
43714 }
43715 }
43716@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
43717 if (sbi->s_mb_stats) {
43718 ext4_msg(sb, KERN_INFO,
43719 "mballoc: %u blocks %u reqs (%u success)",
43720- atomic_read(&sbi->s_bal_allocated),
43721- atomic_read(&sbi->s_bal_reqs),
43722- atomic_read(&sbi->s_bal_success));
43723+ atomic_read_unchecked(&sbi->s_bal_allocated),
43724+ atomic_read_unchecked(&sbi->s_bal_reqs),
43725+ atomic_read_unchecked(&sbi->s_bal_success));
43726 ext4_msg(sb, KERN_INFO,
43727 "mballoc: %u extents scanned, %u goal hits, "
43728 "%u 2^N hits, %u breaks, %u lost",
43729- atomic_read(&sbi->s_bal_ex_scanned),
43730- atomic_read(&sbi->s_bal_goals),
43731- atomic_read(&sbi->s_bal_2orders),
43732- atomic_read(&sbi->s_bal_breaks),
43733- atomic_read(&sbi->s_mb_lost_chunks));
43734+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
43735+ atomic_read_unchecked(&sbi->s_bal_goals),
43736+ atomic_read_unchecked(&sbi->s_bal_2orders),
43737+ atomic_read_unchecked(&sbi->s_bal_breaks),
43738+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
43739 ext4_msg(sb, KERN_INFO,
43740 "mballoc: %lu generated and it took %Lu",
43741 sbi->s_mb_buddies_generated,
43742 sbi->s_mb_generation_time);
43743 ext4_msg(sb, KERN_INFO,
43744 "mballoc: %u preallocated, %u discarded",
43745- atomic_read(&sbi->s_mb_preallocated),
43746- atomic_read(&sbi->s_mb_discarded));
43747+ atomic_read_unchecked(&sbi->s_mb_preallocated),
43748+ atomic_read_unchecked(&sbi->s_mb_discarded));
43749 }
43750
43751 free_percpu(sbi->s_locality_groups);
43752@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
43753 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
43754
43755 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
43756- atomic_inc(&sbi->s_bal_reqs);
43757- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43758+ atomic_inc_unchecked(&sbi->s_bal_reqs);
43759+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43760 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
43761- atomic_inc(&sbi->s_bal_success);
43762- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
43763+ atomic_inc_unchecked(&sbi->s_bal_success);
43764+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
43765 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
43766 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
43767- atomic_inc(&sbi->s_bal_goals);
43768+ atomic_inc_unchecked(&sbi->s_bal_goals);
43769 if (ac->ac_found > sbi->s_mb_max_to_scan)
43770- atomic_inc(&sbi->s_bal_breaks);
43771+ atomic_inc_unchecked(&sbi->s_bal_breaks);
43772 }
43773
43774 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
43775@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
43776 trace_ext4_mb_new_inode_pa(ac, pa);
43777
43778 ext4_mb_use_inode_pa(ac, pa);
43779- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
43780+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
43781
43782 ei = EXT4_I(ac->ac_inode);
43783 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43784@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
43785 trace_ext4_mb_new_group_pa(ac, pa);
43786
43787 ext4_mb_use_group_pa(ac, pa);
43788- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43789+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43790
43791 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43792 lg = ac->ac_lg;
43793@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
43794 * from the bitmap and continue.
43795 */
43796 }
43797- atomic_add(free, &sbi->s_mb_discarded);
43798+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
43799
43800 return err;
43801 }
43802@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
43803 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
43804 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
43805 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
43806- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43807+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43808 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
43809
43810 return 0;
43811diff --git a/fs/fcntl.c b/fs/fcntl.c
43812index 22764c7..86372c9 100644
43813--- a/fs/fcntl.c
43814+++ b/fs/fcntl.c
43815@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
43816 if (err)
43817 return err;
43818
43819+ if (gr_handle_chroot_fowner(pid, type))
43820+ return -ENOENT;
43821+ if (gr_check_protected_task_fowner(pid, type))
43822+ return -EACCES;
43823+
43824 f_modown(filp, pid, type, force);
43825 return 0;
43826 }
43827@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
43828
43829 static int f_setown_ex(struct file *filp, unsigned long arg)
43830 {
43831- struct f_owner_ex * __user owner_p = (void * __user)arg;
43832+ struct f_owner_ex __user *owner_p = (void __user *)arg;
43833 struct f_owner_ex owner;
43834 struct pid *pid;
43835 int type;
43836@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
43837
43838 static int f_getown_ex(struct file *filp, unsigned long arg)
43839 {
43840- struct f_owner_ex * __user owner_p = (void * __user)arg;
43841+ struct f_owner_ex __user *owner_p = (void __user *)arg;
43842 struct f_owner_ex owner;
43843 int ret = 0;
43844
43845@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
43846 switch (cmd) {
43847 case F_DUPFD:
43848 case F_DUPFD_CLOEXEC:
43849+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
43850 if (arg >= rlimit(RLIMIT_NOFILE))
43851 break;
43852 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
43853diff --git a/fs/fifo.c b/fs/fifo.c
43854index b1a524d..4ee270e 100644
43855--- a/fs/fifo.c
43856+++ b/fs/fifo.c
43857@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
43858 */
43859 filp->f_op = &read_pipefifo_fops;
43860 pipe->r_counter++;
43861- if (pipe->readers++ == 0)
43862+ if (atomic_inc_return(&pipe->readers) == 1)
43863 wake_up_partner(inode);
43864
43865- if (!pipe->writers) {
43866+ if (!atomic_read(&pipe->writers)) {
43867 if ((filp->f_flags & O_NONBLOCK)) {
43868 /* suppress POLLHUP until we have
43869 * seen a writer */
43870@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
43871 * errno=ENXIO when there is no process reading the FIFO.
43872 */
43873 ret = -ENXIO;
43874- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
43875+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
43876 goto err;
43877
43878 filp->f_op = &write_pipefifo_fops;
43879 pipe->w_counter++;
43880- if (!pipe->writers++)
43881+ if (atomic_inc_return(&pipe->writers) == 1)
43882 wake_up_partner(inode);
43883
43884- if (!pipe->readers) {
43885+ if (!atomic_read(&pipe->readers)) {
43886 wait_for_partner(inode, &pipe->r_counter);
43887 if (signal_pending(current))
43888 goto err_wr;
43889@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
43890 */
43891 filp->f_op = &rdwr_pipefifo_fops;
43892
43893- pipe->readers++;
43894- pipe->writers++;
43895+ atomic_inc(&pipe->readers);
43896+ atomic_inc(&pipe->writers);
43897 pipe->r_counter++;
43898 pipe->w_counter++;
43899- if (pipe->readers == 1 || pipe->writers == 1)
43900+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
43901 wake_up_partner(inode);
43902 break;
43903
43904@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
43905 return 0;
43906
43907 err_rd:
43908- if (!--pipe->readers)
43909+ if (atomic_dec_and_test(&pipe->readers))
43910 wake_up_interruptible(&pipe->wait);
43911 ret = -ERESTARTSYS;
43912 goto err;
43913
43914 err_wr:
43915- if (!--pipe->writers)
43916+ if (atomic_dec_and_test(&pipe->writers))
43917 wake_up_interruptible(&pipe->wait);
43918 ret = -ERESTARTSYS;
43919 goto err;
43920
43921 err:
43922- if (!pipe->readers && !pipe->writers)
43923+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
43924 free_pipe_info(inode);
43925
43926 err_nocleanup:
43927diff --git a/fs/file.c b/fs/file.c
43928index 4c6992d..104cdea 100644
43929--- a/fs/file.c
43930+++ b/fs/file.c
43931@@ -15,6 +15,7 @@
43932 #include <linux/slab.h>
43933 #include <linux/vmalloc.h>
43934 #include <linux/file.h>
43935+#include <linux/security.h>
43936 #include <linux/fdtable.h>
43937 #include <linux/bitops.h>
43938 #include <linux/interrupt.h>
43939@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
43940 * N.B. For clone tasks sharing a files structure, this test
43941 * will limit the total number of files that can be opened.
43942 */
43943+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
43944 if (nr >= rlimit(RLIMIT_NOFILE))
43945 return -EMFILE;
43946
43947diff --git a/fs/filesystems.c b/fs/filesystems.c
43948index 96f2428..f5eeb8e 100644
43949--- a/fs/filesystems.c
43950+++ b/fs/filesystems.c
43951@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
43952 int len = dot ? dot - name : strlen(name);
43953
43954 fs = __get_fs_type(name, len);
43955+
43956+#ifdef CONFIG_GRKERNSEC_MODHARDEN
43957+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
43958+#else
43959 if (!fs && (request_module("%.*s", len, name) == 0))
43960+#endif
43961 fs = __get_fs_type(name, len);
43962
43963 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
43964diff --git a/fs/fs_struct.c b/fs/fs_struct.c
43965index 78b519c..a8b4979 100644
43966--- a/fs/fs_struct.c
43967+++ b/fs/fs_struct.c
43968@@ -4,6 +4,7 @@
43969 #include <linux/path.h>
43970 #include <linux/slab.h>
43971 #include <linux/fs_struct.h>
43972+#include <linux/grsecurity.h>
43973 #include "internal.h"
43974
43975 static inline void path_get_longterm(struct path *path)
43976@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
43977 old_root = fs->root;
43978 fs->root = *path;
43979 path_get_longterm(path);
43980+ gr_set_chroot_entries(current, path);
43981 write_seqcount_end(&fs->seq);
43982 spin_unlock(&fs->lock);
43983 if (old_root.dentry)
43984@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
43985 && fs->root.mnt == old_root->mnt) {
43986 path_get_longterm(new_root);
43987 fs->root = *new_root;
43988+ gr_set_chroot_entries(p, new_root);
43989 count++;
43990 }
43991 if (fs->pwd.dentry == old_root->dentry
43992@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43993 spin_lock(&fs->lock);
43994 write_seqcount_begin(&fs->seq);
43995 tsk->fs = NULL;
43996- kill = !--fs->users;
43997+ gr_clear_chroot_entries(tsk);
43998+ kill = !atomic_dec_return(&fs->users);
43999 write_seqcount_end(&fs->seq);
44000 spin_unlock(&fs->lock);
44001 task_unlock(tsk);
44002@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44003 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44004 /* We don't need to lock fs - think why ;-) */
44005 if (fs) {
44006- fs->users = 1;
44007+ atomic_set(&fs->users, 1);
44008 fs->in_exec = 0;
44009 spin_lock_init(&fs->lock);
44010 seqcount_init(&fs->seq);
44011@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44012 spin_lock(&old->lock);
44013 fs->root = old->root;
44014 path_get_longterm(&fs->root);
44015+ /* instead of calling gr_set_chroot_entries here,
44016+ we call it from every caller of this function
44017+ */
44018 fs->pwd = old->pwd;
44019 path_get_longterm(&fs->pwd);
44020 spin_unlock(&old->lock);
44021@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44022
44023 task_lock(current);
44024 spin_lock(&fs->lock);
44025- kill = !--fs->users;
44026+ kill = !atomic_dec_return(&fs->users);
44027 current->fs = new_fs;
44028+ gr_set_chroot_entries(current, &new_fs->root);
44029 spin_unlock(&fs->lock);
44030 task_unlock(current);
44031
44032@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44033
44034 int current_umask(void)
44035 {
44036- return current->fs->umask;
44037+ return current->fs->umask | gr_acl_umask();
44038 }
44039 EXPORT_SYMBOL(current_umask);
44040
44041 /* to be mentioned only in INIT_TASK */
44042 struct fs_struct init_fs = {
44043- .users = 1,
44044+ .users = ATOMIC_INIT(1),
44045 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44046 .seq = SEQCNT_ZERO,
44047 .umask = 0022,
44048@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44049 task_lock(current);
44050
44051 spin_lock(&init_fs.lock);
44052- init_fs.users++;
44053+ atomic_inc(&init_fs.users);
44054 spin_unlock(&init_fs.lock);
44055
44056 spin_lock(&fs->lock);
44057 current->fs = &init_fs;
44058- kill = !--fs->users;
44059+ gr_set_chroot_entries(current, &current->fs->root);
44060+ kill = !atomic_dec_return(&fs->users);
44061 spin_unlock(&fs->lock);
44062
44063 task_unlock(current);
44064diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44065index 9905350..02eaec4 100644
44066--- a/fs/fscache/cookie.c
44067+++ b/fs/fscache/cookie.c
44068@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44069 parent ? (char *) parent->def->name : "<no-parent>",
44070 def->name, netfs_data);
44071
44072- fscache_stat(&fscache_n_acquires);
44073+ fscache_stat_unchecked(&fscache_n_acquires);
44074
44075 /* if there's no parent cookie, then we don't create one here either */
44076 if (!parent) {
44077- fscache_stat(&fscache_n_acquires_null);
44078+ fscache_stat_unchecked(&fscache_n_acquires_null);
44079 _leave(" [no parent]");
44080 return NULL;
44081 }
44082@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44083 /* allocate and initialise a cookie */
44084 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44085 if (!cookie) {
44086- fscache_stat(&fscache_n_acquires_oom);
44087+ fscache_stat_unchecked(&fscache_n_acquires_oom);
44088 _leave(" [ENOMEM]");
44089 return NULL;
44090 }
44091@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44092
44093 switch (cookie->def->type) {
44094 case FSCACHE_COOKIE_TYPE_INDEX:
44095- fscache_stat(&fscache_n_cookie_index);
44096+ fscache_stat_unchecked(&fscache_n_cookie_index);
44097 break;
44098 case FSCACHE_COOKIE_TYPE_DATAFILE:
44099- fscache_stat(&fscache_n_cookie_data);
44100+ fscache_stat_unchecked(&fscache_n_cookie_data);
44101 break;
44102 default:
44103- fscache_stat(&fscache_n_cookie_special);
44104+ fscache_stat_unchecked(&fscache_n_cookie_special);
44105 break;
44106 }
44107
44108@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44109 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44110 atomic_dec(&parent->n_children);
44111 __fscache_cookie_put(cookie);
44112- fscache_stat(&fscache_n_acquires_nobufs);
44113+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44114 _leave(" = NULL");
44115 return NULL;
44116 }
44117 }
44118
44119- fscache_stat(&fscache_n_acquires_ok);
44120+ fscache_stat_unchecked(&fscache_n_acquires_ok);
44121 _leave(" = %p", cookie);
44122 return cookie;
44123 }
44124@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44125 cache = fscache_select_cache_for_object(cookie->parent);
44126 if (!cache) {
44127 up_read(&fscache_addremove_sem);
44128- fscache_stat(&fscache_n_acquires_no_cache);
44129+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44130 _leave(" = -ENOMEDIUM [no cache]");
44131 return -ENOMEDIUM;
44132 }
44133@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44134 object = cache->ops->alloc_object(cache, cookie);
44135 fscache_stat_d(&fscache_n_cop_alloc_object);
44136 if (IS_ERR(object)) {
44137- fscache_stat(&fscache_n_object_no_alloc);
44138+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
44139 ret = PTR_ERR(object);
44140 goto error;
44141 }
44142
44143- fscache_stat(&fscache_n_object_alloc);
44144+ fscache_stat_unchecked(&fscache_n_object_alloc);
44145
44146 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44147
44148@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44149 struct fscache_object *object;
44150 struct hlist_node *_p;
44151
44152- fscache_stat(&fscache_n_updates);
44153+ fscache_stat_unchecked(&fscache_n_updates);
44154
44155 if (!cookie) {
44156- fscache_stat(&fscache_n_updates_null);
44157+ fscache_stat_unchecked(&fscache_n_updates_null);
44158 _leave(" [no cookie]");
44159 return;
44160 }
44161@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44162 struct fscache_object *object;
44163 unsigned long event;
44164
44165- fscache_stat(&fscache_n_relinquishes);
44166+ fscache_stat_unchecked(&fscache_n_relinquishes);
44167 if (retire)
44168- fscache_stat(&fscache_n_relinquishes_retire);
44169+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44170
44171 if (!cookie) {
44172- fscache_stat(&fscache_n_relinquishes_null);
44173+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
44174 _leave(" [no cookie]");
44175 return;
44176 }
44177@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44178
44179 /* wait for the cookie to finish being instantiated (or to fail) */
44180 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44181- fscache_stat(&fscache_n_relinquishes_waitcrt);
44182+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44183 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44184 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44185 }
44186diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44187index f6aad48..88dcf26 100644
44188--- a/fs/fscache/internal.h
44189+++ b/fs/fscache/internal.h
44190@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44191 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44192 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44193
44194-extern atomic_t fscache_n_op_pend;
44195-extern atomic_t fscache_n_op_run;
44196-extern atomic_t fscache_n_op_enqueue;
44197-extern atomic_t fscache_n_op_deferred_release;
44198-extern atomic_t fscache_n_op_release;
44199-extern atomic_t fscache_n_op_gc;
44200-extern atomic_t fscache_n_op_cancelled;
44201-extern atomic_t fscache_n_op_rejected;
44202+extern atomic_unchecked_t fscache_n_op_pend;
44203+extern atomic_unchecked_t fscache_n_op_run;
44204+extern atomic_unchecked_t fscache_n_op_enqueue;
44205+extern atomic_unchecked_t fscache_n_op_deferred_release;
44206+extern atomic_unchecked_t fscache_n_op_release;
44207+extern atomic_unchecked_t fscache_n_op_gc;
44208+extern atomic_unchecked_t fscache_n_op_cancelled;
44209+extern atomic_unchecked_t fscache_n_op_rejected;
44210
44211-extern atomic_t fscache_n_attr_changed;
44212-extern atomic_t fscache_n_attr_changed_ok;
44213-extern atomic_t fscache_n_attr_changed_nobufs;
44214-extern atomic_t fscache_n_attr_changed_nomem;
44215-extern atomic_t fscache_n_attr_changed_calls;
44216+extern atomic_unchecked_t fscache_n_attr_changed;
44217+extern atomic_unchecked_t fscache_n_attr_changed_ok;
44218+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44219+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44220+extern atomic_unchecked_t fscache_n_attr_changed_calls;
44221
44222-extern atomic_t fscache_n_allocs;
44223-extern atomic_t fscache_n_allocs_ok;
44224-extern atomic_t fscache_n_allocs_wait;
44225-extern atomic_t fscache_n_allocs_nobufs;
44226-extern atomic_t fscache_n_allocs_intr;
44227-extern atomic_t fscache_n_allocs_object_dead;
44228-extern atomic_t fscache_n_alloc_ops;
44229-extern atomic_t fscache_n_alloc_op_waits;
44230+extern atomic_unchecked_t fscache_n_allocs;
44231+extern atomic_unchecked_t fscache_n_allocs_ok;
44232+extern atomic_unchecked_t fscache_n_allocs_wait;
44233+extern atomic_unchecked_t fscache_n_allocs_nobufs;
44234+extern atomic_unchecked_t fscache_n_allocs_intr;
44235+extern atomic_unchecked_t fscache_n_allocs_object_dead;
44236+extern atomic_unchecked_t fscache_n_alloc_ops;
44237+extern atomic_unchecked_t fscache_n_alloc_op_waits;
44238
44239-extern atomic_t fscache_n_retrievals;
44240-extern atomic_t fscache_n_retrievals_ok;
44241-extern atomic_t fscache_n_retrievals_wait;
44242-extern atomic_t fscache_n_retrievals_nodata;
44243-extern atomic_t fscache_n_retrievals_nobufs;
44244-extern atomic_t fscache_n_retrievals_intr;
44245-extern atomic_t fscache_n_retrievals_nomem;
44246-extern atomic_t fscache_n_retrievals_object_dead;
44247-extern atomic_t fscache_n_retrieval_ops;
44248-extern atomic_t fscache_n_retrieval_op_waits;
44249+extern atomic_unchecked_t fscache_n_retrievals;
44250+extern atomic_unchecked_t fscache_n_retrievals_ok;
44251+extern atomic_unchecked_t fscache_n_retrievals_wait;
44252+extern atomic_unchecked_t fscache_n_retrievals_nodata;
44253+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44254+extern atomic_unchecked_t fscache_n_retrievals_intr;
44255+extern atomic_unchecked_t fscache_n_retrievals_nomem;
44256+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44257+extern atomic_unchecked_t fscache_n_retrieval_ops;
44258+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44259
44260-extern atomic_t fscache_n_stores;
44261-extern atomic_t fscache_n_stores_ok;
44262-extern atomic_t fscache_n_stores_again;
44263-extern atomic_t fscache_n_stores_nobufs;
44264-extern atomic_t fscache_n_stores_oom;
44265-extern atomic_t fscache_n_store_ops;
44266-extern atomic_t fscache_n_store_calls;
44267-extern atomic_t fscache_n_store_pages;
44268-extern atomic_t fscache_n_store_radix_deletes;
44269-extern atomic_t fscache_n_store_pages_over_limit;
44270+extern atomic_unchecked_t fscache_n_stores;
44271+extern atomic_unchecked_t fscache_n_stores_ok;
44272+extern atomic_unchecked_t fscache_n_stores_again;
44273+extern atomic_unchecked_t fscache_n_stores_nobufs;
44274+extern atomic_unchecked_t fscache_n_stores_oom;
44275+extern atomic_unchecked_t fscache_n_store_ops;
44276+extern atomic_unchecked_t fscache_n_store_calls;
44277+extern atomic_unchecked_t fscache_n_store_pages;
44278+extern atomic_unchecked_t fscache_n_store_radix_deletes;
44279+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44280
44281-extern atomic_t fscache_n_store_vmscan_not_storing;
44282-extern atomic_t fscache_n_store_vmscan_gone;
44283-extern atomic_t fscache_n_store_vmscan_busy;
44284-extern atomic_t fscache_n_store_vmscan_cancelled;
44285+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44286+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44287+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44288+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44289
44290-extern atomic_t fscache_n_marks;
44291-extern atomic_t fscache_n_uncaches;
44292+extern atomic_unchecked_t fscache_n_marks;
44293+extern atomic_unchecked_t fscache_n_uncaches;
44294
44295-extern atomic_t fscache_n_acquires;
44296-extern atomic_t fscache_n_acquires_null;
44297-extern atomic_t fscache_n_acquires_no_cache;
44298-extern atomic_t fscache_n_acquires_ok;
44299-extern atomic_t fscache_n_acquires_nobufs;
44300-extern atomic_t fscache_n_acquires_oom;
44301+extern atomic_unchecked_t fscache_n_acquires;
44302+extern atomic_unchecked_t fscache_n_acquires_null;
44303+extern atomic_unchecked_t fscache_n_acquires_no_cache;
44304+extern atomic_unchecked_t fscache_n_acquires_ok;
44305+extern atomic_unchecked_t fscache_n_acquires_nobufs;
44306+extern atomic_unchecked_t fscache_n_acquires_oom;
44307
44308-extern atomic_t fscache_n_updates;
44309-extern atomic_t fscache_n_updates_null;
44310-extern atomic_t fscache_n_updates_run;
44311+extern atomic_unchecked_t fscache_n_updates;
44312+extern atomic_unchecked_t fscache_n_updates_null;
44313+extern atomic_unchecked_t fscache_n_updates_run;
44314
44315-extern atomic_t fscache_n_relinquishes;
44316-extern atomic_t fscache_n_relinquishes_null;
44317-extern atomic_t fscache_n_relinquishes_waitcrt;
44318-extern atomic_t fscache_n_relinquishes_retire;
44319+extern atomic_unchecked_t fscache_n_relinquishes;
44320+extern atomic_unchecked_t fscache_n_relinquishes_null;
44321+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44322+extern atomic_unchecked_t fscache_n_relinquishes_retire;
44323
44324-extern atomic_t fscache_n_cookie_index;
44325-extern atomic_t fscache_n_cookie_data;
44326-extern atomic_t fscache_n_cookie_special;
44327+extern atomic_unchecked_t fscache_n_cookie_index;
44328+extern atomic_unchecked_t fscache_n_cookie_data;
44329+extern atomic_unchecked_t fscache_n_cookie_special;
44330
44331-extern atomic_t fscache_n_object_alloc;
44332-extern atomic_t fscache_n_object_no_alloc;
44333-extern atomic_t fscache_n_object_lookups;
44334-extern atomic_t fscache_n_object_lookups_negative;
44335-extern atomic_t fscache_n_object_lookups_positive;
44336-extern atomic_t fscache_n_object_lookups_timed_out;
44337-extern atomic_t fscache_n_object_created;
44338-extern atomic_t fscache_n_object_avail;
44339-extern atomic_t fscache_n_object_dead;
44340+extern atomic_unchecked_t fscache_n_object_alloc;
44341+extern atomic_unchecked_t fscache_n_object_no_alloc;
44342+extern atomic_unchecked_t fscache_n_object_lookups;
44343+extern atomic_unchecked_t fscache_n_object_lookups_negative;
44344+extern atomic_unchecked_t fscache_n_object_lookups_positive;
44345+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44346+extern atomic_unchecked_t fscache_n_object_created;
44347+extern atomic_unchecked_t fscache_n_object_avail;
44348+extern atomic_unchecked_t fscache_n_object_dead;
44349
44350-extern atomic_t fscache_n_checkaux_none;
44351-extern atomic_t fscache_n_checkaux_okay;
44352-extern atomic_t fscache_n_checkaux_update;
44353-extern atomic_t fscache_n_checkaux_obsolete;
44354+extern atomic_unchecked_t fscache_n_checkaux_none;
44355+extern atomic_unchecked_t fscache_n_checkaux_okay;
44356+extern atomic_unchecked_t fscache_n_checkaux_update;
44357+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44358
44359 extern atomic_t fscache_n_cop_alloc_object;
44360 extern atomic_t fscache_n_cop_lookup_object;
44361@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44362 atomic_inc(stat);
44363 }
44364
44365+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44366+{
44367+ atomic_inc_unchecked(stat);
44368+}
44369+
44370 static inline void fscache_stat_d(atomic_t *stat)
44371 {
44372 atomic_dec(stat);
44373@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44374
44375 #define __fscache_stat(stat) (NULL)
44376 #define fscache_stat(stat) do {} while (0)
44377+#define fscache_stat_unchecked(stat) do {} while (0)
44378 #define fscache_stat_d(stat) do {} while (0)
44379 #endif
44380
44381diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44382index b6b897c..0ffff9c 100644
44383--- a/fs/fscache/object.c
44384+++ b/fs/fscache/object.c
44385@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44386 /* update the object metadata on disk */
44387 case FSCACHE_OBJECT_UPDATING:
44388 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44389- fscache_stat(&fscache_n_updates_run);
44390+ fscache_stat_unchecked(&fscache_n_updates_run);
44391 fscache_stat(&fscache_n_cop_update_object);
44392 object->cache->ops->update_object(object);
44393 fscache_stat_d(&fscache_n_cop_update_object);
44394@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44395 spin_lock(&object->lock);
44396 object->state = FSCACHE_OBJECT_DEAD;
44397 spin_unlock(&object->lock);
44398- fscache_stat(&fscache_n_object_dead);
44399+ fscache_stat_unchecked(&fscache_n_object_dead);
44400 goto terminal_transit;
44401
44402 /* handle the parent cache of this object being withdrawn from
44403@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44404 spin_lock(&object->lock);
44405 object->state = FSCACHE_OBJECT_DEAD;
44406 spin_unlock(&object->lock);
44407- fscache_stat(&fscache_n_object_dead);
44408+ fscache_stat_unchecked(&fscache_n_object_dead);
44409 goto terminal_transit;
44410
44411 /* complain about the object being woken up once it is
44412@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44413 parent->cookie->def->name, cookie->def->name,
44414 object->cache->tag->name);
44415
44416- fscache_stat(&fscache_n_object_lookups);
44417+ fscache_stat_unchecked(&fscache_n_object_lookups);
44418 fscache_stat(&fscache_n_cop_lookup_object);
44419 ret = object->cache->ops->lookup_object(object);
44420 fscache_stat_d(&fscache_n_cop_lookup_object);
44421@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44422 if (ret == -ETIMEDOUT) {
44423 /* probably stuck behind another object, so move this one to
44424 * the back of the queue */
44425- fscache_stat(&fscache_n_object_lookups_timed_out);
44426+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44427 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44428 }
44429
44430@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44431
44432 spin_lock(&object->lock);
44433 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44434- fscache_stat(&fscache_n_object_lookups_negative);
44435+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44436
44437 /* transit here to allow write requests to begin stacking up
44438 * and read requests to begin returning ENODATA */
44439@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44440 * result, in which case there may be data available */
44441 spin_lock(&object->lock);
44442 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44443- fscache_stat(&fscache_n_object_lookups_positive);
44444+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44445
44446 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44447
44448@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44449 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44450 } else {
44451 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44452- fscache_stat(&fscache_n_object_created);
44453+ fscache_stat_unchecked(&fscache_n_object_created);
44454
44455 object->state = FSCACHE_OBJECT_AVAILABLE;
44456 spin_unlock(&object->lock);
44457@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44458 fscache_enqueue_dependents(object);
44459
44460 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44461- fscache_stat(&fscache_n_object_avail);
44462+ fscache_stat_unchecked(&fscache_n_object_avail);
44463
44464 _leave("");
44465 }
44466@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44467 enum fscache_checkaux result;
44468
44469 if (!object->cookie->def->check_aux) {
44470- fscache_stat(&fscache_n_checkaux_none);
44471+ fscache_stat_unchecked(&fscache_n_checkaux_none);
44472 return FSCACHE_CHECKAUX_OKAY;
44473 }
44474
44475@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44476 switch (result) {
44477 /* entry okay as is */
44478 case FSCACHE_CHECKAUX_OKAY:
44479- fscache_stat(&fscache_n_checkaux_okay);
44480+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
44481 break;
44482
44483 /* entry requires update */
44484 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44485- fscache_stat(&fscache_n_checkaux_update);
44486+ fscache_stat_unchecked(&fscache_n_checkaux_update);
44487 break;
44488
44489 /* entry requires deletion */
44490 case FSCACHE_CHECKAUX_OBSOLETE:
44491- fscache_stat(&fscache_n_checkaux_obsolete);
44492+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44493 break;
44494
44495 default:
44496diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44497index 30afdfa..2256596 100644
44498--- a/fs/fscache/operation.c
44499+++ b/fs/fscache/operation.c
44500@@ -17,7 +17,7 @@
44501 #include <linux/slab.h>
44502 #include "internal.h"
44503
44504-atomic_t fscache_op_debug_id;
44505+atomic_unchecked_t fscache_op_debug_id;
44506 EXPORT_SYMBOL(fscache_op_debug_id);
44507
44508 /**
44509@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44510 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44511 ASSERTCMP(atomic_read(&op->usage), >, 0);
44512
44513- fscache_stat(&fscache_n_op_enqueue);
44514+ fscache_stat_unchecked(&fscache_n_op_enqueue);
44515 switch (op->flags & FSCACHE_OP_TYPE) {
44516 case FSCACHE_OP_ASYNC:
44517 _debug("queue async");
44518@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44519 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44520 if (op->processor)
44521 fscache_enqueue_operation(op);
44522- fscache_stat(&fscache_n_op_run);
44523+ fscache_stat_unchecked(&fscache_n_op_run);
44524 }
44525
44526 /*
44527@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44528 if (object->n_ops > 1) {
44529 atomic_inc(&op->usage);
44530 list_add_tail(&op->pend_link, &object->pending_ops);
44531- fscache_stat(&fscache_n_op_pend);
44532+ fscache_stat_unchecked(&fscache_n_op_pend);
44533 } else if (!list_empty(&object->pending_ops)) {
44534 atomic_inc(&op->usage);
44535 list_add_tail(&op->pend_link, &object->pending_ops);
44536- fscache_stat(&fscache_n_op_pend);
44537+ fscache_stat_unchecked(&fscache_n_op_pend);
44538 fscache_start_operations(object);
44539 } else {
44540 ASSERTCMP(object->n_in_progress, ==, 0);
44541@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44542 object->n_exclusive++; /* reads and writes must wait */
44543 atomic_inc(&op->usage);
44544 list_add_tail(&op->pend_link, &object->pending_ops);
44545- fscache_stat(&fscache_n_op_pend);
44546+ fscache_stat_unchecked(&fscache_n_op_pend);
44547 ret = 0;
44548 } else {
44549 /* not allowed to submit ops in any other state */
44550@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
44551 if (object->n_exclusive > 0) {
44552 atomic_inc(&op->usage);
44553 list_add_tail(&op->pend_link, &object->pending_ops);
44554- fscache_stat(&fscache_n_op_pend);
44555+ fscache_stat_unchecked(&fscache_n_op_pend);
44556 } else if (!list_empty(&object->pending_ops)) {
44557 atomic_inc(&op->usage);
44558 list_add_tail(&op->pend_link, &object->pending_ops);
44559- fscache_stat(&fscache_n_op_pend);
44560+ fscache_stat_unchecked(&fscache_n_op_pend);
44561 fscache_start_operations(object);
44562 } else {
44563 ASSERTCMP(object->n_exclusive, ==, 0);
44564@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
44565 object->n_ops++;
44566 atomic_inc(&op->usage);
44567 list_add_tail(&op->pend_link, &object->pending_ops);
44568- fscache_stat(&fscache_n_op_pend);
44569+ fscache_stat_unchecked(&fscache_n_op_pend);
44570 ret = 0;
44571 } else if (object->state == FSCACHE_OBJECT_DYING ||
44572 object->state == FSCACHE_OBJECT_LC_DYING ||
44573 object->state == FSCACHE_OBJECT_WITHDRAWING) {
44574- fscache_stat(&fscache_n_op_rejected);
44575+ fscache_stat_unchecked(&fscache_n_op_rejected);
44576 ret = -ENOBUFS;
44577 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
44578 fscache_report_unexpected_submission(object, op, ostate);
44579@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
44580
44581 ret = -EBUSY;
44582 if (!list_empty(&op->pend_link)) {
44583- fscache_stat(&fscache_n_op_cancelled);
44584+ fscache_stat_unchecked(&fscache_n_op_cancelled);
44585 list_del_init(&op->pend_link);
44586 object->n_ops--;
44587 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
44588@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
44589 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
44590 BUG();
44591
44592- fscache_stat(&fscache_n_op_release);
44593+ fscache_stat_unchecked(&fscache_n_op_release);
44594
44595 if (op->release) {
44596 op->release(op);
44597@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
44598 * lock, and defer it otherwise */
44599 if (!spin_trylock(&object->lock)) {
44600 _debug("defer put");
44601- fscache_stat(&fscache_n_op_deferred_release);
44602+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
44603
44604 cache = object->cache;
44605 spin_lock(&cache->op_gc_list_lock);
44606@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
44607
44608 _debug("GC DEFERRED REL OBJ%x OP%x",
44609 object->debug_id, op->debug_id);
44610- fscache_stat(&fscache_n_op_gc);
44611+ fscache_stat_unchecked(&fscache_n_op_gc);
44612
44613 ASSERTCMP(atomic_read(&op->usage), ==, 0);
44614
44615diff --git a/fs/fscache/page.c b/fs/fscache/page.c
44616index 3f7a59b..cf196cc 100644
44617--- a/fs/fscache/page.c
44618+++ b/fs/fscache/page.c
44619@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44620 val = radix_tree_lookup(&cookie->stores, page->index);
44621 if (!val) {
44622 rcu_read_unlock();
44623- fscache_stat(&fscache_n_store_vmscan_not_storing);
44624+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
44625 __fscache_uncache_page(cookie, page);
44626 return true;
44627 }
44628@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44629 spin_unlock(&cookie->stores_lock);
44630
44631 if (xpage) {
44632- fscache_stat(&fscache_n_store_vmscan_cancelled);
44633- fscache_stat(&fscache_n_store_radix_deletes);
44634+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
44635+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44636 ASSERTCMP(xpage, ==, page);
44637 } else {
44638- fscache_stat(&fscache_n_store_vmscan_gone);
44639+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
44640 }
44641
44642 wake_up_bit(&cookie->flags, 0);
44643@@ -107,7 +107,7 @@ page_busy:
44644 /* we might want to wait here, but that could deadlock the allocator as
44645 * the work threads writing to the cache may all end up sleeping
44646 * on memory allocation */
44647- fscache_stat(&fscache_n_store_vmscan_busy);
44648+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
44649 return false;
44650 }
44651 EXPORT_SYMBOL(__fscache_maybe_release_page);
44652@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
44653 FSCACHE_COOKIE_STORING_TAG);
44654 if (!radix_tree_tag_get(&cookie->stores, page->index,
44655 FSCACHE_COOKIE_PENDING_TAG)) {
44656- fscache_stat(&fscache_n_store_radix_deletes);
44657+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44658 xpage = radix_tree_delete(&cookie->stores, page->index);
44659 }
44660 spin_unlock(&cookie->stores_lock);
44661@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
44662
44663 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
44664
44665- fscache_stat(&fscache_n_attr_changed_calls);
44666+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
44667
44668 if (fscache_object_is_active(object)) {
44669 fscache_stat(&fscache_n_cop_attr_changed);
44670@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44671
44672 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44673
44674- fscache_stat(&fscache_n_attr_changed);
44675+ fscache_stat_unchecked(&fscache_n_attr_changed);
44676
44677 op = kzalloc(sizeof(*op), GFP_KERNEL);
44678 if (!op) {
44679- fscache_stat(&fscache_n_attr_changed_nomem);
44680+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
44681 _leave(" = -ENOMEM");
44682 return -ENOMEM;
44683 }
44684@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44685 if (fscache_submit_exclusive_op(object, op) < 0)
44686 goto nobufs;
44687 spin_unlock(&cookie->lock);
44688- fscache_stat(&fscache_n_attr_changed_ok);
44689+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
44690 fscache_put_operation(op);
44691 _leave(" = 0");
44692 return 0;
44693@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44694 nobufs:
44695 spin_unlock(&cookie->lock);
44696 kfree(op);
44697- fscache_stat(&fscache_n_attr_changed_nobufs);
44698+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
44699 _leave(" = %d", -ENOBUFS);
44700 return -ENOBUFS;
44701 }
44702@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
44703 /* allocate a retrieval operation and attempt to submit it */
44704 op = kzalloc(sizeof(*op), GFP_NOIO);
44705 if (!op) {
44706- fscache_stat(&fscache_n_retrievals_nomem);
44707+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44708 return NULL;
44709 }
44710
44711@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44712 return 0;
44713 }
44714
44715- fscache_stat(&fscache_n_retrievals_wait);
44716+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
44717
44718 jif = jiffies;
44719 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
44720 fscache_wait_bit_interruptible,
44721 TASK_INTERRUPTIBLE) != 0) {
44722- fscache_stat(&fscache_n_retrievals_intr);
44723+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
44724 _leave(" = -ERESTARTSYS");
44725 return -ERESTARTSYS;
44726 }
44727@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44728 */
44729 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44730 struct fscache_retrieval *op,
44731- atomic_t *stat_op_waits,
44732- atomic_t *stat_object_dead)
44733+ atomic_unchecked_t *stat_op_waits,
44734+ atomic_unchecked_t *stat_object_dead)
44735 {
44736 int ret;
44737
44738@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44739 goto check_if_dead;
44740
44741 _debug(">>> WT");
44742- fscache_stat(stat_op_waits);
44743+ fscache_stat_unchecked(stat_op_waits);
44744 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
44745 fscache_wait_bit_interruptible,
44746 TASK_INTERRUPTIBLE) < 0) {
44747@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44748
44749 check_if_dead:
44750 if (unlikely(fscache_object_is_dead(object))) {
44751- fscache_stat(stat_object_dead);
44752+ fscache_stat_unchecked(stat_object_dead);
44753 return -ENOBUFS;
44754 }
44755 return 0;
44756@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44757
44758 _enter("%p,%p,,,", cookie, page);
44759
44760- fscache_stat(&fscache_n_retrievals);
44761+ fscache_stat_unchecked(&fscache_n_retrievals);
44762
44763 if (hlist_empty(&cookie->backing_objects))
44764 goto nobufs;
44765@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44766 goto nobufs_unlock;
44767 spin_unlock(&cookie->lock);
44768
44769- fscache_stat(&fscache_n_retrieval_ops);
44770+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
44771
44772 /* pin the netfs read context in case we need to do the actual netfs
44773 * read because we've encountered a cache read failure */
44774@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44775
44776 error:
44777 if (ret == -ENOMEM)
44778- fscache_stat(&fscache_n_retrievals_nomem);
44779+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44780 else if (ret == -ERESTARTSYS)
44781- fscache_stat(&fscache_n_retrievals_intr);
44782+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
44783 else if (ret == -ENODATA)
44784- fscache_stat(&fscache_n_retrievals_nodata);
44785+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44786 else if (ret < 0)
44787- fscache_stat(&fscache_n_retrievals_nobufs);
44788+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44789 else
44790- fscache_stat(&fscache_n_retrievals_ok);
44791+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
44792
44793 fscache_put_retrieval(op);
44794 _leave(" = %d", ret);
44795@@ -429,7 +429,7 @@ nobufs_unlock:
44796 spin_unlock(&cookie->lock);
44797 kfree(op);
44798 nobufs:
44799- fscache_stat(&fscache_n_retrievals_nobufs);
44800+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44801 _leave(" = -ENOBUFS");
44802 return -ENOBUFS;
44803 }
44804@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44805
44806 _enter("%p,,%d,,,", cookie, *nr_pages);
44807
44808- fscache_stat(&fscache_n_retrievals);
44809+ fscache_stat_unchecked(&fscache_n_retrievals);
44810
44811 if (hlist_empty(&cookie->backing_objects))
44812 goto nobufs;
44813@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44814 goto nobufs_unlock;
44815 spin_unlock(&cookie->lock);
44816
44817- fscache_stat(&fscache_n_retrieval_ops);
44818+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
44819
44820 /* pin the netfs read context in case we need to do the actual netfs
44821 * read because we've encountered a cache read failure */
44822@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44823
44824 error:
44825 if (ret == -ENOMEM)
44826- fscache_stat(&fscache_n_retrievals_nomem);
44827+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44828 else if (ret == -ERESTARTSYS)
44829- fscache_stat(&fscache_n_retrievals_intr);
44830+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
44831 else if (ret == -ENODATA)
44832- fscache_stat(&fscache_n_retrievals_nodata);
44833+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44834 else if (ret < 0)
44835- fscache_stat(&fscache_n_retrievals_nobufs);
44836+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44837 else
44838- fscache_stat(&fscache_n_retrievals_ok);
44839+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
44840
44841 fscache_put_retrieval(op);
44842 _leave(" = %d", ret);
44843@@ -545,7 +545,7 @@ nobufs_unlock:
44844 spin_unlock(&cookie->lock);
44845 kfree(op);
44846 nobufs:
44847- fscache_stat(&fscache_n_retrievals_nobufs);
44848+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44849 _leave(" = -ENOBUFS");
44850 return -ENOBUFS;
44851 }
44852@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44853
44854 _enter("%p,%p,,,", cookie, page);
44855
44856- fscache_stat(&fscache_n_allocs);
44857+ fscache_stat_unchecked(&fscache_n_allocs);
44858
44859 if (hlist_empty(&cookie->backing_objects))
44860 goto nobufs;
44861@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44862 goto nobufs_unlock;
44863 spin_unlock(&cookie->lock);
44864
44865- fscache_stat(&fscache_n_alloc_ops);
44866+ fscache_stat_unchecked(&fscache_n_alloc_ops);
44867
44868 ret = fscache_wait_for_retrieval_activation(
44869 object, op,
44870@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44871
44872 error:
44873 if (ret == -ERESTARTSYS)
44874- fscache_stat(&fscache_n_allocs_intr);
44875+ fscache_stat_unchecked(&fscache_n_allocs_intr);
44876 else if (ret < 0)
44877- fscache_stat(&fscache_n_allocs_nobufs);
44878+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44879 else
44880- fscache_stat(&fscache_n_allocs_ok);
44881+ fscache_stat_unchecked(&fscache_n_allocs_ok);
44882
44883 fscache_put_retrieval(op);
44884 _leave(" = %d", ret);
44885@@ -625,7 +625,7 @@ nobufs_unlock:
44886 spin_unlock(&cookie->lock);
44887 kfree(op);
44888 nobufs:
44889- fscache_stat(&fscache_n_allocs_nobufs);
44890+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44891 _leave(" = -ENOBUFS");
44892 return -ENOBUFS;
44893 }
44894@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44895
44896 spin_lock(&cookie->stores_lock);
44897
44898- fscache_stat(&fscache_n_store_calls);
44899+ fscache_stat_unchecked(&fscache_n_store_calls);
44900
44901 /* find a page to store */
44902 page = NULL;
44903@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44904 page = results[0];
44905 _debug("gang %d [%lx]", n, page->index);
44906 if (page->index > op->store_limit) {
44907- fscache_stat(&fscache_n_store_pages_over_limit);
44908+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
44909 goto superseded;
44910 }
44911
44912@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44913 spin_unlock(&cookie->stores_lock);
44914 spin_unlock(&object->lock);
44915
44916- fscache_stat(&fscache_n_store_pages);
44917+ fscache_stat_unchecked(&fscache_n_store_pages);
44918 fscache_stat(&fscache_n_cop_write_page);
44919 ret = object->cache->ops->write_page(op, page);
44920 fscache_stat_d(&fscache_n_cop_write_page);
44921@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44922 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44923 ASSERT(PageFsCache(page));
44924
44925- fscache_stat(&fscache_n_stores);
44926+ fscache_stat_unchecked(&fscache_n_stores);
44927
44928 op = kzalloc(sizeof(*op), GFP_NOIO);
44929 if (!op)
44930@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44931 spin_unlock(&cookie->stores_lock);
44932 spin_unlock(&object->lock);
44933
44934- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
44935+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
44936 op->store_limit = object->store_limit;
44937
44938 if (fscache_submit_op(object, &op->op) < 0)
44939@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44940
44941 spin_unlock(&cookie->lock);
44942 radix_tree_preload_end();
44943- fscache_stat(&fscache_n_store_ops);
44944- fscache_stat(&fscache_n_stores_ok);
44945+ fscache_stat_unchecked(&fscache_n_store_ops);
44946+ fscache_stat_unchecked(&fscache_n_stores_ok);
44947
44948 /* the work queue now carries its own ref on the object */
44949 fscache_put_operation(&op->op);
44950@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44951 return 0;
44952
44953 already_queued:
44954- fscache_stat(&fscache_n_stores_again);
44955+ fscache_stat_unchecked(&fscache_n_stores_again);
44956 already_pending:
44957 spin_unlock(&cookie->stores_lock);
44958 spin_unlock(&object->lock);
44959 spin_unlock(&cookie->lock);
44960 radix_tree_preload_end();
44961 kfree(op);
44962- fscache_stat(&fscache_n_stores_ok);
44963+ fscache_stat_unchecked(&fscache_n_stores_ok);
44964 _leave(" = 0");
44965 return 0;
44966
44967@@ -851,14 +851,14 @@ nobufs:
44968 spin_unlock(&cookie->lock);
44969 radix_tree_preload_end();
44970 kfree(op);
44971- fscache_stat(&fscache_n_stores_nobufs);
44972+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
44973 _leave(" = -ENOBUFS");
44974 return -ENOBUFS;
44975
44976 nomem_free:
44977 kfree(op);
44978 nomem:
44979- fscache_stat(&fscache_n_stores_oom);
44980+ fscache_stat_unchecked(&fscache_n_stores_oom);
44981 _leave(" = -ENOMEM");
44982 return -ENOMEM;
44983 }
44984@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
44985 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44986 ASSERTCMP(page, !=, NULL);
44987
44988- fscache_stat(&fscache_n_uncaches);
44989+ fscache_stat_unchecked(&fscache_n_uncaches);
44990
44991 /* cache withdrawal may beat us to it */
44992 if (!PageFsCache(page))
44993@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
44994 unsigned long loop;
44995
44996 #ifdef CONFIG_FSCACHE_STATS
44997- atomic_add(pagevec->nr, &fscache_n_marks);
44998+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
44999 #endif
45000
45001 for (loop = 0; loop < pagevec->nr; loop++) {
45002diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45003index 4765190..2a067f2 100644
45004--- a/fs/fscache/stats.c
45005+++ b/fs/fscache/stats.c
45006@@ -18,95 +18,95 @@
45007 /*
45008 * operation counters
45009 */
45010-atomic_t fscache_n_op_pend;
45011-atomic_t fscache_n_op_run;
45012-atomic_t fscache_n_op_enqueue;
45013-atomic_t fscache_n_op_requeue;
45014-atomic_t fscache_n_op_deferred_release;
45015-atomic_t fscache_n_op_release;
45016-atomic_t fscache_n_op_gc;
45017-atomic_t fscache_n_op_cancelled;
45018-atomic_t fscache_n_op_rejected;
45019+atomic_unchecked_t fscache_n_op_pend;
45020+atomic_unchecked_t fscache_n_op_run;
45021+atomic_unchecked_t fscache_n_op_enqueue;
45022+atomic_unchecked_t fscache_n_op_requeue;
45023+atomic_unchecked_t fscache_n_op_deferred_release;
45024+atomic_unchecked_t fscache_n_op_release;
45025+atomic_unchecked_t fscache_n_op_gc;
45026+atomic_unchecked_t fscache_n_op_cancelled;
45027+atomic_unchecked_t fscache_n_op_rejected;
45028
45029-atomic_t fscache_n_attr_changed;
45030-atomic_t fscache_n_attr_changed_ok;
45031-atomic_t fscache_n_attr_changed_nobufs;
45032-atomic_t fscache_n_attr_changed_nomem;
45033-atomic_t fscache_n_attr_changed_calls;
45034+atomic_unchecked_t fscache_n_attr_changed;
45035+atomic_unchecked_t fscache_n_attr_changed_ok;
45036+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45037+atomic_unchecked_t fscache_n_attr_changed_nomem;
45038+atomic_unchecked_t fscache_n_attr_changed_calls;
45039
45040-atomic_t fscache_n_allocs;
45041-atomic_t fscache_n_allocs_ok;
45042-atomic_t fscache_n_allocs_wait;
45043-atomic_t fscache_n_allocs_nobufs;
45044-atomic_t fscache_n_allocs_intr;
45045-atomic_t fscache_n_allocs_object_dead;
45046-atomic_t fscache_n_alloc_ops;
45047-atomic_t fscache_n_alloc_op_waits;
45048+atomic_unchecked_t fscache_n_allocs;
45049+atomic_unchecked_t fscache_n_allocs_ok;
45050+atomic_unchecked_t fscache_n_allocs_wait;
45051+atomic_unchecked_t fscache_n_allocs_nobufs;
45052+atomic_unchecked_t fscache_n_allocs_intr;
45053+atomic_unchecked_t fscache_n_allocs_object_dead;
45054+atomic_unchecked_t fscache_n_alloc_ops;
45055+atomic_unchecked_t fscache_n_alloc_op_waits;
45056
45057-atomic_t fscache_n_retrievals;
45058-atomic_t fscache_n_retrievals_ok;
45059-atomic_t fscache_n_retrievals_wait;
45060-atomic_t fscache_n_retrievals_nodata;
45061-atomic_t fscache_n_retrievals_nobufs;
45062-atomic_t fscache_n_retrievals_intr;
45063-atomic_t fscache_n_retrievals_nomem;
45064-atomic_t fscache_n_retrievals_object_dead;
45065-atomic_t fscache_n_retrieval_ops;
45066-atomic_t fscache_n_retrieval_op_waits;
45067+atomic_unchecked_t fscache_n_retrievals;
45068+atomic_unchecked_t fscache_n_retrievals_ok;
45069+atomic_unchecked_t fscache_n_retrievals_wait;
45070+atomic_unchecked_t fscache_n_retrievals_nodata;
45071+atomic_unchecked_t fscache_n_retrievals_nobufs;
45072+atomic_unchecked_t fscache_n_retrievals_intr;
45073+atomic_unchecked_t fscache_n_retrievals_nomem;
45074+atomic_unchecked_t fscache_n_retrievals_object_dead;
45075+atomic_unchecked_t fscache_n_retrieval_ops;
45076+atomic_unchecked_t fscache_n_retrieval_op_waits;
45077
45078-atomic_t fscache_n_stores;
45079-atomic_t fscache_n_stores_ok;
45080-atomic_t fscache_n_stores_again;
45081-atomic_t fscache_n_stores_nobufs;
45082-atomic_t fscache_n_stores_oom;
45083-atomic_t fscache_n_store_ops;
45084-atomic_t fscache_n_store_calls;
45085-atomic_t fscache_n_store_pages;
45086-atomic_t fscache_n_store_radix_deletes;
45087-atomic_t fscache_n_store_pages_over_limit;
45088+atomic_unchecked_t fscache_n_stores;
45089+atomic_unchecked_t fscache_n_stores_ok;
45090+atomic_unchecked_t fscache_n_stores_again;
45091+atomic_unchecked_t fscache_n_stores_nobufs;
45092+atomic_unchecked_t fscache_n_stores_oom;
45093+atomic_unchecked_t fscache_n_store_ops;
45094+atomic_unchecked_t fscache_n_store_calls;
45095+atomic_unchecked_t fscache_n_store_pages;
45096+atomic_unchecked_t fscache_n_store_radix_deletes;
45097+atomic_unchecked_t fscache_n_store_pages_over_limit;
45098
45099-atomic_t fscache_n_store_vmscan_not_storing;
45100-atomic_t fscache_n_store_vmscan_gone;
45101-atomic_t fscache_n_store_vmscan_busy;
45102-atomic_t fscache_n_store_vmscan_cancelled;
45103+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45104+atomic_unchecked_t fscache_n_store_vmscan_gone;
45105+atomic_unchecked_t fscache_n_store_vmscan_busy;
45106+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45107
45108-atomic_t fscache_n_marks;
45109-atomic_t fscache_n_uncaches;
45110+atomic_unchecked_t fscache_n_marks;
45111+atomic_unchecked_t fscache_n_uncaches;
45112
45113-atomic_t fscache_n_acquires;
45114-atomic_t fscache_n_acquires_null;
45115-atomic_t fscache_n_acquires_no_cache;
45116-atomic_t fscache_n_acquires_ok;
45117-atomic_t fscache_n_acquires_nobufs;
45118-atomic_t fscache_n_acquires_oom;
45119+atomic_unchecked_t fscache_n_acquires;
45120+atomic_unchecked_t fscache_n_acquires_null;
45121+atomic_unchecked_t fscache_n_acquires_no_cache;
45122+atomic_unchecked_t fscache_n_acquires_ok;
45123+atomic_unchecked_t fscache_n_acquires_nobufs;
45124+atomic_unchecked_t fscache_n_acquires_oom;
45125
45126-atomic_t fscache_n_updates;
45127-atomic_t fscache_n_updates_null;
45128-atomic_t fscache_n_updates_run;
45129+atomic_unchecked_t fscache_n_updates;
45130+atomic_unchecked_t fscache_n_updates_null;
45131+atomic_unchecked_t fscache_n_updates_run;
45132
45133-atomic_t fscache_n_relinquishes;
45134-atomic_t fscache_n_relinquishes_null;
45135-atomic_t fscache_n_relinquishes_waitcrt;
45136-atomic_t fscache_n_relinquishes_retire;
45137+atomic_unchecked_t fscache_n_relinquishes;
45138+atomic_unchecked_t fscache_n_relinquishes_null;
45139+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45140+atomic_unchecked_t fscache_n_relinquishes_retire;
45141
45142-atomic_t fscache_n_cookie_index;
45143-atomic_t fscache_n_cookie_data;
45144-atomic_t fscache_n_cookie_special;
45145+atomic_unchecked_t fscache_n_cookie_index;
45146+atomic_unchecked_t fscache_n_cookie_data;
45147+atomic_unchecked_t fscache_n_cookie_special;
45148
45149-atomic_t fscache_n_object_alloc;
45150-atomic_t fscache_n_object_no_alloc;
45151-atomic_t fscache_n_object_lookups;
45152-atomic_t fscache_n_object_lookups_negative;
45153-atomic_t fscache_n_object_lookups_positive;
45154-atomic_t fscache_n_object_lookups_timed_out;
45155-atomic_t fscache_n_object_created;
45156-atomic_t fscache_n_object_avail;
45157-atomic_t fscache_n_object_dead;
45158+atomic_unchecked_t fscache_n_object_alloc;
45159+atomic_unchecked_t fscache_n_object_no_alloc;
45160+atomic_unchecked_t fscache_n_object_lookups;
45161+atomic_unchecked_t fscache_n_object_lookups_negative;
45162+atomic_unchecked_t fscache_n_object_lookups_positive;
45163+atomic_unchecked_t fscache_n_object_lookups_timed_out;
45164+atomic_unchecked_t fscache_n_object_created;
45165+atomic_unchecked_t fscache_n_object_avail;
45166+atomic_unchecked_t fscache_n_object_dead;
45167
45168-atomic_t fscache_n_checkaux_none;
45169-atomic_t fscache_n_checkaux_okay;
45170-atomic_t fscache_n_checkaux_update;
45171-atomic_t fscache_n_checkaux_obsolete;
45172+atomic_unchecked_t fscache_n_checkaux_none;
45173+atomic_unchecked_t fscache_n_checkaux_okay;
45174+atomic_unchecked_t fscache_n_checkaux_update;
45175+atomic_unchecked_t fscache_n_checkaux_obsolete;
45176
45177 atomic_t fscache_n_cop_alloc_object;
45178 atomic_t fscache_n_cop_lookup_object;
45179@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45180 seq_puts(m, "FS-Cache statistics\n");
45181
45182 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45183- atomic_read(&fscache_n_cookie_index),
45184- atomic_read(&fscache_n_cookie_data),
45185- atomic_read(&fscache_n_cookie_special));
45186+ atomic_read_unchecked(&fscache_n_cookie_index),
45187+ atomic_read_unchecked(&fscache_n_cookie_data),
45188+ atomic_read_unchecked(&fscache_n_cookie_special));
45189
45190 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45191- atomic_read(&fscache_n_object_alloc),
45192- atomic_read(&fscache_n_object_no_alloc),
45193- atomic_read(&fscache_n_object_avail),
45194- atomic_read(&fscache_n_object_dead));
45195+ atomic_read_unchecked(&fscache_n_object_alloc),
45196+ atomic_read_unchecked(&fscache_n_object_no_alloc),
45197+ atomic_read_unchecked(&fscache_n_object_avail),
45198+ atomic_read_unchecked(&fscache_n_object_dead));
45199 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45200- atomic_read(&fscache_n_checkaux_none),
45201- atomic_read(&fscache_n_checkaux_okay),
45202- atomic_read(&fscache_n_checkaux_update),
45203- atomic_read(&fscache_n_checkaux_obsolete));
45204+ atomic_read_unchecked(&fscache_n_checkaux_none),
45205+ atomic_read_unchecked(&fscache_n_checkaux_okay),
45206+ atomic_read_unchecked(&fscache_n_checkaux_update),
45207+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45208
45209 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45210- atomic_read(&fscache_n_marks),
45211- atomic_read(&fscache_n_uncaches));
45212+ atomic_read_unchecked(&fscache_n_marks),
45213+ atomic_read_unchecked(&fscache_n_uncaches));
45214
45215 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45216 " oom=%u\n",
45217- atomic_read(&fscache_n_acquires),
45218- atomic_read(&fscache_n_acquires_null),
45219- atomic_read(&fscache_n_acquires_no_cache),
45220- atomic_read(&fscache_n_acquires_ok),
45221- atomic_read(&fscache_n_acquires_nobufs),
45222- atomic_read(&fscache_n_acquires_oom));
45223+ atomic_read_unchecked(&fscache_n_acquires),
45224+ atomic_read_unchecked(&fscache_n_acquires_null),
45225+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
45226+ atomic_read_unchecked(&fscache_n_acquires_ok),
45227+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
45228+ atomic_read_unchecked(&fscache_n_acquires_oom));
45229
45230 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45231- atomic_read(&fscache_n_object_lookups),
45232- atomic_read(&fscache_n_object_lookups_negative),
45233- atomic_read(&fscache_n_object_lookups_positive),
45234- atomic_read(&fscache_n_object_created),
45235- atomic_read(&fscache_n_object_lookups_timed_out));
45236+ atomic_read_unchecked(&fscache_n_object_lookups),
45237+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
45238+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
45239+ atomic_read_unchecked(&fscache_n_object_created),
45240+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45241
45242 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45243- atomic_read(&fscache_n_updates),
45244- atomic_read(&fscache_n_updates_null),
45245- atomic_read(&fscache_n_updates_run));
45246+ atomic_read_unchecked(&fscache_n_updates),
45247+ atomic_read_unchecked(&fscache_n_updates_null),
45248+ atomic_read_unchecked(&fscache_n_updates_run));
45249
45250 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45251- atomic_read(&fscache_n_relinquishes),
45252- atomic_read(&fscache_n_relinquishes_null),
45253- atomic_read(&fscache_n_relinquishes_waitcrt),
45254- atomic_read(&fscache_n_relinquishes_retire));
45255+ atomic_read_unchecked(&fscache_n_relinquishes),
45256+ atomic_read_unchecked(&fscache_n_relinquishes_null),
45257+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45258+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
45259
45260 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45261- atomic_read(&fscache_n_attr_changed),
45262- atomic_read(&fscache_n_attr_changed_ok),
45263- atomic_read(&fscache_n_attr_changed_nobufs),
45264- atomic_read(&fscache_n_attr_changed_nomem),
45265- atomic_read(&fscache_n_attr_changed_calls));
45266+ atomic_read_unchecked(&fscache_n_attr_changed),
45267+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
45268+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45269+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45270+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
45271
45272 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45273- atomic_read(&fscache_n_allocs),
45274- atomic_read(&fscache_n_allocs_ok),
45275- atomic_read(&fscache_n_allocs_wait),
45276- atomic_read(&fscache_n_allocs_nobufs),
45277- atomic_read(&fscache_n_allocs_intr));
45278+ atomic_read_unchecked(&fscache_n_allocs),
45279+ atomic_read_unchecked(&fscache_n_allocs_ok),
45280+ atomic_read_unchecked(&fscache_n_allocs_wait),
45281+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
45282+ atomic_read_unchecked(&fscache_n_allocs_intr));
45283 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45284- atomic_read(&fscache_n_alloc_ops),
45285- atomic_read(&fscache_n_alloc_op_waits),
45286- atomic_read(&fscache_n_allocs_object_dead));
45287+ atomic_read_unchecked(&fscache_n_alloc_ops),
45288+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
45289+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
45290
45291 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45292 " int=%u oom=%u\n",
45293- atomic_read(&fscache_n_retrievals),
45294- atomic_read(&fscache_n_retrievals_ok),
45295- atomic_read(&fscache_n_retrievals_wait),
45296- atomic_read(&fscache_n_retrievals_nodata),
45297- atomic_read(&fscache_n_retrievals_nobufs),
45298- atomic_read(&fscache_n_retrievals_intr),
45299- atomic_read(&fscache_n_retrievals_nomem));
45300+ atomic_read_unchecked(&fscache_n_retrievals),
45301+ atomic_read_unchecked(&fscache_n_retrievals_ok),
45302+ atomic_read_unchecked(&fscache_n_retrievals_wait),
45303+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
45304+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45305+ atomic_read_unchecked(&fscache_n_retrievals_intr),
45306+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
45307 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45308- atomic_read(&fscache_n_retrieval_ops),
45309- atomic_read(&fscache_n_retrieval_op_waits),
45310- atomic_read(&fscache_n_retrievals_object_dead));
45311+ atomic_read_unchecked(&fscache_n_retrieval_ops),
45312+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45313+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45314
45315 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45316- atomic_read(&fscache_n_stores),
45317- atomic_read(&fscache_n_stores_ok),
45318- atomic_read(&fscache_n_stores_again),
45319- atomic_read(&fscache_n_stores_nobufs),
45320- atomic_read(&fscache_n_stores_oom));
45321+ atomic_read_unchecked(&fscache_n_stores),
45322+ atomic_read_unchecked(&fscache_n_stores_ok),
45323+ atomic_read_unchecked(&fscache_n_stores_again),
45324+ atomic_read_unchecked(&fscache_n_stores_nobufs),
45325+ atomic_read_unchecked(&fscache_n_stores_oom));
45326 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45327- atomic_read(&fscache_n_store_ops),
45328- atomic_read(&fscache_n_store_calls),
45329- atomic_read(&fscache_n_store_pages),
45330- atomic_read(&fscache_n_store_radix_deletes),
45331- atomic_read(&fscache_n_store_pages_over_limit));
45332+ atomic_read_unchecked(&fscache_n_store_ops),
45333+ atomic_read_unchecked(&fscache_n_store_calls),
45334+ atomic_read_unchecked(&fscache_n_store_pages),
45335+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
45336+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45337
45338 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45339- atomic_read(&fscache_n_store_vmscan_not_storing),
45340- atomic_read(&fscache_n_store_vmscan_gone),
45341- atomic_read(&fscache_n_store_vmscan_busy),
45342- atomic_read(&fscache_n_store_vmscan_cancelled));
45343+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45344+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45345+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45346+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45347
45348 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45349- atomic_read(&fscache_n_op_pend),
45350- atomic_read(&fscache_n_op_run),
45351- atomic_read(&fscache_n_op_enqueue),
45352- atomic_read(&fscache_n_op_cancelled),
45353- atomic_read(&fscache_n_op_rejected));
45354+ atomic_read_unchecked(&fscache_n_op_pend),
45355+ atomic_read_unchecked(&fscache_n_op_run),
45356+ atomic_read_unchecked(&fscache_n_op_enqueue),
45357+ atomic_read_unchecked(&fscache_n_op_cancelled),
45358+ atomic_read_unchecked(&fscache_n_op_rejected));
45359 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45360- atomic_read(&fscache_n_op_deferred_release),
45361- atomic_read(&fscache_n_op_release),
45362- atomic_read(&fscache_n_op_gc));
45363+ atomic_read_unchecked(&fscache_n_op_deferred_release),
45364+ atomic_read_unchecked(&fscache_n_op_release),
45365+ atomic_read_unchecked(&fscache_n_op_gc));
45366
45367 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45368 atomic_read(&fscache_n_cop_alloc_object),
45369diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45370index 3426521..3b75162 100644
45371--- a/fs/fuse/cuse.c
45372+++ b/fs/fuse/cuse.c
45373@@ -587,10 +587,12 @@ static int __init cuse_init(void)
45374 INIT_LIST_HEAD(&cuse_conntbl[i]);
45375
45376 /* inherit and extend fuse_dev_operations */
45377- cuse_channel_fops = fuse_dev_operations;
45378- cuse_channel_fops.owner = THIS_MODULE;
45379- cuse_channel_fops.open = cuse_channel_open;
45380- cuse_channel_fops.release = cuse_channel_release;
45381+ pax_open_kernel();
45382+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45383+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45384+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
45385+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
45386+ pax_close_kernel();
45387
45388 cuse_class = class_create(THIS_MODULE, "cuse");
45389 if (IS_ERR(cuse_class))
45390diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45391index 5f3368a..8306426 100644
45392--- a/fs/fuse/dev.c
45393+++ b/fs/fuse/dev.c
45394@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45395 ret = 0;
45396 pipe_lock(pipe);
45397
45398- if (!pipe->readers) {
45399+ if (!atomic_read(&pipe->readers)) {
45400 send_sig(SIGPIPE, current, 0);
45401 if (!ret)
45402 ret = -EPIPE;
45403diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45404index 2066328..f5add3b 100644
45405--- a/fs/fuse/dir.c
45406+++ b/fs/fuse/dir.c
45407@@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
45408 return link;
45409 }
45410
45411-static void free_link(char *link)
45412+static void free_link(const char *link)
45413 {
45414 if (!IS_ERR(link))
45415 free_page((unsigned long) link);
45416diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45417index 5698746..6086012 100644
45418--- a/fs/gfs2/inode.c
45419+++ b/fs/gfs2/inode.c
45420@@ -1487,7 +1487,7 @@ out:
45421
45422 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45423 {
45424- char *s = nd_get_link(nd);
45425+ const char *s = nd_get_link(nd);
45426 if (!IS_ERR(s))
45427 kfree(s);
45428 }
45429diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45430index 1e85a7a..eb4218a 100644
45431--- a/fs/hugetlbfs/inode.c
45432+++ b/fs/hugetlbfs/inode.c
45433@@ -921,7 +921,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45434 .kill_sb = kill_litter_super,
45435 };
45436
45437-static struct vfsmount *hugetlbfs_vfsmount;
45438+struct vfsmount *hugetlbfs_vfsmount;
45439
45440 static int can_do_hugetlb_shm(void)
45441 {
45442diff --git a/fs/inode.c b/fs/inode.c
45443index 83ab215..8842101 100644
45444--- a/fs/inode.c
45445+++ b/fs/inode.c
45446@@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
45447
45448 #ifdef CONFIG_SMP
45449 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45450- static atomic_t shared_last_ino;
45451- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45452+ static atomic_unchecked_t shared_last_ino;
45453+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45454
45455 res = next - LAST_INO_BATCH;
45456 }
45457diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45458index eafb8d3..f423d37 100644
45459--- a/fs/jffs2/erase.c
45460+++ b/fs/jffs2/erase.c
45461@@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45462 struct jffs2_unknown_node marker = {
45463 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45464 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45465- .totlen = cpu_to_je32(c->cleanmarker_size)
45466+ .totlen = cpu_to_je32(c->cleanmarker_size),
45467+ .hdr_crc = cpu_to_je32(0)
45468 };
45469
45470 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45471diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45472index 30e8f47..21f600c 100644
45473--- a/fs/jffs2/wbuf.c
45474+++ b/fs/jffs2/wbuf.c
45475@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45476 {
45477 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45478 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45479- .totlen = constant_cpu_to_je32(8)
45480+ .totlen = constant_cpu_to_je32(8),
45481+ .hdr_crc = constant_cpu_to_je32(0)
45482 };
45483
45484 /*
45485diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45486index 682bca6..86b8e6e 100644
45487--- a/fs/jfs/super.c
45488+++ b/fs/jfs/super.c
45489@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
45490
45491 jfs_inode_cachep =
45492 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45493- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45494+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45495 init_once);
45496 if (jfs_inode_cachep == NULL)
45497 return -ENOMEM;
45498diff --git a/fs/libfs.c b/fs/libfs.c
45499index 5b2dbb3..7442d54 100644
45500--- a/fs/libfs.c
45501+++ b/fs/libfs.c
45502@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45503
45504 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45505 struct dentry *next;
45506+ char d_name[sizeof(next->d_iname)];
45507+ const unsigned char *name;
45508+
45509 next = list_entry(p, struct dentry, d_u.d_child);
45510 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45511 if (!simple_positive(next)) {
45512@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45513
45514 spin_unlock(&next->d_lock);
45515 spin_unlock(&dentry->d_lock);
45516- if (filldir(dirent, next->d_name.name,
45517+ name = next->d_name.name;
45518+ if (name == next->d_iname) {
45519+ memcpy(d_name, name, next->d_name.len);
45520+ name = d_name;
45521+ }
45522+ if (filldir(dirent, name,
45523 next->d_name.len, filp->f_pos,
45524 next->d_inode->i_ino,
45525 dt_type(next->d_inode)) < 0)
45526diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
45527index 8392cb8..80d6193 100644
45528--- a/fs/lockd/clntproc.c
45529+++ b/fs/lockd/clntproc.c
45530@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
45531 /*
45532 * Cookie counter for NLM requests
45533 */
45534-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45535+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45536
45537 void nlmclnt_next_cookie(struct nlm_cookie *c)
45538 {
45539- u32 cookie = atomic_inc_return(&nlm_cookie);
45540+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45541
45542 memcpy(c->data, &cookie, 4);
45543 c->len=4;
45544diff --git a/fs/locks.c b/fs/locks.c
45545index 637694b..f84a121 100644
45546--- a/fs/locks.c
45547+++ b/fs/locks.c
45548@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
45549 return;
45550
45551 if (filp->f_op && filp->f_op->flock) {
45552- struct file_lock fl = {
45553+ struct file_lock flock = {
45554 .fl_pid = current->tgid,
45555 .fl_file = filp,
45556 .fl_flags = FL_FLOCK,
45557 .fl_type = F_UNLCK,
45558 .fl_end = OFFSET_MAX,
45559 };
45560- filp->f_op->flock(filp, F_SETLKW, &fl);
45561- if (fl.fl_ops && fl.fl_ops->fl_release_private)
45562- fl.fl_ops->fl_release_private(&fl);
45563+ filp->f_op->flock(filp, F_SETLKW, &flock);
45564+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
45565+ flock.fl_ops->fl_release_private(&flock);
45566 }
45567
45568 lock_flocks();
45569diff --git a/fs/namei.c b/fs/namei.c
45570index 46ea9cc..c7cf3a3 100644
45571--- a/fs/namei.c
45572+++ b/fs/namei.c
45573@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
45574 if (ret != -EACCES)
45575 return ret;
45576
45577+#ifdef CONFIG_GRKERNSEC
45578+ /* we'll block if we have to log due to a denied capability use */
45579+ if (mask & MAY_NOT_BLOCK)
45580+ return -ECHILD;
45581+#endif
45582+
45583 if (S_ISDIR(inode->i_mode)) {
45584 /* DACs are overridable for directories */
45585- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45586- return 0;
45587 if (!(mask & MAY_WRITE))
45588- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45589+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45590+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45591 return 0;
45592+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45593+ return 0;
45594 return -EACCES;
45595 }
45596 /*
45597+ * Searching includes executable on directories, else just read.
45598+ */
45599+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45600+ if (mask == MAY_READ)
45601+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45602+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45603+ return 0;
45604+
45605+ /*
45606 * Read/write DACs are always overridable.
45607 * Executable DACs are overridable when there is
45608 * at least one exec bit set.
45609@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
45610 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45611 return 0;
45612
45613- /*
45614- * Searching includes executable on directories, else just read.
45615- */
45616- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45617- if (mask == MAY_READ)
45618- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45619- return 0;
45620-
45621 return -EACCES;
45622 }
45623
45624@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
45625 return error;
45626 }
45627
45628+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
45629+ dentry->d_inode, dentry, nd->path.mnt)) {
45630+ error = -EACCES;
45631+ *p = ERR_PTR(error); /* no ->put_link(), please */
45632+ path_put(&nd->path);
45633+ return error;
45634+ }
45635+
45636 nd->last_type = LAST_BIND;
45637 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45638 error = PTR_ERR(*p);
45639 if (!IS_ERR(*p)) {
45640- char *s = nd_get_link(nd);
45641+ const char *s = nd_get_link(nd);
45642 error = 0;
45643 if (s)
45644 error = __vfs_follow_link(nd, s);
45645@@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
45646 if (!err)
45647 err = complete_walk(nd);
45648
45649+ if (!(nd->flags & LOOKUP_PARENT)) {
45650+#ifdef CONFIG_GRKERNSEC
45651+ if (flags & LOOKUP_RCU) {
45652+ if (!err)
45653+ path_put(&nd->path);
45654+ err = -ECHILD;
45655+ } else
45656+#endif
45657+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45658+ if (!err)
45659+ path_put(&nd->path);
45660+ err = -ENOENT;
45661+ }
45662+ }
45663+
45664 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45665 if (!nd->inode->i_op->lookup) {
45666 path_put(&nd->path);
45667@@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
45668 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45669
45670 if (likely(!retval)) {
45671+ if (*name != '/' && nd->path.dentry && nd->inode) {
45672+#ifdef CONFIG_GRKERNSEC
45673+ if (flags & LOOKUP_RCU)
45674+ return -ECHILD;
45675+#endif
45676+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45677+ return -ENOENT;
45678+ }
45679+
45680 if (unlikely(!audit_dummy_context())) {
45681 if (nd->path.dentry && nd->inode)
45682 audit_inode(name, nd->path.dentry);
45683@@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
45684 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
45685 return -EPERM;
45686
45687+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
45688+ return -EPERM;
45689+ if (gr_handle_rawio(inode))
45690+ return -EPERM;
45691+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
45692+ return -EACCES;
45693+
45694 return 0;
45695 }
45696
45697@@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45698 error = complete_walk(nd);
45699 if (error)
45700 return ERR_PTR(error);
45701+#ifdef CONFIG_GRKERNSEC
45702+ if (nd->flags & LOOKUP_RCU) {
45703+ error = -ECHILD;
45704+ goto exit;
45705+ }
45706+#endif
45707+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45708+ error = -ENOENT;
45709+ goto exit;
45710+ }
45711 audit_inode(pathname, nd->path.dentry);
45712 if (open_flag & O_CREAT) {
45713 error = -EISDIR;
45714@@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45715 error = complete_walk(nd);
45716 if (error)
45717 return ERR_PTR(error);
45718+#ifdef CONFIG_GRKERNSEC
45719+ if (nd->flags & LOOKUP_RCU) {
45720+ error = -ECHILD;
45721+ goto exit;
45722+ }
45723+#endif
45724+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
45725+ error = -ENOENT;
45726+ goto exit;
45727+ }
45728 audit_inode(pathname, dir);
45729 goto ok;
45730 }
45731@@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45732 error = complete_walk(nd);
45733 if (error)
45734 return ERR_PTR(error);
45735+#ifdef CONFIG_GRKERNSEC
45736+ if (nd->flags & LOOKUP_RCU) {
45737+ error = -ECHILD;
45738+ goto exit;
45739+ }
45740+#endif
45741+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45742+ error = -ENOENT;
45743+ goto exit;
45744+ }
45745
45746 error = -ENOTDIR;
45747 if (nd->flags & LOOKUP_DIRECTORY) {
45748@@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45749 /* Negative dentry, just create the file */
45750 if (!dentry->d_inode) {
45751 umode_t mode = op->mode;
45752+
45753+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
45754+ error = -EACCES;
45755+ goto exit_mutex_unlock;
45756+ }
45757+
45758 if (!IS_POSIXACL(dir->d_inode))
45759 mode &= ~current_umask();
45760 /*
45761@@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45762 error = vfs_create(dir->d_inode, dentry, mode, nd);
45763 if (error)
45764 goto exit_mutex_unlock;
45765+ else
45766+ gr_handle_create(path->dentry, path->mnt);
45767 mutex_unlock(&dir->d_inode->i_mutex);
45768 dput(nd->path.dentry);
45769 nd->path.dentry = dentry;
45770@@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45771 /*
45772 * It already exists.
45773 */
45774+
45775+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
45776+ error = -ENOENT;
45777+ goto exit_mutex_unlock;
45778+ }
45779+
45780+ /* only check if O_CREAT is specified, all other checks need to go
45781+ into may_open */
45782+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
45783+ error = -EACCES;
45784+ goto exit_mutex_unlock;
45785+ }
45786+
45787 mutex_unlock(&dir->d_inode->i_mutex);
45788 audit_inode(pathname, path->dentry);
45789
45790@@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
45791 *path = nd.path;
45792 return dentry;
45793 eexist:
45794+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
45795+ dput(dentry);
45796+ dentry = ERR_PTR(-ENOENT);
45797+ goto fail;
45798+ }
45799 dput(dentry);
45800 dentry = ERR_PTR(-EEXIST);
45801 fail:
45802@@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
45803 }
45804 EXPORT_SYMBOL(user_path_create);
45805
45806+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
45807+{
45808+ char *tmp = getname(pathname);
45809+ struct dentry *res;
45810+ if (IS_ERR(tmp))
45811+ return ERR_CAST(tmp);
45812+ res = kern_path_create(dfd, tmp, path, is_dir);
45813+ if (IS_ERR(res))
45814+ putname(tmp);
45815+ else
45816+ *to = tmp;
45817+ return res;
45818+}
45819+
45820 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
45821 {
45822 int error = may_create(dir, dentry);
45823@@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45824 error = mnt_want_write(path.mnt);
45825 if (error)
45826 goto out_dput;
45827+
45828+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
45829+ error = -EPERM;
45830+ goto out_drop_write;
45831+ }
45832+
45833+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
45834+ error = -EACCES;
45835+ goto out_drop_write;
45836+ }
45837+
45838 error = security_path_mknod(&path, dentry, mode, dev);
45839 if (error)
45840 goto out_drop_write;
45841@@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45842 }
45843 out_drop_write:
45844 mnt_drop_write(path.mnt);
45845+
45846+ if (!error)
45847+ gr_handle_create(dentry, path.mnt);
45848 out_dput:
45849 dput(dentry);
45850 mutex_unlock(&path.dentry->d_inode->i_mutex);
45851@@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
45852 error = mnt_want_write(path.mnt);
45853 if (error)
45854 goto out_dput;
45855+
45856+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
45857+ error = -EACCES;
45858+ goto out_drop_write;
45859+ }
45860+
45861 error = security_path_mkdir(&path, dentry, mode);
45862 if (error)
45863 goto out_drop_write;
45864 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
45865 out_drop_write:
45866 mnt_drop_write(path.mnt);
45867+
45868+ if (!error)
45869+ gr_handle_create(dentry, path.mnt);
45870 out_dput:
45871 dput(dentry);
45872 mutex_unlock(&path.dentry->d_inode->i_mutex);
45873@@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45874 char * name;
45875 struct dentry *dentry;
45876 struct nameidata nd;
45877+ ino_t saved_ino = 0;
45878+ dev_t saved_dev = 0;
45879
45880 error = user_path_parent(dfd, pathname, &nd, &name);
45881 if (error)
45882@@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
45883 error = -ENOENT;
45884 goto exit3;
45885 }
45886+
45887+ saved_ino = dentry->d_inode->i_ino;
45888+ saved_dev = gr_get_dev_from_dentry(dentry);
45889+
45890+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
45891+ error = -EACCES;
45892+ goto exit3;
45893+ }
45894+
45895 error = mnt_want_write(nd.path.mnt);
45896 if (error)
45897 goto exit3;
45898@@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45899 if (error)
45900 goto exit4;
45901 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
45902+ if (!error && (saved_dev || saved_ino))
45903+ gr_handle_delete(saved_ino, saved_dev);
45904 exit4:
45905 mnt_drop_write(nd.path.mnt);
45906 exit3:
45907@@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45908 struct dentry *dentry;
45909 struct nameidata nd;
45910 struct inode *inode = NULL;
45911+ ino_t saved_ino = 0;
45912+ dev_t saved_dev = 0;
45913
45914 error = user_path_parent(dfd, pathname, &nd, &name);
45915 if (error)
45916@@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45917 if (!inode)
45918 goto slashes;
45919 ihold(inode);
45920+
45921+ if (inode->i_nlink <= 1) {
45922+ saved_ino = inode->i_ino;
45923+ saved_dev = gr_get_dev_from_dentry(dentry);
45924+ }
45925+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
45926+ error = -EACCES;
45927+ goto exit2;
45928+ }
45929+
45930 error = mnt_want_write(nd.path.mnt);
45931 if (error)
45932 goto exit2;
45933@@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45934 if (error)
45935 goto exit3;
45936 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
45937+ if (!error && (saved_ino || saved_dev))
45938+ gr_handle_delete(saved_ino, saved_dev);
45939 exit3:
45940 mnt_drop_write(nd.path.mnt);
45941 exit2:
45942@@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
45943 error = mnt_want_write(path.mnt);
45944 if (error)
45945 goto out_dput;
45946+
45947+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
45948+ error = -EACCES;
45949+ goto out_drop_write;
45950+ }
45951+
45952 error = security_path_symlink(&path, dentry, from);
45953 if (error)
45954 goto out_drop_write;
45955 error = vfs_symlink(path.dentry->d_inode, dentry, from);
45956+ if (!error)
45957+ gr_handle_create(dentry, path.mnt);
45958 out_drop_write:
45959 mnt_drop_write(path.mnt);
45960 out_dput:
45961@@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
45962 {
45963 struct dentry *new_dentry;
45964 struct path old_path, new_path;
45965+ char *to = NULL;
45966 int how = 0;
45967 int error;
45968
45969@@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
45970 if (error)
45971 return error;
45972
45973- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
45974+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
45975 error = PTR_ERR(new_dentry);
45976 if (IS_ERR(new_dentry))
45977 goto out;
45978@@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
45979 error = mnt_want_write(new_path.mnt);
45980 if (error)
45981 goto out_dput;
45982+
45983+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
45984+ old_path.dentry->d_inode,
45985+ old_path.dentry->d_inode->i_mode, to)) {
45986+ error = -EACCES;
45987+ goto out_drop_write;
45988+ }
45989+
45990+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
45991+ old_path.dentry, old_path.mnt, to)) {
45992+ error = -EACCES;
45993+ goto out_drop_write;
45994+ }
45995+
45996 error = security_path_link(old_path.dentry, &new_path, new_dentry);
45997 if (error)
45998 goto out_drop_write;
45999 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46000+ if (!error)
46001+ gr_handle_create(new_dentry, new_path.mnt);
46002 out_drop_write:
46003 mnt_drop_write(new_path.mnt);
46004 out_dput:
46005+ putname(to);
46006 dput(new_dentry);
46007 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46008 path_put(&new_path);
46009@@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46010 if (new_dentry == trap)
46011 goto exit5;
46012
46013+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46014+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
46015+ to);
46016+ if (error)
46017+ goto exit5;
46018+
46019 error = mnt_want_write(oldnd.path.mnt);
46020 if (error)
46021 goto exit5;
46022@@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46023 goto exit6;
46024 error = vfs_rename(old_dir->d_inode, old_dentry,
46025 new_dir->d_inode, new_dentry);
46026+ if (!error)
46027+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46028+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46029 exit6:
46030 mnt_drop_write(oldnd.path.mnt);
46031 exit5:
46032@@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46033
46034 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46035 {
46036+ char tmpbuf[64];
46037+ const char *newlink;
46038 int len;
46039
46040 len = PTR_ERR(link);
46041@@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46042 len = strlen(link);
46043 if (len > (unsigned) buflen)
46044 len = buflen;
46045- if (copy_to_user(buffer, link, len))
46046+
46047+ if (len < sizeof(tmpbuf)) {
46048+ memcpy(tmpbuf, link, len);
46049+ newlink = tmpbuf;
46050+ } else
46051+ newlink = link;
46052+
46053+ if (copy_to_user(buffer, newlink, len))
46054 len = -EFAULT;
46055 out:
46056 return len;
46057diff --git a/fs/namespace.c b/fs/namespace.c
46058index e608199..9609cb9 100644
46059--- a/fs/namespace.c
46060+++ b/fs/namespace.c
46061@@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
46062 if (!(sb->s_flags & MS_RDONLY))
46063 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46064 up_write(&sb->s_umount);
46065+
46066+ gr_log_remount(mnt->mnt_devname, retval);
46067+
46068 return retval;
46069 }
46070
46071@@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
46072 br_write_unlock(vfsmount_lock);
46073 up_write(&namespace_sem);
46074 release_mounts(&umount_list);
46075+
46076+ gr_log_unmount(mnt->mnt_devname, retval);
46077+
46078 return retval;
46079 }
46080
46081@@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46082 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46083 MS_STRICTATIME);
46084
46085+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46086+ retval = -EPERM;
46087+ goto dput_out;
46088+ }
46089+
46090+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46091+ retval = -EPERM;
46092+ goto dput_out;
46093+ }
46094+
46095 if (flags & MS_REMOUNT)
46096 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46097 data_page);
46098@@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46099 dev_name, data_page);
46100 dput_out:
46101 path_put(&path);
46102+
46103+ gr_log_mount(dev_name, dir_name, retval);
46104+
46105 return retval;
46106 }
46107
46108@@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46109 if (error)
46110 goto out2;
46111
46112+ if (gr_handle_chroot_pivot()) {
46113+ error = -EPERM;
46114+ goto out2;
46115+ }
46116+
46117 get_fs_root(current->fs, &root);
46118 error = lock_mount(&old);
46119 if (error)
46120diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46121index f649fba..236bf92 100644
46122--- a/fs/nfs/inode.c
46123+++ b/fs/nfs/inode.c
46124@@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46125 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46126 nfsi->attrtimeo_timestamp = jiffies;
46127
46128- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46129+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46130 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46131 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46132 else
46133@@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46134 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46135 }
46136
46137-static atomic_long_t nfs_attr_generation_counter;
46138+static atomic_long_unchecked_t nfs_attr_generation_counter;
46139
46140 static unsigned long nfs_read_attr_generation_counter(void)
46141 {
46142- return atomic_long_read(&nfs_attr_generation_counter);
46143+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46144 }
46145
46146 unsigned long nfs_inc_attr_generation_counter(void)
46147 {
46148- return atomic_long_inc_return(&nfs_attr_generation_counter);
46149+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46150 }
46151
46152 void nfs_fattr_init(struct nfs_fattr *fattr)
46153diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46154index edf6d3e..bdd1da7 100644
46155--- a/fs/nfsd/vfs.c
46156+++ b/fs/nfsd/vfs.c
46157@@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46158 } else {
46159 oldfs = get_fs();
46160 set_fs(KERNEL_DS);
46161- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46162+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46163 set_fs(oldfs);
46164 }
46165
46166@@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46167
46168 /* Write the data. */
46169 oldfs = get_fs(); set_fs(KERNEL_DS);
46170- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46171+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46172 set_fs(oldfs);
46173 if (host_err < 0)
46174 goto out_nfserr;
46175@@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46176 */
46177
46178 oldfs = get_fs(); set_fs(KERNEL_DS);
46179- host_err = inode->i_op->readlink(dentry, buf, *lenp);
46180+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
46181 set_fs(oldfs);
46182
46183 if (host_err < 0)
46184diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46185index 3568c8a..e0240d8 100644
46186--- a/fs/notify/fanotify/fanotify_user.c
46187+++ b/fs/notify/fanotify/fanotify_user.c
46188@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46189 goto out_close_fd;
46190
46191 ret = -EFAULT;
46192- if (copy_to_user(buf, &fanotify_event_metadata,
46193+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46194+ copy_to_user(buf, &fanotify_event_metadata,
46195 fanotify_event_metadata.event_len))
46196 goto out_kill_access_response;
46197
46198diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46199index ee18815..7aa5d01 100644
46200--- a/fs/notify/notification.c
46201+++ b/fs/notify/notification.c
46202@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46203 * get set to 0 so it will never get 'freed'
46204 */
46205 static struct fsnotify_event *q_overflow_event;
46206-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46207+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46208
46209 /**
46210 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46211@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46212 */
46213 u32 fsnotify_get_cookie(void)
46214 {
46215- return atomic_inc_return(&fsnotify_sync_cookie);
46216+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46217 }
46218 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46219
46220diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46221index 99e3610..02c1068 100644
46222--- a/fs/ntfs/dir.c
46223+++ b/fs/ntfs/dir.c
46224@@ -1329,7 +1329,7 @@ find_next_index_buffer:
46225 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46226 ~(s64)(ndir->itype.index.block_size - 1)));
46227 /* Bounds checks. */
46228- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46229+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46230 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46231 "inode 0x%lx or driver bug.", vdir->i_ino);
46232 goto err_out;
46233diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46234index c587e2d..3641eaa 100644
46235--- a/fs/ntfs/file.c
46236+++ b/fs/ntfs/file.c
46237@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46238 #endif /* NTFS_RW */
46239 };
46240
46241-const struct file_operations ntfs_empty_file_ops = {};
46242+const struct file_operations ntfs_empty_file_ops __read_only;
46243
46244-const struct inode_operations ntfs_empty_inode_ops = {};
46245+const struct inode_operations ntfs_empty_inode_ops __read_only;
46246diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46247index 210c352..a174f83 100644
46248--- a/fs/ocfs2/localalloc.c
46249+++ b/fs/ocfs2/localalloc.c
46250@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46251 goto bail;
46252 }
46253
46254- atomic_inc(&osb->alloc_stats.moves);
46255+ atomic_inc_unchecked(&osb->alloc_stats.moves);
46256
46257 bail:
46258 if (handle)
46259diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46260index d355e6e..578d905 100644
46261--- a/fs/ocfs2/ocfs2.h
46262+++ b/fs/ocfs2/ocfs2.h
46263@@ -235,11 +235,11 @@ enum ocfs2_vol_state
46264
46265 struct ocfs2_alloc_stats
46266 {
46267- atomic_t moves;
46268- atomic_t local_data;
46269- atomic_t bitmap_data;
46270- atomic_t bg_allocs;
46271- atomic_t bg_extends;
46272+ atomic_unchecked_t moves;
46273+ atomic_unchecked_t local_data;
46274+ atomic_unchecked_t bitmap_data;
46275+ atomic_unchecked_t bg_allocs;
46276+ atomic_unchecked_t bg_extends;
46277 };
46278
46279 enum ocfs2_local_alloc_state
46280diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46281index ba5d97e..c77db25 100644
46282--- a/fs/ocfs2/suballoc.c
46283+++ b/fs/ocfs2/suballoc.c
46284@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46285 mlog_errno(status);
46286 goto bail;
46287 }
46288- atomic_inc(&osb->alloc_stats.bg_extends);
46289+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46290
46291 /* You should never ask for this much metadata */
46292 BUG_ON(bits_wanted >
46293@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46294 mlog_errno(status);
46295 goto bail;
46296 }
46297- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46298+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46299
46300 *suballoc_loc = res.sr_bg_blkno;
46301 *suballoc_bit_start = res.sr_bit_offset;
46302@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46303 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46304 res->sr_bits);
46305
46306- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46307+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46308
46309 BUG_ON(res->sr_bits != 1);
46310
46311@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46312 mlog_errno(status);
46313 goto bail;
46314 }
46315- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46316+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46317
46318 BUG_ON(res.sr_bits != 1);
46319
46320@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46321 cluster_start,
46322 num_clusters);
46323 if (!status)
46324- atomic_inc(&osb->alloc_stats.local_data);
46325+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
46326 } else {
46327 if (min_clusters > (osb->bitmap_cpg - 1)) {
46328 /* The only paths asking for contiguousness
46329@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46330 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46331 res.sr_bg_blkno,
46332 res.sr_bit_offset);
46333- atomic_inc(&osb->alloc_stats.bitmap_data);
46334+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46335 *num_clusters = res.sr_bits;
46336 }
46337 }
46338diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46339index 604e12c..8426483 100644
46340--- a/fs/ocfs2/super.c
46341+++ b/fs/ocfs2/super.c
46342@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46343 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46344 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46345 "Stats",
46346- atomic_read(&osb->alloc_stats.bitmap_data),
46347- atomic_read(&osb->alloc_stats.local_data),
46348- atomic_read(&osb->alloc_stats.bg_allocs),
46349- atomic_read(&osb->alloc_stats.moves),
46350- atomic_read(&osb->alloc_stats.bg_extends));
46351+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46352+ atomic_read_unchecked(&osb->alloc_stats.local_data),
46353+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46354+ atomic_read_unchecked(&osb->alloc_stats.moves),
46355+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46356
46357 out += snprintf(buf + out, len - out,
46358 "%10s => State: %u Descriptor: %llu Size: %u bits "
46359@@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46360 spin_lock_init(&osb->osb_xattr_lock);
46361 ocfs2_init_steal_slots(osb);
46362
46363- atomic_set(&osb->alloc_stats.moves, 0);
46364- atomic_set(&osb->alloc_stats.local_data, 0);
46365- atomic_set(&osb->alloc_stats.bitmap_data, 0);
46366- atomic_set(&osb->alloc_stats.bg_allocs, 0);
46367- atomic_set(&osb->alloc_stats.bg_extends, 0);
46368+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46369+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46370+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46371+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46372+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46373
46374 /* Copy the blockcheck stats from the superblock probe */
46375 osb->osb_ecc_stats = *stats;
46376diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46377index 5d22872..523db20 100644
46378--- a/fs/ocfs2/symlink.c
46379+++ b/fs/ocfs2/symlink.c
46380@@ -142,7 +142,7 @@ bail:
46381
46382 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46383 {
46384- char *link = nd_get_link(nd);
46385+ const char *link = nd_get_link(nd);
46386 if (!IS_ERR(link))
46387 kfree(link);
46388 }
46389diff --git a/fs/open.c b/fs/open.c
46390index 77becc0..aad7bd9 100644
46391--- a/fs/open.c
46392+++ b/fs/open.c
46393@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46394 error = locks_verify_truncate(inode, NULL, length);
46395 if (!error)
46396 error = security_path_truncate(&path);
46397+
46398+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46399+ error = -EACCES;
46400+
46401 if (!error)
46402 error = do_truncate(path.dentry, length, 0, NULL);
46403
46404@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46405 if (__mnt_is_readonly(path.mnt))
46406 res = -EROFS;
46407
46408+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46409+ res = -EACCES;
46410+
46411 out_path_release:
46412 path_put(&path);
46413 out:
46414@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46415 if (error)
46416 goto dput_and_out;
46417
46418+ gr_log_chdir(path.dentry, path.mnt);
46419+
46420 set_fs_pwd(current->fs, &path);
46421
46422 dput_and_out:
46423@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46424 goto out_putf;
46425
46426 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46427+
46428+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46429+ error = -EPERM;
46430+
46431+ if (!error)
46432+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46433+
46434 if (!error)
46435 set_fs_pwd(current->fs, &file->f_path);
46436 out_putf:
46437@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46438 if (error)
46439 goto dput_and_out;
46440
46441+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46442+ goto dput_and_out;
46443+
46444 set_fs_root(current->fs, &path);
46445+
46446+ gr_handle_chroot_chdir(&path);
46447+
46448 error = 0;
46449 dput_and_out:
46450 path_put(&path);
46451@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46452 if (error)
46453 return error;
46454 mutex_lock(&inode->i_mutex);
46455+
46456+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46457+ error = -EACCES;
46458+ goto out_unlock;
46459+ }
46460+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46461+ error = -EACCES;
46462+ goto out_unlock;
46463+ }
46464+
46465 error = security_path_chmod(path, mode);
46466 if (error)
46467 goto out_unlock;
46468@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46469 int error;
46470 struct iattr newattrs;
46471
46472+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
46473+ return -EACCES;
46474+
46475 newattrs.ia_valid = ATTR_CTIME;
46476 if (user != (uid_t) -1) {
46477 newattrs.ia_valid |= ATTR_UID;
46478diff --git a/fs/pipe.c b/fs/pipe.c
46479index a932ced..6495412 100644
46480--- a/fs/pipe.c
46481+++ b/fs/pipe.c
46482@@ -420,9 +420,9 @@ redo:
46483 }
46484 if (bufs) /* More to do? */
46485 continue;
46486- if (!pipe->writers)
46487+ if (!atomic_read(&pipe->writers))
46488 break;
46489- if (!pipe->waiting_writers) {
46490+ if (!atomic_read(&pipe->waiting_writers)) {
46491 /* syscall merging: Usually we must not sleep
46492 * if O_NONBLOCK is set, or if we got some data.
46493 * But if a writer sleeps in kernel space, then
46494@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46495 mutex_lock(&inode->i_mutex);
46496 pipe = inode->i_pipe;
46497
46498- if (!pipe->readers) {
46499+ if (!atomic_read(&pipe->readers)) {
46500 send_sig(SIGPIPE, current, 0);
46501 ret = -EPIPE;
46502 goto out;
46503@@ -530,7 +530,7 @@ redo1:
46504 for (;;) {
46505 int bufs;
46506
46507- if (!pipe->readers) {
46508+ if (!atomic_read(&pipe->readers)) {
46509 send_sig(SIGPIPE, current, 0);
46510 if (!ret)
46511 ret = -EPIPE;
46512@@ -616,9 +616,9 @@ redo2:
46513 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46514 do_wakeup = 0;
46515 }
46516- pipe->waiting_writers++;
46517+ atomic_inc(&pipe->waiting_writers);
46518 pipe_wait(pipe);
46519- pipe->waiting_writers--;
46520+ atomic_dec(&pipe->waiting_writers);
46521 }
46522 out:
46523 mutex_unlock(&inode->i_mutex);
46524@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46525 mask = 0;
46526 if (filp->f_mode & FMODE_READ) {
46527 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46528- if (!pipe->writers && filp->f_version != pipe->w_counter)
46529+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46530 mask |= POLLHUP;
46531 }
46532
46533@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46534 * Most Unices do not set POLLERR for FIFOs but on Linux they
46535 * behave exactly like pipes for poll().
46536 */
46537- if (!pipe->readers)
46538+ if (!atomic_read(&pipe->readers))
46539 mask |= POLLERR;
46540 }
46541
46542@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
46543
46544 mutex_lock(&inode->i_mutex);
46545 pipe = inode->i_pipe;
46546- pipe->readers -= decr;
46547- pipe->writers -= decw;
46548+ atomic_sub(decr, &pipe->readers);
46549+ atomic_sub(decw, &pipe->writers);
46550
46551- if (!pipe->readers && !pipe->writers) {
46552+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46553 free_pipe_info(inode);
46554 } else {
46555 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46556@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
46557
46558 if (inode->i_pipe) {
46559 ret = 0;
46560- inode->i_pipe->readers++;
46561+ atomic_inc(&inode->i_pipe->readers);
46562 }
46563
46564 mutex_unlock(&inode->i_mutex);
46565@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
46566
46567 if (inode->i_pipe) {
46568 ret = 0;
46569- inode->i_pipe->writers++;
46570+ atomic_inc(&inode->i_pipe->writers);
46571 }
46572
46573 mutex_unlock(&inode->i_mutex);
46574@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
46575 if (inode->i_pipe) {
46576 ret = 0;
46577 if (filp->f_mode & FMODE_READ)
46578- inode->i_pipe->readers++;
46579+ atomic_inc(&inode->i_pipe->readers);
46580 if (filp->f_mode & FMODE_WRITE)
46581- inode->i_pipe->writers++;
46582+ atomic_inc(&inode->i_pipe->writers);
46583 }
46584
46585 mutex_unlock(&inode->i_mutex);
46586@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46587 inode->i_pipe = NULL;
46588 }
46589
46590-static struct vfsmount *pipe_mnt __read_mostly;
46591+struct vfsmount *pipe_mnt __read_mostly;
46592
46593 /*
46594 * pipefs_dname() is called from d_path().
46595@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
46596 goto fail_iput;
46597 inode->i_pipe = pipe;
46598
46599- pipe->readers = pipe->writers = 1;
46600+ atomic_set(&pipe->readers, 1);
46601+ atomic_set(&pipe->writers, 1);
46602 inode->i_fop = &rdwr_pipefifo_fops;
46603
46604 /*
46605diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
46606index 15af622..0e9f4467 100644
46607--- a/fs/proc/Kconfig
46608+++ b/fs/proc/Kconfig
46609@@ -30,12 +30,12 @@ config PROC_FS
46610
46611 config PROC_KCORE
46612 bool "/proc/kcore support" if !ARM
46613- depends on PROC_FS && MMU
46614+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46615
46616 config PROC_VMCORE
46617 bool "/proc/vmcore support"
46618- depends on PROC_FS && CRASH_DUMP
46619- default y
46620+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46621+ default n
46622 help
46623 Exports the dump image of crashed kernel in ELF format.
46624
46625@@ -59,8 +59,8 @@ config PROC_SYSCTL
46626 limited in memory.
46627
46628 config PROC_PAGE_MONITOR
46629- default y
46630- depends on PROC_FS && MMU
46631+ default n
46632+ depends on PROC_FS && MMU && !GRKERNSEC
46633 bool "Enable /proc page monitoring" if EXPERT
46634 help
46635 Various /proc files exist to monitor process memory utilization:
46636diff --git a/fs/proc/array.c b/fs/proc/array.c
46637index c602b8d..a7de642 100644
46638--- a/fs/proc/array.c
46639+++ b/fs/proc/array.c
46640@@ -60,6 +60,7 @@
46641 #include <linux/tty.h>
46642 #include <linux/string.h>
46643 #include <linux/mman.h>
46644+#include <linux/grsecurity.h>
46645 #include <linux/proc_fs.h>
46646 #include <linux/ioport.h>
46647 #include <linux/uaccess.h>
46648@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
46649 seq_putc(m, '\n');
46650 }
46651
46652+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46653+static inline void task_pax(struct seq_file *m, struct task_struct *p)
46654+{
46655+ if (p->mm)
46656+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46657+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46658+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
46659+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
46660+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
46661+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
46662+ else
46663+ seq_printf(m, "PaX:\t-----\n");
46664+}
46665+#endif
46666+
46667 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46668 struct pid *pid, struct task_struct *task)
46669 {
46670@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46671 task_cpus_allowed(m, task);
46672 cpuset_task_status_allowed(m, task);
46673 task_context_switch_counts(m, task);
46674+
46675+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46676+ task_pax(m, task);
46677+#endif
46678+
46679+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
46680+ task_grsec_rbac(m, task);
46681+#endif
46682+
46683 return 0;
46684 }
46685
46686+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46687+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46688+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46689+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46690+#endif
46691+
46692 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46693 struct pid *pid, struct task_struct *task, int whole)
46694 {
46695@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46696 char tcomm[sizeof(task->comm)];
46697 unsigned long flags;
46698
46699+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46700+ if (current->exec_id != m->exec_id) {
46701+ gr_log_badprocpid("stat");
46702+ return 0;
46703+ }
46704+#endif
46705+
46706 state = *get_task_state(task);
46707 vsize = eip = esp = 0;
46708 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46709@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46710 gtime = task->gtime;
46711 }
46712
46713+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46714+ if (PAX_RAND_FLAGS(mm)) {
46715+ eip = 0;
46716+ esp = 0;
46717+ wchan = 0;
46718+ }
46719+#endif
46720+#ifdef CONFIG_GRKERNSEC_HIDESYM
46721+ wchan = 0;
46722+ eip =0;
46723+ esp =0;
46724+#endif
46725+
46726 /* scale priority and nice values from timeslices to -20..20 */
46727 /* to make it look like a "normal" Unix priority/nice value */
46728 priority = task_prio(task);
46729@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46730 vsize,
46731 mm ? get_mm_rss(mm) : 0,
46732 rsslim,
46733+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46734+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
46735+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
46736+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
46737+#else
46738 mm ? (permitted ? mm->start_code : 1) : 0,
46739 mm ? (permitted ? mm->end_code : 1) : 0,
46740 (permitted && mm) ? mm->start_stack : 0,
46741+#endif
46742 esp,
46743 eip,
46744 /* The signal information here is obsolete.
46745@@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46746 struct pid *pid, struct task_struct *task)
46747 {
46748 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
46749- struct mm_struct *mm = get_task_mm(task);
46750+ struct mm_struct *mm;
46751
46752+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46753+ if (current->exec_id != m->exec_id) {
46754+ gr_log_badprocpid("statm");
46755+ return 0;
46756+ }
46757+#endif
46758+ mm = get_task_mm(task);
46759 if (mm) {
46760 size = task_statm(mm, &shared, &text, &data, &resident);
46761 mmput(mm);
46762@@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46763
46764 return 0;
46765 }
46766+
46767+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46768+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
46769+{
46770+ u32 curr_ip = 0;
46771+ unsigned long flags;
46772+
46773+ if (lock_task_sighand(task, &flags)) {
46774+ curr_ip = task->signal->curr_ip;
46775+ unlock_task_sighand(task, &flags);
46776+ }
46777+
46778+ return sprintf(buffer, "%pI4\n", &curr_ip);
46779+}
46780+#endif
46781diff --git a/fs/proc/base.c b/fs/proc/base.c
46782index d4548dd..d101f84 100644
46783--- a/fs/proc/base.c
46784+++ b/fs/proc/base.c
46785@@ -109,6 +109,14 @@ struct pid_entry {
46786 union proc_op op;
46787 };
46788
46789+struct getdents_callback {
46790+ struct linux_dirent __user * current_dir;
46791+ struct linux_dirent __user * previous;
46792+ struct file * file;
46793+ int count;
46794+ int error;
46795+};
46796+
46797 #define NOD(NAME, MODE, IOP, FOP, OP) { \
46798 .name = (NAME), \
46799 .len = sizeof(NAME) - 1, \
46800@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
46801 if (!mm->arg_end)
46802 goto out_mm; /* Shh! No looking before we're done */
46803
46804+ if (gr_acl_handle_procpidmem(task))
46805+ goto out_mm;
46806+
46807 len = mm->arg_end - mm->arg_start;
46808
46809 if (len > PAGE_SIZE)
46810@@ -240,12 +251,28 @@ out:
46811 return res;
46812 }
46813
46814+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46815+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46816+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46817+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46818+#endif
46819+
46820 static int proc_pid_auxv(struct task_struct *task, char *buffer)
46821 {
46822 struct mm_struct *mm = mm_for_maps(task);
46823 int res = PTR_ERR(mm);
46824 if (mm && !IS_ERR(mm)) {
46825 unsigned int nwords = 0;
46826+
46827+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46828+ /* allow if we're currently ptracing this task */
46829+ if (PAX_RAND_FLAGS(mm) &&
46830+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
46831+ mmput(mm);
46832+ return 0;
46833+ }
46834+#endif
46835+
46836 do {
46837 nwords += 2;
46838 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
46839@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
46840 }
46841
46842
46843-#ifdef CONFIG_KALLSYMS
46844+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46845 /*
46846 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
46847 * Returns the resolved symbol. If that fails, simply return the address.
46848@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
46849 mutex_unlock(&task->signal->cred_guard_mutex);
46850 }
46851
46852-#ifdef CONFIG_STACKTRACE
46853+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46854
46855 #define MAX_STACK_TRACE_DEPTH 64
46856
46857@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
46858 return count;
46859 }
46860
46861-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46862+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46863 static int proc_pid_syscall(struct task_struct *task, char *buffer)
46864 {
46865 long nr;
46866@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
46867 /************************************************************************/
46868
46869 /* permission checks */
46870-static int proc_fd_access_allowed(struct inode *inode)
46871+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
46872 {
46873 struct task_struct *task;
46874 int allowed = 0;
46875@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
46876 */
46877 task = get_proc_task(inode);
46878 if (task) {
46879- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46880+ if (log)
46881+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46882+ else
46883+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46884 put_task_struct(task);
46885 }
46886 return allowed;
46887@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
46888 struct task_struct *task,
46889 int hide_pid_min)
46890 {
46891+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46892+ return false;
46893+
46894+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46895+ rcu_read_lock();
46896+ {
46897+ const struct cred *tmpcred = current_cred();
46898+ const struct cred *cred = __task_cred(task);
46899+
46900+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
46901+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46902+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46903+#endif
46904+ ) {
46905+ rcu_read_unlock();
46906+ return true;
46907+ }
46908+ }
46909+ rcu_read_unlock();
46910+
46911+ if (!pid->hide_pid)
46912+ return false;
46913+#endif
46914+
46915 if (pid->hide_pid < hide_pid_min)
46916 return true;
46917 if (in_group_p(pid->pid_gid))
46918 return true;
46919+
46920 return ptrace_may_access(task, PTRACE_MODE_READ);
46921 }
46922
46923@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
46924 put_task_struct(task);
46925
46926 if (!has_perms) {
46927+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46928+ {
46929+#else
46930 if (pid->hide_pid == 2) {
46931+#endif
46932 /*
46933 * Let's make getdents(), stat(), and open()
46934 * consistent with each other. If a process
46935@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
46936 file->f_mode |= FMODE_UNSIGNED_OFFSET;
46937 file->private_data = mm;
46938
46939+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46940+ file->f_version = current->exec_id;
46941+#endif
46942+
46943 return 0;
46944 }
46945
46946@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
46947 ssize_t copied;
46948 char *page;
46949
46950+#ifdef CONFIG_GRKERNSEC
46951+ if (write)
46952+ return -EPERM;
46953+#endif
46954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46955+ if (file->f_version != current->exec_id) {
46956+ gr_log_badprocpid("mem");
46957+ return 0;
46958+ }
46959+#endif
46960+
46961 if (!mm)
46962 return 0;
46963
46964@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
46965 if (!task)
46966 goto out_no_task;
46967
46968+ if (gr_acl_handle_procpidmem(task))
46969+ goto out;
46970+
46971 ret = -ENOMEM;
46972 page = (char *)__get_free_page(GFP_TEMPORARY);
46973 if (!page)
46974@@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
46975 path_put(&nd->path);
46976
46977 /* Are we allowed to snoop on the tasks file descriptors? */
46978- if (!proc_fd_access_allowed(inode))
46979+ if (!proc_fd_access_allowed(inode, 0))
46980 goto out;
46981
46982 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
46983@@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
46984 struct path path;
46985
46986 /* Are we allowed to snoop on the tasks file descriptors? */
46987- if (!proc_fd_access_allowed(inode))
46988- goto out;
46989+ /* logging this is needed for learning on chromium to work properly,
46990+ but we don't want to flood the logs from 'ps' which does a readlink
46991+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
46992+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
46993+ */
46994+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
46995+ if (!proc_fd_access_allowed(inode,0))
46996+ goto out;
46997+ } else {
46998+ if (!proc_fd_access_allowed(inode,1))
46999+ goto out;
47000+ }
47001
47002 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47003 if (error)
47004@@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47005 rcu_read_lock();
47006 cred = __task_cred(task);
47007 inode->i_uid = cred->euid;
47008+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47009+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47010+#else
47011 inode->i_gid = cred->egid;
47012+#endif
47013 rcu_read_unlock();
47014 }
47015 security_task_to_inode(task, inode);
47016@@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47017 return -ENOENT;
47018 }
47019 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47020+#ifdef CONFIG_GRKERNSEC_PROC_USER
47021+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47022+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47023+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47024+#endif
47025 task_dumpable(task)) {
47026 cred = __task_cred(task);
47027 stat->uid = cred->euid;
47028+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47029+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47030+#else
47031 stat->gid = cred->egid;
47032+#endif
47033 }
47034 }
47035 rcu_read_unlock();
47036@@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47037
47038 if (task) {
47039 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47040+#ifdef CONFIG_GRKERNSEC_PROC_USER
47041+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47042+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47043+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47044+#endif
47045 task_dumpable(task)) {
47046 rcu_read_lock();
47047 cred = __task_cred(task);
47048 inode->i_uid = cred->euid;
47049+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47050+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47051+#else
47052 inode->i_gid = cred->egid;
47053+#endif
47054 rcu_read_unlock();
47055 } else {
47056 inode->i_uid = 0;
47057@@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47058 int fd = proc_fd(inode);
47059
47060 if (task) {
47061- files = get_files_struct(task);
47062+ if (!gr_acl_handle_procpidmem(task))
47063+ files = get_files_struct(task);
47064 put_task_struct(task);
47065 }
47066 if (files) {
47067@@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
47068 */
47069 static int proc_fd_permission(struct inode *inode, int mask)
47070 {
47071+ struct task_struct *task;
47072 int rv = generic_permission(inode, mask);
47073- if (rv == 0)
47074- return 0;
47075+
47076 if (task_pid(current) == proc_pid(inode))
47077 rv = 0;
47078+
47079+ task = get_proc_task(inode);
47080+ if (task == NULL)
47081+ return rv;
47082+
47083+ if (gr_acl_handle_procpidmem(task))
47084+ rv = -EACCES;
47085+
47086+ put_task_struct(task);
47087+
47088 return rv;
47089 }
47090
47091@@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47092 if (!task)
47093 goto out_no_task;
47094
47095+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47096+ goto out;
47097+
47098 /*
47099 * Yes, it does not scale. And it should not. Don't add
47100 * new entries into /proc/<tgid>/ without very good reasons.
47101@@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
47102 if (!task)
47103 goto out_no_task;
47104
47105+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47106+ goto out;
47107+
47108 ret = 0;
47109 i = filp->f_pos;
47110 switch (i) {
47111@@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47112 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47113 void *cookie)
47114 {
47115- char *s = nd_get_link(nd);
47116+ const char *s = nd_get_link(nd);
47117 if (!IS_ERR(s))
47118 __putname(s);
47119 }
47120@@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47121 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47122 #endif
47123 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47124-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47125+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47126 INF("syscall", S_IRUGO, proc_pid_syscall),
47127 #endif
47128 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47129@@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47130 #ifdef CONFIG_SECURITY
47131 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47132 #endif
47133-#ifdef CONFIG_KALLSYMS
47134+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47135 INF("wchan", S_IRUGO, proc_pid_wchan),
47136 #endif
47137-#ifdef CONFIG_STACKTRACE
47138+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47139 ONE("stack", S_IRUGO, proc_pid_stack),
47140 #endif
47141 #ifdef CONFIG_SCHEDSTATS
47142@@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47143 #ifdef CONFIG_HARDWALL
47144 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47145 #endif
47146+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47147+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47148+#endif
47149 };
47150
47151 static int proc_tgid_base_readdir(struct file * filp,
47152@@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47153 if (!inode)
47154 goto out;
47155
47156+#ifdef CONFIG_GRKERNSEC_PROC_USER
47157+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47158+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47159+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47160+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47161+#else
47162 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47163+#endif
47164 inode->i_op = &proc_tgid_base_inode_operations;
47165 inode->i_fop = &proc_tgid_base_operations;
47166 inode->i_flags|=S_IMMUTABLE;
47167@@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47168 if (!task)
47169 goto out;
47170
47171+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47172+ goto out_put_task;
47173+
47174 result = proc_pid_instantiate(dir, dentry, task, NULL);
47175+out_put_task:
47176 put_task_struct(task);
47177 out:
47178 return result;
47179@@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47180 static int fake_filldir(void *buf, const char *name, int namelen,
47181 loff_t offset, u64 ino, unsigned d_type)
47182 {
47183+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
47184+ __buf->error = -EINVAL;
47185 return 0;
47186 }
47187
47188@@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
47189 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47190 #endif
47191 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47192-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47193+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47194 INF("syscall", S_IRUGO, proc_pid_syscall),
47195 #endif
47196 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47197@@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
47198 #ifdef CONFIG_SECURITY
47199 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47200 #endif
47201-#ifdef CONFIG_KALLSYMS
47202+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47203 INF("wchan", S_IRUGO, proc_pid_wchan),
47204 #endif
47205-#ifdef CONFIG_STACKTRACE
47206+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47207 ONE("stack", S_IRUGO, proc_pid_stack),
47208 #endif
47209 #ifdef CONFIG_SCHEDSTATS
47210diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47211index 82676e3..5f8518a 100644
47212--- a/fs/proc/cmdline.c
47213+++ b/fs/proc/cmdline.c
47214@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47215
47216 static int __init proc_cmdline_init(void)
47217 {
47218+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47219+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47220+#else
47221 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47222+#endif
47223 return 0;
47224 }
47225 module_init(proc_cmdline_init);
47226diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47227index b143471..bb105e5 100644
47228--- a/fs/proc/devices.c
47229+++ b/fs/proc/devices.c
47230@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47231
47232 static int __init proc_devices_init(void)
47233 {
47234+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47235+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47236+#else
47237 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47238+#endif
47239 return 0;
47240 }
47241 module_init(proc_devices_init);
47242diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47243index 84fd323..f698a32 100644
47244--- a/fs/proc/inode.c
47245+++ b/fs/proc/inode.c
47246@@ -21,12 +21,18 @@
47247 #include <linux/seq_file.h>
47248 #include <linux/slab.h>
47249 #include <linux/mount.h>
47250+#include <linux/grsecurity.h>
47251
47252 #include <asm/system.h>
47253 #include <asm/uaccess.h>
47254
47255 #include "internal.h"
47256
47257+#ifdef CONFIG_PROC_SYSCTL
47258+extern const struct inode_operations proc_sys_inode_operations;
47259+extern const struct inode_operations proc_sys_dir_operations;
47260+#endif
47261+
47262 static void proc_evict_inode(struct inode *inode)
47263 {
47264 struct proc_dir_entry *de;
47265@@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
47266 ns_ops = PROC_I(inode)->ns_ops;
47267 if (ns_ops && ns_ops->put)
47268 ns_ops->put(PROC_I(inode)->ns);
47269+
47270+#ifdef CONFIG_PROC_SYSCTL
47271+ if (inode->i_op == &proc_sys_inode_operations ||
47272+ inode->i_op == &proc_sys_dir_operations)
47273+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47274+#endif
47275+
47276 }
47277
47278 static struct kmem_cache * proc_inode_cachep;
47279@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47280 if (de->mode) {
47281 inode->i_mode = de->mode;
47282 inode->i_uid = de->uid;
47283+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47284+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47285+#else
47286 inode->i_gid = de->gid;
47287+#endif
47288 }
47289 if (de->size)
47290 inode->i_size = de->size;
47291diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47292index 2925775..4f08fae 100644
47293--- a/fs/proc/internal.h
47294+++ b/fs/proc/internal.h
47295@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47296 struct pid *pid, struct task_struct *task);
47297 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47298 struct pid *pid, struct task_struct *task);
47299+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47300+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47301+#endif
47302 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47303
47304 extern const struct file_operations proc_maps_operations;
47305diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47306index d245cb2..f4e8498 100644
47307--- a/fs/proc/kcore.c
47308+++ b/fs/proc/kcore.c
47309@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47310 * the addresses in the elf_phdr on our list.
47311 */
47312 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47313- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47314+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47315+ if (tsz > buflen)
47316 tsz = buflen;
47317-
47318+
47319 while (buflen) {
47320 struct kcore_list *m;
47321
47322@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47323 kfree(elf_buf);
47324 } else {
47325 if (kern_addr_valid(start)) {
47326- unsigned long n;
47327+ char *elf_buf;
47328+ mm_segment_t oldfs;
47329
47330- n = copy_to_user(buffer, (char *)start, tsz);
47331- /*
47332- * We cannot distingush between fault on source
47333- * and fault on destination. When this happens
47334- * we clear too and hope it will trigger the
47335- * EFAULT again.
47336- */
47337- if (n) {
47338- if (clear_user(buffer + tsz - n,
47339- n))
47340+ elf_buf = kmalloc(tsz, GFP_KERNEL);
47341+ if (!elf_buf)
47342+ return -ENOMEM;
47343+ oldfs = get_fs();
47344+ set_fs(KERNEL_DS);
47345+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47346+ set_fs(oldfs);
47347+ if (copy_to_user(buffer, elf_buf, tsz)) {
47348+ kfree(elf_buf);
47349 return -EFAULT;
47350+ }
47351 }
47352+ set_fs(oldfs);
47353+ kfree(elf_buf);
47354 } else {
47355 if (clear_user(buffer, tsz))
47356 return -EFAULT;
47357@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47358
47359 static int open_kcore(struct inode *inode, struct file *filp)
47360 {
47361+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47362+ return -EPERM;
47363+#endif
47364 if (!capable(CAP_SYS_RAWIO))
47365 return -EPERM;
47366 if (kcore_need_update)
47367diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47368index 80e4645..53e5fcf 100644
47369--- a/fs/proc/meminfo.c
47370+++ b/fs/proc/meminfo.c
47371@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47372 vmi.used >> 10,
47373 vmi.largest_chunk >> 10
47374 #ifdef CONFIG_MEMORY_FAILURE
47375- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47376+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47377 #endif
47378 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47379 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47380diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47381index b1822dd..df622cb 100644
47382--- a/fs/proc/nommu.c
47383+++ b/fs/proc/nommu.c
47384@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47385 if (len < 1)
47386 len = 1;
47387 seq_printf(m, "%*c", len, ' ');
47388- seq_path(m, &file->f_path, "");
47389+ seq_path(m, &file->f_path, "\n\\");
47390 }
47391
47392 seq_putc(m, '\n');
47393diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47394index 06e1cc1..177cd98 100644
47395--- a/fs/proc/proc_net.c
47396+++ b/fs/proc/proc_net.c
47397@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47398 struct task_struct *task;
47399 struct nsproxy *ns;
47400 struct net *net = NULL;
47401+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47402+ const struct cred *cred = current_cred();
47403+#endif
47404+
47405+#ifdef CONFIG_GRKERNSEC_PROC_USER
47406+ if (cred->fsuid)
47407+ return net;
47408+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47409+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47410+ return net;
47411+#endif
47412
47413 rcu_read_lock();
47414 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47415diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47416index a6b6217..1e0579d 100644
47417--- a/fs/proc/proc_sysctl.c
47418+++ b/fs/proc/proc_sysctl.c
47419@@ -9,11 +9,13 @@
47420 #include <linux/namei.h>
47421 #include "internal.h"
47422
47423+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47424+
47425 static const struct dentry_operations proc_sys_dentry_operations;
47426 static const struct file_operations proc_sys_file_operations;
47427-static const struct inode_operations proc_sys_inode_operations;
47428+const struct inode_operations proc_sys_inode_operations;
47429 static const struct file_operations proc_sys_dir_file_operations;
47430-static const struct inode_operations proc_sys_dir_operations;
47431+const struct inode_operations proc_sys_dir_operations;
47432
47433 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47434 {
47435@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47436
47437 err = NULL;
47438 d_set_d_op(dentry, &proc_sys_dentry_operations);
47439+
47440+ gr_handle_proc_create(dentry, inode);
47441+
47442 d_add(dentry, inode);
47443
47444+ if (gr_handle_sysctl(p, MAY_EXEC))
47445+ err = ERR_PTR(-ENOENT);
47446+
47447 out:
47448 sysctl_head_finish(head);
47449 return err;
47450@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47451 if (!table->proc_handler)
47452 goto out;
47453
47454+#ifdef CONFIG_GRKERNSEC
47455+ error = -EPERM;
47456+ if (write && !capable(CAP_SYS_ADMIN))
47457+ goto out;
47458+#endif
47459+
47460 /* careful: calling conventions are nasty here */
47461 res = count;
47462 error = table->proc_handler(table, write, buf, &res, ppos);
47463@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
47464 return -ENOMEM;
47465 } else {
47466 d_set_d_op(child, &proc_sys_dentry_operations);
47467+
47468+ gr_handle_proc_create(child, inode);
47469+
47470 d_add(child, inode);
47471 }
47472 } else {
47473@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
47474 if (*pos < file->f_pos)
47475 continue;
47476
47477+ if (gr_handle_sysctl(table, 0))
47478+ continue;
47479+
47480 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47481 if (res)
47482 return res;
47483@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
47484 if (IS_ERR(head))
47485 return PTR_ERR(head);
47486
47487+ if (table && gr_handle_sysctl(table, MAY_EXEC))
47488+ return -ENOENT;
47489+
47490 generic_fillattr(inode, stat);
47491 if (table)
47492 stat->mode = (stat->mode & S_IFMT) | table->mode;
47493@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
47494 .llseek = generic_file_llseek,
47495 };
47496
47497-static const struct inode_operations proc_sys_inode_operations = {
47498+const struct inode_operations proc_sys_inode_operations = {
47499 .permission = proc_sys_permission,
47500 .setattr = proc_sys_setattr,
47501 .getattr = proc_sys_getattr,
47502 };
47503
47504-static const struct inode_operations proc_sys_dir_operations = {
47505+const struct inode_operations proc_sys_dir_operations = {
47506 .lookup = proc_sys_lookup,
47507 .permission = proc_sys_permission,
47508 .setattr = proc_sys_setattr,
47509diff --git a/fs/proc/root.c b/fs/proc/root.c
47510index 46a15d8..335631a 100644
47511--- a/fs/proc/root.c
47512+++ b/fs/proc/root.c
47513@@ -187,7 +187,15 @@ void __init proc_root_init(void)
47514 #ifdef CONFIG_PROC_DEVICETREE
47515 proc_device_tree_init();
47516 #endif
47517+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47518+#ifdef CONFIG_GRKERNSEC_PROC_USER
47519+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47520+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47521+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47522+#endif
47523+#else
47524 proc_mkdir("bus", NULL);
47525+#endif
47526 proc_sys_init();
47527 }
47528
47529diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
47530index 7dcd2a2..b2f410e 100644
47531--- a/fs/proc/task_mmu.c
47532+++ b/fs/proc/task_mmu.c
47533@@ -11,6 +11,7 @@
47534 #include <linux/rmap.h>
47535 #include <linux/swap.h>
47536 #include <linux/swapops.h>
47537+#include <linux/grsecurity.h>
47538
47539 #include <asm/elf.h>
47540 #include <asm/uaccess.h>
47541@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47542 "VmExe:\t%8lu kB\n"
47543 "VmLib:\t%8lu kB\n"
47544 "VmPTE:\t%8lu kB\n"
47545- "VmSwap:\t%8lu kB\n",
47546- hiwater_vm << (PAGE_SHIFT-10),
47547+ "VmSwap:\t%8lu kB\n"
47548+
47549+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47550+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47551+#endif
47552+
47553+ ,hiwater_vm << (PAGE_SHIFT-10),
47554 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47555 mm->locked_vm << (PAGE_SHIFT-10),
47556 mm->pinned_vm << (PAGE_SHIFT-10),
47557@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47558 data << (PAGE_SHIFT-10),
47559 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47560 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47561- swap << (PAGE_SHIFT-10));
47562+ swap << (PAGE_SHIFT-10)
47563+
47564+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47565+ , mm->context.user_cs_base, mm->context.user_cs_limit
47566+#endif
47567+
47568+ );
47569 }
47570
47571 unsigned long task_vsize(struct mm_struct *mm)
47572@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
47573 return ret;
47574 }
47575
47576+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47577+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47578+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47579+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47580+#endif
47581+
47582 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47583 {
47584 struct mm_struct *mm = vma->vm_mm;
47585@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47586 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47587 }
47588
47589- /* We don't show the stack guard page in /proc/maps */
47590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47591+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47592+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47593+#else
47594 start = vma->vm_start;
47595- if (stack_guard_page_start(vma, start))
47596- start += PAGE_SIZE;
47597 end = vma->vm_end;
47598- if (stack_guard_page_end(vma, end))
47599- end -= PAGE_SIZE;
47600+#endif
47601
47602 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47603 start,
47604@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47605 flags & VM_WRITE ? 'w' : '-',
47606 flags & VM_EXEC ? 'x' : '-',
47607 flags & VM_MAYSHARE ? 's' : 'p',
47608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47609+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
47610+#else
47611 pgoff,
47612+#endif
47613 MAJOR(dev), MINOR(dev), ino, &len);
47614
47615 /*
47616@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47617 */
47618 if (file) {
47619 pad_len_spaces(m, len);
47620- seq_path(m, &file->f_path, "\n");
47621+ seq_path(m, &file->f_path, "\n\\");
47622 } else {
47623 const char *name = arch_vma_name(vma);
47624 if (!name) {
47625@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47626 if (vma->vm_start <= mm->brk &&
47627 vma->vm_end >= mm->start_brk) {
47628 name = "[heap]";
47629- } else if (vma->vm_start <= mm->start_stack &&
47630- vma->vm_end >= mm->start_stack) {
47631+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
47632+ (vma->vm_start <= mm->start_stack &&
47633+ vma->vm_end >= mm->start_stack)) {
47634 name = "[stack]";
47635 }
47636 } else {
47637@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
47638 struct proc_maps_private *priv = m->private;
47639 struct task_struct *task = priv->task;
47640
47641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47642+ if (current->exec_id != m->exec_id) {
47643+ gr_log_badprocpid("maps");
47644+ return 0;
47645+ }
47646+#endif
47647+
47648 show_map_vma(m, vma);
47649
47650 if (m->count < m->size) /* vma is copied successfully */
47651@@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
47652 .private = &mss,
47653 };
47654
47655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47656+ if (current->exec_id != m->exec_id) {
47657+ gr_log_badprocpid("smaps");
47658+ return 0;
47659+ }
47660+#endif
47661 memset(&mss, 0, sizeof mss);
47662- mss.vma = vma;
47663- /* mmap_sem is held in m_start */
47664- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47665- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47666-
47667+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47668+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
47669+#endif
47670+ mss.vma = vma;
47671+ /* mmap_sem is held in m_start */
47672+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47673+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47675+ }
47676+#endif
47677 show_map_vma(m, vma);
47678
47679 seq_printf(m,
47680@@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
47681 "KernelPageSize: %8lu kB\n"
47682 "MMUPageSize: %8lu kB\n"
47683 "Locked: %8lu kB\n",
47684+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47685+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
47686+#else
47687 (vma->vm_end - vma->vm_start) >> 10,
47688+#endif
47689 mss.resident >> 10,
47690 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
47691 mss.shared_clean >> 10,
47692@@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
47693 int n;
47694 char buffer[50];
47695
47696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47697+ if (current->exec_id != m->exec_id) {
47698+ gr_log_badprocpid("numa_maps");
47699+ return 0;
47700+ }
47701+#endif
47702+
47703 if (!mm)
47704 return 0;
47705
47706@@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
47707 mpol_to_str(buffer, sizeof(buffer), pol, 0);
47708 mpol_cond_put(pol);
47709
47710+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47711+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
47712+#else
47713 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
47714+#endif
47715
47716 if (file) {
47717 seq_printf(m, " file=");
47718- seq_path(m, &file->f_path, "\n\t= ");
47719+ seq_path(m, &file->f_path, "\n\t\\= ");
47720 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
47721 seq_printf(m, " heap");
47722 } else if (vma->vm_start <= mm->start_stack &&
47723diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
47724index 980de54..2a4db5f 100644
47725--- a/fs/proc/task_nommu.c
47726+++ b/fs/proc/task_nommu.c
47727@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47728 else
47729 bytes += kobjsize(mm);
47730
47731- if (current->fs && current->fs->users > 1)
47732+ if (current->fs && atomic_read(&current->fs->users) > 1)
47733 sbytes += kobjsize(current->fs);
47734 else
47735 bytes += kobjsize(current->fs);
47736@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
47737
47738 if (file) {
47739 pad_len_spaces(m, len);
47740- seq_path(m, &file->f_path, "");
47741+ seq_path(m, &file->f_path, "\n\\");
47742 } else if (mm) {
47743 if (vma->vm_start <= mm->start_stack &&
47744 vma->vm_end >= mm->start_stack) {
47745diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
47746index d67908b..d13f6a6 100644
47747--- a/fs/quota/netlink.c
47748+++ b/fs/quota/netlink.c
47749@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
47750 void quota_send_warning(short type, unsigned int id, dev_t dev,
47751 const char warntype)
47752 {
47753- static atomic_t seq;
47754+ static atomic_unchecked_t seq;
47755 struct sk_buff *skb;
47756 void *msg_head;
47757 int ret;
47758@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
47759 "VFS: Not enough memory to send quota warning.\n");
47760 return;
47761 }
47762- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
47763+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
47764 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
47765 if (!msg_head) {
47766 printk(KERN_ERR
47767diff --git a/fs/readdir.c b/fs/readdir.c
47768index 356f715..c918d38 100644
47769--- a/fs/readdir.c
47770+++ b/fs/readdir.c
47771@@ -17,6 +17,7 @@
47772 #include <linux/security.h>
47773 #include <linux/syscalls.h>
47774 #include <linux/unistd.h>
47775+#include <linux/namei.h>
47776
47777 #include <asm/uaccess.h>
47778
47779@@ -67,6 +68,7 @@ struct old_linux_dirent {
47780
47781 struct readdir_callback {
47782 struct old_linux_dirent __user * dirent;
47783+ struct file * file;
47784 int result;
47785 };
47786
47787@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
47788 buf->result = -EOVERFLOW;
47789 return -EOVERFLOW;
47790 }
47791+
47792+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47793+ return 0;
47794+
47795 buf->result++;
47796 dirent = buf->dirent;
47797 if (!access_ok(VERIFY_WRITE, dirent,
47798@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
47799
47800 buf.result = 0;
47801 buf.dirent = dirent;
47802+ buf.file = file;
47803
47804 error = vfs_readdir(file, fillonedir, &buf);
47805 if (buf.result)
47806@@ -142,6 +149,7 @@ struct linux_dirent {
47807 struct getdents_callback {
47808 struct linux_dirent __user * current_dir;
47809 struct linux_dirent __user * previous;
47810+ struct file * file;
47811 int count;
47812 int error;
47813 };
47814@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
47815 buf->error = -EOVERFLOW;
47816 return -EOVERFLOW;
47817 }
47818+
47819+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47820+ return 0;
47821+
47822 dirent = buf->previous;
47823 if (dirent) {
47824 if (__put_user(offset, &dirent->d_off))
47825@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
47826 buf.previous = NULL;
47827 buf.count = count;
47828 buf.error = 0;
47829+ buf.file = file;
47830
47831 error = vfs_readdir(file, filldir, &buf);
47832 if (error >= 0)
47833@@ -229,6 +242,7 @@ out:
47834 struct getdents_callback64 {
47835 struct linux_dirent64 __user * current_dir;
47836 struct linux_dirent64 __user * previous;
47837+ struct file *file;
47838 int count;
47839 int error;
47840 };
47841@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
47842 buf->error = -EINVAL; /* only used if we fail.. */
47843 if (reclen > buf->count)
47844 return -EINVAL;
47845+
47846+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47847+ return 0;
47848+
47849 dirent = buf->previous;
47850 if (dirent) {
47851 if (__put_user(offset, &dirent->d_off))
47852@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47853
47854 buf.current_dir = dirent;
47855 buf.previous = NULL;
47856+ buf.file = file;
47857 buf.count = count;
47858 buf.error = 0;
47859
47860@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47861 error = buf.error;
47862 lastdirent = buf.previous;
47863 if (lastdirent) {
47864- typeof(lastdirent->d_off) d_off = file->f_pos;
47865+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47866 if (__put_user(d_off, &lastdirent->d_off))
47867 error = -EFAULT;
47868 else
47869diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
47870index 60c0804..d814f98 100644
47871--- a/fs/reiserfs/do_balan.c
47872+++ b/fs/reiserfs/do_balan.c
47873@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
47874 return;
47875 }
47876
47877- atomic_inc(&(fs_generation(tb->tb_sb)));
47878+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
47879 do_balance_starts(tb);
47880
47881 /* balance leaf returns 0 except if combining L R and S into
47882diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
47883index 7a99811..a7c96c4 100644
47884--- a/fs/reiserfs/procfs.c
47885+++ b/fs/reiserfs/procfs.c
47886@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
47887 "SMALL_TAILS " : "NO_TAILS ",
47888 replay_only(sb) ? "REPLAY_ONLY " : "",
47889 convert_reiserfs(sb) ? "CONV " : "",
47890- atomic_read(&r->s_generation_counter),
47891+ atomic_read_unchecked(&r->s_generation_counter),
47892 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
47893 SF(s_do_balance), SF(s_unneeded_left_neighbor),
47894 SF(s_good_search_by_key_reada), SF(s_bmaps),
47895diff --git a/fs/select.c b/fs/select.c
47896index e782258..3b4b44c 100644
47897--- a/fs/select.c
47898+++ b/fs/select.c
47899@@ -20,6 +20,7 @@
47900 #include <linux/module.h>
47901 #include <linux/slab.h>
47902 #include <linux/poll.h>
47903+#include <linux/security.h>
47904 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
47905 #include <linux/file.h>
47906 #include <linux/fdtable.h>
47907@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
47908 struct poll_list *walk = head;
47909 unsigned long todo = nfds;
47910
47911+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
47912 if (nfds > rlimit(RLIMIT_NOFILE))
47913 return -EINVAL;
47914
47915diff --git a/fs/seq_file.c b/fs/seq_file.c
47916index 4023d6b..53b39c5 100644
47917--- a/fs/seq_file.c
47918+++ b/fs/seq_file.c
47919@@ -9,6 +9,7 @@
47920 #include <linux/module.h>
47921 #include <linux/seq_file.h>
47922 #include <linux/slab.h>
47923+#include <linux/sched.h>
47924
47925 #include <asm/uaccess.h>
47926 #include <asm/page.h>
47927@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
47928 memset(p, 0, sizeof(*p));
47929 mutex_init(&p->lock);
47930 p->op = op;
47931+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47932+ p->exec_id = current->exec_id;
47933+#endif
47934
47935 /*
47936 * Wrappers around seq_open(e.g. swaps_open) need to be
47937@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
47938 return 0;
47939 }
47940 if (!m->buf) {
47941- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
47942+ m->size = PAGE_SIZE;
47943+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
47944 if (!m->buf)
47945 return -ENOMEM;
47946 }
47947@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
47948 Eoverflow:
47949 m->op->stop(m, p);
47950 kfree(m->buf);
47951- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
47952+ m->size <<= 1;
47953+ m->buf = kmalloc(m->size, GFP_KERNEL);
47954 return !m->buf ? -ENOMEM : -EAGAIN;
47955 }
47956
47957@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
47958 m->version = file->f_version;
47959 /* grab buffer if we didn't have one */
47960 if (!m->buf) {
47961- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
47962+ m->size = PAGE_SIZE;
47963+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
47964 if (!m->buf)
47965 goto Enomem;
47966 }
47967@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
47968 goto Fill;
47969 m->op->stop(m, p);
47970 kfree(m->buf);
47971- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
47972+ m->size <<= 1;
47973+ m->buf = kmalloc(m->size, GFP_KERNEL);
47974 if (!m->buf)
47975 goto Enomem;
47976 m->count = 0;
47977@@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
47978 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
47979 void *data)
47980 {
47981- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
47982+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
47983 int res = -ENOMEM;
47984
47985 if (op) {
47986diff --git a/fs/splice.c b/fs/splice.c
47987index 1ec0493..d6ab5c2 100644
47988--- a/fs/splice.c
47989+++ b/fs/splice.c
47990@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
47991 pipe_lock(pipe);
47992
47993 for (;;) {
47994- if (!pipe->readers) {
47995+ if (!atomic_read(&pipe->readers)) {
47996 send_sig(SIGPIPE, current, 0);
47997 if (!ret)
47998 ret = -EPIPE;
47999@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48000 do_wakeup = 0;
48001 }
48002
48003- pipe->waiting_writers++;
48004+ atomic_inc(&pipe->waiting_writers);
48005 pipe_wait(pipe);
48006- pipe->waiting_writers--;
48007+ atomic_dec(&pipe->waiting_writers);
48008 }
48009
48010 pipe_unlock(pipe);
48011@@ -559,7 +559,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48012 old_fs = get_fs();
48013 set_fs(get_ds());
48014 /* The cast to a user pointer is valid due to the set_fs() */
48015- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48016+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48017 set_fs(old_fs);
48018
48019 return res;
48020@@ -574,7 +574,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48021 old_fs = get_fs();
48022 set_fs(get_ds());
48023 /* The cast to a user pointer is valid due to the set_fs() */
48024- res = vfs_write(file, (const char __user *)buf, count, &pos);
48025+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48026 set_fs(old_fs);
48027
48028 return res;
48029@@ -625,7 +625,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48030 goto err;
48031
48032 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48033- vec[i].iov_base = (void __user *) page_address(page);
48034+ vec[i].iov_base = (void __force_user *) page_address(page);
48035 vec[i].iov_len = this_len;
48036 spd.pages[i] = page;
48037 spd.nr_pages++;
48038@@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48039 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48040 {
48041 while (!pipe->nrbufs) {
48042- if (!pipe->writers)
48043+ if (!atomic_read(&pipe->writers))
48044 return 0;
48045
48046- if (!pipe->waiting_writers && sd->num_spliced)
48047+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48048 return 0;
48049
48050 if (sd->flags & SPLICE_F_NONBLOCK)
48051@@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48052 * out of the pipe right after the splice_to_pipe(). So set
48053 * PIPE_READERS appropriately.
48054 */
48055- pipe->readers = 1;
48056+ atomic_set(&pipe->readers, 1);
48057
48058 current->splice_pipe = pipe;
48059 }
48060@@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48061 ret = -ERESTARTSYS;
48062 break;
48063 }
48064- if (!pipe->writers)
48065+ if (!atomic_read(&pipe->writers))
48066 break;
48067- if (!pipe->waiting_writers) {
48068+ if (!atomic_read(&pipe->waiting_writers)) {
48069 if (flags & SPLICE_F_NONBLOCK) {
48070 ret = -EAGAIN;
48071 break;
48072@@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48073 pipe_lock(pipe);
48074
48075 while (pipe->nrbufs >= pipe->buffers) {
48076- if (!pipe->readers) {
48077+ if (!atomic_read(&pipe->readers)) {
48078 send_sig(SIGPIPE, current, 0);
48079 ret = -EPIPE;
48080 break;
48081@@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48082 ret = -ERESTARTSYS;
48083 break;
48084 }
48085- pipe->waiting_writers++;
48086+ atomic_inc(&pipe->waiting_writers);
48087 pipe_wait(pipe);
48088- pipe->waiting_writers--;
48089+ atomic_dec(&pipe->waiting_writers);
48090 }
48091
48092 pipe_unlock(pipe);
48093@@ -1818,14 +1818,14 @@ retry:
48094 pipe_double_lock(ipipe, opipe);
48095
48096 do {
48097- if (!opipe->readers) {
48098+ if (!atomic_read(&opipe->readers)) {
48099 send_sig(SIGPIPE, current, 0);
48100 if (!ret)
48101 ret = -EPIPE;
48102 break;
48103 }
48104
48105- if (!ipipe->nrbufs && !ipipe->writers)
48106+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48107 break;
48108
48109 /*
48110@@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48111 pipe_double_lock(ipipe, opipe);
48112
48113 do {
48114- if (!opipe->readers) {
48115+ if (!atomic_read(&opipe->readers)) {
48116 send_sig(SIGPIPE, current, 0);
48117 if (!ret)
48118 ret = -EPIPE;
48119@@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48120 * return EAGAIN if we have the potential of some data in the
48121 * future, otherwise just return 0
48122 */
48123- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48124+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48125 ret = -EAGAIN;
48126
48127 pipe_unlock(ipipe);
48128diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48129index 7fdf6a7..e6cd8ad 100644
48130--- a/fs/sysfs/dir.c
48131+++ b/fs/sysfs/dir.c
48132@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48133 struct sysfs_dirent *sd;
48134 int rc;
48135
48136+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48137+ const char *parent_name = parent_sd->s_name;
48138+
48139+ mode = S_IFDIR | S_IRWXU;
48140+
48141+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48142+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48143+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48144+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48145+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48146+#endif
48147+
48148 /* allocate */
48149 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48150 if (!sd)
48151diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48152index 00012e3..8392349 100644
48153--- a/fs/sysfs/file.c
48154+++ b/fs/sysfs/file.c
48155@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48156
48157 struct sysfs_open_dirent {
48158 atomic_t refcnt;
48159- atomic_t event;
48160+ atomic_unchecked_t event;
48161 wait_queue_head_t poll;
48162 struct list_head buffers; /* goes through sysfs_buffer.list */
48163 };
48164@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48165 if (!sysfs_get_active(attr_sd))
48166 return -ENODEV;
48167
48168- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48169+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48170 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48171
48172 sysfs_put_active(attr_sd);
48173@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48174 return -ENOMEM;
48175
48176 atomic_set(&new_od->refcnt, 0);
48177- atomic_set(&new_od->event, 1);
48178+ atomic_set_unchecked(&new_od->event, 1);
48179 init_waitqueue_head(&new_od->poll);
48180 INIT_LIST_HEAD(&new_od->buffers);
48181 goto retry;
48182@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48183
48184 sysfs_put_active(attr_sd);
48185
48186- if (buffer->event != atomic_read(&od->event))
48187+ if (buffer->event != atomic_read_unchecked(&od->event))
48188 goto trigger;
48189
48190 return DEFAULT_POLLMASK;
48191@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48192
48193 od = sd->s_attr.open;
48194 if (od) {
48195- atomic_inc(&od->event);
48196+ atomic_inc_unchecked(&od->event);
48197 wake_up_interruptible(&od->poll);
48198 }
48199
48200diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48201index a7ac78f..02158e1 100644
48202--- a/fs/sysfs/symlink.c
48203+++ b/fs/sysfs/symlink.c
48204@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48205
48206 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48207 {
48208- char *page = nd_get_link(nd);
48209+ const char *page = nd_get_link(nd);
48210 if (!IS_ERR(page))
48211 free_page((unsigned long)page);
48212 }
48213diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48214index c175b4d..8f36a16 100644
48215--- a/fs/udf/misc.c
48216+++ b/fs/udf/misc.c
48217@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48218
48219 u8 udf_tag_checksum(const struct tag *t)
48220 {
48221- u8 *data = (u8 *)t;
48222+ const u8 *data = (const u8 *)t;
48223 u8 checksum = 0;
48224 int i;
48225 for (i = 0; i < sizeof(struct tag); ++i)
48226diff --git a/fs/utimes.c b/fs/utimes.c
48227index ba653f3..06ea4b1 100644
48228--- a/fs/utimes.c
48229+++ b/fs/utimes.c
48230@@ -1,6 +1,7 @@
48231 #include <linux/compiler.h>
48232 #include <linux/file.h>
48233 #include <linux/fs.h>
48234+#include <linux/security.h>
48235 #include <linux/linkage.h>
48236 #include <linux/mount.h>
48237 #include <linux/namei.h>
48238@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48239 goto mnt_drop_write_and_out;
48240 }
48241 }
48242+
48243+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48244+ error = -EACCES;
48245+ goto mnt_drop_write_and_out;
48246+ }
48247+
48248 mutex_lock(&inode->i_mutex);
48249 error = notify_change(path->dentry, &newattrs);
48250 mutex_unlock(&inode->i_mutex);
48251diff --git a/fs/xattr.c b/fs/xattr.c
48252index 82f4337..236473c 100644
48253--- a/fs/xattr.c
48254+++ b/fs/xattr.c
48255@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48256 * Extended attribute SET operations
48257 */
48258 static long
48259-setxattr(struct dentry *d, const char __user *name, const void __user *value,
48260+setxattr(struct path *path, const char __user *name, const void __user *value,
48261 size_t size, int flags)
48262 {
48263 int error;
48264@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48265 return PTR_ERR(kvalue);
48266 }
48267
48268- error = vfs_setxattr(d, kname, kvalue, size, flags);
48269+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48270+ error = -EACCES;
48271+ goto out;
48272+ }
48273+
48274+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48275+out:
48276 kfree(kvalue);
48277 return error;
48278 }
48279@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48280 return error;
48281 error = mnt_want_write(path.mnt);
48282 if (!error) {
48283- error = setxattr(path.dentry, name, value, size, flags);
48284+ error = setxattr(&path, name, value, size, flags);
48285 mnt_drop_write(path.mnt);
48286 }
48287 path_put(&path);
48288@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48289 return error;
48290 error = mnt_want_write(path.mnt);
48291 if (!error) {
48292- error = setxattr(path.dentry, name, value, size, flags);
48293+ error = setxattr(&path, name, value, size, flags);
48294 mnt_drop_write(path.mnt);
48295 }
48296 path_put(&path);
48297@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48298 const void __user *,value, size_t, size, int, flags)
48299 {
48300 struct file *f;
48301- struct dentry *dentry;
48302 int error = -EBADF;
48303
48304 f = fget(fd);
48305 if (!f)
48306 return error;
48307- dentry = f->f_path.dentry;
48308- audit_inode(NULL, dentry);
48309+ audit_inode(NULL, f->f_path.dentry);
48310 error = mnt_want_write_file(f);
48311 if (!error) {
48312- error = setxattr(dentry, name, value, size, flags);
48313+ error = setxattr(&f->f_path, name, value, size, flags);
48314 mnt_drop_write_file(f);
48315 }
48316 fput(f);
48317diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48318index 8d5a506..7f62712 100644
48319--- a/fs/xattr_acl.c
48320+++ b/fs/xattr_acl.c
48321@@ -17,8 +17,8 @@
48322 struct posix_acl *
48323 posix_acl_from_xattr(const void *value, size_t size)
48324 {
48325- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48326- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48327+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48328+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48329 int count;
48330 struct posix_acl *acl;
48331 struct posix_acl_entry *acl_e;
48332diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48333index 188ef2f..adcf864 100644
48334--- a/fs/xfs/xfs_bmap.c
48335+++ b/fs/xfs/xfs_bmap.c
48336@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48337 int nmap,
48338 int ret_nmap);
48339 #else
48340-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48341+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48342 #endif /* DEBUG */
48343
48344 STATIC int
48345diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48346index 79d05e8..e3e5861 100644
48347--- a/fs/xfs/xfs_dir2_sf.c
48348+++ b/fs/xfs/xfs_dir2_sf.c
48349@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48350 }
48351
48352 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48353- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48354+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48355+ char name[sfep->namelen];
48356+ memcpy(name, sfep->name, sfep->namelen);
48357+ if (filldir(dirent, name, sfep->namelen,
48358+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
48359+ *offset = off & 0x7fffffff;
48360+ return 0;
48361+ }
48362+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48363 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48364 *offset = off & 0x7fffffff;
48365 return 0;
48366diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48367index 76f3ca5..f57f712 100644
48368--- a/fs/xfs/xfs_ioctl.c
48369+++ b/fs/xfs/xfs_ioctl.c
48370@@ -128,7 +128,7 @@ xfs_find_handle(
48371 }
48372
48373 error = -EFAULT;
48374- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48375+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48376 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48377 goto out_put;
48378
48379diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48380index ab30253..4d86958 100644
48381--- a/fs/xfs/xfs_iops.c
48382+++ b/fs/xfs/xfs_iops.c
48383@@ -447,7 +447,7 @@ xfs_vn_put_link(
48384 struct nameidata *nd,
48385 void *p)
48386 {
48387- char *s = nd_get_link(nd);
48388+ const char *s = nd_get_link(nd);
48389
48390 if (!IS_ERR(s))
48391 kfree(s);
48392diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48393new file mode 100644
48394index 0000000..4089e05
48395--- /dev/null
48396+++ b/grsecurity/Kconfig
48397@@ -0,0 +1,1078 @@
48398+#
48399+# grecurity configuration
48400+#
48401+
48402+menu "Grsecurity"
48403+
48404+config GRKERNSEC
48405+ bool "Grsecurity"
48406+ select CRYPTO
48407+ select CRYPTO_SHA256
48408+ help
48409+ If you say Y here, you will be able to configure many features
48410+ that will enhance the security of your system. It is highly
48411+ recommended that you say Y here and read through the help
48412+ for each option so that you fully understand the features and
48413+ can evaluate their usefulness for your machine.
48414+
48415+choice
48416+ prompt "Security Level"
48417+ depends on GRKERNSEC
48418+ default GRKERNSEC_CUSTOM
48419+
48420+config GRKERNSEC_LOW
48421+ bool "Low"
48422+ select GRKERNSEC_LINK
48423+ select GRKERNSEC_FIFO
48424+ select GRKERNSEC_RANDNET
48425+ select GRKERNSEC_DMESG
48426+ select GRKERNSEC_CHROOT
48427+ select GRKERNSEC_CHROOT_CHDIR
48428+
48429+ help
48430+ If you choose this option, several of the grsecurity options will
48431+ be enabled that will give you greater protection against a number
48432+ of attacks, while assuring that none of your software will have any
48433+ conflicts with the additional security measures. If you run a lot
48434+ of unusual software, or you are having problems with the higher
48435+ security levels, you should say Y here. With this option, the
48436+ following features are enabled:
48437+
48438+ - Linking restrictions
48439+ - FIFO restrictions
48440+ - Restricted dmesg
48441+ - Enforced chdir("/") on chroot
48442+ - Runtime module disabling
48443+
48444+config GRKERNSEC_MEDIUM
48445+ bool "Medium"
48446+ select PAX
48447+ select PAX_EI_PAX
48448+ select PAX_PT_PAX_FLAGS
48449+ select PAX_HAVE_ACL_FLAGS
48450+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48451+ select GRKERNSEC_CHROOT
48452+ select GRKERNSEC_CHROOT_SYSCTL
48453+ select GRKERNSEC_LINK
48454+ select GRKERNSEC_FIFO
48455+ select GRKERNSEC_DMESG
48456+ select GRKERNSEC_RANDNET
48457+ select GRKERNSEC_FORKFAIL
48458+ select GRKERNSEC_TIME
48459+ select GRKERNSEC_SIGNAL
48460+ select GRKERNSEC_CHROOT
48461+ select GRKERNSEC_CHROOT_UNIX
48462+ select GRKERNSEC_CHROOT_MOUNT
48463+ select GRKERNSEC_CHROOT_PIVOT
48464+ select GRKERNSEC_CHROOT_DOUBLE
48465+ select GRKERNSEC_CHROOT_CHDIR
48466+ select GRKERNSEC_CHROOT_MKNOD
48467+ select GRKERNSEC_PROC
48468+ select GRKERNSEC_PROC_USERGROUP
48469+ select PAX_RANDUSTACK
48470+ select PAX_ASLR
48471+ select PAX_RANDMMAP
48472+ select PAX_REFCOUNT if (X86 || SPARC64)
48473+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
48474+
48475+ help
48476+ If you say Y here, several features in addition to those included
48477+ in the low additional security level will be enabled. These
48478+ features provide even more security to your system, though in rare
48479+ cases they may be incompatible with very old or poorly written
48480+ software. If you enable this option, make sure that your auth
48481+ service (identd) is running as gid 1001. With this option,
48482+ the following features (in addition to those provided in the
48483+ low additional security level) will be enabled:
48484+
48485+ - Failed fork logging
48486+ - Time change logging
48487+ - Signal logging
48488+ - Deny mounts in chroot
48489+ - Deny double chrooting
48490+ - Deny sysctl writes in chroot
48491+ - Deny mknod in chroot
48492+ - Deny access to abstract AF_UNIX sockets out of chroot
48493+ - Deny pivot_root in chroot
48494+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
48495+ - /proc restrictions with special GID set to 10 (usually wheel)
48496+ - Address Space Layout Randomization (ASLR)
48497+ - Prevent exploitation of most refcount overflows
48498+ - Bounds checking of copying between the kernel and userland
48499+
48500+config GRKERNSEC_HIGH
48501+ bool "High"
48502+ select GRKERNSEC_LINK
48503+ select GRKERNSEC_FIFO
48504+ select GRKERNSEC_DMESG
48505+ select GRKERNSEC_FORKFAIL
48506+ select GRKERNSEC_TIME
48507+ select GRKERNSEC_SIGNAL
48508+ select GRKERNSEC_CHROOT
48509+ select GRKERNSEC_CHROOT_SHMAT
48510+ select GRKERNSEC_CHROOT_UNIX
48511+ select GRKERNSEC_CHROOT_MOUNT
48512+ select GRKERNSEC_CHROOT_FCHDIR
48513+ select GRKERNSEC_CHROOT_PIVOT
48514+ select GRKERNSEC_CHROOT_DOUBLE
48515+ select GRKERNSEC_CHROOT_CHDIR
48516+ select GRKERNSEC_CHROOT_MKNOD
48517+ select GRKERNSEC_CHROOT_CAPS
48518+ select GRKERNSEC_CHROOT_SYSCTL
48519+ select GRKERNSEC_CHROOT_FINDTASK
48520+ select GRKERNSEC_SYSFS_RESTRICT
48521+ select GRKERNSEC_PROC
48522+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48523+ select GRKERNSEC_HIDESYM
48524+ select GRKERNSEC_BRUTE
48525+ select GRKERNSEC_PROC_USERGROUP
48526+ select GRKERNSEC_KMEM
48527+ select GRKERNSEC_RESLOG
48528+ select GRKERNSEC_RANDNET
48529+ select GRKERNSEC_PROC_ADD
48530+ select GRKERNSEC_CHROOT_CHMOD
48531+ select GRKERNSEC_CHROOT_NICE
48532+ select GRKERNSEC_SETXID
48533+ select GRKERNSEC_AUDIT_MOUNT
48534+ select GRKERNSEC_MODHARDEN if (MODULES)
48535+ select GRKERNSEC_HARDEN_PTRACE
48536+ select GRKERNSEC_PTRACE_READEXEC
48537+ select GRKERNSEC_VM86 if (X86_32)
48538+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
48539+ select PAX
48540+ select PAX_RANDUSTACK
48541+ select PAX_ASLR
48542+ select PAX_RANDMMAP
48543+ select PAX_NOEXEC
48544+ select PAX_MPROTECT
48545+ select PAX_EI_PAX
48546+ select PAX_PT_PAX_FLAGS
48547+ select PAX_HAVE_ACL_FLAGS
48548+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
48549+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
48550+ select PAX_RANDKSTACK if (X86_TSC && X86)
48551+ select PAX_SEGMEXEC if (X86_32)
48552+ select PAX_PAGEEXEC
48553+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
48554+ select PAX_EMUTRAMP if (PARISC)
48555+ select PAX_EMUSIGRT if (PARISC)
48556+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
48557+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
48558+ select PAX_REFCOUNT if (X86 || SPARC64)
48559+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
48560+ help
48561+ If you say Y here, many of the features of grsecurity will be
48562+ enabled, which will protect you against many kinds of attacks
48563+ against your system. The heightened security comes at a cost
48564+ of an increased chance of incompatibilities with rare software
48565+ on your machine. Since this security level enables PaX, you should
48566+ view <http://pax.grsecurity.net> and read about the PaX
48567+ project. While you are there, download chpax and run it on
48568+ binaries that cause problems with PaX. Also remember that
48569+ since the /proc restrictions are enabled, you must run your
48570+ identd as gid 1001. This security level enables the following
48571+ features in addition to those listed in the low and medium
48572+ security levels:
48573+
48574+ - Additional /proc restrictions
48575+ - Chmod restrictions in chroot
48576+ - No signals, ptrace, or viewing of processes outside of chroot
48577+ - Capability restrictions in chroot
48578+ - Deny fchdir out of chroot
48579+ - Priority restrictions in chroot
48580+ - Segmentation-based implementation of PaX
48581+ - Mprotect restrictions
48582+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
48583+ - Kernel stack randomization
48584+ - Mount/unmount/remount logging
48585+ - Kernel symbol hiding
48586+ - Hardening of module auto-loading
48587+ - Ptrace restrictions
48588+ - Restricted vm86 mode
48589+ - Restricted sysfs/debugfs
48590+ - Active kernel exploit response
48591+
48592+config GRKERNSEC_CUSTOM
48593+ bool "Custom"
48594+ help
48595+ If you say Y here, you will be able to configure every grsecurity
48596+ option, which allows you to enable many more features that aren't
48597+ covered in the basic security levels. These additional features
48598+ include TPE, socket restrictions, and the sysctl system for
48599+ grsecurity. It is advised that you read through the help for
48600+ each option to determine its usefulness in your situation.
48601+
48602+endchoice
48603+
48604+menu "Memory Protections"
48605+depends on GRKERNSEC
48606+
48607+config GRKERNSEC_KMEM
48608+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
48609+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
48610+ help
48611+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
48612+ be written to or read from to modify or leak the contents of the running
48613+ kernel. /dev/port will also not be allowed to be opened. If you have module
48614+ support disabled, enabling this will close up four ways that are
48615+ currently used to insert malicious code into the running kernel.
48616+ Even with all these features enabled, we still highly recommend that
48617+ you use the RBAC system, as it is still possible for an attacker to
48618+ modify the running kernel through privileged I/O granted by ioperm/iopl.
48619+ If you are not using XFree86, you may be able to stop this additional
48620+ case by enabling the 'Disable privileged I/O' option. Though nothing
48621+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
48622+ but only to video memory, which is the only writing we allow in this
48623+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
48624+ not be allowed to mprotect it with PROT_WRITE later.
48625+ It is highly recommended that you say Y here if you meet all the
48626+ conditions above.
48627+
48628+config GRKERNSEC_VM86
48629+ bool "Restrict VM86 mode"
48630+ depends on X86_32
48631+
48632+ help
48633+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
48634+ make use of a special execution mode on 32bit x86 processors called
48635+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
48636+ video cards and will still work with this option enabled. The purpose
48637+ of the option is to prevent exploitation of emulation errors in
48638+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
48639+ Nearly all users should be able to enable this option.
48640+
48641+config GRKERNSEC_IO
48642+ bool "Disable privileged I/O"
48643+ depends on X86
48644+ select RTC_CLASS
48645+ select RTC_INTF_DEV
48646+ select RTC_DRV_CMOS
48647+
48648+ help
48649+ If you say Y here, all ioperm and iopl calls will return an error.
48650+ Ioperm and iopl can be used to modify the running kernel.
48651+ Unfortunately, some programs need this access to operate properly,
48652+ the most notable of which are XFree86 and hwclock. hwclock can be
48653+ remedied by having RTC support in the kernel, so real-time
48654+ clock support is enabled if this option is enabled, to ensure
48655+ that hwclock operates correctly. XFree86 still will not
48656+ operate correctly with this option enabled, so DO NOT CHOOSE Y
48657+ IF YOU USE XFree86. If you use XFree86 and you still want to
48658+ protect your kernel against modification, use the RBAC system.
48659+
48660+config GRKERNSEC_PROC_MEMMAP
48661+ bool "Harden ASLR against information leaks and entropy reduction"
48662+ default y if (PAX_NOEXEC || PAX_ASLR)
48663+ depends on PAX_NOEXEC || PAX_ASLR
48664+ help
48665+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
48666+ give no information about the addresses of its mappings if
48667+ PaX features that rely on random addresses are enabled on the task.
48668+ In addition to sanitizing this information and disabling other
48669+ dangerous sources of information, this option causes reads of sensitive
48670+ /proc/<pid> entries where the file descriptor was opened in a different
48671+ task than the one performing the read. Such attempts are logged.
48672+ This option also limits argv/env strings for suid/sgid binaries
48673+ to 512KB to prevent a complete exhaustion of the stack entropy provided
48674+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
48675+ binaries to prevent alternative mmap layouts from being abused.
48676+
48677+ If you use PaX it is essential that you say Y here as it closes up
48678+ several holes that make full ASLR useless locally.
48679+
48680+config GRKERNSEC_BRUTE
48681+ bool "Deter exploit bruteforcing"
48682+ help
48683+ If you say Y here, attempts to bruteforce exploits against forking
48684+ daemons such as apache or sshd, as well as against suid/sgid binaries
48685+ will be deterred. When a child of a forking daemon is killed by PaX
48686+ or crashes due to an illegal instruction or other suspicious signal,
48687+ the parent process will be delayed 30 seconds upon every subsequent
48688+ fork until the administrator is able to assess the situation and
48689+ restart the daemon.
48690+ In the suid/sgid case, the attempt is logged, the user has all their
48691+ processes terminated, and they are prevented from executing any further
48692+ processes for 15 minutes.
48693+ It is recommended that you also enable signal logging in the auditing
48694+ section so that logs are generated when a process triggers a suspicious
48695+ signal.
48696+ If the sysctl option is enabled, a sysctl option with name
48697+ "deter_bruteforce" is created.
48698+
48699+
48700+config GRKERNSEC_MODHARDEN
48701+ bool "Harden module auto-loading"
48702+ depends on MODULES
48703+ help
48704+ If you say Y here, module auto-loading in response to use of some
48705+ feature implemented by an unloaded module will be restricted to
48706+ root users. Enabling this option helps defend against attacks
48707+ by unprivileged users who abuse the auto-loading behavior to
48708+ cause a vulnerable module to load that is then exploited.
48709+
48710+ If this option prevents a legitimate use of auto-loading for a
48711+ non-root user, the administrator can execute modprobe manually
48712+ with the exact name of the module mentioned in the alert log.
48713+ Alternatively, the administrator can add the module to the list
48714+ of modules loaded at boot by modifying init scripts.
48715+
48716+ Modification of init scripts will most likely be needed on
48717+ Ubuntu servers with encrypted home directory support enabled,
48718+ as the first non-root user logging in will cause the ecb(aes),
48719+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
48720+
48721+config GRKERNSEC_HIDESYM
48722+ bool "Hide kernel symbols"
48723+ help
48724+ If you say Y here, getting information on loaded modules, and
48725+ displaying all kernel symbols through a syscall will be restricted
48726+ to users with CAP_SYS_MODULE. For software compatibility reasons,
48727+ /proc/kallsyms will be restricted to the root user. The RBAC
48728+ system can hide that entry even from root.
48729+
48730+ This option also prevents leaking of kernel addresses through
48731+ several /proc entries.
48732+
48733+ Note that this option is only effective provided the following
48734+ conditions are met:
48735+ 1) The kernel using grsecurity is not precompiled by some distribution
48736+ 2) You have also enabled GRKERNSEC_DMESG
48737+ 3) You are using the RBAC system and hiding other files such as your
48738+ kernel image and System.map. Alternatively, enabling this option
48739+ causes the permissions on /boot, /lib/modules, and the kernel
48740+ source directory to change at compile time to prevent
48741+ reading by non-root users.
48742+ If the above conditions are met, this option will aid in providing a
48743+ useful protection against local kernel exploitation of overflows
48744+ and arbitrary read/write vulnerabilities.
48745+
48746+config GRKERNSEC_KERN_LOCKOUT
48747+ bool "Active kernel exploit response"
48748+ depends on X86 || ARM || PPC || SPARC
48749+ help
48750+ If you say Y here, when a PaX alert is triggered due to suspicious
48751+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
48752+ or an OOPs occurs due to bad memory accesses, instead of just
48753+ terminating the offending process (and potentially allowing
48754+ a subsequent exploit from the same user), we will take one of two
48755+ actions:
48756+ If the user was root, we will panic the system
48757+ If the user was non-root, we will log the attempt, terminate
48758+ all processes owned by the user, then prevent them from creating
48759+ any new processes until the system is restarted
48760+ This deters repeated kernel exploitation/bruteforcing attempts
48761+ and is useful for later forensics.
48762+
48763+endmenu
48764+menu "Role Based Access Control Options"
48765+depends on GRKERNSEC
48766+
48767+config GRKERNSEC_RBAC_DEBUG
48768+ bool
48769+
48770+config GRKERNSEC_NO_RBAC
48771+ bool "Disable RBAC system"
48772+ help
48773+ If you say Y here, the /dev/grsec device will be removed from the kernel,
48774+ preventing the RBAC system from being enabled. You should only say Y
48775+ here if you have no intention of using the RBAC system, so as to prevent
48776+ an attacker with root access from misusing the RBAC system to hide files
48777+ and processes when loadable module support and /dev/[k]mem have been
48778+ locked down.
48779+
48780+config GRKERNSEC_ACL_HIDEKERN
48781+ bool "Hide kernel processes"
48782+ help
48783+ If you say Y here, all kernel threads will be hidden to all
48784+ processes but those whose subject has the "view hidden processes"
48785+ flag.
48786+
48787+config GRKERNSEC_ACL_MAXTRIES
48788+ int "Maximum tries before password lockout"
48789+ default 3
48790+ help
48791+ This option enforces the maximum number of times a user can attempt
48792+ to authorize themselves with the grsecurity RBAC system before being
48793+ denied the ability to attempt authorization again for a specified time.
48794+ The lower the number, the harder it will be to brute-force a password.
48795+
48796+config GRKERNSEC_ACL_TIMEOUT
48797+ int "Time to wait after max password tries, in seconds"
48798+ default 30
48799+ help
48800+ This option specifies the time the user must wait after attempting to
48801+ authorize to the RBAC system with the maximum number of invalid
48802+ passwords. The higher the number, the harder it will be to brute-force
48803+ a password.
48804+
48805+endmenu
48806+menu "Filesystem Protections"
48807+depends on GRKERNSEC
48808+
48809+config GRKERNSEC_PROC
48810+ bool "Proc restrictions"
48811+ help
48812+ If you say Y here, the permissions of the /proc filesystem
48813+ will be altered to enhance system security and privacy. You MUST
48814+ choose either a user only restriction or a user and group restriction.
48815+ Depending upon the option you choose, you can either restrict users to
48816+ see only the processes they themselves run, or choose a group that can
48817+ view all processes and files normally restricted to root if you choose
48818+ the "restrict to user only" option. NOTE: If you're running identd or
48819+ ntpd as a non-root user, you will have to run it as the group you
48820+ specify here.
48821+
48822+config GRKERNSEC_PROC_USER
48823+ bool "Restrict /proc to user only"
48824+ depends on GRKERNSEC_PROC
48825+ help
48826+ If you say Y here, non-root users will only be able to view their own
48827+ processes, and restricts them from viewing network-related information,
48828+ and viewing kernel symbol and module information.
48829+
48830+config GRKERNSEC_PROC_USERGROUP
48831+ bool "Allow special group"
48832+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
48833+ help
48834+ If you say Y here, you will be able to select a group that will be
48835+ able to view all processes and network-related information. If you've
48836+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
48837+ remain hidden. This option is useful if you want to run identd as
48838+ a non-root user.
48839+
48840+config GRKERNSEC_PROC_GID
48841+ int "GID for special group"
48842+ depends on GRKERNSEC_PROC_USERGROUP
48843+ default 1001
48844+
48845+config GRKERNSEC_PROC_ADD
48846+ bool "Additional restrictions"
48847+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
48848+ help
48849+ If you say Y here, additional restrictions will be placed on
48850+ /proc that keep normal users from viewing device information and
48851+ slabinfo information that could be useful for exploits.
48852+
48853+config GRKERNSEC_LINK
48854+ bool "Linking restrictions"
48855+ help
48856+ If you say Y here, /tmp race exploits will be prevented, since users
48857+ will no longer be able to follow symlinks owned by other users in
48858+ world-writable +t directories (e.g. /tmp), unless the owner of the
48859+ symlink is the owner of the directory. users will also not be
48860+ able to hardlink to files they do not own. If the sysctl option is
48861+ enabled, a sysctl option with name "linking_restrictions" is created.
48862+
48863+config GRKERNSEC_FIFO
48864+ bool "FIFO restrictions"
48865+ help
48866+ If you say Y here, users will not be able to write to FIFOs they don't
48867+ own in world-writable +t directories (e.g. /tmp), unless the owner of
48868+ the FIFO is the same owner of the directory it's held in. If the sysctl
48869+ option is enabled, a sysctl option with name "fifo_restrictions" is
48870+ created.
48871+
48872+config GRKERNSEC_SYSFS_RESTRICT
48873+ bool "Sysfs/debugfs restriction"
48874+ depends on SYSFS
48875+ help
48876+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
48877+ any filesystem normally mounted under it (e.g. debugfs) will be
48878+ mostly accessible only by root. These filesystems generally provide access
48879+ to hardware and debug information that isn't appropriate for unprivileged
48880+ users of the system. Sysfs and debugfs have also become a large source
48881+ of new vulnerabilities, ranging from infoleaks to local compromise.
48882+ There has been very little oversight with an eye toward security involved
48883+ in adding new exporters of information to these filesystems, so their
48884+ use is discouraged.
48885+ For reasons of compatibility, a few directories have been whitelisted
48886+ for access by non-root users:
48887+ /sys/fs/selinux
48888+ /sys/fs/fuse
48889+ /sys/devices/system/cpu
48890+
48891+config GRKERNSEC_ROFS
48892+ bool "Runtime read-only mount protection"
48893+ help
48894+ If you say Y here, a sysctl option with name "romount_protect" will
48895+ be created. By setting this option to 1 at runtime, filesystems
48896+ will be protected in the following ways:
48897+ * No new writable mounts will be allowed
48898+ * Existing read-only mounts won't be able to be remounted read/write
48899+ * Write operations will be denied on all block devices
48900+ This option acts independently of grsec_lock: once it is set to 1,
48901+ it cannot be turned off. Therefore, please be mindful of the resulting
48902+ behavior if this option is enabled in an init script on a read-only
48903+ filesystem. This feature is mainly intended for secure embedded systems.
48904+
48905+config GRKERNSEC_CHROOT
48906+ bool "Chroot jail restrictions"
48907+ help
48908+ If you say Y here, you will be able to choose several options that will
48909+ make breaking out of a chrooted jail much more difficult. If you
48910+ encounter no software incompatibilities with the following options, it
48911+ is recommended that you enable each one.
48912+
48913+config GRKERNSEC_CHROOT_MOUNT
48914+ bool "Deny mounts"
48915+ depends on GRKERNSEC_CHROOT
48916+ help
48917+ If you say Y here, processes inside a chroot will not be able to
48918+ mount or remount filesystems. If the sysctl option is enabled, a
48919+ sysctl option with name "chroot_deny_mount" is created.
48920+
48921+config GRKERNSEC_CHROOT_DOUBLE
48922+ bool "Deny double-chroots"
48923+ depends on GRKERNSEC_CHROOT
48924+ help
48925+ If you say Y here, processes inside a chroot will not be able to chroot
48926+ again outside the chroot. This is a widely used method of breaking
48927+ out of a chroot jail and should not be allowed. If the sysctl
48928+ option is enabled, a sysctl option with name
48929+ "chroot_deny_chroot" is created.
48930+
48931+config GRKERNSEC_CHROOT_PIVOT
48932+ bool "Deny pivot_root in chroot"
48933+ depends on GRKERNSEC_CHROOT
48934+ help
48935+ If you say Y here, processes inside a chroot will not be able to use
48936+ a function called pivot_root() that was introduced in Linux 2.3.41. It
48937+ works similar to chroot in that it changes the root filesystem. This
48938+ function could be misused in a chrooted process to attempt to break out
48939+ of the chroot, and therefore should not be allowed. If the sysctl
48940+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
48941+ created.
48942+
48943+config GRKERNSEC_CHROOT_CHDIR
48944+ bool "Enforce chdir(\"/\") on all chroots"
48945+ depends on GRKERNSEC_CHROOT
48946+ help
48947+ If you say Y here, the current working directory of all newly-chrooted
48948+ applications will be set to the the root directory of the chroot.
48949+ The man page on chroot(2) states:
48950+ Note that this call does not change the current working
48951+ directory, so that `.' can be outside the tree rooted at
48952+ `/'. In particular, the super-user can escape from a
48953+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
48954+
48955+ It is recommended that you say Y here, since it's not known to break
48956+ any software. If the sysctl option is enabled, a sysctl option with
48957+ name "chroot_enforce_chdir" is created.
48958+
48959+config GRKERNSEC_CHROOT_CHMOD
48960+ bool "Deny (f)chmod +s"
48961+ depends on GRKERNSEC_CHROOT
48962+ help
48963+ If you say Y here, processes inside a chroot will not be able to chmod
48964+ or fchmod files to make them have suid or sgid bits. This protects
48965+ against another published method of breaking a chroot. If the sysctl
48966+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
48967+ created.
48968+
48969+config GRKERNSEC_CHROOT_FCHDIR
48970+ bool "Deny fchdir out of chroot"
48971+ depends on GRKERNSEC_CHROOT
48972+ help
48973+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
48974+ to a file descriptor of the chrooting process that points to a directory
48975+ outside the filesystem will be stopped. If the sysctl option
48976+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
48977+
48978+config GRKERNSEC_CHROOT_MKNOD
48979+ bool "Deny mknod"
48980+ depends on GRKERNSEC_CHROOT
48981+ help
48982+ If you say Y here, processes inside a chroot will not be allowed to
48983+ mknod. The problem with using mknod inside a chroot is that it
48984+ would allow an attacker to create a device entry that is the same
48985+ as one on the physical root of your system, which could range from
48986+ anything from the console device to a device for your harddrive (which
48987+ they could then use to wipe the drive or steal data). It is recommended
48988+ that you say Y here, unless you run into software incompatibilities.
48989+ If the sysctl option is enabled, a sysctl option with name
48990+ "chroot_deny_mknod" is created.
48991+
48992+config GRKERNSEC_CHROOT_SHMAT
48993+ bool "Deny shmat() out of chroot"
48994+ depends on GRKERNSEC_CHROOT
48995+ help
48996+ If you say Y here, processes inside a chroot will not be able to attach
48997+ to shared memory segments that were created outside of the chroot jail.
48998+ It is recommended that you say Y here. If the sysctl option is enabled,
48999+ a sysctl option with name "chroot_deny_shmat" is created.
49000+
49001+config GRKERNSEC_CHROOT_UNIX
49002+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
49003+ depends on GRKERNSEC_CHROOT
49004+ help
49005+ If you say Y here, processes inside a chroot will not be able to
49006+ connect to abstract (meaning not belonging to a filesystem) Unix
49007+ domain sockets that were bound outside of a chroot. It is recommended
49008+ that you say Y here. If the sysctl option is enabled, a sysctl option
49009+ with name "chroot_deny_unix" is created.
49010+
49011+config GRKERNSEC_CHROOT_FINDTASK
49012+ bool "Protect outside processes"
49013+ depends on GRKERNSEC_CHROOT
49014+ help
49015+ If you say Y here, processes inside a chroot will not be able to
49016+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49017+ getsid, or view any process outside of the chroot. If the sysctl
49018+ option is enabled, a sysctl option with name "chroot_findtask" is
49019+ created.
49020+
49021+config GRKERNSEC_CHROOT_NICE
49022+ bool "Restrict priority changes"
49023+ depends on GRKERNSEC_CHROOT
49024+ help
49025+ If you say Y here, processes inside a chroot will not be able to raise
49026+ the priority of processes in the chroot, or alter the priority of
49027+ processes outside the chroot. This provides more security than simply
49028+ removing CAP_SYS_NICE from the process' capability set. If the
49029+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49030+ is created.
49031+
49032+config GRKERNSEC_CHROOT_SYSCTL
49033+ bool "Deny sysctl writes"
49034+ depends on GRKERNSEC_CHROOT
49035+ help
49036+ If you say Y here, an attacker in a chroot will not be able to
49037+ write to sysctl entries, either by sysctl(2) or through a /proc
49038+ interface. It is strongly recommended that you say Y here. If the
49039+ sysctl option is enabled, a sysctl option with name
49040+ "chroot_deny_sysctl" is created.
49041+
49042+config GRKERNSEC_CHROOT_CAPS
49043+ bool "Capability restrictions"
49044+ depends on GRKERNSEC_CHROOT
49045+ help
49046+ If you say Y here, the capabilities on all processes within a
49047+ chroot jail will be lowered to stop module insertion, raw i/o,
49048+ system and net admin tasks, rebooting the system, modifying immutable
49049+ files, modifying IPC owned by another, and changing the system time.
49050+ This is left an option because it can break some apps. Disable this
49051+ if your chrooted apps are having problems performing those kinds of
49052+ tasks. If the sysctl option is enabled, a sysctl option with
49053+ name "chroot_caps" is created.
49054+
49055+endmenu
49056+menu "Kernel Auditing"
49057+depends on GRKERNSEC
49058+
49059+config GRKERNSEC_AUDIT_GROUP
49060+ bool "Single group for auditing"
49061+ help
49062+ If you say Y here, the exec, chdir, and (un)mount logging features
49063+ will only operate on a group you specify. This option is recommended
49064+ if you only want to watch certain users instead of having a large
49065+ amount of logs from the entire system. If the sysctl option is enabled,
49066+ a sysctl option with name "audit_group" is created.
49067+
49068+config GRKERNSEC_AUDIT_GID
49069+ int "GID for auditing"
49070+ depends on GRKERNSEC_AUDIT_GROUP
49071+ default 1007
49072+
49073+config GRKERNSEC_EXECLOG
49074+ bool "Exec logging"
49075+ help
49076+ If you say Y here, all execve() calls will be logged (since the
49077+ other exec*() calls are frontends to execve(), all execution
49078+ will be logged). Useful for shell-servers that like to keep track
49079+ of their users. If the sysctl option is enabled, a sysctl option with
49080+ name "exec_logging" is created.
49081+ WARNING: This option when enabled will produce a LOT of logs, especially
49082+ on an active system.
49083+
49084+config GRKERNSEC_RESLOG
49085+ bool "Resource logging"
49086+ help
49087+ If you say Y here, all attempts to overstep resource limits will
49088+ be logged with the resource name, the requested size, and the current
49089+ limit. It is highly recommended that you say Y here. If the sysctl
49090+ option is enabled, a sysctl option with name "resource_logging" is
49091+ created. If the RBAC system is enabled, the sysctl value is ignored.
49092+
49093+config GRKERNSEC_CHROOT_EXECLOG
49094+ bool "Log execs within chroot"
49095+ help
49096+ If you say Y here, all executions inside a chroot jail will be logged
49097+ to syslog. This can cause a large amount of logs if certain
49098+ applications (eg. djb's daemontools) are installed on the system, and
49099+ is therefore left as an option. If the sysctl option is enabled, a
49100+ sysctl option with name "chroot_execlog" is created.
49101+
49102+config GRKERNSEC_AUDIT_PTRACE
49103+ bool "Ptrace logging"
49104+ help
49105+ If you say Y here, all attempts to attach to a process via ptrace
49106+ will be logged. If the sysctl option is enabled, a sysctl option
49107+ with name "audit_ptrace" is created.
49108+
49109+config GRKERNSEC_AUDIT_CHDIR
49110+ bool "Chdir logging"
49111+ help
49112+ If you say Y here, all chdir() calls will be logged. If the sysctl
49113+ option is enabled, a sysctl option with name "audit_chdir" is created.
49114+
49115+config GRKERNSEC_AUDIT_MOUNT
49116+ bool "(Un)Mount logging"
49117+ help
49118+ If you say Y here, all mounts and unmounts will be logged. If the
49119+ sysctl option is enabled, a sysctl option with name "audit_mount" is
49120+ created.
49121+
49122+config GRKERNSEC_SIGNAL
49123+ bool "Signal logging"
49124+ help
49125+ If you say Y here, certain important signals will be logged, such as
49126+ SIGSEGV, which will as a result inform you of when a error in a program
49127+ occurred, which in some cases could mean a possible exploit attempt.
49128+ If the sysctl option is enabled, a sysctl option with name
49129+ "signal_logging" is created.
49130+
49131+config GRKERNSEC_FORKFAIL
49132+ bool "Fork failure logging"
49133+ help
49134+ If you say Y here, all failed fork() attempts will be logged.
49135+ This could suggest a fork bomb, or someone attempting to overstep
49136+ their process limit. If the sysctl option is enabled, a sysctl option
49137+ with name "forkfail_logging" is created.
49138+
49139+config GRKERNSEC_TIME
49140+ bool "Time change logging"
49141+ help
49142+ If you say Y here, any changes of the system clock will be logged.
49143+ If the sysctl option is enabled, a sysctl option with name
49144+ "timechange_logging" is created.
49145+
49146+config GRKERNSEC_PROC_IPADDR
49147+ bool "/proc/<pid>/ipaddr support"
49148+ help
49149+ If you say Y here, a new entry will be added to each /proc/<pid>
49150+ directory that contains the IP address of the person using the task.
49151+ The IP is carried across local TCP and AF_UNIX stream sockets.
49152+ This information can be useful for IDS/IPSes to perform remote response
49153+ to a local attack. The entry is readable by only the owner of the
49154+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49155+ the RBAC system), and thus does not create privacy concerns.
49156+
49157+config GRKERNSEC_RWXMAP_LOG
49158+ bool 'Denied RWX mmap/mprotect logging'
49159+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49160+ help
49161+ If you say Y here, calls to mmap() and mprotect() with explicit
49162+ usage of PROT_WRITE and PROT_EXEC together will be logged when
49163+ denied by the PAX_MPROTECT feature. If the sysctl option is
49164+ enabled, a sysctl option with name "rwxmap_logging" is created.
49165+
49166+config GRKERNSEC_AUDIT_TEXTREL
49167+ bool 'ELF text relocations logging (READ HELP)'
49168+ depends on PAX_MPROTECT
49169+ help
49170+ If you say Y here, text relocations will be logged with the filename
49171+ of the offending library or binary. The purpose of the feature is
49172+ to help Linux distribution developers get rid of libraries and
49173+ binaries that need text relocations which hinder the future progress
49174+ of PaX. Only Linux distribution developers should say Y here, and
49175+ never on a production machine, as this option creates an information
49176+ leak that could aid an attacker in defeating the randomization of
49177+ a single memory region. If the sysctl option is enabled, a sysctl
49178+ option with name "audit_textrel" is created.
49179+
49180+endmenu
49181+
49182+menu "Executable Protections"
49183+depends on GRKERNSEC
49184+
49185+config GRKERNSEC_DMESG
49186+ bool "Dmesg(8) restriction"
49187+ help
49188+ If you say Y here, non-root users will not be able to use dmesg(8)
49189+ to view up to the last 4kb of messages in the kernel's log buffer.
49190+ The kernel's log buffer often contains kernel addresses and other
49191+ identifying information useful to an attacker in fingerprinting a
49192+ system for a targeted exploit.
49193+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
49194+ created.
49195+
49196+config GRKERNSEC_HARDEN_PTRACE
49197+ bool "Deter ptrace-based process snooping"
49198+ help
49199+ If you say Y here, TTY sniffers and other malicious monitoring
49200+ programs implemented through ptrace will be defeated. If you
49201+ have been using the RBAC system, this option has already been
49202+ enabled for several years for all users, with the ability to make
49203+ fine-grained exceptions.
49204+
49205+ This option only affects the ability of non-root users to ptrace
49206+ processes that are not a descendent of the ptracing process.
49207+ This means that strace ./binary and gdb ./binary will still work,
49208+ but attaching to arbitrary processes will not. If the sysctl
49209+ option is enabled, a sysctl option with name "harden_ptrace" is
49210+ created.
49211+
49212+config GRKERNSEC_PTRACE_READEXEC
49213+ bool "Require read access to ptrace sensitive binaries"
49214+ help
49215+ If you say Y here, unprivileged users will not be able to ptrace unreadable
49216+ binaries. This option is useful in environments that
49217+ remove the read bits (e.g. file mode 4711) from suid binaries to
49218+ prevent infoleaking of their contents. This option adds
49219+ consistency to the use of that file mode, as the binary could normally
49220+ be read out when run without privileges while ptracing.
49221+
49222+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49223+ is created.
49224+
49225+config GRKERNSEC_SETXID
49226+ bool "Enforce consistent multithreaded privileges"
49227+ help
49228+ If you say Y here, a change from a root uid to a non-root uid
49229+ in a multithreaded application will cause the resulting uids,
49230+ gids, supplementary groups, and capabilities in that thread
49231+ to be propagated to the other threads of the process. In most
49232+ cases this is unnecessary, as glibc will emulate this behavior
49233+ on behalf of the application. Other libcs do not act in the
49234+ same way, allowing the other threads of the process to continue
49235+ running with root privileges. If the sysctl option is enabled,
49236+ a sysctl option with name "consistent_setxid" is created.
49237+
49238+config GRKERNSEC_TPE
49239+ bool "Trusted Path Execution (TPE)"
49240+ help
49241+ If you say Y here, you will be able to choose a gid to add to the
49242+ supplementary groups of users you want to mark as "untrusted."
49243+ These users will not be able to execute any files that are not in
49244+ root-owned directories writable only by root. If the sysctl option
49245+ is enabled, a sysctl option with name "tpe" is created.
49246+
49247+config GRKERNSEC_TPE_ALL
49248+ bool "Partially restrict all non-root users"
49249+ depends on GRKERNSEC_TPE
49250+ help
49251+ If you say Y here, all non-root users will be covered under
49252+ a weaker TPE restriction. This is separate from, and in addition to,
49253+ the main TPE options that you have selected elsewhere. Thus, if a
49254+ "trusted" GID is chosen, this restriction applies to even that GID.
49255+ Under this restriction, all non-root users will only be allowed to
49256+ execute files in directories they own that are not group or
49257+ world-writable, or in directories owned by root and writable only by
49258+ root. If the sysctl option is enabled, a sysctl option with name
49259+ "tpe_restrict_all" is created.
49260+
49261+config GRKERNSEC_TPE_INVERT
49262+ bool "Invert GID option"
49263+ depends on GRKERNSEC_TPE
49264+ help
49265+ If you say Y here, the group you specify in the TPE configuration will
49266+ decide what group TPE restrictions will be *disabled* for. This
49267+ option is useful if you want TPE restrictions to be applied to most
49268+ users on the system. If the sysctl option is enabled, a sysctl option
49269+ with name "tpe_invert" is created. Unlike other sysctl options, this
49270+ entry will default to on for backward-compatibility.
49271+
49272+config GRKERNSEC_TPE_GID
49273+ int "GID for untrusted users"
49274+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49275+ default 1005
49276+ help
49277+ Setting this GID determines what group TPE restrictions will be
49278+ *enabled* for. If the sysctl option is enabled, a sysctl option
49279+ with name "tpe_gid" is created.
49280+
49281+config GRKERNSEC_TPE_GID
49282+ int "GID for trusted users"
49283+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49284+ default 1005
49285+ help
49286+ Setting this GID determines what group TPE restrictions will be
49287+ *disabled* for. If the sysctl option is enabled, a sysctl option
49288+ with name "tpe_gid" is created.
49289+
49290+endmenu
49291+menu "Network Protections"
49292+depends on GRKERNSEC
49293+
49294+config GRKERNSEC_RANDNET
49295+ bool "Larger entropy pools"
49296+ help
49297+ If you say Y here, the entropy pools used for many features of Linux
49298+ and grsecurity will be doubled in size. Since several grsecurity
49299+ features use additional randomness, it is recommended that you say Y
49300+ here. Saying Y here has a similar effect as modifying
49301+ /proc/sys/kernel/random/poolsize.
49302+
49303+config GRKERNSEC_BLACKHOLE
49304+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49305+ depends on NET
49306+ help
49307+ If you say Y here, neither TCP resets nor ICMP
49308+ destination-unreachable packets will be sent in response to packets
49309+ sent to ports for which no associated listening process exists.
49310+ This feature supports both IPV4 and IPV6 and exempts the
49311+ loopback interface from blackholing. Enabling this feature
49312+ makes a host more resilient to DoS attacks and reduces network
49313+ visibility against scanners.
49314+
49315+ The blackhole feature as-implemented is equivalent to the FreeBSD
49316+ blackhole feature, as it prevents RST responses to all packets, not
49317+ just SYNs. Under most application behavior this causes no
49318+ problems, but applications (like haproxy) may not close certain
49319+ connections in a way that cleanly terminates them on the remote
49320+ end, leaving the remote host in LAST_ACK state. Because of this
49321+ side-effect and to prevent intentional LAST_ACK DoSes, this
49322+ feature also adds automatic mitigation against such attacks.
49323+ The mitigation drastically reduces the amount of time a socket
49324+ can spend in LAST_ACK state. If you're using haproxy and not
49325+ all servers it connects to have this option enabled, consider
49326+ disabling this feature on the haproxy host.
49327+
49328+ If the sysctl option is enabled, two sysctl options with names
49329+ "ip_blackhole" and "lastack_retries" will be created.
49330+ While "ip_blackhole" takes the standard zero/non-zero on/off
49331+ toggle, "lastack_retries" uses the same kinds of values as
49332+ "tcp_retries1" and "tcp_retries2". The default value of 4
49333+ prevents a socket from lasting more than 45 seconds in LAST_ACK
49334+ state.
49335+
49336+config GRKERNSEC_SOCKET
49337+ bool "Socket restrictions"
49338+ depends on NET
49339+ help
49340+ If you say Y here, you will be able to choose from several options.
49341+ If you assign a GID on your system and add it to the supplementary
49342+ groups of users you want to restrict socket access to, this patch
49343+ will perform up to three things, based on the option(s) you choose.
49344+
49345+config GRKERNSEC_SOCKET_ALL
49346+ bool "Deny any sockets to group"
49347+ depends on GRKERNSEC_SOCKET
49348+ help
49349+ If you say Y here, you will be able to choose a GID of whose users will
49350+ be unable to connect to other hosts from your machine or run server
49351+ applications from your machine. If the sysctl option is enabled, a
49352+ sysctl option with name "socket_all" is created.
49353+
49354+config GRKERNSEC_SOCKET_ALL_GID
49355+ int "GID to deny all sockets for"
49356+ depends on GRKERNSEC_SOCKET_ALL
49357+ default 1004
49358+ help
49359+ Here you can choose the GID to disable socket access for. Remember to
49360+ add the users you want socket access disabled for to the GID
49361+ specified here. If the sysctl option is enabled, a sysctl option
49362+ with name "socket_all_gid" is created.
49363+
49364+config GRKERNSEC_SOCKET_CLIENT
49365+ bool "Deny client sockets to group"
49366+ depends on GRKERNSEC_SOCKET
49367+ help
49368+ If you say Y here, you will be able to choose a GID of whose users will
49369+ be unable to connect to other hosts from your machine, but will be
49370+ able to run servers. If this option is enabled, all users in the group
49371+ you specify will have to use passive mode when initiating ftp transfers
49372+ from the shell on your machine. If the sysctl option is enabled, a
49373+ sysctl option with name "socket_client" is created.
49374+
49375+config GRKERNSEC_SOCKET_CLIENT_GID
49376+ int "GID to deny client sockets for"
49377+ depends on GRKERNSEC_SOCKET_CLIENT
49378+ default 1003
49379+ help
49380+ Here you can choose the GID to disable client socket access for.
49381+ Remember to add the users you want client socket access disabled for to
49382+ the GID specified here. If the sysctl option is enabled, a sysctl
49383+ option with name "socket_client_gid" is created.
49384+
49385+config GRKERNSEC_SOCKET_SERVER
49386+ bool "Deny server sockets to group"
49387+ depends on GRKERNSEC_SOCKET
49388+ help
49389+ If you say Y here, you will be able to choose a GID of whose users will
49390+ be unable to run server applications from your machine. If the sysctl
49391+ option is enabled, a sysctl option with name "socket_server" is created.
49392+
49393+config GRKERNSEC_SOCKET_SERVER_GID
49394+ int "GID to deny server sockets for"
49395+ depends on GRKERNSEC_SOCKET_SERVER
49396+ default 1002
49397+ help
49398+ Here you can choose the GID to disable server socket access for.
49399+ Remember to add the users you want server socket access disabled for to
49400+ the GID specified here. If the sysctl option is enabled, a sysctl
49401+ option with name "socket_server_gid" is created.
49402+
49403+endmenu
49404+menu "Sysctl support"
49405+depends on GRKERNSEC && SYSCTL
49406+
49407+config GRKERNSEC_SYSCTL
49408+ bool "Sysctl support"
49409+ help
49410+ If you say Y here, you will be able to change the options that
49411+ grsecurity runs with at bootup, without having to recompile your
49412+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49413+ to enable (1) or disable (0) various features. All the sysctl entries
49414+ are mutable until the "grsec_lock" entry is set to a non-zero value.
49415+ All features enabled in the kernel configuration are disabled at boot
49416+ if you do not say Y to the "Turn on features by default" option.
49417+ All options should be set at startup, and the grsec_lock entry should
49418+ be set to a non-zero value after all the options are set.
49419+ *THIS IS EXTREMELY IMPORTANT*
49420+
49421+config GRKERNSEC_SYSCTL_DISTRO
49422+ bool "Extra sysctl support for distro makers (READ HELP)"
49423+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49424+ help
49425+ If you say Y here, additional sysctl options will be created
49426+ for features that affect processes running as root. Therefore,
49427+ it is critical when using this option that the grsec_lock entry be
49428+ enabled after boot. Only distros with prebuilt kernel packages
49429+ with this option enabled that can ensure grsec_lock is enabled
49430+ after boot should use this option.
49431+ *Failure to set grsec_lock after boot makes all grsec features
49432+ this option covers useless*
49433+
49434+ Currently this option creates the following sysctl entries:
49435+ "Disable Privileged I/O": "disable_priv_io"
49436+
49437+config GRKERNSEC_SYSCTL_ON
49438+ bool "Turn on features by default"
49439+ depends on GRKERNSEC_SYSCTL
49440+ help
49441+ If you say Y here, instead of having all features enabled in the
49442+ kernel configuration disabled at boot time, the features will be
49443+ enabled at boot time. It is recommended you say Y here unless
49444+ there is some reason you would want all sysctl-tunable features to
49445+ be disabled by default. As mentioned elsewhere, it is important
49446+ to enable the grsec_lock entry once you have finished modifying
49447+ the sysctl entries.
49448+
49449+endmenu
49450+menu "Logging Options"
49451+depends on GRKERNSEC
49452+
49453+config GRKERNSEC_FLOODTIME
49454+ int "Seconds in between log messages (minimum)"
49455+ default 10
49456+ help
49457+ This option allows you to enforce the number of seconds between
49458+ grsecurity log messages. The default should be suitable for most
49459+ people, however, if you choose to change it, choose a value small enough
49460+ to allow informative logs to be produced, but large enough to
49461+ prevent flooding.
49462+
49463+config GRKERNSEC_FLOODBURST
49464+ int "Number of messages in a burst (maximum)"
49465+ default 6
49466+ help
49467+ This option allows you to choose the maximum number of messages allowed
49468+ within the flood time interval you chose in a separate option. The
49469+ default should be suitable for most people, however if you find that
49470+ many of your logs are being interpreted as flooding, you may want to
49471+ raise this value.
49472+
49473+endmenu
49474+
49475+endmenu
49476diff --git a/grsecurity/Makefile b/grsecurity/Makefile
49477new file mode 100644
49478index 0000000..1b9afa9
49479--- /dev/null
49480+++ b/grsecurity/Makefile
49481@@ -0,0 +1,38 @@
49482+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
49483+# during 2001-2009 it has been completely redesigned by Brad Spengler
49484+# into an RBAC system
49485+#
49486+# All code in this directory and various hooks inserted throughout the kernel
49487+# are copyright Brad Spengler - Open Source Security, Inc., and released
49488+# under the GPL v2 or higher
49489+
49490+KBUILD_CFLAGS += -Werror
49491+
49492+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
49493+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
49494+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
49495+
49496+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
49497+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
49498+ gracl_learn.o grsec_log.o
49499+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
49500+
49501+ifdef CONFIG_NET
49502+obj-y += grsec_sock.o
49503+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
49504+endif
49505+
49506+ifndef CONFIG_GRKERNSEC
49507+obj-y += grsec_disabled.o
49508+endif
49509+
49510+ifdef CONFIG_GRKERNSEC_HIDESYM
49511+extra-y := grsec_hidesym.o
49512+$(obj)/grsec_hidesym.o:
49513+ @-chmod -f 500 /boot
49514+ @-chmod -f 500 /lib/modules
49515+ @-chmod -f 500 /lib64/modules
49516+ @-chmod -f 500 /lib32/modules
49517+ @-chmod -f 700 .
49518+ @echo ' grsec: protected kernel image paths'
49519+endif
49520diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
49521new file mode 100644
49522index 0000000..e8c5d41
49523--- /dev/null
49524+++ b/grsecurity/gracl.c
49525@@ -0,0 +1,4179 @@
49526+#include <linux/kernel.h>
49527+#include <linux/module.h>
49528+#include <linux/sched.h>
49529+#include <linux/mm.h>
49530+#include <linux/file.h>
49531+#include <linux/fs.h>
49532+#include <linux/namei.h>
49533+#include <linux/mount.h>
49534+#include <linux/tty.h>
49535+#include <linux/proc_fs.h>
49536+#include <linux/lglock.h>
49537+#include <linux/slab.h>
49538+#include <linux/vmalloc.h>
49539+#include <linux/types.h>
49540+#include <linux/sysctl.h>
49541+#include <linux/netdevice.h>
49542+#include <linux/ptrace.h>
49543+#include <linux/gracl.h>
49544+#include <linux/gralloc.h>
49545+#include <linux/security.h>
49546+#include <linux/grinternal.h>
49547+#include <linux/pid_namespace.h>
49548+#include <linux/fdtable.h>
49549+#include <linux/percpu.h>
49550+#include "../fs/mount.h"
49551+
49552+#include <asm/uaccess.h>
49553+#include <asm/errno.h>
49554+#include <asm/mman.h>
49555+
49556+static struct acl_role_db acl_role_set;
49557+static struct name_db name_set;
49558+static struct inodev_db inodev_set;
49559+
49560+/* for keeping track of userspace pointers used for subjects, so we
49561+ can share references in the kernel as well
49562+*/
49563+
49564+static struct path real_root;
49565+
49566+static struct acl_subj_map_db subj_map_set;
49567+
49568+static struct acl_role_label *default_role;
49569+
49570+static struct acl_role_label *role_list;
49571+
49572+static u16 acl_sp_role_value;
49573+
49574+extern char *gr_shared_page[4];
49575+static DEFINE_MUTEX(gr_dev_mutex);
49576+DEFINE_RWLOCK(gr_inode_lock);
49577+
49578+struct gr_arg *gr_usermode;
49579+
49580+static unsigned int gr_status __read_only = GR_STATUS_INIT;
49581+
49582+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
49583+extern void gr_clear_learn_entries(void);
49584+
49585+#ifdef CONFIG_GRKERNSEC_RESLOG
49586+extern void gr_log_resource(const struct task_struct *task,
49587+ const int res, const unsigned long wanted, const int gt);
49588+#endif
49589+
49590+unsigned char *gr_system_salt;
49591+unsigned char *gr_system_sum;
49592+
49593+static struct sprole_pw **acl_special_roles = NULL;
49594+static __u16 num_sprole_pws = 0;
49595+
49596+static struct acl_role_label *kernel_role = NULL;
49597+
49598+static unsigned int gr_auth_attempts = 0;
49599+static unsigned long gr_auth_expires = 0UL;
49600+
49601+#ifdef CONFIG_NET
49602+extern struct vfsmount *sock_mnt;
49603+#endif
49604+
49605+extern struct vfsmount *pipe_mnt;
49606+extern struct vfsmount *shm_mnt;
49607+#ifdef CONFIG_HUGETLBFS
49608+extern struct vfsmount *hugetlbfs_vfsmount;
49609+#endif
49610+
49611+static struct acl_object_label *fakefs_obj_rw;
49612+static struct acl_object_label *fakefs_obj_rwx;
49613+
49614+extern int gr_init_uidset(void);
49615+extern void gr_free_uidset(void);
49616+extern void gr_remove_uid(uid_t uid);
49617+extern int gr_find_uid(uid_t uid);
49618+
49619+DECLARE_BRLOCK(vfsmount_lock);
49620+
49621+__inline__ int
49622+gr_acl_is_enabled(void)
49623+{
49624+ return (gr_status & GR_READY);
49625+}
49626+
49627+#ifdef CONFIG_BTRFS_FS
49628+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49629+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49630+#endif
49631+
49632+static inline dev_t __get_dev(const struct dentry *dentry)
49633+{
49634+#ifdef CONFIG_BTRFS_FS
49635+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49636+ return get_btrfs_dev_from_inode(dentry->d_inode);
49637+ else
49638+#endif
49639+ return dentry->d_inode->i_sb->s_dev;
49640+}
49641+
49642+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
49643+{
49644+ return __get_dev(dentry);
49645+}
49646+
49647+static char gr_task_roletype_to_char(struct task_struct *task)
49648+{
49649+ switch (task->role->roletype &
49650+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
49651+ GR_ROLE_SPECIAL)) {
49652+ case GR_ROLE_DEFAULT:
49653+ return 'D';
49654+ case GR_ROLE_USER:
49655+ return 'U';
49656+ case GR_ROLE_GROUP:
49657+ return 'G';
49658+ case GR_ROLE_SPECIAL:
49659+ return 'S';
49660+ }
49661+
49662+ return 'X';
49663+}
49664+
49665+char gr_roletype_to_char(void)
49666+{
49667+ return gr_task_roletype_to_char(current);
49668+}
49669+
49670+__inline__ int
49671+gr_acl_tpe_check(void)
49672+{
49673+ if (unlikely(!(gr_status & GR_READY)))
49674+ return 0;
49675+ if (current->role->roletype & GR_ROLE_TPE)
49676+ return 1;
49677+ else
49678+ return 0;
49679+}
49680+
49681+int
49682+gr_handle_rawio(const struct inode *inode)
49683+{
49684+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49685+ if (inode && S_ISBLK(inode->i_mode) &&
49686+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49687+ !capable(CAP_SYS_RAWIO))
49688+ return 1;
49689+#endif
49690+ return 0;
49691+}
49692+
49693+static int
49694+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
49695+{
49696+ if (likely(lena != lenb))
49697+ return 0;
49698+
49699+ return !memcmp(a, b, lena);
49700+}
49701+
49702+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
49703+{
49704+ *buflen -= namelen;
49705+ if (*buflen < 0)
49706+ return -ENAMETOOLONG;
49707+ *buffer -= namelen;
49708+ memcpy(*buffer, str, namelen);
49709+ return 0;
49710+}
49711+
49712+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
49713+{
49714+ return prepend(buffer, buflen, name->name, name->len);
49715+}
49716+
49717+static int prepend_path(const struct path *path, struct path *root,
49718+ char **buffer, int *buflen)
49719+{
49720+ struct dentry *dentry = path->dentry;
49721+ struct vfsmount *vfsmnt = path->mnt;
49722+ struct mount *mnt = real_mount(vfsmnt);
49723+ bool slash = false;
49724+ int error = 0;
49725+
49726+ while (dentry != root->dentry || vfsmnt != root->mnt) {
49727+ struct dentry * parent;
49728+
49729+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
49730+ /* Global root? */
49731+ if (!mnt_has_parent(mnt)) {
49732+ goto out;
49733+ }
49734+ dentry = mnt->mnt_mountpoint;
49735+ mnt = mnt->mnt_parent;
49736+ vfsmnt = &mnt->mnt;
49737+ continue;
49738+ }
49739+ parent = dentry->d_parent;
49740+ prefetch(parent);
49741+ spin_lock(&dentry->d_lock);
49742+ error = prepend_name(buffer, buflen, &dentry->d_name);
49743+ spin_unlock(&dentry->d_lock);
49744+ if (!error)
49745+ error = prepend(buffer, buflen, "/", 1);
49746+ if (error)
49747+ break;
49748+
49749+ slash = true;
49750+ dentry = parent;
49751+ }
49752+
49753+out:
49754+ if (!error && !slash)
49755+ error = prepend(buffer, buflen, "/", 1);
49756+
49757+ return error;
49758+}
49759+
49760+/* this must be called with vfsmount_lock and rename_lock held */
49761+
49762+static char *__our_d_path(const struct path *path, struct path *root,
49763+ char *buf, int buflen)
49764+{
49765+ char *res = buf + buflen;
49766+ int error;
49767+
49768+ prepend(&res, &buflen, "\0", 1);
49769+ error = prepend_path(path, root, &res, &buflen);
49770+ if (error)
49771+ return ERR_PTR(error);
49772+
49773+ return res;
49774+}
49775+
49776+static char *
49777+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
49778+{
49779+ char *retval;
49780+
49781+ retval = __our_d_path(path, root, buf, buflen);
49782+ if (unlikely(IS_ERR(retval)))
49783+ retval = strcpy(buf, "<path too long>");
49784+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
49785+ retval[1] = '\0';
49786+
49787+ return retval;
49788+}
49789+
49790+static char *
49791+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49792+ char *buf, int buflen)
49793+{
49794+ struct path path;
49795+ char *res;
49796+
49797+ path.dentry = (struct dentry *)dentry;
49798+ path.mnt = (struct vfsmount *)vfsmnt;
49799+
49800+ /* we can use real_root.dentry, real_root.mnt, because this is only called
49801+ by the RBAC system */
49802+ res = gen_full_path(&path, &real_root, buf, buflen);
49803+
49804+ return res;
49805+}
49806+
49807+static char *
49808+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49809+ char *buf, int buflen)
49810+{
49811+ char *res;
49812+ struct path path;
49813+ struct path root;
49814+ struct task_struct *reaper = &init_task;
49815+
49816+ path.dentry = (struct dentry *)dentry;
49817+ path.mnt = (struct vfsmount *)vfsmnt;
49818+
49819+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
49820+ get_fs_root(reaper->fs, &root);
49821+
49822+ write_seqlock(&rename_lock);
49823+ br_read_lock(vfsmount_lock);
49824+ res = gen_full_path(&path, &root, buf, buflen);
49825+ br_read_unlock(vfsmount_lock);
49826+ write_sequnlock(&rename_lock);
49827+
49828+ path_put(&root);
49829+ return res;
49830+}
49831+
49832+static char *
49833+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49834+{
49835+ char *ret;
49836+ write_seqlock(&rename_lock);
49837+ br_read_lock(vfsmount_lock);
49838+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49839+ PAGE_SIZE);
49840+ br_read_unlock(vfsmount_lock);
49841+ write_sequnlock(&rename_lock);
49842+ return ret;
49843+}
49844+
49845+static char *
49846+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49847+{
49848+ char *ret;
49849+ char *buf;
49850+ int buflen;
49851+
49852+ write_seqlock(&rename_lock);
49853+ br_read_lock(vfsmount_lock);
49854+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
49855+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
49856+ buflen = (int)(ret - buf);
49857+ if (buflen >= 5)
49858+ prepend(&ret, &buflen, "/proc", 5);
49859+ else
49860+ ret = strcpy(buf, "<path too long>");
49861+ br_read_unlock(vfsmount_lock);
49862+ write_sequnlock(&rename_lock);
49863+ return ret;
49864+}
49865+
49866+char *
49867+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
49868+{
49869+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49870+ PAGE_SIZE);
49871+}
49872+
49873+char *
49874+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
49875+{
49876+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
49877+ PAGE_SIZE);
49878+}
49879+
49880+char *
49881+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
49882+{
49883+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
49884+ PAGE_SIZE);
49885+}
49886+
49887+char *
49888+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
49889+{
49890+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
49891+ PAGE_SIZE);
49892+}
49893+
49894+char *
49895+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
49896+{
49897+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
49898+ PAGE_SIZE);
49899+}
49900+
49901+__inline__ __u32
49902+to_gr_audit(const __u32 reqmode)
49903+{
49904+ /* masks off auditable permission flags, then shifts them to create
49905+ auditing flags, and adds the special case of append auditing if
49906+ we're requesting write */
49907+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
49908+}
49909+
49910+struct acl_subject_label *
49911+lookup_subject_map(const struct acl_subject_label *userp)
49912+{
49913+ unsigned int index = shash(userp, subj_map_set.s_size);
49914+ struct subject_map *match;
49915+
49916+ match = subj_map_set.s_hash[index];
49917+
49918+ while (match && match->user != userp)
49919+ match = match->next;
49920+
49921+ if (match != NULL)
49922+ return match->kernel;
49923+ else
49924+ return NULL;
49925+}
49926+
49927+static void
49928+insert_subj_map_entry(struct subject_map *subjmap)
49929+{
49930+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
49931+ struct subject_map **curr;
49932+
49933+ subjmap->prev = NULL;
49934+
49935+ curr = &subj_map_set.s_hash[index];
49936+ if (*curr != NULL)
49937+ (*curr)->prev = subjmap;
49938+
49939+ subjmap->next = *curr;
49940+ *curr = subjmap;
49941+
49942+ return;
49943+}
49944+
49945+static struct acl_role_label *
49946+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
49947+ const gid_t gid)
49948+{
49949+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
49950+ struct acl_role_label *match;
49951+ struct role_allowed_ip *ipp;
49952+ unsigned int x;
49953+ u32 curr_ip = task->signal->curr_ip;
49954+
49955+ task->signal->saved_ip = curr_ip;
49956+
49957+ match = acl_role_set.r_hash[index];
49958+
49959+ while (match) {
49960+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
49961+ for (x = 0; x < match->domain_child_num; x++) {
49962+ if (match->domain_children[x] == uid)
49963+ goto found;
49964+ }
49965+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
49966+ break;
49967+ match = match->next;
49968+ }
49969+found:
49970+ if (match == NULL) {
49971+ try_group:
49972+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
49973+ match = acl_role_set.r_hash[index];
49974+
49975+ while (match) {
49976+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
49977+ for (x = 0; x < match->domain_child_num; x++) {
49978+ if (match->domain_children[x] == gid)
49979+ goto found2;
49980+ }
49981+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
49982+ break;
49983+ match = match->next;
49984+ }
49985+found2:
49986+ if (match == NULL)
49987+ match = default_role;
49988+ if (match->allowed_ips == NULL)
49989+ return match;
49990+ else {
49991+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
49992+ if (likely
49993+ ((ntohl(curr_ip) & ipp->netmask) ==
49994+ (ntohl(ipp->addr) & ipp->netmask)))
49995+ return match;
49996+ }
49997+ match = default_role;
49998+ }
49999+ } else if (match->allowed_ips == NULL) {
50000+ return match;
50001+ } else {
50002+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50003+ if (likely
50004+ ((ntohl(curr_ip) & ipp->netmask) ==
50005+ (ntohl(ipp->addr) & ipp->netmask)))
50006+ return match;
50007+ }
50008+ goto try_group;
50009+ }
50010+
50011+ return match;
50012+}
50013+
50014+struct acl_subject_label *
50015+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50016+ const struct acl_role_label *role)
50017+{
50018+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50019+ struct acl_subject_label *match;
50020+
50021+ match = role->subj_hash[index];
50022+
50023+ while (match && (match->inode != ino || match->device != dev ||
50024+ (match->mode & GR_DELETED))) {
50025+ match = match->next;
50026+ }
50027+
50028+ if (match && !(match->mode & GR_DELETED))
50029+ return match;
50030+ else
50031+ return NULL;
50032+}
50033+
50034+struct acl_subject_label *
50035+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50036+ const struct acl_role_label *role)
50037+{
50038+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50039+ struct acl_subject_label *match;
50040+
50041+ match = role->subj_hash[index];
50042+
50043+ while (match && (match->inode != ino || match->device != dev ||
50044+ !(match->mode & GR_DELETED))) {
50045+ match = match->next;
50046+ }
50047+
50048+ if (match && (match->mode & GR_DELETED))
50049+ return match;
50050+ else
50051+ return NULL;
50052+}
50053+
50054+static struct acl_object_label *
50055+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50056+ const struct acl_subject_label *subj)
50057+{
50058+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50059+ struct acl_object_label *match;
50060+
50061+ match = subj->obj_hash[index];
50062+
50063+ while (match && (match->inode != ino || match->device != dev ||
50064+ (match->mode & GR_DELETED))) {
50065+ match = match->next;
50066+ }
50067+
50068+ if (match && !(match->mode & GR_DELETED))
50069+ return match;
50070+ else
50071+ return NULL;
50072+}
50073+
50074+static struct acl_object_label *
50075+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50076+ const struct acl_subject_label *subj)
50077+{
50078+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50079+ struct acl_object_label *match;
50080+
50081+ match = subj->obj_hash[index];
50082+
50083+ while (match && (match->inode != ino || match->device != dev ||
50084+ !(match->mode & GR_DELETED))) {
50085+ match = match->next;
50086+ }
50087+
50088+ if (match && (match->mode & GR_DELETED))
50089+ return match;
50090+
50091+ match = subj->obj_hash[index];
50092+
50093+ while (match && (match->inode != ino || match->device != dev ||
50094+ (match->mode & GR_DELETED))) {
50095+ match = match->next;
50096+ }
50097+
50098+ if (match && !(match->mode & GR_DELETED))
50099+ return match;
50100+ else
50101+ return NULL;
50102+}
50103+
50104+static struct name_entry *
50105+lookup_name_entry(const char *name)
50106+{
50107+ unsigned int len = strlen(name);
50108+ unsigned int key = full_name_hash(name, len);
50109+ unsigned int index = key % name_set.n_size;
50110+ struct name_entry *match;
50111+
50112+ match = name_set.n_hash[index];
50113+
50114+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50115+ match = match->next;
50116+
50117+ return match;
50118+}
50119+
50120+static struct name_entry *
50121+lookup_name_entry_create(const char *name)
50122+{
50123+ unsigned int len = strlen(name);
50124+ unsigned int key = full_name_hash(name, len);
50125+ unsigned int index = key % name_set.n_size;
50126+ struct name_entry *match;
50127+
50128+ match = name_set.n_hash[index];
50129+
50130+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50131+ !match->deleted))
50132+ match = match->next;
50133+
50134+ if (match && match->deleted)
50135+ return match;
50136+
50137+ match = name_set.n_hash[index];
50138+
50139+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50140+ match->deleted))
50141+ match = match->next;
50142+
50143+ if (match && !match->deleted)
50144+ return match;
50145+ else
50146+ return NULL;
50147+}
50148+
50149+static struct inodev_entry *
50150+lookup_inodev_entry(const ino_t ino, const dev_t dev)
50151+{
50152+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
50153+ struct inodev_entry *match;
50154+
50155+ match = inodev_set.i_hash[index];
50156+
50157+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50158+ match = match->next;
50159+
50160+ return match;
50161+}
50162+
50163+static void
50164+insert_inodev_entry(struct inodev_entry *entry)
50165+{
50166+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50167+ inodev_set.i_size);
50168+ struct inodev_entry **curr;
50169+
50170+ entry->prev = NULL;
50171+
50172+ curr = &inodev_set.i_hash[index];
50173+ if (*curr != NULL)
50174+ (*curr)->prev = entry;
50175+
50176+ entry->next = *curr;
50177+ *curr = entry;
50178+
50179+ return;
50180+}
50181+
50182+static void
50183+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50184+{
50185+ unsigned int index =
50186+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50187+ struct acl_role_label **curr;
50188+ struct acl_role_label *tmp, *tmp2;
50189+
50190+ curr = &acl_role_set.r_hash[index];
50191+
50192+ /* simple case, slot is empty, just set it to our role */
50193+ if (*curr == NULL) {
50194+ *curr = role;
50195+ } else {
50196+ /* example:
50197+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
50198+ 2 -> 3
50199+ */
50200+ /* first check to see if we can already be reached via this slot */
50201+ tmp = *curr;
50202+ while (tmp && tmp != role)
50203+ tmp = tmp->next;
50204+ if (tmp == role) {
50205+ /* we don't need to add ourselves to this slot's chain */
50206+ return;
50207+ }
50208+ /* we need to add ourselves to this chain, two cases */
50209+ if (role->next == NULL) {
50210+ /* simple case, append the current chain to our role */
50211+ role->next = *curr;
50212+ *curr = role;
50213+ } else {
50214+ /* 1 -> 2 -> 3 -> 4
50215+ 2 -> 3 -> 4
50216+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50217+ */
50218+ /* trickier case: walk our role's chain until we find
50219+ the role for the start of the current slot's chain */
50220+ tmp = role;
50221+ tmp2 = *curr;
50222+ while (tmp->next && tmp->next != tmp2)
50223+ tmp = tmp->next;
50224+ if (tmp->next == tmp2) {
50225+ /* from example above, we found 3, so just
50226+ replace this slot's chain with ours */
50227+ *curr = role;
50228+ } else {
50229+ /* we didn't find a subset of our role's chain
50230+ in the current slot's chain, so append their
50231+ chain to ours, and set us as the first role in
50232+ the slot's chain
50233+
50234+ we could fold this case with the case above,
50235+ but making it explicit for clarity
50236+ */
50237+ tmp->next = tmp2;
50238+ *curr = role;
50239+ }
50240+ }
50241+ }
50242+
50243+ return;
50244+}
50245+
50246+static void
50247+insert_acl_role_label(struct acl_role_label *role)
50248+{
50249+ int i;
50250+
50251+ if (role_list == NULL) {
50252+ role_list = role;
50253+ role->prev = NULL;
50254+ } else {
50255+ role->prev = role_list;
50256+ role_list = role;
50257+ }
50258+
50259+ /* used for hash chains */
50260+ role->next = NULL;
50261+
50262+ if (role->roletype & GR_ROLE_DOMAIN) {
50263+ for (i = 0; i < role->domain_child_num; i++)
50264+ __insert_acl_role_label(role, role->domain_children[i]);
50265+ } else
50266+ __insert_acl_role_label(role, role->uidgid);
50267+}
50268+
50269+static int
50270+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50271+{
50272+ struct name_entry **curr, *nentry;
50273+ struct inodev_entry *ientry;
50274+ unsigned int len = strlen(name);
50275+ unsigned int key = full_name_hash(name, len);
50276+ unsigned int index = key % name_set.n_size;
50277+
50278+ curr = &name_set.n_hash[index];
50279+
50280+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50281+ curr = &((*curr)->next);
50282+
50283+ if (*curr != NULL)
50284+ return 1;
50285+
50286+ nentry = acl_alloc(sizeof (struct name_entry));
50287+ if (nentry == NULL)
50288+ return 0;
50289+ ientry = acl_alloc(sizeof (struct inodev_entry));
50290+ if (ientry == NULL)
50291+ return 0;
50292+ ientry->nentry = nentry;
50293+
50294+ nentry->key = key;
50295+ nentry->name = name;
50296+ nentry->inode = inode;
50297+ nentry->device = device;
50298+ nentry->len = len;
50299+ nentry->deleted = deleted;
50300+
50301+ nentry->prev = NULL;
50302+ curr = &name_set.n_hash[index];
50303+ if (*curr != NULL)
50304+ (*curr)->prev = nentry;
50305+ nentry->next = *curr;
50306+ *curr = nentry;
50307+
50308+ /* insert us into the table searchable by inode/dev */
50309+ insert_inodev_entry(ientry);
50310+
50311+ return 1;
50312+}
50313+
50314+static void
50315+insert_acl_obj_label(struct acl_object_label *obj,
50316+ struct acl_subject_label *subj)
50317+{
50318+ unsigned int index =
50319+ fhash(obj->inode, obj->device, subj->obj_hash_size);
50320+ struct acl_object_label **curr;
50321+
50322+
50323+ obj->prev = NULL;
50324+
50325+ curr = &subj->obj_hash[index];
50326+ if (*curr != NULL)
50327+ (*curr)->prev = obj;
50328+
50329+ obj->next = *curr;
50330+ *curr = obj;
50331+
50332+ return;
50333+}
50334+
50335+static void
50336+insert_acl_subj_label(struct acl_subject_label *obj,
50337+ struct acl_role_label *role)
50338+{
50339+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50340+ struct acl_subject_label **curr;
50341+
50342+ obj->prev = NULL;
50343+
50344+ curr = &role->subj_hash[index];
50345+ if (*curr != NULL)
50346+ (*curr)->prev = obj;
50347+
50348+ obj->next = *curr;
50349+ *curr = obj;
50350+
50351+ return;
50352+}
50353+
50354+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50355+
50356+static void *
50357+create_table(__u32 * len, int elementsize)
50358+{
50359+ unsigned int table_sizes[] = {
50360+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50361+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50362+ 4194301, 8388593, 16777213, 33554393, 67108859
50363+ };
50364+ void *newtable = NULL;
50365+ unsigned int pwr = 0;
50366+
50367+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50368+ table_sizes[pwr] <= *len)
50369+ pwr++;
50370+
50371+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50372+ return newtable;
50373+
50374+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50375+ newtable =
50376+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50377+ else
50378+ newtable = vmalloc(table_sizes[pwr] * elementsize);
50379+
50380+ *len = table_sizes[pwr];
50381+
50382+ return newtable;
50383+}
50384+
50385+static int
50386+init_variables(const struct gr_arg *arg)
50387+{
50388+ struct task_struct *reaper = &init_task;
50389+ unsigned int stacksize;
50390+
50391+ subj_map_set.s_size = arg->role_db.num_subjects;
50392+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50393+ name_set.n_size = arg->role_db.num_objects;
50394+ inodev_set.i_size = arg->role_db.num_objects;
50395+
50396+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
50397+ !name_set.n_size || !inodev_set.i_size)
50398+ return 1;
50399+
50400+ if (!gr_init_uidset())
50401+ return 1;
50402+
50403+ /* set up the stack that holds allocation info */
50404+
50405+ stacksize = arg->role_db.num_pointers + 5;
50406+
50407+ if (!acl_alloc_stack_init(stacksize))
50408+ return 1;
50409+
50410+ /* grab reference for the real root dentry and vfsmount */
50411+ get_fs_root(reaper->fs, &real_root);
50412+
50413+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50414+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50415+#endif
50416+
50417+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50418+ if (fakefs_obj_rw == NULL)
50419+ return 1;
50420+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50421+
50422+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50423+ if (fakefs_obj_rwx == NULL)
50424+ return 1;
50425+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50426+
50427+ subj_map_set.s_hash =
50428+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50429+ acl_role_set.r_hash =
50430+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50431+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50432+ inodev_set.i_hash =
50433+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50434+
50435+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50436+ !name_set.n_hash || !inodev_set.i_hash)
50437+ return 1;
50438+
50439+ memset(subj_map_set.s_hash, 0,
50440+ sizeof(struct subject_map *) * subj_map_set.s_size);
50441+ memset(acl_role_set.r_hash, 0,
50442+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
50443+ memset(name_set.n_hash, 0,
50444+ sizeof (struct name_entry *) * name_set.n_size);
50445+ memset(inodev_set.i_hash, 0,
50446+ sizeof (struct inodev_entry *) * inodev_set.i_size);
50447+
50448+ return 0;
50449+}
50450+
50451+/* free information not needed after startup
50452+ currently contains user->kernel pointer mappings for subjects
50453+*/
50454+
50455+static void
50456+free_init_variables(void)
50457+{
50458+ __u32 i;
50459+
50460+ if (subj_map_set.s_hash) {
50461+ for (i = 0; i < subj_map_set.s_size; i++) {
50462+ if (subj_map_set.s_hash[i]) {
50463+ kfree(subj_map_set.s_hash[i]);
50464+ subj_map_set.s_hash[i] = NULL;
50465+ }
50466+ }
50467+
50468+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
50469+ PAGE_SIZE)
50470+ kfree(subj_map_set.s_hash);
50471+ else
50472+ vfree(subj_map_set.s_hash);
50473+ }
50474+
50475+ return;
50476+}
50477+
50478+static void
50479+free_variables(void)
50480+{
50481+ struct acl_subject_label *s;
50482+ struct acl_role_label *r;
50483+ struct task_struct *task, *task2;
50484+ unsigned int x;
50485+
50486+ gr_clear_learn_entries();
50487+
50488+ read_lock(&tasklist_lock);
50489+ do_each_thread(task2, task) {
50490+ task->acl_sp_role = 0;
50491+ task->acl_role_id = 0;
50492+ task->acl = NULL;
50493+ task->role = NULL;
50494+ } while_each_thread(task2, task);
50495+ read_unlock(&tasklist_lock);
50496+
50497+ /* release the reference to the real root dentry and vfsmount */
50498+ path_put(&real_root);
50499+ memset(&real_root, 0, sizeof(real_root));
50500+
50501+ /* free all object hash tables */
50502+
50503+ FOR_EACH_ROLE_START(r)
50504+ if (r->subj_hash == NULL)
50505+ goto next_role;
50506+ FOR_EACH_SUBJECT_START(r, s, x)
50507+ if (s->obj_hash == NULL)
50508+ break;
50509+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50510+ kfree(s->obj_hash);
50511+ else
50512+ vfree(s->obj_hash);
50513+ FOR_EACH_SUBJECT_END(s, x)
50514+ FOR_EACH_NESTED_SUBJECT_START(r, s)
50515+ if (s->obj_hash == NULL)
50516+ break;
50517+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50518+ kfree(s->obj_hash);
50519+ else
50520+ vfree(s->obj_hash);
50521+ FOR_EACH_NESTED_SUBJECT_END(s)
50522+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
50523+ kfree(r->subj_hash);
50524+ else
50525+ vfree(r->subj_hash);
50526+ r->subj_hash = NULL;
50527+next_role:
50528+ FOR_EACH_ROLE_END(r)
50529+
50530+ acl_free_all();
50531+
50532+ if (acl_role_set.r_hash) {
50533+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
50534+ PAGE_SIZE)
50535+ kfree(acl_role_set.r_hash);
50536+ else
50537+ vfree(acl_role_set.r_hash);
50538+ }
50539+ if (name_set.n_hash) {
50540+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
50541+ PAGE_SIZE)
50542+ kfree(name_set.n_hash);
50543+ else
50544+ vfree(name_set.n_hash);
50545+ }
50546+
50547+ if (inodev_set.i_hash) {
50548+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
50549+ PAGE_SIZE)
50550+ kfree(inodev_set.i_hash);
50551+ else
50552+ vfree(inodev_set.i_hash);
50553+ }
50554+
50555+ gr_free_uidset();
50556+
50557+ memset(&name_set, 0, sizeof (struct name_db));
50558+ memset(&inodev_set, 0, sizeof (struct inodev_db));
50559+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
50560+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
50561+
50562+ default_role = NULL;
50563+ kernel_role = NULL;
50564+ role_list = NULL;
50565+
50566+ return;
50567+}
50568+
50569+static __u32
50570+count_user_objs(struct acl_object_label *userp)
50571+{
50572+ struct acl_object_label o_tmp;
50573+ __u32 num = 0;
50574+
50575+ while (userp) {
50576+ if (copy_from_user(&o_tmp, userp,
50577+ sizeof (struct acl_object_label)))
50578+ break;
50579+
50580+ userp = o_tmp.prev;
50581+ num++;
50582+ }
50583+
50584+ return num;
50585+}
50586+
50587+static struct acl_subject_label *
50588+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
50589+
50590+static int
50591+copy_user_glob(struct acl_object_label *obj)
50592+{
50593+ struct acl_object_label *g_tmp, **guser;
50594+ unsigned int len;
50595+ char *tmp;
50596+
50597+ if (obj->globbed == NULL)
50598+ return 0;
50599+
50600+ guser = &obj->globbed;
50601+ while (*guser) {
50602+ g_tmp = (struct acl_object_label *)
50603+ acl_alloc(sizeof (struct acl_object_label));
50604+ if (g_tmp == NULL)
50605+ return -ENOMEM;
50606+
50607+ if (copy_from_user(g_tmp, *guser,
50608+ sizeof (struct acl_object_label)))
50609+ return -EFAULT;
50610+
50611+ len = strnlen_user(g_tmp->filename, PATH_MAX);
50612+
50613+ if (!len || len >= PATH_MAX)
50614+ return -EINVAL;
50615+
50616+ if ((tmp = (char *) acl_alloc(len)) == NULL)
50617+ return -ENOMEM;
50618+
50619+ if (copy_from_user(tmp, g_tmp->filename, len))
50620+ return -EFAULT;
50621+ tmp[len-1] = '\0';
50622+ g_tmp->filename = tmp;
50623+
50624+ *guser = g_tmp;
50625+ guser = &(g_tmp->next);
50626+ }
50627+
50628+ return 0;
50629+}
50630+
50631+static int
50632+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
50633+ struct acl_role_label *role)
50634+{
50635+ struct acl_object_label *o_tmp;
50636+ unsigned int len;
50637+ int ret;
50638+ char *tmp;
50639+
50640+ while (userp) {
50641+ if ((o_tmp = (struct acl_object_label *)
50642+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
50643+ return -ENOMEM;
50644+
50645+ if (copy_from_user(o_tmp, userp,
50646+ sizeof (struct acl_object_label)))
50647+ return -EFAULT;
50648+
50649+ userp = o_tmp->prev;
50650+
50651+ len = strnlen_user(o_tmp->filename, PATH_MAX);
50652+
50653+ if (!len || len >= PATH_MAX)
50654+ return -EINVAL;
50655+
50656+ if ((tmp = (char *) acl_alloc(len)) == NULL)
50657+ return -ENOMEM;
50658+
50659+ if (copy_from_user(tmp, o_tmp->filename, len))
50660+ return -EFAULT;
50661+ tmp[len-1] = '\0';
50662+ o_tmp->filename = tmp;
50663+
50664+ insert_acl_obj_label(o_tmp, subj);
50665+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
50666+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
50667+ return -ENOMEM;
50668+
50669+ ret = copy_user_glob(o_tmp);
50670+ if (ret)
50671+ return ret;
50672+
50673+ if (o_tmp->nested) {
50674+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
50675+ if (IS_ERR(o_tmp->nested))
50676+ return PTR_ERR(o_tmp->nested);
50677+
50678+ /* insert into nested subject list */
50679+ o_tmp->nested->next = role->hash->first;
50680+ role->hash->first = o_tmp->nested;
50681+ }
50682+ }
50683+
50684+ return 0;
50685+}
50686+
50687+static __u32
50688+count_user_subjs(struct acl_subject_label *userp)
50689+{
50690+ struct acl_subject_label s_tmp;
50691+ __u32 num = 0;
50692+
50693+ while (userp) {
50694+ if (copy_from_user(&s_tmp, userp,
50695+ sizeof (struct acl_subject_label)))
50696+ break;
50697+
50698+ userp = s_tmp.prev;
50699+ /* do not count nested subjects against this count, since
50700+ they are not included in the hash table, but are
50701+ attached to objects. We have already counted
50702+ the subjects in userspace for the allocation
50703+ stack
50704+ */
50705+ if (!(s_tmp.mode & GR_NESTED))
50706+ num++;
50707+ }
50708+
50709+ return num;
50710+}
50711+
50712+static int
50713+copy_user_allowedips(struct acl_role_label *rolep)
50714+{
50715+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
50716+
50717+ ruserip = rolep->allowed_ips;
50718+
50719+ while (ruserip) {
50720+ rlast = rtmp;
50721+
50722+ if ((rtmp = (struct role_allowed_ip *)
50723+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
50724+ return -ENOMEM;
50725+
50726+ if (copy_from_user(rtmp, ruserip,
50727+ sizeof (struct role_allowed_ip)))
50728+ return -EFAULT;
50729+
50730+ ruserip = rtmp->prev;
50731+
50732+ if (!rlast) {
50733+ rtmp->prev = NULL;
50734+ rolep->allowed_ips = rtmp;
50735+ } else {
50736+ rlast->next = rtmp;
50737+ rtmp->prev = rlast;
50738+ }
50739+
50740+ if (!ruserip)
50741+ rtmp->next = NULL;
50742+ }
50743+
50744+ return 0;
50745+}
50746+
50747+static int
50748+copy_user_transitions(struct acl_role_label *rolep)
50749+{
50750+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
50751+
50752+ unsigned int len;
50753+ char *tmp;
50754+
50755+ rusertp = rolep->transitions;
50756+
50757+ while (rusertp) {
50758+ rlast = rtmp;
50759+
50760+ if ((rtmp = (struct role_transition *)
50761+ acl_alloc(sizeof (struct role_transition))) == NULL)
50762+ return -ENOMEM;
50763+
50764+ if (copy_from_user(rtmp, rusertp,
50765+ sizeof (struct role_transition)))
50766+ return -EFAULT;
50767+
50768+ rusertp = rtmp->prev;
50769+
50770+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
50771+
50772+ if (!len || len >= GR_SPROLE_LEN)
50773+ return -EINVAL;
50774+
50775+ if ((tmp = (char *) acl_alloc(len)) == NULL)
50776+ return -ENOMEM;
50777+
50778+ if (copy_from_user(tmp, rtmp->rolename, len))
50779+ return -EFAULT;
50780+ tmp[len-1] = '\0';
50781+ rtmp->rolename = tmp;
50782+
50783+ if (!rlast) {
50784+ rtmp->prev = NULL;
50785+ rolep->transitions = rtmp;
50786+ } else {
50787+ rlast->next = rtmp;
50788+ rtmp->prev = rlast;
50789+ }
50790+
50791+ if (!rusertp)
50792+ rtmp->next = NULL;
50793+ }
50794+
50795+ return 0;
50796+}
50797+
50798+static struct acl_subject_label *
50799+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
50800+{
50801+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
50802+ unsigned int len;
50803+ char *tmp;
50804+ __u32 num_objs;
50805+ struct acl_ip_label **i_tmp, *i_utmp2;
50806+ struct gr_hash_struct ghash;
50807+ struct subject_map *subjmap;
50808+ unsigned int i_num;
50809+ int err;
50810+
50811+ s_tmp = lookup_subject_map(userp);
50812+
50813+ /* we've already copied this subject into the kernel, just return
50814+ the reference to it, and don't copy it over again
50815+ */
50816+ if (s_tmp)
50817+ return(s_tmp);
50818+
50819+ if ((s_tmp = (struct acl_subject_label *)
50820+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
50821+ return ERR_PTR(-ENOMEM);
50822+
50823+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
50824+ if (subjmap == NULL)
50825+ return ERR_PTR(-ENOMEM);
50826+
50827+ subjmap->user = userp;
50828+ subjmap->kernel = s_tmp;
50829+ insert_subj_map_entry(subjmap);
50830+
50831+ if (copy_from_user(s_tmp, userp,
50832+ sizeof (struct acl_subject_label)))
50833+ return ERR_PTR(-EFAULT);
50834+
50835+ len = strnlen_user(s_tmp->filename, PATH_MAX);
50836+
50837+ if (!len || len >= PATH_MAX)
50838+ return ERR_PTR(-EINVAL);
50839+
50840+ if ((tmp = (char *) acl_alloc(len)) == NULL)
50841+ return ERR_PTR(-ENOMEM);
50842+
50843+ if (copy_from_user(tmp, s_tmp->filename, len))
50844+ return ERR_PTR(-EFAULT);
50845+ tmp[len-1] = '\0';
50846+ s_tmp->filename = tmp;
50847+
50848+ if (!strcmp(s_tmp->filename, "/"))
50849+ role->root_label = s_tmp;
50850+
50851+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
50852+ return ERR_PTR(-EFAULT);
50853+
50854+ /* copy user and group transition tables */
50855+
50856+ if (s_tmp->user_trans_num) {
50857+ uid_t *uidlist;
50858+
50859+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
50860+ if (uidlist == NULL)
50861+ return ERR_PTR(-ENOMEM);
50862+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
50863+ return ERR_PTR(-EFAULT);
50864+
50865+ s_tmp->user_transitions = uidlist;
50866+ }
50867+
50868+ if (s_tmp->group_trans_num) {
50869+ gid_t *gidlist;
50870+
50871+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
50872+ if (gidlist == NULL)
50873+ return ERR_PTR(-ENOMEM);
50874+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
50875+ return ERR_PTR(-EFAULT);
50876+
50877+ s_tmp->group_transitions = gidlist;
50878+ }
50879+
50880+ /* set up object hash table */
50881+ num_objs = count_user_objs(ghash.first);
50882+
50883+ s_tmp->obj_hash_size = num_objs;
50884+ s_tmp->obj_hash =
50885+ (struct acl_object_label **)
50886+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
50887+
50888+ if (!s_tmp->obj_hash)
50889+ return ERR_PTR(-ENOMEM);
50890+
50891+ memset(s_tmp->obj_hash, 0,
50892+ s_tmp->obj_hash_size *
50893+ sizeof (struct acl_object_label *));
50894+
50895+ /* add in objects */
50896+ err = copy_user_objs(ghash.first, s_tmp, role);
50897+
50898+ if (err)
50899+ return ERR_PTR(err);
50900+
50901+ /* set pointer for parent subject */
50902+ if (s_tmp->parent_subject) {
50903+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
50904+
50905+ if (IS_ERR(s_tmp2))
50906+ return s_tmp2;
50907+
50908+ s_tmp->parent_subject = s_tmp2;
50909+ }
50910+
50911+ /* add in ip acls */
50912+
50913+ if (!s_tmp->ip_num) {
50914+ s_tmp->ips = NULL;
50915+ goto insert;
50916+ }
50917+
50918+ i_tmp =
50919+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
50920+ sizeof (struct acl_ip_label *));
50921+
50922+ if (!i_tmp)
50923+ return ERR_PTR(-ENOMEM);
50924+
50925+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
50926+ *(i_tmp + i_num) =
50927+ (struct acl_ip_label *)
50928+ acl_alloc(sizeof (struct acl_ip_label));
50929+ if (!*(i_tmp + i_num))
50930+ return ERR_PTR(-ENOMEM);
50931+
50932+ if (copy_from_user
50933+ (&i_utmp2, s_tmp->ips + i_num,
50934+ sizeof (struct acl_ip_label *)))
50935+ return ERR_PTR(-EFAULT);
50936+
50937+ if (copy_from_user
50938+ (*(i_tmp + i_num), i_utmp2,
50939+ sizeof (struct acl_ip_label)))
50940+ return ERR_PTR(-EFAULT);
50941+
50942+ if ((*(i_tmp + i_num))->iface == NULL)
50943+ continue;
50944+
50945+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
50946+ if (!len || len >= IFNAMSIZ)
50947+ return ERR_PTR(-EINVAL);
50948+ tmp = acl_alloc(len);
50949+ if (tmp == NULL)
50950+ return ERR_PTR(-ENOMEM);
50951+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
50952+ return ERR_PTR(-EFAULT);
50953+ (*(i_tmp + i_num))->iface = tmp;
50954+ }
50955+
50956+ s_tmp->ips = i_tmp;
50957+
50958+insert:
50959+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
50960+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
50961+ return ERR_PTR(-ENOMEM);
50962+
50963+ return s_tmp;
50964+}
50965+
50966+static int
50967+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
50968+{
50969+ struct acl_subject_label s_pre;
50970+ struct acl_subject_label * ret;
50971+ int err;
50972+
50973+ while (userp) {
50974+ if (copy_from_user(&s_pre, userp,
50975+ sizeof (struct acl_subject_label)))
50976+ return -EFAULT;
50977+
50978+ /* do not add nested subjects here, add
50979+ while parsing objects
50980+ */
50981+
50982+ if (s_pre.mode & GR_NESTED) {
50983+ userp = s_pre.prev;
50984+ continue;
50985+ }
50986+
50987+ ret = do_copy_user_subj(userp, role);
50988+
50989+ err = PTR_ERR(ret);
50990+ if (IS_ERR(ret))
50991+ return err;
50992+
50993+ insert_acl_subj_label(ret, role);
50994+
50995+ userp = s_pre.prev;
50996+ }
50997+
50998+ return 0;
50999+}
51000+
51001+static int
51002+copy_user_acl(struct gr_arg *arg)
51003+{
51004+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51005+ struct sprole_pw *sptmp;
51006+ struct gr_hash_struct *ghash;
51007+ uid_t *domainlist;
51008+ unsigned int r_num;
51009+ unsigned int len;
51010+ char *tmp;
51011+ int err = 0;
51012+ __u16 i;
51013+ __u32 num_subjs;
51014+
51015+ /* we need a default and kernel role */
51016+ if (arg->role_db.num_roles < 2)
51017+ return -EINVAL;
51018+
51019+ /* copy special role authentication info from userspace */
51020+
51021+ num_sprole_pws = arg->num_sprole_pws;
51022+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51023+
51024+ if (!acl_special_roles && num_sprole_pws)
51025+ return -ENOMEM;
51026+
51027+ for (i = 0; i < num_sprole_pws; i++) {
51028+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51029+ if (!sptmp)
51030+ return -ENOMEM;
51031+ if (copy_from_user(sptmp, arg->sprole_pws + i,
51032+ sizeof (struct sprole_pw)))
51033+ return -EFAULT;
51034+
51035+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51036+
51037+ if (!len || len >= GR_SPROLE_LEN)
51038+ return -EINVAL;
51039+
51040+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51041+ return -ENOMEM;
51042+
51043+ if (copy_from_user(tmp, sptmp->rolename, len))
51044+ return -EFAULT;
51045+
51046+ tmp[len-1] = '\0';
51047+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51048+ printk(KERN_ALERT "Copying special role %s\n", tmp);
51049+#endif
51050+ sptmp->rolename = tmp;
51051+ acl_special_roles[i] = sptmp;
51052+ }
51053+
51054+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51055+
51056+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51057+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
51058+
51059+ if (!r_tmp)
51060+ return -ENOMEM;
51061+
51062+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
51063+ sizeof (struct acl_role_label *)))
51064+ return -EFAULT;
51065+
51066+ if (copy_from_user(r_tmp, r_utmp2,
51067+ sizeof (struct acl_role_label)))
51068+ return -EFAULT;
51069+
51070+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51071+
51072+ if (!len || len >= PATH_MAX)
51073+ return -EINVAL;
51074+
51075+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51076+ return -ENOMEM;
51077+
51078+ if (copy_from_user(tmp, r_tmp->rolename, len))
51079+ return -EFAULT;
51080+
51081+ tmp[len-1] = '\0';
51082+ r_tmp->rolename = tmp;
51083+
51084+ if (!strcmp(r_tmp->rolename, "default")
51085+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51086+ default_role = r_tmp;
51087+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51088+ kernel_role = r_tmp;
51089+ }
51090+
51091+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51092+ return -ENOMEM;
51093+
51094+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51095+ return -EFAULT;
51096+
51097+ r_tmp->hash = ghash;
51098+
51099+ num_subjs = count_user_subjs(r_tmp->hash->first);
51100+
51101+ r_tmp->subj_hash_size = num_subjs;
51102+ r_tmp->subj_hash =
51103+ (struct acl_subject_label **)
51104+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51105+
51106+ if (!r_tmp->subj_hash)
51107+ return -ENOMEM;
51108+
51109+ err = copy_user_allowedips(r_tmp);
51110+ if (err)
51111+ return err;
51112+
51113+ /* copy domain info */
51114+ if (r_tmp->domain_children != NULL) {
51115+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51116+ if (domainlist == NULL)
51117+ return -ENOMEM;
51118+
51119+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51120+ return -EFAULT;
51121+
51122+ r_tmp->domain_children = domainlist;
51123+ }
51124+
51125+ err = copy_user_transitions(r_tmp);
51126+ if (err)
51127+ return err;
51128+
51129+ memset(r_tmp->subj_hash, 0,
51130+ r_tmp->subj_hash_size *
51131+ sizeof (struct acl_subject_label *));
51132+
51133+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51134+
51135+ if (err)
51136+ return err;
51137+
51138+ /* set nested subject list to null */
51139+ r_tmp->hash->first = NULL;
51140+
51141+ insert_acl_role_label(r_tmp);
51142+ }
51143+
51144+ if (default_role == NULL || kernel_role == NULL)
51145+ return -EINVAL;
51146+
51147+ return err;
51148+}
51149+
51150+static int
51151+gracl_init(struct gr_arg *args)
51152+{
51153+ int error = 0;
51154+
51155+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51156+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51157+
51158+ if (init_variables(args)) {
51159+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51160+ error = -ENOMEM;
51161+ free_variables();
51162+ goto out;
51163+ }
51164+
51165+ error = copy_user_acl(args);
51166+ free_init_variables();
51167+ if (error) {
51168+ free_variables();
51169+ goto out;
51170+ }
51171+
51172+ if ((error = gr_set_acls(0))) {
51173+ free_variables();
51174+ goto out;
51175+ }
51176+
51177+ pax_open_kernel();
51178+ gr_status |= GR_READY;
51179+ pax_close_kernel();
51180+
51181+ out:
51182+ return error;
51183+}
51184+
51185+/* derived from glibc fnmatch() 0: match, 1: no match*/
51186+
51187+static int
51188+glob_match(const char *p, const char *n)
51189+{
51190+ char c;
51191+
51192+ while ((c = *p++) != '\0') {
51193+ switch (c) {
51194+ case '?':
51195+ if (*n == '\0')
51196+ return 1;
51197+ else if (*n == '/')
51198+ return 1;
51199+ break;
51200+ case '\\':
51201+ if (*n != c)
51202+ return 1;
51203+ break;
51204+ case '*':
51205+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
51206+ if (*n == '/')
51207+ return 1;
51208+ else if (c == '?') {
51209+ if (*n == '\0')
51210+ return 1;
51211+ else
51212+ ++n;
51213+ }
51214+ }
51215+ if (c == '\0') {
51216+ return 0;
51217+ } else {
51218+ const char *endp;
51219+
51220+ if ((endp = strchr(n, '/')) == NULL)
51221+ endp = n + strlen(n);
51222+
51223+ if (c == '[') {
51224+ for (--p; n < endp; ++n)
51225+ if (!glob_match(p, n))
51226+ return 0;
51227+ } else if (c == '/') {
51228+ while (*n != '\0' && *n != '/')
51229+ ++n;
51230+ if (*n == '/' && !glob_match(p, n + 1))
51231+ return 0;
51232+ } else {
51233+ for (--p; n < endp; ++n)
51234+ if (*n == c && !glob_match(p, n))
51235+ return 0;
51236+ }
51237+
51238+ return 1;
51239+ }
51240+ case '[':
51241+ {
51242+ int not;
51243+ char cold;
51244+
51245+ if (*n == '\0' || *n == '/')
51246+ return 1;
51247+
51248+ not = (*p == '!' || *p == '^');
51249+ if (not)
51250+ ++p;
51251+
51252+ c = *p++;
51253+ for (;;) {
51254+ unsigned char fn = (unsigned char)*n;
51255+
51256+ if (c == '\0')
51257+ return 1;
51258+ else {
51259+ if (c == fn)
51260+ goto matched;
51261+ cold = c;
51262+ c = *p++;
51263+
51264+ if (c == '-' && *p != ']') {
51265+ unsigned char cend = *p++;
51266+
51267+ if (cend == '\0')
51268+ return 1;
51269+
51270+ if (cold <= fn && fn <= cend)
51271+ goto matched;
51272+
51273+ c = *p++;
51274+ }
51275+ }
51276+
51277+ if (c == ']')
51278+ break;
51279+ }
51280+ if (!not)
51281+ return 1;
51282+ break;
51283+ matched:
51284+ while (c != ']') {
51285+ if (c == '\0')
51286+ return 1;
51287+
51288+ c = *p++;
51289+ }
51290+ if (not)
51291+ return 1;
51292+ }
51293+ break;
51294+ default:
51295+ if (c != *n)
51296+ return 1;
51297+ }
51298+
51299+ ++n;
51300+ }
51301+
51302+ if (*n == '\0')
51303+ return 0;
51304+
51305+ if (*n == '/')
51306+ return 0;
51307+
51308+ return 1;
51309+}
51310+
51311+static struct acl_object_label *
51312+chk_glob_label(struct acl_object_label *globbed,
51313+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51314+{
51315+ struct acl_object_label *tmp;
51316+
51317+ if (*path == NULL)
51318+ *path = gr_to_filename_nolock(dentry, mnt);
51319+
51320+ tmp = globbed;
51321+
51322+ while (tmp) {
51323+ if (!glob_match(tmp->filename, *path))
51324+ return tmp;
51325+ tmp = tmp->next;
51326+ }
51327+
51328+ return NULL;
51329+}
51330+
51331+static struct acl_object_label *
51332+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51333+ const ino_t curr_ino, const dev_t curr_dev,
51334+ const struct acl_subject_label *subj, char **path, const int checkglob)
51335+{
51336+ struct acl_subject_label *tmpsubj;
51337+ struct acl_object_label *retval;
51338+ struct acl_object_label *retval2;
51339+
51340+ tmpsubj = (struct acl_subject_label *) subj;
51341+ read_lock(&gr_inode_lock);
51342+ do {
51343+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51344+ if (retval) {
51345+ if (checkglob && retval->globbed) {
51346+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51347+ if (retval2)
51348+ retval = retval2;
51349+ }
51350+ break;
51351+ }
51352+ } while ((tmpsubj = tmpsubj->parent_subject));
51353+ read_unlock(&gr_inode_lock);
51354+
51355+ return retval;
51356+}
51357+
51358+static __inline__ struct acl_object_label *
51359+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51360+ struct dentry *curr_dentry,
51361+ const struct acl_subject_label *subj, char **path, const int checkglob)
51362+{
51363+ int newglob = checkglob;
51364+ ino_t inode;
51365+ dev_t device;
51366+
51367+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51368+ as we don't want a / * rule to match instead of the / object
51369+ don't do this for create lookups that call this function though, since they're looking up
51370+ on the parent and thus need globbing checks on all paths
51371+ */
51372+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51373+ newglob = GR_NO_GLOB;
51374+
51375+ spin_lock(&curr_dentry->d_lock);
51376+ inode = curr_dentry->d_inode->i_ino;
51377+ device = __get_dev(curr_dentry);
51378+ spin_unlock(&curr_dentry->d_lock);
51379+
51380+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51381+}
51382+
51383+static struct acl_object_label *
51384+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51385+ const struct acl_subject_label *subj, char *path, const int checkglob)
51386+{
51387+ struct dentry *dentry = (struct dentry *) l_dentry;
51388+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51389+ struct mount *real_mnt = real_mount(mnt);
51390+ struct acl_object_label *retval;
51391+ struct dentry *parent;
51392+
51393+ write_seqlock(&rename_lock);
51394+ br_read_lock(vfsmount_lock);
51395+
51396+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51397+#ifdef CONFIG_NET
51398+ mnt == sock_mnt ||
51399+#endif
51400+#ifdef CONFIG_HUGETLBFS
51401+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51402+#endif
51403+ /* ignore Eric Biederman */
51404+ IS_PRIVATE(l_dentry->d_inode))) {
51405+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51406+ goto out;
51407+ }
51408+
51409+ for (;;) {
51410+ if (dentry == real_root.dentry && mnt == real_root.mnt)
51411+ break;
51412+
51413+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51414+ if (!mnt_has_parent(real_mnt))
51415+ break;
51416+
51417+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51418+ if (retval != NULL)
51419+ goto out;
51420+
51421+ dentry = real_mnt->mnt_mountpoint;
51422+ real_mnt = real_mnt->mnt_parent;
51423+ mnt = &real_mnt->mnt;
51424+ continue;
51425+ }
51426+
51427+ parent = dentry->d_parent;
51428+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51429+ if (retval != NULL)
51430+ goto out;
51431+
51432+ dentry = parent;
51433+ }
51434+
51435+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51436+
51437+ /* real_root is pinned so we don't have to hold a reference */
51438+ if (retval == NULL)
51439+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51440+out:
51441+ br_read_unlock(vfsmount_lock);
51442+ write_sequnlock(&rename_lock);
51443+
51444+ BUG_ON(retval == NULL);
51445+
51446+ return retval;
51447+}
51448+
51449+static __inline__ struct acl_object_label *
51450+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51451+ const struct acl_subject_label *subj)
51452+{
51453+ char *path = NULL;
51454+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51455+}
51456+
51457+static __inline__ struct acl_object_label *
51458+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51459+ const struct acl_subject_label *subj)
51460+{
51461+ char *path = NULL;
51462+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51463+}
51464+
51465+static __inline__ struct acl_object_label *
51466+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51467+ const struct acl_subject_label *subj, char *path)
51468+{
51469+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
51470+}
51471+
51472+static struct acl_subject_label *
51473+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51474+ const struct acl_role_label *role)
51475+{
51476+ struct dentry *dentry = (struct dentry *) l_dentry;
51477+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51478+ struct mount *real_mnt = real_mount(mnt);
51479+ struct acl_subject_label *retval;
51480+ struct dentry *parent;
51481+
51482+ write_seqlock(&rename_lock);
51483+ br_read_lock(vfsmount_lock);
51484+
51485+ for (;;) {
51486+ if (dentry == real_root.dentry && mnt == real_root.mnt)
51487+ break;
51488+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51489+ if (!mnt_has_parent(real_mnt))
51490+ break;
51491+
51492+ spin_lock(&dentry->d_lock);
51493+ read_lock(&gr_inode_lock);
51494+ retval =
51495+ lookup_acl_subj_label(dentry->d_inode->i_ino,
51496+ __get_dev(dentry), role);
51497+ read_unlock(&gr_inode_lock);
51498+ spin_unlock(&dentry->d_lock);
51499+ if (retval != NULL)
51500+ goto out;
51501+
51502+ dentry = real_mnt->mnt_mountpoint;
51503+ real_mnt = real_mnt->mnt_parent;
51504+ mnt = &real_mnt->mnt;
51505+ continue;
51506+ }
51507+
51508+ spin_lock(&dentry->d_lock);
51509+ read_lock(&gr_inode_lock);
51510+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51511+ __get_dev(dentry), role);
51512+ read_unlock(&gr_inode_lock);
51513+ parent = dentry->d_parent;
51514+ spin_unlock(&dentry->d_lock);
51515+
51516+ if (retval != NULL)
51517+ goto out;
51518+
51519+ dentry = parent;
51520+ }
51521+
51522+ spin_lock(&dentry->d_lock);
51523+ read_lock(&gr_inode_lock);
51524+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51525+ __get_dev(dentry), role);
51526+ read_unlock(&gr_inode_lock);
51527+ spin_unlock(&dentry->d_lock);
51528+
51529+ if (unlikely(retval == NULL)) {
51530+ /* real_root is pinned, we don't need to hold a reference */
51531+ read_lock(&gr_inode_lock);
51532+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
51533+ __get_dev(real_root.dentry), role);
51534+ read_unlock(&gr_inode_lock);
51535+ }
51536+out:
51537+ br_read_unlock(vfsmount_lock);
51538+ write_sequnlock(&rename_lock);
51539+
51540+ BUG_ON(retval == NULL);
51541+
51542+ return retval;
51543+}
51544+
51545+static void
51546+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
51547+{
51548+ struct task_struct *task = current;
51549+ const struct cred *cred = current_cred();
51550+
51551+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51552+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51553+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51554+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
51555+
51556+ return;
51557+}
51558+
51559+static void
51560+gr_log_learn_sysctl(const char *path, const __u32 mode)
51561+{
51562+ struct task_struct *task = current;
51563+ const struct cred *cred = current_cred();
51564+
51565+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51566+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51567+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51568+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
51569+
51570+ return;
51571+}
51572+
51573+static void
51574+gr_log_learn_id_change(const char type, const unsigned int real,
51575+ const unsigned int effective, const unsigned int fs)
51576+{
51577+ struct task_struct *task = current;
51578+ const struct cred *cred = current_cred();
51579+
51580+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
51581+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51582+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51583+ type, real, effective, fs, &task->signal->saved_ip);
51584+
51585+ return;
51586+}
51587+
51588+__u32
51589+gr_search_file(const struct dentry * dentry, const __u32 mode,
51590+ const struct vfsmount * mnt)
51591+{
51592+ __u32 retval = mode;
51593+ struct acl_subject_label *curracl;
51594+ struct acl_object_label *currobj;
51595+
51596+ if (unlikely(!(gr_status & GR_READY)))
51597+ return (mode & ~GR_AUDITS);
51598+
51599+ curracl = current->acl;
51600+
51601+ currobj = chk_obj_label(dentry, mnt, curracl);
51602+ retval = currobj->mode & mode;
51603+
51604+ /* if we're opening a specified transfer file for writing
51605+ (e.g. /dev/initctl), then transfer our role to init
51606+ */
51607+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
51608+ current->role->roletype & GR_ROLE_PERSIST)) {
51609+ struct task_struct *task = init_pid_ns.child_reaper;
51610+
51611+ if (task->role != current->role) {
51612+ task->acl_sp_role = 0;
51613+ task->acl_role_id = current->acl_role_id;
51614+ task->role = current->role;
51615+ rcu_read_lock();
51616+ read_lock(&grsec_exec_file_lock);
51617+ gr_apply_subject_to_task(task);
51618+ read_unlock(&grsec_exec_file_lock);
51619+ rcu_read_unlock();
51620+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
51621+ }
51622+ }
51623+
51624+ if (unlikely
51625+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
51626+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
51627+ __u32 new_mode = mode;
51628+
51629+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51630+
51631+ retval = new_mode;
51632+
51633+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
51634+ new_mode |= GR_INHERIT;
51635+
51636+ if (!(mode & GR_NOLEARN))
51637+ gr_log_learn(dentry, mnt, new_mode);
51638+ }
51639+
51640+ return retval;
51641+}
51642+
51643+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
51644+ const struct dentry *parent,
51645+ const struct vfsmount *mnt)
51646+{
51647+ struct name_entry *match;
51648+ struct acl_object_label *matchpo;
51649+ struct acl_subject_label *curracl;
51650+ char *path;
51651+
51652+ if (unlikely(!(gr_status & GR_READY)))
51653+ return NULL;
51654+
51655+ preempt_disable();
51656+ path = gr_to_filename_rbac(new_dentry, mnt);
51657+ match = lookup_name_entry_create(path);
51658+
51659+ curracl = current->acl;
51660+
51661+ if (match) {
51662+ read_lock(&gr_inode_lock);
51663+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
51664+ read_unlock(&gr_inode_lock);
51665+
51666+ if (matchpo) {
51667+ preempt_enable();
51668+ return matchpo;
51669+ }
51670+ }
51671+
51672+ // lookup parent
51673+
51674+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
51675+
51676+ preempt_enable();
51677+ return matchpo;
51678+}
51679+
51680+__u32
51681+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
51682+ const struct vfsmount * mnt, const __u32 mode)
51683+{
51684+ struct acl_object_label *matchpo;
51685+ __u32 retval;
51686+
51687+ if (unlikely(!(gr_status & GR_READY)))
51688+ return (mode & ~GR_AUDITS);
51689+
51690+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
51691+
51692+ retval = matchpo->mode & mode;
51693+
51694+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
51695+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51696+ __u32 new_mode = mode;
51697+
51698+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51699+
51700+ gr_log_learn(new_dentry, mnt, new_mode);
51701+ return new_mode;
51702+ }
51703+
51704+ return retval;
51705+}
51706+
51707+__u32
51708+gr_check_link(const struct dentry * new_dentry,
51709+ const struct dentry * parent_dentry,
51710+ const struct vfsmount * parent_mnt,
51711+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
51712+{
51713+ struct acl_object_label *obj;
51714+ __u32 oldmode, newmode;
51715+ __u32 needmode;
51716+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
51717+ GR_DELETE | GR_INHERIT;
51718+
51719+ if (unlikely(!(gr_status & GR_READY)))
51720+ return (GR_CREATE | GR_LINK);
51721+
51722+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
51723+ oldmode = obj->mode;
51724+
51725+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
51726+ newmode = obj->mode;
51727+
51728+ needmode = newmode & checkmodes;
51729+
51730+ // old name for hardlink must have at least the permissions of the new name
51731+ if ((oldmode & needmode) != needmode)
51732+ goto bad;
51733+
51734+ // if old name had restrictions/auditing, make sure the new name does as well
51735+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
51736+
51737+ // don't allow hardlinking of suid/sgid files without permission
51738+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51739+ needmode |= GR_SETID;
51740+
51741+ if ((newmode & needmode) != needmode)
51742+ goto bad;
51743+
51744+ // enforce minimum permissions
51745+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
51746+ return newmode;
51747+bad:
51748+ needmode = oldmode;
51749+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51750+ needmode |= GR_SETID;
51751+
51752+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
51753+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
51754+ return (GR_CREATE | GR_LINK);
51755+ } else if (newmode & GR_SUPPRESS)
51756+ return GR_SUPPRESS;
51757+ else
51758+ return 0;
51759+}
51760+
51761+int
51762+gr_check_hidden_task(const struct task_struct *task)
51763+{
51764+ if (unlikely(!(gr_status & GR_READY)))
51765+ return 0;
51766+
51767+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
51768+ return 1;
51769+
51770+ return 0;
51771+}
51772+
51773+int
51774+gr_check_protected_task(const struct task_struct *task)
51775+{
51776+ if (unlikely(!(gr_status & GR_READY) || !task))
51777+ return 0;
51778+
51779+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51780+ task->acl != current->acl)
51781+ return 1;
51782+
51783+ return 0;
51784+}
51785+
51786+int
51787+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
51788+{
51789+ struct task_struct *p;
51790+ int ret = 0;
51791+
51792+ if (unlikely(!(gr_status & GR_READY) || !pid))
51793+ return ret;
51794+
51795+ read_lock(&tasklist_lock);
51796+ do_each_pid_task(pid, type, p) {
51797+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51798+ p->acl != current->acl) {
51799+ ret = 1;
51800+ goto out;
51801+ }
51802+ } while_each_pid_task(pid, type, p);
51803+out:
51804+ read_unlock(&tasklist_lock);
51805+
51806+ return ret;
51807+}
51808+
51809+void
51810+gr_copy_label(struct task_struct *tsk)
51811+{
51812+ /* plain copying of fields is already done by dup_task_struct */
51813+ tsk->signal->used_accept = 0;
51814+ tsk->acl_sp_role = 0;
51815+ //tsk->acl_role_id = current->acl_role_id;
51816+ //tsk->acl = current->acl;
51817+ //tsk->role = current->role;
51818+ tsk->signal->curr_ip = current->signal->curr_ip;
51819+ tsk->signal->saved_ip = current->signal->saved_ip;
51820+ if (current->exec_file)
51821+ get_file(current->exec_file);
51822+ //tsk->exec_file = current->exec_file;
51823+ //tsk->is_writable = current->is_writable;
51824+ if (unlikely(current->signal->used_accept)) {
51825+ current->signal->curr_ip = 0;
51826+ current->signal->saved_ip = 0;
51827+ }
51828+
51829+ return;
51830+}
51831+
51832+static void
51833+gr_set_proc_res(struct task_struct *task)
51834+{
51835+ struct acl_subject_label *proc;
51836+ unsigned short i;
51837+
51838+ proc = task->acl;
51839+
51840+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
51841+ return;
51842+
51843+ for (i = 0; i < RLIM_NLIMITS; i++) {
51844+ if (!(proc->resmask & (1 << i)))
51845+ continue;
51846+
51847+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
51848+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
51849+ }
51850+
51851+ return;
51852+}
51853+
51854+extern int __gr_process_user_ban(struct user_struct *user);
51855+
51856+int
51857+gr_check_user_change(int real, int effective, int fs)
51858+{
51859+ unsigned int i;
51860+ __u16 num;
51861+ uid_t *uidlist;
51862+ int curuid;
51863+ int realok = 0;
51864+ int effectiveok = 0;
51865+ int fsok = 0;
51866+
51867+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51868+ struct user_struct *user;
51869+
51870+ if (real == -1)
51871+ goto skipit;
51872+
51873+ user = find_user(real);
51874+ if (user == NULL)
51875+ goto skipit;
51876+
51877+ if (__gr_process_user_ban(user)) {
51878+ /* for find_user */
51879+ free_uid(user);
51880+ return 1;
51881+ }
51882+
51883+ /* for find_user */
51884+ free_uid(user);
51885+
51886+skipit:
51887+#endif
51888+
51889+ if (unlikely(!(gr_status & GR_READY)))
51890+ return 0;
51891+
51892+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51893+ gr_log_learn_id_change('u', real, effective, fs);
51894+
51895+ num = current->acl->user_trans_num;
51896+ uidlist = current->acl->user_transitions;
51897+
51898+ if (uidlist == NULL)
51899+ return 0;
51900+
51901+ if (real == -1)
51902+ realok = 1;
51903+ if (effective == -1)
51904+ effectiveok = 1;
51905+ if (fs == -1)
51906+ fsok = 1;
51907+
51908+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
51909+ for (i = 0; i < num; i++) {
51910+ curuid = (int)uidlist[i];
51911+ if (real == curuid)
51912+ realok = 1;
51913+ if (effective == curuid)
51914+ effectiveok = 1;
51915+ if (fs == curuid)
51916+ fsok = 1;
51917+ }
51918+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
51919+ for (i = 0; i < num; i++) {
51920+ curuid = (int)uidlist[i];
51921+ if (real == curuid)
51922+ break;
51923+ if (effective == curuid)
51924+ break;
51925+ if (fs == curuid)
51926+ break;
51927+ }
51928+ /* not in deny list */
51929+ if (i == num) {
51930+ realok = 1;
51931+ effectiveok = 1;
51932+ fsok = 1;
51933+ }
51934+ }
51935+
51936+ if (realok && effectiveok && fsok)
51937+ return 0;
51938+ else {
51939+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
51940+ return 1;
51941+ }
51942+}
51943+
51944+int
51945+gr_check_group_change(int real, int effective, int fs)
51946+{
51947+ unsigned int i;
51948+ __u16 num;
51949+ gid_t *gidlist;
51950+ int curgid;
51951+ int realok = 0;
51952+ int effectiveok = 0;
51953+ int fsok = 0;
51954+
51955+ if (unlikely(!(gr_status & GR_READY)))
51956+ return 0;
51957+
51958+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51959+ gr_log_learn_id_change('g', real, effective, fs);
51960+
51961+ num = current->acl->group_trans_num;
51962+ gidlist = current->acl->group_transitions;
51963+
51964+ if (gidlist == NULL)
51965+ return 0;
51966+
51967+ if (real == -1)
51968+ realok = 1;
51969+ if (effective == -1)
51970+ effectiveok = 1;
51971+ if (fs == -1)
51972+ fsok = 1;
51973+
51974+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
51975+ for (i = 0; i < num; i++) {
51976+ curgid = (int)gidlist[i];
51977+ if (real == curgid)
51978+ realok = 1;
51979+ if (effective == curgid)
51980+ effectiveok = 1;
51981+ if (fs == curgid)
51982+ fsok = 1;
51983+ }
51984+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
51985+ for (i = 0; i < num; i++) {
51986+ curgid = (int)gidlist[i];
51987+ if (real == curgid)
51988+ break;
51989+ if (effective == curgid)
51990+ break;
51991+ if (fs == curgid)
51992+ break;
51993+ }
51994+ /* not in deny list */
51995+ if (i == num) {
51996+ realok = 1;
51997+ effectiveok = 1;
51998+ fsok = 1;
51999+ }
52000+ }
52001+
52002+ if (realok && effectiveok && fsok)
52003+ return 0;
52004+ else {
52005+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52006+ return 1;
52007+ }
52008+}
52009+
52010+extern int gr_acl_is_capable(const int cap);
52011+
52012+void
52013+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52014+{
52015+ struct acl_role_label *role = task->role;
52016+ struct acl_subject_label *subj = NULL;
52017+ struct acl_object_label *obj;
52018+ struct file *filp;
52019+
52020+ if (unlikely(!(gr_status & GR_READY)))
52021+ return;
52022+
52023+ filp = task->exec_file;
52024+
52025+ /* kernel process, we'll give them the kernel role */
52026+ if (unlikely(!filp)) {
52027+ task->role = kernel_role;
52028+ task->acl = kernel_role->root_label;
52029+ return;
52030+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52031+ role = lookup_acl_role_label(task, uid, gid);
52032+
52033+ /* don't change the role if we're not a privileged process */
52034+ if (role && task->role != role &&
52035+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52036+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52037+ return;
52038+
52039+ /* perform subject lookup in possibly new role
52040+ we can use this result below in the case where role == task->role
52041+ */
52042+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52043+
52044+ /* if we changed uid/gid, but result in the same role
52045+ and are using inheritance, don't lose the inherited subject
52046+ if current subject is other than what normal lookup
52047+ would result in, we arrived via inheritance, don't
52048+ lose subject
52049+ */
52050+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52051+ (subj == task->acl)))
52052+ task->acl = subj;
52053+
52054+ task->role = role;
52055+
52056+ task->is_writable = 0;
52057+
52058+ /* ignore additional mmap checks for processes that are writable
52059+ by the default ACL */
52060+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52061+ if (unlikely(obj->mode & GR_WRITE))
52062+ task->is_writable = 1;
52063+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52064+ if (unlikely(obj->mode & GR_WRITE))
52065+ task->is_writable = 1;
52066+
52067+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52068+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52069+#endif
52070+
52071+ gr_set_proc_res(task);
52072+
52073+ return;
52074+}
52075+
52076+int
52077+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52078+ const int unsafe_flags)
52079+{
52080+ struct task_struct *task = current;
52081+ struct acl_subject_label *newacl;
52082+ struct acl_object_label *obj;
52083+ __u32 retmode;
52084+
52085+ if (unlikely(!(gr_status & GR_READY)))
52086+ return 0;
52087+
52088+ newacl = chk_subj_label(dentry, mnt, task->role);
52089+
52090+ task_lock(task);
52091+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52092+ !(task->role->roletype & GR_ROLE_GOD) &&
52093+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52094+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52095+ task_unlock(task);
52096+ if (unsafe_flags & LSM_UNSAFE_SHARE)
52097+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52098+ else
52099+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52100+ return -EACCES;
52101+ }
52102+ task_unlock(task);
52103+
52104+ obj = chk_obj_label(dentry, mnt, task->acl);
52105+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52106+
52107+ if (!(task->acl->mode & GR_INHERITLEARN) &&
52108+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52109+ if (obj->nested)
52110+ task->acl = obj->nested;
52111+ else
52112+ task->acl = newacl;
52113+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52114+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52115+
52116+ task->is_writable = 0;
52117+
52118+ /* ignore additional mmap checks for processes that are writable
52119+ by the default ACL */
52120+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
52121+ if (unlikely(obj->mode & GR_WRITE))
52122+ task->is_writable = 1;
52123+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
52124+ if (unlikely(obj->mode & GR_WRITE))
52125+ task->is_writable = 1;
52126+
52127+ gr_set_proc_res(task);
52128+
52129+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52130+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52131+#endif
52132+ return 0;
52133+}
52134+
52135+/* always called with valid inodev ptr */
52136+static void
52137+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52138+{
52139+ struct acl_object_label *matchpo;
52140+ struct acl_subject_label *matchps;
52141+ struct acl_subject_label *subj;
52142+ struct acl_role_label *role;
52143+ unsigned int x;
52144+
52145+ FOR_EACH_ROLE_START(role)
52146+ FOR_EACH_SUBJECT_START(role, subj, x)
52147+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52148+ matchpo->mode |= GR_DELETED;
52149+ FOR_EACH_SUBJECT_END(subj,x)
52150+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52151+ if (subj->inode == ino && subj->device == dev)
52152+ subj->mode |= GR_DELETED;
52153+ FOR_EACH_NESTED_SUBJECT_END(subj)
52154+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52155+ matchps->mode |= GR_DELETED;
52156+ FOR_EACH_ROLE_END(role)
52157+
52158+ inodev->nentry->deleted = 1;
52159+
52160+ return;
52161+}
52162+
52163+void
52164+gr_handle_delete(const ino_t ino, const dev_t dev)
52165+{
52166+ struct inodev_entry *inodev;
52167+
52168+ if (unlikely(!(gr_status & GR_READY)))
52169+ return;
52170+
52171+ write_lock(&gr_inode_lock);
52172+ inodev = lookup_inodev_entry(ino, dev);
52173+ if (inodev != NULL)
52174+ do_handle_delete(inodev, ino, dev);
52175+ write_unlock(&gr_inode_lock);
52176+
52177+ return;
52178+}
52179+
52180+static void
52181+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52182+ const ino_t newinode, const dev_t newdevice,
52183+ struct acl_subject_label *subj)
52184+{
52185+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52186+ struct acl_object_label *match;
52187+
52188+ match = subj->obj_hash[index];
52189+
52190+ while (match && (match->inode != oldinode ||
52191+ match->device != olddevice ||
52192+ !(match->mode & GR_DELETED)))
52193+ match = match->next;
52194+
52195+ if (match && (match->inode == oldinode)
52196+ && (match->device == olddevice)
52197+ && (match->mode & GR_DELETED)) {
52198+ if (match->prev == NULL) {
52199+ subj->obj_hash[index] = match->next;
52200+ if (match->next != NULL)
52201+ match->next->prev = NULL;
52202+ } else {
52203+ match->prev->next = match->next;
52204+ if (match->next != NULL)
52205+ match->next->prev = match->prev;
52206+ }
52207+ match->prev = NULL;
52208+ match->next = NULL;
52209+ match->inode = newinode;
52210+ match->device = newdevice;
52211+ match->mode &= ~GR_DELETED;
52212+
52213+ insert_acl_obj_label(match, subj);
52214+ }
52215+
52216+ return;
52217+}
52218+
52219+static void
52220+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52221+ const ino_t newinode, const dev_t newdevice,
52222+ struct acl_role_label *role)
52223+{
52224+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52225+ struct acl_subject_label *match;
52226+
52227+ match = role->subj_hash[index];
52228+
52229+ while (match && (match->inode != oldinode ||
52230+ match->device != olddevice ||
52231+ !(match->mode & GR_DELETED)))
52232+ match = match->next;
52233+
52234+ if (match && (match->inode == oldinode)
52235+ && (match->device == olddevice)
52236+ && (match->mode & GR_DELETED)) {
52237+ if (match->prev == NULL) {
52238+ role->subj_hash[index] = match->next;
52239+ if (match->next != NULL)
52240+ match->next->prev = NULL;
52241+ } else {
52242+ match->prev->next = match->next;
52243+ if (match->next != NULL)
52244+ match->next->prev = match->prev;
52245+ }
52246+ match->prev = NULL;
52247+ match->next = NULL;
52248+ match->inode = newinode;
52249+ match->device = newdevice;
52250+ match->mode &= ~GR_DELETED;
52251+
52252+ insert_acl_subj_label(match, role);
52253+ }
52254+
52255+ return;
52256+}
52257+
52258+static void
52259+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52260+ const ino_t newinode, const dev_t newdevice)
52261+{
52262+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52263+ struct inodev_entry *match;
52264+
52265+ match = inodev_set.i_hash[index];
52266+
52267+ while (match && (match->nentry->inode != oldinode ||
52268+ match->nentry->device != olddevice || !match->nentry->deleted))
52269+ match = match->next;
52270+
52271+ if (match && (match->nentry->inode == oldinode)
52272+ && (match->nentry->device == olddevice) &&
52273+ match->nentry->deleted) {
52274+ if (match->prev == NULL) {
52275+ inodev_set.i_hash[index] = match->next;
52276+ if (match->next != NULL)
52277+ match->next->prev = NULL;
52278+ } else {
52279+ match->prev->next = match->next;
52280+ if (match->next != NULL)
52281+ match->next->prev = match->prev;
52282+ }
52283+ match->prev = NULL;
52284+ match->next = NULL;
52285+ match->nentry->inode = newinode;
52286+ match->nentry->device = newdevice;
52287+ match->nentry->deleted = 0;
52288+
52289+ insert_inodev_entry(match);
52290+ }
52291+
52292+ return;
52293+}
52294+
52295+static void
52296+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52297+{
52298+ struct acl_subject_label *subj;
52299+ struct acl_role_label *role;
52300+ unsigned int x;
52301+
52302+ FOR_EACH_ROLE_START(role)
52303+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52304+
52305+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52306+ if ((subj->inode == ino) && (subj->device == dev)) {
52307+ subj->inode = ino;
52308+ subj->device = dev;
52309+ }
52310+ FOR_EACH_NESTED_SUBJECT_END(subj)
52311+ FOR_EACH_SUBJECT_START(role, subj, x)
52312+ update_acl_obj_label(matchn->inode, matchn->device,
52313+ ino, dev, subj);
52314+ FOR_EACH_SUBJECT_END(subj,x)
52315+ FOR_EACH_ROLE_END(role)
52316+
52317+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52318+
52319+ return;
52320+}
52321+
52322+static void
52323+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52324+ const struct vfsmount *mnt)
52325+{
52326+ ino_t ino = dentry->d_inode->i_ino;
52327+ dev_t dev = __get_dev(dentry);
52328+
52329+ __do_handle_create(matchn, ino, dev);
52330+
52331+ return;
52332+}
52333+
52334+void
52335+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52336+{
52337+ struct name_entry *matchn;
52338+
52339+ if (unlikely(!(gr_status & GR_READY)))
52340+ return;
52341+
52342+ preempt_disable();
52343+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52344+
52345+ if (unlikely((unsigned long)matchn)) {
52346+ write_lock(&gr_inode_lock);
52347+ do_handle_create(matchn, dentry, mnt);
52348+ write_unlock(&gr_inode_lock);
52349+ }
52350+ preempt_enable();
52351+
52352+ return;
52353+}
52354+
52355+void
52356+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52357+{
52358+ struct name_entry *matchn;
52359+
52360+ if (unlikely(!(gr_status & GR_READY)))
52361+ return;
52362+
52363+ preempt_disable();
52364+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52365+
52366+ if (unlikely((unsigned long)matchn)) {
52367+ write_lock(&gr_inode_lock);
52368+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52369+ write_unlock(&gr_inode_lock);
52370+ }
52371+ preempt_enable();
52372+
52373+ return;
52374+}
52375+
52376+void
52377+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52378+ struct dentry *old_dentry,
52379+ struct dentry *new_dentry,
52380+ struct vfsmount *mnt, const __u8 replace)
52381+{
52382+ struct name_entry *matchn;
52383+ struct inodev_entry *inodev;
52384+ struct inode *inode = new_dentry->d_inode;
52385+ ino_t old_ino = old_dentry->d_inode->i_ino;
52386+ dev_t old_dev = __get_dev(old_dentry);
52387+
52388+ /* vfs_rename swaps the name and parent link for old_dentry and
52389+ new_dentry
52390+ at this point, old_dentry has the new name, parent link, and inode
52391+ for the renamed file
52392+ if a file is being replaced by a rename, new_dentry has the inode
52393+ and name for the replaced file
52394+ */
52395+
52396+ if (unlikely(!(gr_status & GR_READY)))
52397+ return;
52398+
52399+ preempt_disable();
52400+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52401+
52402+ /* we wouldn't have to check d_inode if it weren't for
52403+ NFS silly-renaming
52404+ */
52405+
52406+ write_lock(&gr_inode_lock);
52407+ if (unlikely(replace && inode)) {
52408+ ino_t new_ino = inode->i_ino;
52409+ dev_t new_dev = __get_dev(new_dentry);
52410+
52411+ inodev = lookup_inodev_entry(new_ino, new_dev);
52412+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52413+ do_handle_delete(inodev, new_ino, new_dev);
52414+ }
52415+
52416+ inodev = lookup_inodev_entry(old_ino, old_dev);
52417+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52418+ do_handle_delete(inodev, old_ino, old_dev);
52419+
52420+ if (unlikely((unsigned long)matchn))
52421+ do_handle_create(matchn, old_dentry, mnt);
52422+
52423+ write_unlock(&gr_inode_lock);
52424+ preempt_enable();
52425+
52426+ return;
52427+}
52428+
52429+static int
52430+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52431+ unsigned char **sum)
52432+{
52433+ struct acl_role_label *r;
52434+ struct role_allowed_ip *ipp;
52435+ struct role_transition *trans;
52436+ unsigned int i;
52437+ int found = 0;
52438+ u32 curr_ip = current->signal->curr_ip;
52439+
52440+ current->signal->saved_ip = curr_ip;
52441+
52442+ /* check transition table */
52443+
52444+ for (trans = current->role->transitions; trans; trans = trans->next) {
52445+ if (!strcmp(rolename, trans->rolename)) {
52446+ found = 1;
52447+ break;
52448+ }
52449+ }
52450+
52451+ if (!found)
52452+ return 0;
52453+
52454+ /* handle special roles that do not require authentication
52455+ and check ip */
52456+
52457+ FOR_EACH_ROLE_START(r)
52458+ if (!strcmp(rolename, r->rolename) &&
52459+ (r->roletype & GR_ROLE_SPECIAL)) {
52460+ found = 0;
52461+ if (r->allowed_ips != NULL) {
52462+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52463+ if ((ntohl(curr_ip) & ipp->netmask) ==
52464+ (ntohl(ipp->addr) & ipp->netmask))
52465+ found = 1;
52466+ }
52467+ } else
52468+ found = 2;
52469+ if (!found)
52470+ return 0;
52471+
52472+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
52473+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
52474+ *salt = NULL;
52475+ *sum = NULL;
52476+ return 1;
52477+ }
52478+ }
52479+ FOR_EACH_ROLE_END(r)
52480+
52481+ for (i = 0; i < num_sprole_pws; i++) {
52482+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
52483+ *salt = acl_special_roles[i]->salt;
52484+ *sum = acl_special_roles[i]->sum;
52485+ return 1;
52486+ }
52487+ }
52488+
52489+ return 0;
52490+}
52491+
52492+static void
52493+assign_special_role(char *rolename)
52494+{
52495+ struct acl_object_label *obj;
52496+ struct acl_role_label *r;
52497+ struct acl_role_label *assigned = NULL;
52498+ struct task_struct *tsk;
52499+ struct file *filp;
52500+
52501+ FOR_EACH_ROLE_START(r)
52502+ if (!strcmp(rolename, r->rolename) &&
52503+ (r->roletype & GR_ROLE_SPECIAL)) {
52504+ assigned = r;
52505+ break;
52506+ }
52507+ FOR_EACH_ROLE_END(r)
52508+
52509+ if (!assigned)
52510+ return;
52511+
52512+ read_lock(&tasklist_lock);
52513+ read_lock(&grsec_exec_file_lock);
52514+
52515+ tsk = current->real_parent;
52516+ if (tsk == NULL)
52517+ goto out_unlock;
52518+
52519+ filp = tsk->exec_file;
52520+ if (filp == NULL)
52521+ goto out_unlock;
52522+
52523+ tsk->is_writable = 0;
52524+
52525+ tsk->acl_sp_role = 1;
52526+ tsk->acl_role_id = ++acl_sp_role_value;
52527+ tsk->role = assigned;
52528+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
52529+
52530+ /* ignore additional mmap checks for processes that are writable
52531+ by the default ACL */
52532+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52533+ if (unlikely(obj->mode & GR_WRITE))
52534+ tsk->is_writable = 1;
52535+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
52536+ if (unlikely(obj->mode & GR_WRITE))
52537+ tsk->is_writable = 1;
52538+
52539+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52540+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
52541+#endif
52542+
52543+out_unlock:
52544+ read_unlock(&grsec_exec_file_lock);
52545+ read_unlock(&tasklist_lock);
52546+ return;
52547+}
52548+
52549+int gr_check_secure_terminal(struct task_struct *task)
52550+{
52551+ struct task_struct *p, *p2, *p3;
52552+ struct files_struct *files;
52553+ struct fdtable *fdt;
52554+ struct file *our_file = NULL, *file;
52555+ int i;
52556+
52557+ if (task->signal->tty == NULL)
52558+ return 1;
52559+
52560+ files = get_files_struct(task);
52561+ if (files != NULL) {
52562+ rcu_read_lock();
52563+ fdt = files_fdtable(files);
52564+ for (i=0; i < fdt->max_fds; i++) {
52565+ file = fcheck_files(files, i);
52566+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
52567+ get_file(file);
52568+ our_file = file;
52569+ }
52570+ }
52571+ rcu_read_unlock();
52572+ put_files_struct(files);
52573+ }
52574+
52575+ if (our_file == NULL)
52576+ return 1;
52577+
52578+ read_lock(&tasklist_lock);
52579+ do_each_thread(p2, p) {
52580+ files = get_files_struct(p);
52581+ if (files == NULL ||
52582+ (p->signal && p->signal->tty == task->signal->tty)) {
52583+ if (files != NULL)
52584+ put_files_struct(files);
52585+ continue;
52586+ }
52587+ rcu_read_lock();
52588+ fdt = files_fdtable(files);
52589+ for (i=0; i < fdt->max_fds; i++) {
52590+ file = fcheck_files(files, i);
52591+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
52592+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
52593+ p3 = task;
52594+ while (p3->pid > 0) {
52595+ if (p3 == p)
52596+ break;
52597+ p3 = p3->real_parent;
52598+ }
52599+ if (p3 == p)
52600+ break;
52601+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
52602+ gr_handle_alertkill(p);
52603+ rcu_read_unlock();
52604+ put_files_struct(files);
52605+ read_unlock(&tasklist_lock);
52606+ fput(our_file);
52607+ return 0;
52608+ }
52609+ }
52610+ rcu_read_unlock();
52611+ put_files_struct(files);
52612+ } while_each_thread(p2, p);
52613+ read_unlock(&tasklist_lock);
52614+
52615+ fput(our_file);
52616+ return 1;
52617+}
52618+
52619+ssize_t
52620+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
52621+{
52622+ struct gr_arg_wrapper uwrap;
52623+ unsigned char *sprole_salt = NULL;
52624+ unsigned char *sprole_sum = NULL;
52625+ int error = sizeof (struct gr_arg_wrapper);
52626+ int error2 = 0;
52627+
52628+ mutex_lock(&gr_dev_mutex);
52629+
52630+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
52631+ error = -EPERM;
52632+ goto out;
52633+ }
52634+
52635+ if (count != sizeof (struct gr_arg_wrapper)) {
52636+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
52637+ error = -EINVAL;
52638+ goto out;
52639+ }
52640+
52641+
52642+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
52643+ gr_auth_expires = 0;
52644+ gr_auth_attempts = 0;
52645+ }
52646+
52647+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
52648+ error = -EFAULT;
52649+ goto out;
52650+ }
52651+
52652+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
52653+ error = -EINVAL;
52654+ goto out;
52655+ }
52656+
52657+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
52658+ error = -EFAULT;
52659+ goto out;
52660+ }
52661+
52662+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52663+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52664+ time_after(gr_auth_expires, get_seconds())) {
52665+ error = -EBUSY;
52666+ goto out;
52667+ }
52668+
52669+ /* if non-root trying to do anything other than use a special role,
52670+ do not attempt authentication, do not count towards authentication
52671+ locking
52672+ */
52673+
52674+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
52675+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52676+ current_uid()) {
52677+ error = -EPERM;
52678+ goto out;
52679+ }
52680+
52681+ /* ensure pw and special role name are null terminated */
52682+
52683+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
52684+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
52685+
52686+ /* Okay.
52687+ * We have our enough of the argument structure..(we have yet
52688+ * to copy_from_user the tables themselves) . Copy the tables
52689+ * only if we need them, i.e. for loading operations. */
52690+
52691+ switch (gr_usermode->mode) {
52692+ case GR_STATUS:
52693+ if (gr_status & GR_READY) {
52694+ error = 1;
52695+ if (!gr_check_secure_terminal(current))
52696+ error = 3;
52697+ } else
52698+ error = 2;
52699+ goto out;
52700+ case GR_SHUTDOWN:
52701+ if ((gr_status & GR_READY)
52702+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52703+ pax_open_kernel();
52704+ gr_status &= ~GR_READY;
52705+ pax_close_kernel();
52706+
52707+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
52708+ free_variables();
52709+ memset(gr_usermode, 0, sizeof (struct gr_arg));
52710+ memset(gr_system_salt, 0, GR_SALT_LEN);
52711+ memset(gr_system_sum, 0, GR_SHA_LEN);
52712+ } else if (gr_status & GR_READY) {
52713+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
52714+ error = -EPERM;
52715+ } else {
52716+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
52717+ error = -EAGAIN;
52718+ }
52719+ break;
52720+ case GR_ENABLE:
52721+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
52722+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
52723+ else {
52724+ if (gr_status & GR_READY)
52725+ error = -EAGAIN;
52726+ else
52727+ error = error2;
52728+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
52729+ }
52730+ break;
52731+ case GR_RELOAD:
52732+ if (!(gr_status & GR_READY)) {
52733+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
52734+ error = -EAGAIN;
52735+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52736+ preempt_disable();
52737+
52738+ pax_open_kernel();
52739+ gr_status &= ~GR_READY;
52740+ pax_close_kernel();
52741+
52742+ free_variables();
52743+ if (!(error2 = gracl_init(gr_usermode))) {
52744+ preempt_enable();
52745+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
52746+ } else {
52747+ preempt_enable();
52748+ error = error2;
52749+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52750+ }
52751+ } else {
52752+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52753+ error = -EPERM;
52754+ }
52755+ break;
52756+ case GR_SEGVMOD:
52757+ if (unlikely(!(gr_status & GR_READY))) {
52758+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
52759+ error = -EAGAIN;
52760+ break;
52761+ }
52762+
52763+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52764+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
52765+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
52766+ struct acl_subject_label *segvacl;
52767+ segvacl =
52768+ lookup_acl_subj_label(gr_usermode->segv_inode,
52769+ gr_usermode->segv_device,
52770+ current->role);
52771+ if (segvacl) {
52772+ segvacl->crashes = 0;
52773+ segvacl->expires = 0;
52774+ }
52775+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
52776+ gr_remove_uid(gr_usermode->segv_uid);
52777+ }
52778+ } else {
52779+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
52780+ error = -EPERM;
52781+ }
52782+ break;
52783+ case GR_SPROLE:
52784+ case GR_SPROLEPAM:
52785+ if (unlikely(!(gr_status & GR_READY))) {
52786+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
52787+ error = -EAGAIN;
52788+ break;
52789+ }
52790+
52791+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
52792+ current->role->expires = 0;
52793+ current->role->auth_attempts = 0;
52794+ }
52795+
52796+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52797+ time_after(current->role->expires, get_seconds())) {
52798+ error = -EBUSY;
52799+ goto out;
52800+ }
52801+
52802+ if (lookup_special_role_auth
52803+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
52804+ && ((!sprole_salt && !sprole_sum)
52805+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
52806+ char *p = "";
52807+ assign_special_role(gr_usermode->sp_role);
52808+ read_lock(&tasklist_lock);
52809+ if (current->real_parent)
52810+ p = current->real_parent->role->rolename;
52811+ read_unlock(&tasklist_lock);
52812+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
52813+ p, acl_sp_role_value);
52814+ } else {
52815+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
52816+ error = -EPERM;
52817+ if(!(current->role->auth_attempts++))
52818+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52819+
52820+ goto out;
52821+ }
52822+ break;
52823+ case GR_UNSPROLE:
52824+ if (unlikely(!(gr_status & GR_READY))) {
52825+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
52826+ error = -EAGAIN;
52827+ break;
52828+ }
52829+
52830+ if (current->role->roletype & GR_ROLE_SPECIAL) {
52831+ char *p = "";
52832+ int i = 0;
52833+
52834+ read_lock(&tasklist_lock);
52835+ if (current->real_parent) {
52836+ p = current->real_parent->role->rolename;
52837+ i = current->real_parent->acl_role_id;
52838+ }
52839+ read_unlock(&tasklist_lock);
52840+
52841+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
52842+ gr_set_acls(1);
52843+ } else {
52844+ error = -EPERM;
52845+ goto out;
52846+ }
52847+ break;
52848+ default:
52849+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
52850+ error = -EINVAL;
52851+ break;
52852+ }
52853+
52854+ if (error != -EPERM)
52855+ goto out;
52856+
52857+ if(!(gr_auth_attempts++))
52858+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52859+
52860+ out:
52861+ mutex_unlock(&gr_dev_mutex);
52862+ return error;
52863+}
52864+
52865+/* must be called with
52866+ rcu_read_lock();
52867+ read_lock(&tasklist_lock);
52868+ read_lock(&grsec_exec_file_lock);
52869+*/
52870+int gr_apply_subject_to_task(struct task_struct *task)
52871+{
52872+ struct acl_object_label *obj;
52873+ char *tmpname;
52874+ struct acl_subject_label *tmpsubj;
52875+ struct file *filp;
52876+ struct name_entry *nmatch;
52877+
52878+ filp = task->exec_file;
52879+ if (filp == NULL)
52880+ return 0;
52881+
52882+ /* the following is to apply the correct subject
52883+ on binaries running when the RBAC system
52884+ is enabled, when the binaries have been
52885+ replaced or deleted since their execution
52886+ -----
52887+ when the RBAC system starts, the inode/dev
52888+ from exec_file will be one the RBAC system
52889+ is unaware of. It only knows the inode/dev
52890+ of the present file on disk, or the absence
52891+ of it.
52892+ */
52893+ preempt_disable();
52894+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
52895+
52896+ nmatch = lookup_name_entry(tmpname);
52897+ preempt_enable();
52898+ tmpsubj = NULL;
52899+ if (nmatch) {
52900+ if (nmatch->deleted)
52901+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
52902+ else
52903+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
52904+ if (tmpsubj != NULL)
52905+ task->acl = tmpsubj;
52906+ }
52907+ if (tmpsubj == NULL)
52908+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
52909+ task->role);
52910+ if (task->acl) {
52911+ task->is_writable = 0;
52912+ /* ignore additional mmap checks for processes that are writable
52913+ by the default ACL */
52914+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52915+ if (unlikely(obj->mode & GR_WRITE))
52916+ task->is_writable = 1;
52917+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52918+ if (unlikely(obj->mode & GR_WRITE))
52919+ task->is_writable = 1;
52920+
52921+ gr_set_proc_res(task);
52922+
52923+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52924+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52925+#endif
52926+ } else {
52927+ return 1;
52928+ }
52929+
52930+ return 0;
52931+}
52932+
52933+int
52934+gr_set_acls(const int type)
52935+{
52936+ struct task_struct *task, *task2;
52937+ struct acl_role_label *role = current->role;
52938+ __u16 acl_role_id = current->acl_role_id;
52939+ const struct cred *cred;
52940+ int ret;
52941+
52942+ rcu_read_lock();
52943+ read_lock(&tasklist_lock);
52944+ read_lock(&grsec_exec_file_lock);
52945+ do_each_thread(task2, task) {
52946+ /* check to see if we're called from the exit handler,
52947+ if so, only replace ACLs that have inherited the admin
52948+ ACL */
52949+
52950+ if (type && (task->role != role ||
52951+ task->acl_role_id != acl_role_id))
52952+ continue;
52953+
52954+ task->acl_role_id = 0;
52955+ task->acl_sp_role = 0;
52956+
52957+ if (task->exec_file) {
52958+ cred = __task_cred(task);
52959+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
52960+ ret = gr_apply_subject_to_task(task);
52961+ if (ret) {
52962+ read_unlock(&grsec_exec_file_lock);
52963+ read_unlock(&tasklist_lock);
52964+ rcu_read_unlock();
52965+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
52966+ return ret;
52967+ }
52968+ } else {
52969+ // it's a kernel process
52970+ task->role = kernel_role;
52971+ task->acl = kernel_role->root_label;
52972+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
52973+ task->acl->mode &= ~GR_PROCFIND;
52974+#endif
52975+ }
52976+ } while_each_thread(task2, task);
52977+ read_unlock(&grsec_exec_file_lock);
52978+ read_unlock(&tasklist_lock);
52979+ rcu_read_unlock();
52980+
52981+ return 0;
52982+}
52983+
52984+void
52985+gr_learn_resource(const struct task_struct *task,
52986+ const int res, const unsigned long wanted, const int gt)
52987+{
52988+ struct acl_subject_label *acl;
52989+ const struct cred *cred;
52990+
52991+ if (unlikely((gr_status & GR_READY) &&
52992+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
52993+ goto skip_reslog;
52994+
52995+#ifdef CONFIG_GRKERNSEC_RESLOG
52996+ gr_log_resource(task, res, wanted, gt);
52997+#endif
52998+ skip_reslog:
52999+
53000+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53001+ return;
53002+
53003+ acl = task->acl;
53004+
53005+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53006+ !(acl->resmask & (1 << (unsigned short) res))))
53007+ return;
53008+
53009+ if (wanted >= acl->res[res].rlim_cur) {
53010+ unsigned long res_add;
53011+
53012+ res_add = wanted;
53013+ switch (res) {
53014+ case RLIMIT_CPU:
53015+ res_add += GR_RLIM_CPU_BUMP;
53016+ break;
53017+ case RLIMIT_FSIZE:
53018+ res_add += GR_RLIM_FSIZE_BUMP;
53019+ break;
53020+ case RLIMIT_DATA:
53021+ res_add += GR_RLIM_DATA_BUMP;
53022+ break;
53023+ case RLIMIT_STACK:
53024+ res_add += GR_RLIM_STACK_BUMP;
53025+ break;
53026+ case RLIMIT_CORE:
53027+ res_add += GR_RLIM_CORE_BUMP;
53028+ break;
53029+ case RLIMIT_RSS:
53030+ res_add += GR_RLIM_RSS_BUMP;
53031+ break;
53032+ case RLIMIT_NPROC:
53033+ res_add += GR_RLIM_NPROC_BUMP;
53034+ break;
53035+ case RLIMIT_NOFILE:
53036+ res_add += GR_RLIM_NOFILE_BUMP;
53037+ break;
53038+ case RLIMIT_MEMLOCK:
53039+ res_add += GR_RLIM_MEMLOCK_BUMP;
53040+ break;
53041+ case RLIMIT_AS:
53042+ res_add += GR_RLIM_AS_BUMP;
53043+ break;
53044+ case RLIMIT_LOCKS:
53045+ res_add += GR_RLIM_LOCKS_BUMP;
53046+ break;
53047+ case RLIMIT_SIGPENDING:
53048+ res_add += GR_RLIM_SIGPENDING_BUMP;
53049+ break;
53050+ case RLIMIT_MSGQUEUE:
53051+ res_add += GR_RLIM_MSGQUEUE_BUMP;
53052+ break;
53053+ case RLIMIT_NICE:
53054+ res_add += GR_RLIM_NICE_BUMP;
53055+ break;
53056+ case RLIMIT_RTPRIO:
53057+ res_add += GR_RLIM_RTPRIO_BUMP;
53058+ break;
53059+ case RLIMIT_RTTIME:
53060+ res_add += GR_RLIM_RTTIME_BUMP;
53061+ break;
53062+ }
53063+
53064+ acl->res[res].rlim_cur = res_add;
53065+
53066+ if (wanted > acl->res[res].rlim_max)
53067+ acl->res[res].rlim_max = res_add;
53068+
53069+ /* only log the subject filename, since resource logging is supported for
53070+ single-subject learning only */
53071+ rcu_read_lock();
53072+ cred = __task_cred(task);
53073+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53074+ task->role->roletype, cred->uid, cred->gid, acl->filename,
53075+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53076+ "", (unsigned long) res, &task->signal->saved_ip);
53077+ rcu_read_unlock();
53078+ }
53079+
53080+ return;
53081+}
53082+
53083+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53084+void
53085+pax_set_initial_flags(struct linux_binprm *bprm)
53086+{
53087+ struct task_struct *task = current;
53088+ struct acl_subject_label *proc;
53089+ unsigned long flags;
53090+
53091+ if (unlikely(!(gr_status & GR_READY)))
53092+ return;
53093+
53094+ flags = pax_get_flags(task);
53095+
53096+ proc = task->acl;
53097+
53098+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53099+ flags &= ~MF_PAX_PAGEEXEC;
53100+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53101+ flags &= ~MF_PAX_SEGMEXEC;
53102+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53103+ flags &= ~MF_PAX_RANDMMAP;
53104+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53105+ flags &= ~MF_PAX_EMUTRAMP;
53106+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53107+ flags &= ~MF_PAX_MPROTECT;
53108+
53109+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53110+ flags |= MF_PAX_PAGEEXEC;
53111+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53112+ flags |= MF_PAX_SEGMEXEC;
53113+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53114+ flags |= MF_PAX_RANDMMAP;
53115+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53116+ flags |= MF_PAX_EMUTRAMP;
53117+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53118+ flags |= MF_PAX_MPROTECT;
53119+
53120+ pax_set_flags(task, flags);
53121+
53122+ return;
53123+}
53124+#endif
53125+
53126+#ifdef CONFIG_SYSCTL
53127+/* Eric Biederman likes breaking userland ABI and every inode-based security
53128+ system to save 35kb of memory */
53129+
53130+/* we modify the passed in filename, but adjust it back before returning */
53131+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
53132+{
53133+ struct name_entry *nmatch;
53134+ char *p, *lastp = NULL;
53135+ struct acl_object_label *obj = NULL, *tmp;
53136+ struct acl_subject_label *tmpsubj;
53137+ char c = '\0';
53138+
53139+ read_lock(&gr_inode_lock);
53140+
53141+ p = name + len - 1;
53142+ do {
53143+ nmatch = lookup_name_entry(name);
53144+ if (lastp != NULL)
53145+ *lastp = c;
53146+
53147+ if (nmatch == NULL)
53148+ goto next_component;
53149+ tmpsubj = current->acl;
53150+ do {
53151+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
53152+ if (obj != NULL) {
53153+ tmp = obj->globbed;
53154+ while (tmp) {
53155+ if (!glob_match(tmp->filename, name)) {
53156+ obj = tmp;
53157+ goto found_obj;
53158+ }
53159+ tmp = tmp->next;
53160+ }
53161+ goto found_obj;
53162+ }
53163+ } while ((tmpsubj = tmpsubj->parent_subject));
53164+next_component:
53165+ /* end case */
53166+ if (p == name)
53167+ break;
53168+
53169+ while (*p != '/')
53170+ p--;
53171+ if (p == name)
53172+ lastp = p + 1;
53173+ else {
53174+ lastp = p;
53175+ p--;
53176+ }
53177+ c = *lastp;
53178+ *lastp = '\0';
53179+ } while (1);
53180+found_obj:
53181+ read_unlock(&gr_inode_lock);
53182+ /* obj returned will always be non-null */
53183+ return obj;
53184+}
53185+
53186+/* returns 0 when allowing, non-zero on error
53187+ op of 0 is used for readdir, so we don't log the names of hidden files
53188+*/
53189+__u32
53190+gr_handle_sysctl(const struct ctl_table *table, const int op)
53191+{
53192+ struct ctl_table *tmp;
53193+ const char *proc_sys = "/proc/sys";
53194+ char *path;
53195+ struct acl_object_label *obj;
53196+ unsigned short len = 0, pos = 0, depth = 0, i;
53197+ __u32 err = 0;
53198+ __u32 mode = 0;
53199+
53200+ if (unlikely(!(gr_status & GR_READY)))
53201+ return 0;
53202+
53203+ /* for now, ignore operations on non-sysctl entries if it's not a
53204+ readdir*/
53205+ if (table->child != NULL && op != 0)
53206+ return 0;
53207+
53208+ mode |= GR_FIND;
53209+ /* it's only a read if it's an entry, read on dirs is for readdir */
53210+ if (op & MAY_READ)
53211+ mode |= GR_READ;
53212+ if (op & MAY_WRITE)
53213+ mode |= GR_WRITE;
53214+
53215+ preempt_disable();
53216+
53217+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53218+
53219+ /* it's only a read/write if it's an actual entry, not a dir
53220+ (which are opened for readdir)
53221+ */
53222+
53223+ /* convert the requested sysctl entry into a pathname */
53224+
53225+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53226+ len += strlen(tmp->procname);
53227+ len++;
53228+ depth++;
53229+ }
53230+
53231+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
53232+ /* deny */
53233+ goto out;
53234+ }
53235+
53236+ memset(path, 0, PAGE_SIZE);
53237+
53238+ memcpy(path, proc_sys, strlen(proc_sys));
53239+
53240+ pos += strlen(proc_sys);
53241+
53242+ for (; depth > 0; depth--) {
53243+ path[pos] = '/';
53244+ pos++;
53245+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53246+ if (depth == i) {
53247+ memcpy(path + pos, tmp->procname,
53248+ strlen(tmp->procname));
53249+ pos += strlen(tmp->procname);
53250+ }
53251+ i++;
53252+ }
53253+ }
53254+
53255+ obj = gr_lookup_by_name(path, pos);
53256+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
53257+
53258+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
53259+ ((err & mode) != mode))) {
53260+ __u32 new_mode = mode;
53261+
53262+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53263+
53264+ err = 0;
53265+ gr_log_learn_sysctl(path, new_mode);
53266+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
53267+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
53268+ err = -ENOENT;
53269+ } else if (!(err & GR_FIND)) {
53270+ err = -ENOENT;
53271+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
53272+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
53273+ path, (mode & GR_READ) ? " reading" : "",
53274+ (mode & GR_WRITE) ? " writing" : "");
53275+ err = -EACCES;
53276+ } else if ((err & mode) != mode) {
53277+ err = -EACCES;
53278+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
53279+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
53280+ path, (mode & GR_READ) ? " reading" : "",
53281+ (mode & GR_WRITE) ? " writing" : "");
53282+ err = 0;
53283+ } else
53284+ err = 0;
53285+
53286+ out:
53287+ preempt_enable();
53288+
53289+ return err;
53290+}
53291+#endif
53292+
53293+int
53294+gr_handle_proc_ptrace(struct task_struct *task)
53295+{
53296+ struct file *filp;
53297+ struct task_struct *tmp = task;
53298+ struct task_struct *curtemp = current;
53299+ __u32 retmode;
53300+
53301+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53302+ if (unlikely(!(gr_status & GR_READY)))
53303+ return 0;
53304+#endif
53305+
53306+ read_lock(&tasklist_lock);
53307+ read_lock(&grsec_exec_file_lock);
53308+ filp = task->exec_file;
53309+
53310+ while (tmp->pid > 0) {
53311+ if (tmp == curtemp)
53312+ break;
53313+ tmp = tmp->real_parent;
53314+ }
53315+
53316+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53317+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53318+ read_unlock(&grsec_exec_file_lock);
53319+ read_unlock(&tasklist_lock);
53320+ return 1;
53321+ }
53322+
53323+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53324+ if (!(gr_status & GR_READY)) {
53325+ read_unlock(&grsec_exec_file_lock);
53326+ read_unlock(&tasklist_lock);
53327+ return 0;
53328+ }
53329+#endif
53330+
53331+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53332+ read_unlock(&grsec_exec_file_lock);
53333+ read_unlock(&tasklist_lock);
53334+
53335+ if (retmode & GR_NOPTRACE)
53336+ return 1;
53337+
53338+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53339+ && (current->acl != task->acl || (current->acl != current->role->root_label
53340+ && current->pid != task->pid)))
53341+ return 1;
53342+
53343+ return 0;
53344+}
53345+
53346+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53347+{
53348+ if (unlikely(!(gr_status & GR_READY)))
53349+ return;
53350+
53351+ if (!(current->role->roletype & GR_ROLE_GOD))
53352+ return;
53353+
53354+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53355+ p->role->rolename, gr_task_roletype_to_char(p),
53356+ p->acl->filename);
53357+}
53358+
53359+int
53360+gr_handle_ptrace(struct task_struct *task, const long request)
53361+{
53362+ struct task_struct *tmp = task;
53363+ struct task_struct *curtemp = current;
53364+ __u32 retmode;
53365+
53366+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53367+ if (unlikely(!(gr_status & GR_READY)))
53368+ return 0;
53369+#endif
53370+
53371+ read_lock(&tasklist_lock);
53372+ while (tmp->pid > 0) {
53373+ if (tmp == curtemp)
53374+ break;
53375+ tmp = tmp->real_parent;
53376+ }
53377+
53378+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53379+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53380+ read_unlock(&tasklist_lock);
53381+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53382+ return 1;
53383+ }
53384+ read_unlock(&tasklist_lock);
53385+
53386+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53387+ if (!(gr_status & GR_READY))
53388+ return 0;
53389+#endif
53390+
53391+ read_lock(&grsec_exec_file_lock);
53392+ if (unlikely(!task->exec_file)) {
53393+ read_unlock(&grsec_exec_file_lock);
53394+ return 0;
53395+ }
53396+
53397+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53398+ read_unlock(&grsec_exec_file_lock);
53399+
53400+ if (retmode & GR_NOPTRACE) {
53401+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53402+ return 1;
53403+ }
53404+
53405+ if (retmode & GR_PTRACERD) {
53406+ switch (request) {
53407+ case PTRACE_SEIZE:
53408+ case PTRACE_POKETEXT:
53409+ case PTRACE_POKEDATA:
53410+ case PTRACE_POKEUSR:
53411+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53412+ case PTRACE_SETREGS:
53413+ case PTRACE_SETFPREGS:
53414+#endif
53415+#ifdef CONFIG_X86
53416+ case PTRACE_SETFPXREGS:
53417+#endif
53418+#ifdef CONFIG_ALTIVEC
53419+ case PTRACE_SETVRREGS:
53420+#endif
53421+ return 1;
53422+ default:
53423+ return 0;
53424+ }
53425+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
53426+ !(current->role->roletype & GR_ROLE_GOD) &&
53427+ (current->acl != task->acl)) {
53428+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53429+ return 1;
53430+ }
53431+
53432+ return 0;
53433+}
53434+
53435+static int is_writable_mmap(const struct file *filp)
53436+{
53437+ struct task_struct *task = current;
53438+ struct acl_object_label *obj, *obj2;
53439+
53440+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53441+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53442+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53443+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53444+ task->role->root_label);
53445+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53446+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53447+ return 1;
53448+ }
53449+ }
53450+ return 0;
53451+}
53452+
53453+int
53454+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53455+{
53456+ __u32 mode;
53457+
53458+ if (unlikely(!file || !(prot & PROT_EXEC)))
53459+ return 1;
53460+
53461+ if (is_writable_mmap(file))
53462+ return 0;
53463+
53464+ mode =
53465+ gr_search_file(file->f_path.dentry,
53466+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53467+ file->f_path.mnt);
53468+
53469+ if (!gr_tpe_allow(file))
53470+ return 0;
53471+
53472+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53473+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53474+ return 0;
53475+ } else if (unlikely(!(mode & GR_EXEC))) {
53476+ return 0;
53477+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53478+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53479+ return 1;
53480+ }
53481+
53482+ return 1;
53483+}
53484+
53485+int
53486+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53487+{
53488+ __u32 mode;
53489+
53490+ if (unlikely(!file || !(prot & PROT_EXEC)))
53491+ return 1;
53492+
53493+ if (is_writable_mmap(file))
53494+ return 0;
53495+
53496+ mode =
53497+ gr_search_file(file->f_path.dentry,
53498+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53499+ file->f_path.mnt);
53500+
53501+ if (!gr_tpe_allow(file))
53502+ return 0;
53503+
53504+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53505+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53506+ return 0;
53507+ } else if (unlikely(!(mode & GR_EXEC))) {
53508+ return 0;
53509+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53510+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53511+ return 1;
53512+ }
53513+
53514+ return 1;
53515+}
53516+
53517+void
53518+gr_acl_handle_psacct(struct task_struct *task, const long code)
53519+{
53520+ unsigned long runtime;
53521+ unsigned long cputime;
53522+ unsigned int wday, cday;
53523+ __u8 whr, chr;
53524+ __u8 wmin, cmin;
53525+ __u8 wsec, csec;
53526+ struct timespec timeval;
53527+
53528+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53529+ !(task->acl->mode & GR_PROCACCT)))
53530+ return;
53531+
53532+ do_posix_clock_monotonic_gettime(&timeval);
53533+ runtime = timeval.tv_sec - task->start_time.tv_sec;
53534+ wday = runtime / (3600 * 24);
53535+ runtime -= wday * (3600 * 24);
53536+ whr = runtime / 3600;
53537+ runtime -= whr * 3600;
53538+ wmin = runtime / 60;
53539+ runtime -= wmin * 60;
53540+ wsec = runtime;
53541+
53542+ cputime = (task->utime + task->stime) / HZ;
53543+ cday = cputime / (3600 * 24);
53544+ cputime -= cday * (3600 * 24);
53545+ chr = cputime / 3600;
53546+ cputime -= chr * 3600;
53547+ cmin = cputime / 60;
53548+ cputime -= cmin * 60;
53549+ csec = cputime;
53550+
53551+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53552+
53553+ return;
53554+}
53555+
53556+void gr_set_kernel_label(struct task_struct *task)
53557+{
53558+ if (gr_status & GR_READY) {
53559+ task->role = kernel_role;
53560+ task->acl = kernel_role->root_label;
53561+ }
53562+ return;
53563+}
53564+
53565+#ifdef CONFIG_TASKSTATS
53566+int gr_is_taskstats_denied(int pid)
53567+{
53568+ struct task_struct *task;
53569+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53570+ const struct cred *cred;
53571+#endif
53572+ int ret = 0;
53573+
53574+ /* restrict taskstats viewing to un-chrooted root users
53575+ who have the 'view' subject flag if the RBAC system is enabled
53576+ */
53577+
53578+ rcu_read_lock();
53579+ read_lock(&tasklist_lock);
53580+ task = find_task_by_vpid(pid);
53581+ if (task) {
53582+#ifdef CONFIG_GRKERNSEC_CHROOT
53583+ if (proc_is_chrooted(task))
53584+ ret = -EACCES;
53585+#endif
53586+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53587+ cred = __task_cred(task);
53588+#ifdef CONFIG_GRKERNSEC_PROC_USER
53589+ if (cred->uid != 0)
53590+ ret = -EACCES;
53591+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53592+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53593+ ret = -EACCES;
53594+#endif
53595+#endif
53596+ if (gr_status & GR_READY) {
53597+ if (!(task->acl->mode & GR_VIEW))
53598+ ret = -EACCES;
53599+ }
53600+ } else
53601+ ret = -ENOENT;
53602+
53603+ read_unlock(&tasklist_lock);
53604+ rcu_read_unlock();
53605+
53606+ return ret;
53607+}
53608+#endif
53609+
53610+/* AUXV entries are filled via a descendant of search_binary_handler
53611+ after we've already applied the subject for the target
53612+*/
53613+int gr_acl_enable_at_secure(void)
53614+{
53615+ if (unlikely(!(gr_status & GR_READY)))
53616+ return 0;
53617+
53618+ if (current->acl->mode & GR_ATSECURE)
53619+ return 1;
53620+
53621+ return 0;
53622+}
53623+
53624+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53625+{
53626+ struct task_struct *task = current;
53627+ struct dentry *dentry = file->f_path.dentry;
53628+ struct vfsmount *mnt = file->f_path.mnt;
53629+ struct acl_object_label *obj, *tmp;
53630+ struct acl_subject_label *subj;
53631+ unsigned int bufsize;
53632+ int is_not_root;
53633+ char *path;
53634+ dev_t dev = __get_dev(dentry);
53635+
53636+ if (unlikely(!(gr_status & GR_READY)))
53637+ return 1;
53638+
53639+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53640+ return 1;
53641+
53642+ /* ignore Eric Biederman */
53643+ if (IS_PRIVATE(dentry->d_inode))
53644+ return 1;
53645+
53646+ subj = task->acl;
53647+ do {
53648+ obj = lookup_acl_obj_label(ino, dev, subj);
53649+ if (obj != NULL)
53650+ return (obj->mode & GR_FIND) ? 1 : 0;
53651+ } while ((subj = subj->parent_subject));
53652+
53653+ /* this is purely an optimization since we're looking for an object
53654+ for the directory we're doing a readdir on
53655+ if it's possible for any globbed object to match the entry we're
53656+ filling into the directory, then the object we find here will be
53657+ an anchor point with attached globbed objects
53658+ */
53659+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
53660+ if (obj->globbed == NULL)
53661+ return (obj->mode & GR_FIND) ? 1 : 0;
53662+
53663+ is_not_root = ((obj->filename[0] == '/') &&
53664+ (obj->filename[1] == '\0')) ? 0 : 1;
53665+ bufsize = PAGE_SIZE - namelen - is_not_root;
53666+
53667+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
53668+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
53669+ return 1;
53670+
53671+ preempt_disable();
53672+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53673+ bufsize);
53674+
53675+ bufsize = strlen(path);
53676+
53677+ /* if base is "/", don't append an additional slash */
53678+ if (is_not_root)
53679+ *(path + bufsize) = '/';
53680+ memcpy(path + bufsize + is_not_root, name, namelen);
53681+ *(path + bufsize + namelen + is_not_root) = '\0';
53682+
53683+ tmp = obj->globbed;
53684+ while (tmp) {
53685+ if (!glob_match(tmp->filename, path)) {
53686+ preempt_enable();
53687+ return (tmp->mode & GR_FIND) ? 1 : 0;
53688+ }
53689+ tmp = tmp->next;
53690+ }
53691+ preempt_enable();
53692+ return (obj->mode & GR_FIND) ? 1 : 0;
53693+}
53694+
53695+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
53696+EXPORT_SYMBOL(gr_acl_is_enabled);
53697+#endif
53698+EXPORT_SYMBOL(gr_learn_resource);
53699+EXPORT_SYMBOL(gr_set_kernel_label);
53700+#ifdef CONFIG_SECURITY
53701+EXPORT_SYMBOL(gr_check_user_change);
53702+EXPORT_SYMBOL(gr_check_group_change);
53703+#endif
53704+
53705diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
53706new file mode 100644
53707index 0000000..34fefda
53708--- /dev/null
53709+++ b/grsecurity/gracl_alloc.c
53710@@ -0,0 +1,105 @@
53711+#include <linux/kernel.h>
53712+#include <linux/mm.h>
53713+#include <linux/slab.h>
53714+#include <linux/vmalloc.h>
53715+#include <linux/gracl.h>
53716+#include <linux/grsecurity.h>
53717+
53718+static unsigned long alloc_stack_next = 1;
53719+static unsigned long alloc_stack_size = 1;
53720+static void **alloc_stack;
53721+
53722+static __inline__ int
53723+alloc_pop(void)
53724+{
53725+ if (alloc_stack_next == 1)
53726+ return 0;
53727+
53728+ kfree(alloc_stack[alloc_stack_next - 2]);
53729+
53730+ alloc_stack_next--;
53731+
53732+ return 1;
53733+}
53734+
53735+static __inline__ int
53736+alloc_push(void *buf)
53737+{
53738+ if (alloc_stack_next >= alloc_stack_size)
53739+ return 1;
53740+
53741+ alloc_stack[alloc_stack_next - 1] = buf;
53742+
53743+ alloc_stack_next++;
53744+
53745+ return 0;
53746+}
53747+
53748+void *
53749+acl_alloc(unsigned long len)
53750+{
53751+ void *ret = NULL;
53752+
53753+ if (!len || len > PAGE_SIZE)
53754+ goto out;
53755+
53756+ ret = kmalloc(len, GFP_KERNEL);
53757+
53758+ if (ret) {
53759+ if (alloc_push(ret)) {
53760+ kfree(ret);
53761+ ret = NULL;
53762+ }
53763+ }
53764+
53765+out:
53766+ return ret;
53767+}
53768+
53769+void *
53770+acl_alloc_num(unsigned long num, unsigned long len)
53771+{
53772+ if (!len || (num > (PAGE_SIZE / len)))
53773+ return NULL;
53774+
53775+ return acl_alloc(num * len);
53776+}
53777+
53778+void
53779+acl_free_all(void)
53780+{
53781+ if (gr_acl_is_enabled() || !alloc_stack)
53782+ return;
53783+
53784+ while (alloc_pop()) ;
53785+
53786+ if (alloc_stack) {
53787+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
53788+ kfree(alloc_stack);
53789+ else
53790+ vfree(alloc_stack);
53791+ }
53792+
53793+ alloc_stack = NULL;
53794+ alloc_stack_size = 1;
53795+ alloc_stack_next = 1;
53796+
53797+ return;
53798+}
53799+
53800+int
53801+acl_alloc_stack_init(unsigned long size)
53802+{
53803+ if ((size * sizeof (void *)) <= PAGE_SIZE)
53804+ alloc_stack =
53805+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
53806+ else
53807+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
53808+
53809+ alloc_stack_size = size;
53810+
53811+ if (!alloc_stack)
53812+ return 0;
53813+ else
53814+ return 1;
53815+}
53816diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
53817new file mode 100644
53818index 0000000..6d21049
53819--- /dev/null
53820+++ b/grsecurity/gracl_cap.c
53821@@ -0,0 +1,110 @@
53822+#include <linux/kernel.h>
53823+#include <linux/module.h>
53824+#include <linux/sched.h>
53825+#include <linux/gracl.h>
53826+#include <linux/grsecurity.h>
53827+#include <linux/grinternal.h>
53828+
53829+extern const char *captab_log[];
53830+extern int captab_log_entries;
53831+
53832+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
53833+{
53834+ struct acl_subject_label *curracl;
53835+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53836+ kernel_cap_t cap_audit = __cap_empty_set;
53837+
53838+ if (!gr_acl_is_enabled())
53839+ return 1;
53840+
53841+ curracl = task->acl;
53842+
53843+ cap_drop = curracl->cap_lower;
53844+ cap_mask = curracl->cap_mask;
53845+ cap_audit = curracl->cap_invert_audit;
53846+
53847+ while ((curracl = curracl->parent_subject)) {
53848+ /* if the cap isn't specified in the current computed mask but is specified in the
53849+ current level subject, and is lowered in the current level subject, then add
53850+ it to the set of dropped capabilities
53851+ otherwise, add the current level subject's mask to the current computed mask
53852+ */
53853+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53854+ cap_raise(cap_mask, cap);
53855+ if (cap_raised(curracl->cap_lower, cap))
53856+ cap_raise(cap_drop, cap);
53857+ if (cap_raised(curracl->cap_invert_audit, cap))
53858+ cap_raise(cap_audit, cap);
53859+ }
53860+ }
53861+
53862+ if (!cap_raised(cap_drop, cap)) {
53863+ if (cap_raised(cap_audit, cap))
53864+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
53865+ return 1;
53866+ }
53867+
53868+ curracl = task->acl;
53869+
53870+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
53871+ && cap_raised(cred->cap_effective, cap)) {
53872+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53873+ task->role->roletype, cred->uid,
53874+ cred->gid, task->exec_file ?
53875+ gr_to_filename(task->exec_file->f_path.dentry,
53876+ task->exec_file->f_path.mnt) : curracl->filename,
53877+ curracl->filename, 0UL,
53878+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
53879+ return 1;
53880+ }
53881+
53882+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
53883+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
53884+
53885+ return 0;
53886+}
53887+
53888+int
53889+gr_acl_is_capable(const int cap)
53890+{
53891+ return gr_task_acl_is_capable(current, current_cred(), cap);
53892+}
53893+
53894+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
53895+{
53896+ struct acl_subject_label *curracl;
53897+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53898+
53899+ if (!gr_acl_is_enabled())
53900+ return 1;
53901+
53902+ curracl = task->acl;
53903+
53904+ cap_drop = curracl->cap_lower;
53905+ cap_mask = curracl->cap_mask;
53906+
53907+ while ((curracl = curracl->parent_subject)) {
53908+ /* if the cap isn't specified in the current computed mask but is specified in the
53909+ current level subject, and is lowered in the current level subject, then add
53910+ it to the set of dropped capabilities
53911+ otherwise, add the current level subject's mask to the current computed mask
53912+ */
53913+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53914+ cap_raise(cap_mask, cap);
53915+ if (cap_raised(curracl->cap_lower, cap))
53916+ cap_raise(cap_drop, cap);
53917+ }
53918+ }
53919+
53920+ if (!cap_raised(cap_drop, cap))
53921+ return 1;
53922+
53923+ return 0;
53924+}
53925+
53926+int
53927+gr_acl_is_capable_nolog(const int cap)
53928+{
53929+ return gr_task_acl_is_capable_nolog(current, cap);
53930+}
53931+
53932diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
53933new file mode 100644
53934index 0000000..88d0e87
53935--- /dev/null
53936+++ b/grsecurity/gracl_fs.c
53937@@ -0,0 +1,435 @@
53938+#include <linux/kernel.h>
53939+#include <linux/sched.h>
53940+#include <linux/types.h>
53941+#include <linux/fs.h>
53942+#include <linux/file.h>
53943+#include <linux/stat.h>
53944+#include <linux/grsecurity.h>
53945+#include <linux/grinternal.h>
53946+#include <linux/gracl.h>
53947+
53948+umode_t
53949+gr_acl_umask(void)
53950+{
53951+ if (unlikely(!gr_acl_is_enabled()))
53952+ return 0;
53953+
53954+ return current->role->umask;
53955+}
53956+
53957+__u32
53958+gr_acl_handle_hidden_file(const struct dentry * dentry,
53959+ const struct vfsmount * mnt)
53960+{
53961+ __u32 mode;
53962+
53963+ if (unlikely(!dentry->d_inode))
53964+ return GR_FIND;
53965+
53966+ mode =
53967+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
53968+
53969+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
53970+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
53971+ return mode;
53972+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
53973+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
53974+ return 0;
53975+ } else if (unlikely(!(mode & GR_FIND)))
53976+ return 0;
53977+
53978+ return GR_FIND;
53979+}
53980+
53981+__u32
53982+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53983+ int acc_mode)
53984+{
53985+ __u32 reqmode = GR_FIND;
53986+ __u32 mode;
53987+
53988+ if (unlikely(!dentry->d_inode))
53989+ return reqmode;
53990+
53991+ if (acc_mode & MAY_APPEND)
53992+ reqmode |= GR_APPEND;
53993+ else if (acc_mode & MAY_WRITE)
53994+ reqmode |= GR_WRITE;
53995+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
53996+ reqmode |= GR_READ;
53997+
53998+ mode =
53999+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54000+ mnt);
54001+
54002+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54003+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54004+ reqmode & GR_READ ? " reading" : "",
54005+ reqmode & GR_WRITE ? " writing" : reqmode &
54006+ GR_APPEND ? " appending" : "");
54007+ return reqmode;
54008+ } else
54009+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54010+ {
54011+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54012+ reqmode & GR_READ ? " reading" : "",
54013+ reqmode & GR_WRITE ? " writing" : reqmode &
54014+ GR_APPEND ? " appending" : "");
54015+ return 0;
54016+ } else if (unlikely((mode & reqmode) != reqmode))
54017+ return 0;
54018+
54019+ return reqmode;
54020+}
54021+
54022+__u32
54023+gr_acl_handle_creat(const struct dentry * dentry,
54024+ const struct dentry * p_dentry,
54025+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54026+ const int imode)
54027+{
54028+ __u32 reqmode = GR_WRITE | GR_CREATE;
54029+ __u32 mode;
54030+
54031+ if (acc_mode & MAY_APPEND)
54032+ reqmode |= GR_APPEND;
54033+ // if a directory was required or the directory already exists, then
54034+ // don't count this open as a read
54035+ if ((acc_mode & MAY_READ) &&
54036+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54037+ reqmode |= GR_READ;
54038+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54039+ reqmode |= GR_SETID;
54040+
54041+ mode =
54042+ gr_check_create(dentry, p_dentry, p_mnt,
54043+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54044+
54045+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54046+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54047+ reqmode & GR_READ ? " reading" : "",
54048+ reqmode & GR_WRITE ? " writing" : reqmode &
54049+ GR_APPEND ? " appending" : "");
54050+ return reqmode;
54051+ } else
54052+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54053+ {
54054+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54055+ reqmode & GR_READ ? " reading" : "",
54056+ reqmode & GR_WRITE ? " writing" : reqmode &
54057+ GR_APPEND ? " appending" : "");
54058+ return 0;
54059+ } else if (unlikely((mode & reqmode) != reqmode))
54060+ return 0;
54061+
54062+ return reqmode;
54063+}
54064+
54065+__u32
54066+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54067+ const int fmode)
54068+{
54069+ __u32 mode, reqmode = GR_FIND;
54070+
54071+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54072+ reqmode |= GR_EXEC;
54073+ if (fmode & S_IWOTH)
54074+ reqmode |= GR_WRITE;
54075+ if (fmode & S_IROTH)
54076+ reqmode |= GR_READ;
54077+
54078+ mode =
54079+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54080+ mnt);
54081+
54082+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54083+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54084+ reqmode & GR_READ ? " reading" : "",
54085+ reqmode & GR_WRITE ? " writing" : "",
54086+ reqmode & GR_EXEC ? " executing" : "");
54087+ return reqmode;
54088+ } else
54089+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54090+ {
54091+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54092+ reqmode & GR_READ ? " reading" : "",
54093+ reqmode & GR_WRITE ? " writing" : "",
54094+ reqmode & GR_EXEC ? " executing" : "");
54095+ return 0;
54096+ } else if (unlikely((mode & reqmode) != reqmode))
54097+ return 0;
54098+
54099+ return reqmode;
54100+}
54101+
54102+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54103+{
54104+ __u32 mode;
54105+
54106+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54107+
54108+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54109+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54110+ return mode;
54111+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54112+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54113+ return 0;
54114+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54115+ return 0;
54116+
54117+ return (reqmode);
54118+}
54119+
54120+__u32
54121+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54122+{
54123+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54124+}
54125+
54126+__u32
54127+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54128+{
54129+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54130+}
54131+
54132+__u32
54133+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54134+{
54135+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54136+}
54137+
54138+__u32
54139+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54140+{
54141+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54142+}
54143+
54144+__u32
54145+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54146+ umode_t *modeptr)
54147+{
54148+ umode_t mode;
54149+
54150+ *modeptr &= ~gr_acl_umask();
54151+ mode = *modeptr;
54152+
54153+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54154+ return 1;
54155+
54156+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
54157+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54158+ GR_CHMOD_ACL_MSG);
54159+ } else {
54160+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54161+ }
54162+}
54163+
54164+__u32
54165+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54166+{
54167+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54168+}
54169+
54170+__u32
54171+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54172+{
54173+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54174+}
54175+
54176+__u32
54177+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54178+{
54179+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54180+}
54181+
54182+__u32
54183+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54184+{
54185+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54186+ GR_UNIXCONNECT_ACL_MSG);
54187+}
54188+
54189+/* hardlinks require at minimum create and link permission,
54190+ any additional privilege required is based on the
54191+ privilege of the file being linked to
54192+*/
54193+__u32
54194+gr_acl_handle_link(const struct dentry * new_dentry,
54195+ const struct dentry * parent_dentry,
54196+ const struct vfsmount * parent_mnt,
54197+ const struct dentry * old_dentry,
54198+ const struct vfsmount * old_mnt, const char *to)
54199+{
54200+ __u32 mode;
54201+ __u32 needmode = GR_CREATE | GR_LINK;
54202+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54203+
54204+ mode =
54205+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54206+ old_mnt);
54207+
54208+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54209+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54210+ return mode;
54211+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54212+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54213+ return 0;
54214+ } else if (unlikely((mode & needmode) != needmode))
54215+ return 0;
54216+
54217+ return 1;
54218+}
54219+
54220+__u32
54221+gr_acl_handle_symlink(const struct dentry * new_dentry,
54222+ const struct dentry * parent_dentry,
54223+ const struct vfsmount * parent_mnt, const char *from)
54224+{
54225+ __u32 needmode = GR_WRITE | GR_CREATE;
54226+ __u32 mode;
54227+
54228+ mode =
54229+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
54230+ GR_CREATE | GR_AUDIT_CREATE |
54231+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54232+
54233+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54234+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54235+ return mode;
54236+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54237+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54238+ return 0;
54239+ } else if (unlikely((mode & needmode) != needmode))
54240+ return 0;
54241+
54242+ return (GR_WRITE | GR_CREATE);
54243+}
54244+
54245+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54246+{
54247+ __u32 mode;
54248+
54249+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54250+
54251+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54252+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54253+ return mode;
54254+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54255+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54256+ return 0;
54257+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54258+ return 0;
54259+
54260+ return (reqmode);
54261+}
54262+
54263+__u32
54264+gr_acl_handle_mknod(const struct dentry * new_dentry,
54265+ const struct dentry * parent_dentry,
54266+ const struct vfsmount * parent_mnt,
54267+ const int mode)
54268+{
54269+ __u32 reqmode = GR_WRITE | GR_CREATE;
54270+ if (unlikely(mode & (S_ISUID | S_ISGID)))
54271+ reqmode |= GR_SETID;
54272+
54273+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54274+ reqmode, GR_MKNOD_ACL_MSG);
54275+}
54276+
54277+__u32
54278+gr_acl_handle_mkdir(const struct dentry *new_dentry,
54279+ const struct dentry *parent_dentry,
54280+ const struct vfsmount *parent_mnt)
54281+{
54282+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54283+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54284+}
54285+
54286+#define RENAME_CHECK_SUCCESS(old, new) \
54287+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54288+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54289+
54290+int
54291+gr_acl_handle_rename(struct dentry *new_dentry,
54292+ struct dentry *parent_dentry,
54293+ const struct vfsmount *parent_mnt,
54294+ struct dentry *old_dentry,
54295+ struct inode *old_parent_inode,
54296+ struct vfsmount *old_mnt, const char *newname)
54297+{
54298+ __u32 comp1, comp2;
54299+ int error = 0;
54300+
54301+ if (unlikely(!gr_acl_is_enabled()))
54302+ return 0;
54303+
54304+ if (!new_dentry->d_inode) {
54305+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54306+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54307+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54308+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54309+ GR_DELETE | GR_AUDIT_DELETE |
54310+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54311+ GR_SUPPRESS, old_mnt);
54312+ } else {
54313+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54314+ GR_CREATE | GR_DELETE |
54315+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54316+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54317+ GR_SUPPRESS, parent_mnt);
54318+ comp2 =
54319+ gr_search_file(old_dentry,
54320+ GR_READ | GR_WRITE | GR_AUDIT_READ |
54321+ GR_DELETE | GR_AUDIT_DELETE |
54322+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54323+ }
54324+
54325+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54326+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54327+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54328+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54329+ && !(comp2 & GR_SUPPRESS)) {
54330+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54331+ error = -EACCES;
54332+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54333+ error = -EACCES;
54334+
54335+ return error;
54336+}
54337+
54338+void
54339+gr_acl_handle_exit(void)
54340+{
54341+ u16 id;
54342+ char *rolename;
54343+ struct file *exec_file;
54344+
54345+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54346+ !(current->role->roletype & GR_ROLE_PERSIST))) {
54347+ id = current->acl_role_id;
54348+ rolename = current->role->rolename;
54349+ gr_set_acls(1);
54350+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54351+ }
54352+
54353+ write_lock(&grsec_exec_file_lock);
54354+ exec_file = current->exec_file;
54355+ current->exec_file = NULL;
54356+ write_unlock(&grsec_exec_file_lock);
54357+
54358+ if (exec_file)
54359+ fput(exec_file);
54360+}
54361+
54362+int
54363+gr_acl_handle_procpidmem(const struct task_struct *task)
54364+{
54365+ if (unlikely(!gr_acl_is_enabled()))
54366+ return 0;
54367+
54368+ if (task != current && task->acl->mode & GR_PROTPROCFD)
54369+ return -EACCES;
54370+
54371+ return 0;
54372+}
54373diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54374new file mode 100644
54375index 0000000..17050ca
54376--- /dev/null
54377+++ b/grsecurity/gracl_ip.c
54378@@ -0,0 +1,381 @@
54379+#include <linux/kernel.h>
54380+#include <asm/uaccess.h>
54381+#include <asm/errno.h>
54382+#include <net/sock.h>
54383+#include <linux/file.h>
54384+#include <linux/fs.h>
54385+#include <linux/net.h>
54386+#include <linux/in.h>
54387+#include <linux/skbuff.h>
54388+#include <linux/ip.h>
54389+#include <linux/udp.h>
54390+#include <linux/types.h>
54391+#include <linux/sched.h>
54392+#include <linux/netdevice.h>
54393+#include <linux/inetdevice.h>
54394+#include <linux/gracl.h>
54395+#include <linux/grsecurity.h>
54396+#include <linux/grinternal.h>
54397+
54398+#define GR_BIND 0x01
54399+#define GR_CONNECT 0x02
54400+#define GR_INVERT 0x04
54401+#define GR_BINDOVERRIDE 0x08
54402+#define GR_CONNECTOVERRIDE 0x10
54403+#define GR_SOCK_FAMILY 0x20
54404+
54405+static const char * gr_protocols[IPPROTO_MAX] = {
54406+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54407+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54408+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54409+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54410+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54411+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54412+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54413+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54414+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54415+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54416+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54417+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54418+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54419+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54420+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54421+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54422+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54423+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54424+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54425+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54426+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54427+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54428+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54429+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54430+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54431+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54432+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54433+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54434+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54435+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54436+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54437+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54438+ };
54439+
54440+static const char * gr_socktypes[SOCK_MAX] = {
54441+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54442+ "unknown:7", "unknown:8", "unknown:9", "packet"
54443+ };
54444+
54445+static const char * gr_sockfamilies[AF_MAX+1] = {
54446+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54447+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54448+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54449+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54450+ };
54451+
54452+const char *
54453+gr_proto_to_name(unsigned char proto)
54454+{
54455+ return gr_protocols[proto];
54456+}
54457+
54458+const char *
54459+gr_socktype_to_name(unsigned char type)
54460+{
54461+ return gr_socktypes[type];
54462+}
54463+
54464+const char *
54465+gr_sockfamily_to_name(unsigned char family)
54466+{
54467+ return gr_sockfamilies[family];
54468+}
54469+
54470+int
54471+gr_search_socket(const int domain, const int type, const int protocol)
54472+{
54473+ struct acl_subject_label *curr;
54474+ const struct cred *cred = current_cred();
54475+
54476+ if (unlikely(!gr_acl_is_enabled()))
54477+ goto exit;
54478+
54479+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
54480+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54481+ goto exit; // let the kernel handle it
54482+
54483+ curr = current->acl;
54484+
54485+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54486+ /* the family is allowed, if this is PF_INET allow it only if
54487+ the extra sock type/protocol checks pass */
54488+ if (domain == PF_INET)
54489+ goto inet_check;
54490+ goto exit;
54491+ } else {
54492+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54493+ __u32 fakeip = 0;
54494+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54495+ current->role->roletype, cred->uid,
54496+ cred->gid, current->exec_file ?
54497+ gr_to_filename(current->exec_file->f_path.dentry,
54498+ current->exec_file->f_path.mnt) :
54499+ curr->filename, curr->filename,
54500+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54501+ &current->signal->saved_ip);
54502+ goto exit;
54503+ }
54504+ goto exit_fail;
54505+ }
54506+
54507+inet_check:
54508+ /* the rest of this checking is for IPv4 only */
54509+ if (!curr->ips)
54510+ goto exit;
54511+
54512+ if ((curr->ip_type & (1 << type)) &&
54513+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54514+ goto exit;
54515+
54516+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54517+ /* we don't place acls on raw sockets , and sometimes
54518+ dgram/ip sockets are opened for ioctl and not
54519+ bind/connect, so we'll fake a bind learn log */
54520+ if (type == SOCK_RAW || type == SOCK_PACKET) {
54521+ __u32 fakeip = 0;
54522+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54523+ current->role->roletype, cred->uid,
54524+ cred->gid, current->exec_file ?
54525+ gr_to_filename(current->exec_file->f_path.dentry,
54526+ current->exec_file->f_path.mnt) :
54527+ curr->filename, curr->filename,
54528+ &fakeip, 0, type,
54529+ protocol, GR_CONNECT, &current->signal->saved_ip);
54530+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54531+ __u32 fakeip = 0;
54532+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54533+ current->role->roletype, cred->uid,
54534+ cred->gid, current->exec_file ?
54535+ gr_to_filename(current->exec_file->f_path.dentry,
54536+ current->exec_file->f_path.mnt) :
54537+ curr->filename, curr->filename,
54538+ &fakeip, 0, type,
54539+ protocol, GR_BIND, &current->signal->saved_ip);
54540+ }
54541+ /* we'll log when they use connect or bind */
54542+ goto exit;
54543+ }
54544+
54545+exit_fail:
54546+ if (domain == PF_INET)
54547+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54548+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
54549+ else
54550+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54551+ gr_socktype_to_name(type), protocol);
54552+
54553+ return 0;
54554+exit:
54555+ return 1;
54556+}
54557+
54558+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54559+{
54560+ if ((ip->mode & mode) &&
54561+ (ip_port >= ip->low) &&
54562+ (ip_port <= ip->high) &&
54563+ ((ntohl(ip_addr) & our_netmask) ==
54564+ (ntohl(our_addr) & our_netmask))
54565+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54566+ && (ip->type & (1 << type))) {
54567+ if (ip->mode & GR_INVERT)
54568+ return 2; // specifically denied
54569+ else
54570+ return 1; // allowed
54571+ }
54572+
54573+ return 0; // not specifically allowed, may continue parsing
54574+}
54575+
54576+static int
54577+gr_search_connectbind(const int full_mode, struct sock *sk,
54578+ struct sockaddr_in *addr, const int type)
54579+{
54580+ char iface[IFNAMSIZ] = {0};
54581+ struct acl_subject_label *curr;
54582+ struct acl_ip_label *ip;
54583+ struct inet_sock *isk;
54584+ struct net_device *dev;
54585+ struct in_device *idev;
54586+ unsigned long i;
54587+ int ret;
54588+ int mode = full_mode & (GR_BIND | GR_CONNECT);
54589+ __u32 ip_addr = 0;
54590+ __u32 our_addr;
54591+ __u32 our_netmask;
54592+ char *p;
54593+ __u16 ip_port = 0;
54594+ const struct cred *cred = current_cred();
54595+
54596+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54597+ return 0;
54598+
54599+ curr = current->acl;
54600+ isk = inet_sk(sk);
54601+
54602+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54603+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54604+ addr->sin_addr.s_addr = curr->inaddr_any_override;
54605+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54606+ struct sockaddr_in saddr;
54607+ int err;
54608+
54609+ saddr.sin_family = AF_INET;
54610+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
54611+ saddr.sin_port = isk->inet_sport;
54612+
54613+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54614+ if (err)
54615+ return err;
54616+
54617+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54618+ if (err)
54619+ return err;
54620+ }
54621+
54622+ if (!curr->ips)
54623+ return 0;
54624+
54625+ ip_addr = addr->sin_addr.s_addr;
54626+ ip_port = ntohs(addr->sin_port);
54627+
54628+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54629+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54630+ current->role->roletype, cred->uid,
54631+ cred->gid, current->exec_file ?
54632+ gr_to_filename(current->exec_file->f_path.dentry,
54633+ current->exec_file->f_path.mnt) :
54634+ curr->filename, curr->filename,
54635+ &ip_addr, ip_port, type,
54636+ sk->sk_protocol, mode, &current->signal->saved_ip);
54637+ return 0;
54638+ }
54639+
54640+ for (i = 0; i < curr->ip_num; i++) {
54641+ ip = *(curr->ips + i);
54642+ if (ip->iface != NULL) {
54643+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
54644+ p = strchr(iface, ':');
54645+ if (p != NULL)
54646+ *p = '\0';
54647+ dev = dev_get_by_name(sock_net(sk), iface);
54648+ if (dev == NULL)
54649+ continue;
54650+ idev = in_dev_get(dev);
54651+ if (idev == NULL) {
54652+ dev_put(dev);
54653+ continue;
54654+ }
54655+ rcu_read_lock();
54656+ for_ifa(idev) {
54657+ if (!strcmp(ip->iface, ifa->ifa_label)) {
54658+ our_addr = ifa->ifa_address;
54659+ our_netmask = 0xffffffff;
54660+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54661+ if (ret == 1) {
54662+ rcu_read_unlock();
54663+ in_dev_put(idev);
54664+ dev_put(dev);
54665+ return 0;
54666+ } else if (ret == 2) {
54667+ rcu_read_unlock();
54668+ in_dev_put(idev);
54669+ dev_put(dev);
54670+ goto denied;
54671+ }
54672+ }
54673+ } endfor_ifa(idev);
54674+ rcu_read_unlock();
54675+ in_dev_put(idev);
54676+ dev_put(dev);
54677+ } else {
54678+ our_addr = ip->addr;
54679+ our_netmask = ip->netmask;
54680+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54681+ if (ret == 1)
54682+ return 0;
54683+ else if (ret == 2)
54684+ goto denied;
54685+ }
54686+ }
54687+
54688+denied:
54689+ if (mode == GR_BIND)
54690+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54691+ else if (mode == GR_CONNECT)
54692+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54693+
54694+ return -EACCES;
54695+}
54696+
54697+int
54698+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
54699+{
54700+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
54701+}
54702+
54703+int
54704+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
54705+{
54706+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
54707+}
54708+
54709+int gr_search_listen(struct socket *sock)
54710+{
54711+ struct sock *sk = sock->sk;
54712+ struct sockaddr_in addr;
54713+
54714+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54715+ addr.sin_port = inet_sk(sk)->inet_sport;
54716+
54717+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54718+}
54719+
54720+int gr_search_accept(struct socket *sock)
54721+{
54722+ struct sock *sk = sock->sk;
54723+ struct sockaddr_in addr;
54724+
54725+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54726+ addr.sin_port = inet_sk(sk)->inet_sport;
54727+
54728+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54729+}
54730+
54731+int
54732+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
54733+{
54734+ if (addr)
54735+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
54736+ else {
54737+ struct sockaddr_in sin;
54738+ const struct inet_sock *inet = inet_sk(sk);
54739+
54740+ sin.sin_addr.s_addr = inet->inet_daddr;
54741+ sin.sin_port = inet->inet_dport;
54742+
54743+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54744+ }
54745+}
54746+
54747+int
54748+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
54749+{
54750+ struct sockaddr_in sin;
54751+
54752+ if (unlikely(skb->len < sizeof (struct udphdr)))
54753+ return 0; // skip this packet
54754+
54755+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
54756+ sin.sin_port = udp_hdr(skb)->source;
54757+
54758+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54759+}
54760diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
54761new file mode 100644
54762index 0000000..25f54ef
54763--- /dev/null
54764+++ b/grsecurity/gracl_learn.c
54765@@ -0,0 +1,207 @@
54766+#include <linux/kernel.h>
54767+#include <linux/mm.h>
54768+#include <linux/sched.h>
54769+#include <linux/poll.h>
54770+#include <linux/string.h>
54771+#include <linux/file.h>
54772+#include <linux/types.h>
54773+#include <linux/vmalloc.h>
54774+#include <linux/grinternal.h>
54775+
54776+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
54777+ size_t count, loff_t *ppos);
54778+extern int gr_acl_is_enabled(void);
54779+
54780+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
54781+static int gr_learn_attached;
54782+
54783+/* use a 512k buffer */
54784+#define LEARN_BUFFER_SIZE (512 * 1024)
54785+
54786+static DEFINE_SPINLOCK(gr_learn_lock);
54787+static DEFINE_MUTEX(gr_learn_user_mutex);
54788+
54789+/* we need to maintain two buffers, so that the kernel context of grlearn
54790+ uses a semaphore around the userspace copying, and the other kernel contexts
54791+ use a spinlock when copying into the buffer, since they cannot sleep
54792+*/
54793+static char *learn_buffer;
54794+static char *learn_buffer_user;
54795+static int learn_buffer_len;
54796+static int learn_buffer_user_len;
54797+
54798+static ssize_t
54799+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
54800+{
54801+ DECLARE_WAITQUEUE(wait, current);
54802+ ssize_t retval = 0;
54803+
54804+ add_wait_queue(&learn_wait, &wait);
54805+ set_current_state(TASK_INTERRUPTIBLE);
54806+ do {
54807+ mutex_lock(&gr_learn_user_mutex);
54808+ spin_lock(&gr_learn_lock);
54809+ if (learn_buffer_len)
54810+ break;
54811+ spin_unlock(&gr_learn_lock);
54812+ mutex_unlock(&gr_learn_user_mutex);
54813+ if (file->f_flags & O_NONBLOCK) {
54814+ retval = -EAGAIN;
54815+ goto out;
54816+ }
54817+ if (signal_pending(current)) {
54818+ retval = -ERESTARTSYS;
54819+ goto out;
54820+ }
54821+
54822+ schedule();
54823+ } while (1);
54824+
54825+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
54826+ learn_buffer_user_len = learn_buffer_len;
54827+ retval = learn_buffer_len;
54828+ learn_buffer_len = 0;
54829+
54830+ spin_unlock(&gr_learn_lock);
54831+
54832+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
54833+ retval = -EFAULT;
54834+
54835+ mutex_unlock(&gr_learn_user_mutex);
54836+out:
54837+ set_current_state(TASK_RUNNING);
54838+ remove_wait_queue(&learn_wait, &wait);
54839+ return retval;
54840+}
54841+
54842+static unsigned int
54843+poll_learn(struct file * file, poll_table * wait)
54844+{
54845+ poll_wait(file, &learn_wait, wait);
54846+
54847+ if (learn_buffer_len)
54848+ return (POLLIN | POLLRDNORM);
54849+
54850+ return 0;
54851+}
54852+
54853+void
54854+gr_clear_learn_entries(void)
54855+{
54856+ char *tmp;
54857+
54858+ mutex_lock(&gr_learn_user_mutex);
54859+ spin_lock(&gr_learn_lock);
54860+ tmp = learn_buffer;
54861+ learn_buffer = NULL;
54862+ spin_unlock(&gr_learn_lock);
54863+ if (tmp)
54864+ vfree(tmp);
54865+ if (learn_buffer_user != NULL) {
54866+ vfree(learn_buffer_user);
54867+ learn_buffer_user = NULL;
54868+ }
54869+ learn_buffer_len = 0;
54870+ mutex_unlock(&gr_learn_user_mutex);
54871+
54872+ return;
54873+}
54874+
54875+void
54876+gr_add_learn_entry(const char *fmt, ...)
54877+{
54878+ va_list args;
54879+ unsigned int len;
54880+
54881+ if (!gr_learn_attached)
54882+ return;
54883+
54884+ spin_lock(&gr_learn_lock);
54885+
54886+ /* leave a gap at the end so we know when it's "full" but don't have to
54887+ compute the exact length of the string we're trying to append
54888+ */
54889+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
54890+ spin_unlock(&gr_learn_lock);
54891+ wake_up_interruptible(&learn_wait);
54892+ return;
54893+ }
54894+ if (learn_buffer == NULL) {
54895+ spin_unlock(&gr_learn_lock);
54896+ return;
54897+ }
54898+
54899+ va_start(args, fmt);
54900+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
54901+ va_end(args);
54902+
54903+ learn_buffer_len += len + 1;
54904+
54905+ spin_unlock(&gr_learn_lock);
54906+ wake_up_interruptible(&learn_wait);
54907+
54908+ return;
54909+}
54910+
54911+static int
54912+open_learn(struct inode *inode, struct file *file)
54913+{
54914+ if (file->f_mode & FMODE_READ && gr_learn_attached)
54915+ return -EBUSY;
54916+ if (file->f_mode & FMODE_READ) {
54917+ int retval = 0;
54918+ mutex_lock(&gr_learn_user_mutex);
54919+ if (learn_buffer == NULL)
54920+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
54921+ if (learn_buffer_user == NULL)
54922+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
54923+ if (learn_buffer == NULL) {
54924+ retval = -ENOMEM;
54925+ goto out_error;
54926+ }
54927+ if (learn_buffer_user == NULL) {
54928+ retval = -ENOMEM;
54929+ goto out_error;
54930+ }
54931+ learn_buffer_len = 0;
54932+ learn_buffer_user_len = 0;
54933+ gr_learn_attached = 1;
54934+out_error:
54935+ mutex_unlock(&gr_learn_user_mutex);
54936+ return retval;
54937+ }
54938+ return 0;
54939+}
54940+
54941+static int
54942+close_learn(struct inode *inode, struct file *file)
54943+{
54944+ if (file->f_mode & FMODE_READ) {
54945+ char *tmp = NULL;
54946+ mutex_lock(&gr_learn_user_mutex);
54947+ spin_lock(&gr_learn_lock);
54948+ tmp = learn_buffer;
54949+ learn_buffer = NULL;
54950+ spin_unlock(&gr_learn_lock);
54951+ if (tmp)
54952+ vfree(tmp);
54953+ if (learn_buffer_user != NULL) {
54954+ vfree(learn_buffer_user);
54955+ learn_buffer_user = NULL;
54956+ }
54957+ learn_buffer_len = 0;
54958+ learn_buffer_user_len = 0;
54959+ gr_learn_attached = 0;
54960+ mutex_unlock(&gr_learn_user_mutex);
54961+ }
54962+
54963+ return 0;
54964+}
54965+
54966+const struct file_operations grsec_fops = {
54967+ .read = read_learn,
54968+ .write = write_grsec_handler,
54969+ .open = open_learn,
54970+ .release = close_learn,
54971+ .poll = poll_learn,
54972+};
54973diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
54974new file mode 100644
54975index 0000000..39645c9
54976--- /dev/null
54977+++ b/grsecurity/gracl_res.c
54978@@ -0,0 +1,68 @@
54979+#include <linux/kernel.h>
54980+#include <linux/sched.h>
54981+#include <linux/gracl.h>
54982+#include <linux/grinternal.h>
54983+
54984+static const char *restab_log[] = {
54985+ [RLIMIT_CPU] = "RLIMIT_CPU",
54986+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
54987+ [RLIMIT_DATA] = "RLIMIT_DATA",
54988+ [RLIMIT_STACK] = "RLIMIT_STACK",
54989+ [RLIMIT_CORE] = "RLIMIT_CORE",
54990+ [RLIMIT_RSS] = "RLIMIT_RSS",
54991+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
54992+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
54993+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
54994+ [RLIMIT_AS] = "RLIMIT_AS",
54995+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
54996+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
54997+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
54998+ [RLIMIT_NICE] = "RLIMIT_NICE",
54999+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55000+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55001+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55002+};
55003+
55004+void
55005+gr_log_resource(const struct task_struct *task,
55006+ const int res, const unsigned long wanted, const int gt)
55007+{
55008+ const struct cred *cred;
55009+ unsigned long rlim;
55010+
55011+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55012+ return;
55013+
55014+ // not yet supported resource
55015+ if (unlikely(!restab_log[res]))
55016+ return;
55017+
55018+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55019+ rlim = task_rlimit_max(task, res);
55020+ else
55021+ rlim = task_rlimit(task, res);
55022+
55023+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55024+ return;
55025+
55026+ rcu_read_lock();
55027+ cred = __task_cred(task);
55028+
55029+ if (res == RLIMIT_NPROC &&
55030+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55031+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55032+ goto out_rcu_unlock;
55033+ else if (res == RLIMIT_MEMLOCK &&
55034+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55035+ goto out_rcu_unlock;
55036+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55037+ goto out_rcu_unlock;
55038+ rcu_read_unlock();
55039+
55040+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55041+
55042+ return;
55043+out_rcu_unlock:
55044+ rcu_read_unlock();
55045+ return;
55046+}
55047diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55048new file mode 100644
55049index 0000000..5556be3
55050--- /dev/null
55051+++ b/grsecurity/gracl_segv.c
55052@@ -0,0 +1,299 @@
55053+#include <linux/kernel.h>
55054+#include <linux/mm.h>
55055+#include <asm/uaccess.h>
55056+#include <asm/errno.h>
55057+#include <asm/mman.h>
55058+#include <net/sock.h>
55059+#include <linux/file.h>
55060+#include <linux/fs.h>
55061+#include <linux/net.h>
55062+#include <linux/in.h>
55063+#include <linux/slab.h>
55064+#include <linux/types.h>
55065+#include <linux/sched.h>
55066+#include <linux/timer.h>
55067+#include <linux/gracl.h>
55068+#include <linux/grsecurity.h>
55069+#include <linux/grinternal.h>
55070+
55071+static struct crash_uid *uid_set;
55072+static unsigned short uid_used;
55073+static DEFINE_SPINLOCK(gr_uid_lock);
55074+extern rwlock_t gr_inode_lock;
55075+extern struct acl_subject_label *
55076+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55077+ struct acl_role_label *role);
55078+
55079+#ifdef CONFIG_BTRFS_FS
55080+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55081+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55082+#endif
55083+
55084+static inline dev_t __get_dev(const struct dentry *dentry)
55085+{
55086+#ifdef CONFIG_BTRFS_FS
55087+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55088+ return get_btrfs_dev_from_inode(dentry->d_inode);
55089+ else
55090+#endif
55091+ return dentry->d_inode->i_sb->s_dev;
55092+}
55093+
55094+int
55095+gr_init_uidset(void)
55096+{
55097+ uid_set =
55098+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55099+ uid_used = 0;
55100+
55101+ return uid_set ? 1 : 0;
55102+}
55103+
55104+void
55105+gr_free_uidset(void)
55106+{
55107+ if (uid_set)
55108+ kfree(uid_set);
55109+
55110+ return;
55111+}
55112+
55113+int
55114+gr_find_uid(const uid_t uid)
55115+{
55116+ struct crash_uid *tmp = uid_set;
55117+ uid_t buid;
55118+ int low = 0, high = uid_used - 1, mid;
55119+
55120+ while (high >= low) {
55121+ mid = (low + high) >> 1;
55122+ buid = tmp[mid].uid;
55123+ if (buid == uid)
55124+ return mid;
55125+ if (buid > uid)
55126+ high = mid - 1;
55127+ if (buid < uid)
55128+ low = mid + 1;
55129+ }
55130+
55131+ return -1;
55132+}
55133+
55134+static __inline__ void
55135+gr_insertsort(void)
55136+{
55137+ unsigned short i, j;
55138+ struct crash_uid index;
55139+
55140+ for (i = 1; i < uid_used; i++) {
55141+ index = uid_set[i];
55142+ j = i;
55143+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55144+ uid_set[j] = uid_set[j - 1];
55145+ j--;
55146+ }
55147+ uid_set[j] = index;
55148+ }
55149+
55150+ return;
55151+}
55152+
55153+static __inline__ void
55154+gr_insert_uid(const uid_t uid, const unsigned long expires)
55155+{
55156+ int loc;
55157+
55158+ if (uid_used == GR_UIDTABLE_MAX)
55159+ return;
55160+
55161+ loc = gr_find_uid(uid);
55162+
55163+ if (loc >= 0) {
55164+ uid_set[loc].expires = expires;
55165+ return;
55166+ }
55167+
55168+ uid_set[uid_used].uid = uid;
55169+ uid_set[uid_used].expires = expires;
55170+ uid_used++;
55171+
55172+ gr_insertsort();
55173+
55174+ return;
55175+}
55176+
55177+void
55178+gr_remove_uid(const unsigned short loc)
55179+{
55180+ unsigned short i;
55181+
55182+ for (i = loc + 1; i < uid_used; i++)
55183+ uid_set[i - 1] = uid_set[i];
55184+
55185+ uid_used--;
55186+
55187+ return;
55188+}
55189+
55190+int
55191+gr_check_crash_uid(const uid_t uid)
55192+{
55193+ int loc;
55194+ int ret = 0;
55195+
55196+ if (unlikely(!gr_acl_is_enabled()))
55197+ return 0;
55198+
55199+ spin_lock(&gr_uid_lock);
55200+ loc = gr_find_uid(uid);
55201+
55202+ if (loc < 0)
55203+ goto out_unlock;
55204+
55205+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
55206+ gr_remove_uid(loc);
55207+ else
55208+ ret = 1;
55209+
55210+out_unlock:
55211+ spin_unlock(&gr_uid_lock);
55212+ return ret;
55213+}
55214+
55215+static __inline__ int
55216+proc_is_setxid(const struct cred *cred)
55217+{
55218+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
55219+ cred->uid != cred->fsuid)
55220+ return 1;
55221+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55222+ cred->gid != cred->fsgid)
55223+ return 1;
55224+
55225+ return 0;
55226+}
55227+
55228+extern int gr_fake_force_sig(int sig, struct task_struct *t);
55229+
55230+void
55231+gr_handle_crash(struct task_struct *task, const int sig)
55232+{
55233+ struct acl_subject_label *curr;
55234+ struct task_struct *tsk, *tsk2;
55235+ const struct cred *cred;
55236+ const struct cred *cred2;
55237+
55238+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55239+ return;
55240+
55241+ if (unlikely(!gr_acl_is_enabled()))
55242+ return;
55243+
55244+ curr = task->acl;
55245+
55246+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
55247+ return;
55248+
55249+ if (time_before_eq(curr->expires, get_seconds())) {
55250+ curr->expires = 0;
55251+ curr->crashes = 0;
55252+ }
55253+
55254+ curr->crashes++;
55255+
55256+ if (!curr->expires)
55257+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55258+
55259+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55260+ time_after(curr->expires, get_seconds())) {
55261+ rcu_read_lock();
55262+ cred = __task_cred(task);
55263+ if (cred->uid && proc_is_setxid(cred)) {
55264+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55265+ spin_lock(&gr_uid_lock);
55266+ gr_insert_uid(cred->uid, curr->expires);
55267+ spin_unlock(&gr_uid_lock);
55268+ curr->expires = 0;
55269+ curr->crashes = 0;
55270+ read_lock(&tasklist_lock);
55271+ do_each_thread(tsk2, tsk) {
55272+ cred2 = __task_cred(tsk);
55273+ if (tsk != task && cred2->uid == cred->uid)
55274+ gr_fake_force_sig(SIGKILL, tsk);
55275+ } while_each_thread(tsk2, tsk);
55276+ read_unlock(&tasklist_lock);
55277+ } else {
55278+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55279+ read_lock(&tasklist_lock);
55280+ read_lock(&grsec_exec_file_lock);
55281+ do_each_thread(tsk2, tsk) {
55282+ if (likely(tsk != task)) {
55283+ // if this thread has the same subject as the one that triggered
55284+ // RES_CRASH and it's the same binary, kill it
55285+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55286+ gr_fake_force_sig(SIGKILL, tsk);
55287+ }
55288+ } while_each_thread(tsk2, tsk);
55289+ read_unlock(&grsec_exec_file_lock);
55290+ read_unlock(&tasklist_lock);
55291+ }
55292+ rcu_read_unlock();
55293+ }
55294+
55295+ return;
55296+}
55297+
55298+int
55299+gr_check_crash_exec(const struct file *filp)
55300+{
55301+ struct acl_subject_label *curr;
55302+
55303+ if (unlikely(!gr_acl_is_enabled()))
55304+ return 0;
55305+
55306+ read_lock(&gr_inode_lock);
55307+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55308+ __get_dev(filp->f_path.dentry),
55309+ current->role);
55310+ read_unlock(&gr_inode_lock);
55311+
55312+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55313+ (!curr->crashes && !curr->expires))
55314+ return 0;
55315+
55316+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55317+ time_after(curr->expires, get_seconds()))
55318+ return 1;
55319+ else if (time_before_eq(curr->expires, get_seconds())) {
55320+ curr->crashes = 0;
55321+ curr->expires = 0;
55322+ }
55323+
55324+ return 0;
55325+}
55326+
55327+void
55328+gr_handle_alertkill(struct task_struct *task)
55329+{
55330+ struct acl_subject_label *curracl;
55331+ __u32 curr_ip;
55332+ struct task_struct *p, *p2;
55333+
55334+ if (unlikely(!gr_acl_is_enabled()))
55335+ return;
55336+
55337+ curracl = task->acl;
55338+ curr_ip = task->signal->curr_ip;
55339+
55340+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55341+ read_lock(&tasklist_lock);
55342+ do_each_thread(p2, p) {
55343+ if (p->signal->curr_ip == curr_ip)
55344+ gr_fake_force_sig(SIGKILL, p);
55345+ } while_each_thread(p2, p);
55346+ read_unlock(&tasklist_lock);
55347+ } else if (curracl->mode & GR_KILLPROC)
55348+ gr_fake_force_sig(SIGKILL, task);
55349+
55350+ return;
55351+}
55352diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55353new file mode 100644
55354index 0000000..9d83a69
55355--- /dev/null
55356+++ b/grsecurity/gracl_shm.c
55357@@ -0,0 +1,40 @@
55358+#include <linux/kernel.h>
55359+#include <linux/mm.h>
55360+#include <linux/sched.h>
55361+#include <linux/file.h>
55362+#include <linux/ipc.h>
55363+#include <linux/gracl.h>
55364+#include <linux/grsecurity.h>
55365+#include <linux/grinternal.h>
55366+
55367+int
55368+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55369+ const time_t shm_createtime, const uid_t cuid, const int shmid)
55370+{
55371+ struct task_struct *task;
55372+
55373+ if (!gr_acl_is_enabled())
55374+ return 1;
55375+
55376+ rcu_read_lock();
55377+ read_lock(&tasklist_lock);
55378+
55379+ task = find_task_by_vpid(shm_cprid);
55380+
55381+ if (unlikely(!task))
55382+ task = find_task_by_vpid(shm_lapid);
55383+
55384+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55385+ (task->pid == shm_lapid)) &&
55386+ (task->acl->mode & GR_PROTSHM) &&
55387+ (task->acl != current->acl))) {
55388+ read_unlock(&tasklist_lock);
55389+ rcu_read_unlock();
55390+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55391+ return 0;
55392+ }
55393+ read_unlock(&tasklist_lock);
55394+ rcu_read_unlock();
55395+
55396+ return 1;
55397+}
55398diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55399new file mode 100644
55400index 0000000..bc0be01
55401--- /dev/null
55402+++ b/grsecurity/grsec_chdir.c
55403@@ -0,0 +1,19 @@
55404+#include <linux/kernel.h>
55405+#include <linux/sched.h>
55406+#include <linux/fs.h>
55407+#include <linux/file.h>
55408+#include <linux/grsecurity.h>
55409+#include <linux/grinternal.h>
55410+
55411+void
55412+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55413+{
55414+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55415+ if ((grsec_enable_chdir && grsec_enable_group &&
55416+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55417+ !grsec_enable_group)) {
55418+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55419+ }
55420+#endif
55421+ return;
55422+}
55423diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55424new file mode 100644
55425index 0000000..9807ee2
55426--- /dev/null
55427+++ b/grsecurity/grsec_chroot.c
55428@@ -0,0 +1,368 @@
55429+#include <linux/kernel.h>
55430+#include <linux/module.h>
55431+#include <linux/sched.h>
55432+#include <linux/file.h>
55433+#include <linux/fs.h>
55434+#include <linux/mount.h>
55435+#include <linux/types.h>
55436+#include "../fs/mount.h"
55437+#include <linux/grsecurity.h>
55438+#include <linux/grinternal.h>
55439+
55440+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55441+{
55442+#ifdef CONFIG_GRKERNSEC
55443+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55444+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55445+ task->gr_is_chrooted = 1;
55446+ else
55447+ task->gr_is_chrooted = 0;
55448+
55449+ task->gr_chroot_dentry = path->dentry;
55450+#endif
55451+ return;
55452+}
55453+
55454+void gr_clear_chroot_entries(struct task_struct *task)
55455+{
55456+#ifdef CONFIG_GRKERNSEC
55457+ task->gr_is_chrooted = 0;
55458+ task->gr_chroot_dentry = NULL;
55459+#endif
55460+ return;
55461+}
55462+
55463+int
55464+gr_handle_chroot_unix(const pid_t pid)
55465+{
55466+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55467+ struct task_struct *p;
55468+
55469+ if (unlikely(!grsec_enable_chroot_unix))
55470+ return 1;
55471+
55472+ if (likely(!proc_is_chrooted(current)))
55473+ return 1;
55474+
55475+ rcu_read_lock();
55476+ read_lock(&tasklist_lock);
55477+ p = find_task_by_vpid_unrestricted(pid);
55478+ if (unlikely(p && !have_same_root(current, p))) {
55479+ read_unlock(&tasklist_lock);
55480+ rcu_read_unlock();
55481+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55482+ return 0;
55483+ }
55484+ read_unlock(&tasklist_lock);
55485+ rcu_read_unlock();
55486+#endif
55487+ return 1;
55488+}
55489+
55490+int
55491+gr_handle_chroot_nice(void)
55492+{
55493+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55494+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55495+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55496+ return -EPERM;
55497+ }
55498+#endif
55499+ return 0;
55500+}
55501+
55502+int
55503+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55504+{
55505+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55506+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55507+ && proc_is_chrooted(current)) {
55508+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55509+ return -EACCES;
55510+ }
55511+#endif
55512+ return 0;
55513+}
55514+
55515+int
55516+gr_handle_chroot_rawio(const struct inode *inode)
55517+{
55518+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55519+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55520+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55521+ return 1;
55522+#endif
55523+ return 0;
55524+}
55525+
55526+int
55527+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55528+{
55529+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55530+ struct task_struct *p;
55531+ int ret = 0;
55532+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55533+ return ret;
55534+
55535+ read_lock(&tasklist_lock);
55536+ do_each_pid_task(pid, type, p) {
55537+ if (!have_same_root(current, p)) {
55538+ ret = 1;
55539+ goto out;
55540+ }
55541+ } while_each_pid_task(pid, type, p);
55542+out:
55543+ read_unlock(&tasklist_lock);
55544+ return ret;
55545+#endif
55546+ return 0;
55547+}
55548+
55549+int
55550+gr_pid_is_chrooted(struct task_struct *p)
55551+{
55552+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55553+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55554+ return 0;
55555+
55556+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55557+ !have_same_root(current, p)) {
55558+ return 1;
55559+ }
55560+#endif
55561+ return 0;
55562+}
55563+
55564+EXPORT_SYMBOL(gr_pid_is_chrooted);
55565+
55566+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55567+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55568+{
55569+ struct path path, currentroot;
55570+ int ret = 0;
55571+
55572+ path.dentry = (struct dentry *)u_dentry;
55573+ path.mnt = (struct vfsmount *)u_mnt;
55574+ get_fs_root(current->fs, &currentroot);
55575+ if (path_is_under(&path, &currentroot))
55576+ ret = 1;
55577+ path_put(&currentroot);
55578+
55579+ return ret;
55580+}
55581+#endif
55582+
55583+int
55584+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55585+{
55586+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55587+ if (!grsec_enable_chroot_fchdir)
55588+ return 1;
55589+
55590+ if (!proc_is_chrooted(current))
55591+ return 1;
55592+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55593+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55594+ return 0;
55595+ }
55596+#endif
55597+ return 1;
55598+}
55599+
55600+int
55601+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55602+ const time_t shm_createtime)
55603+{
55604+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55605+ struct task_struct *p;
55606+ time_t starttime;
55607+
55608+ if (unlikely(!grsec_enable_chroot_shmat))
55609+ return 1;
55610+
55611+ if (likely(!proc_is_chrooted(current)))
55612+ return 1;
55613+
55614+ rcu_read_lock();
55615+ read_lock(&tasklist_lock);
55616+
55617+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55618+ starttime = p->start_time.tv_sec;
55619+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55620+ if (have_same_root(current, p)) {
55621+ goto allow;
55622+ } else {
55623+ read_unlock(&tasklist_lock);
55624+ rcu_read_unlock();
55625+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55626+ return 0;
55627+ }
55628+ }
55629+ /* creator exited, pid reuse, fall through to next check */
55630+ }
55631+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
55632+ if (unlikely(!have_same_root(current, p))) {
55633+ read_unlock(&tasklist_lock);
55634+ rcu_read_unlock();
55635+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55636+ return 0;
55637+ }
55638+ }
55639+
55640+allow:
55641+ read_unlock(&tasklist_lock);
55642+ rcu_read_unlock();
55643+#endif
55644+ return 1;
55645+}
55646+
55647+void
55648+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
55649+{
55650+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55651+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
55652+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
55653+#endif
55654+ return;
55655+}
55656+
55657+int
55658+gr_handle_chroot_mknod(const struct dentry *dentry,
55659+ const struct vfsmount *mnt, const int mode)
55660+{
55661+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55662+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
55663+ proc_is_chrooted(current)) {
55664+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
55665+ return -EPERM;
55666+ }
55667+#endif
55668+ return 0;
55669+}
55670+
55671+int
55672+gr_handle_chroot_mount(const struct dentry *dentry,
55673+ const struct vfsmount *mnt, const char *dev_name)
55674+{
55675+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55676+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
55677+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
55678+ return -EPERM;
55679+ }
55680+#endif
55681+ return 0;
55682+}
55683+
55684+int
55685+gr_handle_chroot_pivot(void)
55686+{
55687+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55688+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
55689+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
55690+ return -EPERM;
55691+ }
55692+#endif
55693+ return 0;
55694+}
55695+
55696+int
55697+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
55698+{
55699+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55700+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
55701+ !gr_is_outside_chroot(dentry, mnt)) {
55702+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
55703+ return -EPERM;
55704+ }
55705+#endif
55706+ return 0;
55707+}
55708+
55709+extern const char *captab_log[];
55710+extern int captab_log_entries;
55711+
55712+int
55713+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55714+{
55715+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55716+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55717+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55718+ if (cap_raised(chroot_caps, cap)) {
55719+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
55720+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
55721+ }
55722+ return 0;
55723+ }
55724+ }
55725+#endif
55726+ return 1;
55727+}
55728+
55729+int
55730+gr_chroot_is_capable(const int cap)
55731+{
55732+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55733+ return gr_task_chroot_is_capable(current, current_cred(), cap);
55734+#endif
55735+ return 1;
55736+}
55737+
55738+int
55739+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
55740+{
55741+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55742+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55743+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55744+ if (cap_raised(chroot_caps, cap)) {
55745+ return 0;
55746+ }
55747+ }
55748+#endif
55749+ return 1;
55750+}
55751+
55752+int
55753+gr_chroot_is_capable_nolog(const int cap)
55754+{
55755+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55756+ return gr_task_chroot_is_capable_nolog(current, cap);
55757+#endif
55758+ return 1;
55759+}
55760+
55761+int
55762+gr_handle_chroot_sysctl(const int op)
55763+{
55764+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55765+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
55766+ proc_is_chrooted(current))
55767+ return -EACCES;
55768+#endif
55769+ return 0;
55770+}
55771+
55772+void
55773+gr_handle_chroot_chdir(struct path *path)
55774+{
55775+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55776+ if (grsec_enable_chroot_chdir)
55777+ set_fs_pwd(current->fs, path);
55778+#endif
55779+ return;
55780+}
55781+
55782+int
55783+gr_handle_chroot_chmod(const struct dentry *dentry,
55784+ const struct vfsmount *mnt, const int mode)
55785+{
55786+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55787+ /* allow chmod +s on directories, but not files */
55788+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
55789+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
55790+ proc_is_chrooted(current)) {
55791+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
55792+ return -EPERM;
55793+ }
55794+#endif
55795+ return 0;
55796+}
55797diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
55798new file mode 100644
55799index 0000000..213ad8b
55800--- /dev/null
55801+++ b/grsecurity/grsec_disabled.c
55802@@ -0,0 +1,437 @@
55803+#include <linux/kernel.h>
55804+#include <linux/module.h>
55805+#include <linux/sched.h>
55806+#include <linux/file.h>
55807+#include <linux/fs.h>
55808+#include <linux/kdev_t.h>
55809+#include <linux/net.h>
55810+#include <linux/in.h>
55811+#include <linux/ip.h>
55812+#include <linux/skbuff.h>
55813+#include <linux/sysctl.h>
55814+
55815+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55816+void
55817+pax_set_initial_flags(struct linux_binprm *bprm)
55818+{
55819+ return;
55820+}
55821+#endif
55822+
55823+#ifdef CONFIG_SYSCTL
55824+__u32
55825+gr_handle_sysctl(const struct ctl_table * table, const int op)
55826+{
55827+ return 0;
55828+}
55829+#endif
55830+
55831+#ifdef CONFIG_TASKSTATS
55832+int gr_is_taskstats_denied(int pid)
55833+{
55834+ return 0;
55835+}
55836+#endif
55837+
55838+int
55839+gr_acl_is_enabled(void)
55840+{
55841+ return 0;
55842+}
55843+
55844+void
55845+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
55846+{
55847+ return;
55848+}
55849+
55850+int
55851+gr_handle_rawio(const struct inode *inode)
55852+{
55853+ return 0;
55854+}
55855+
55856+void
55857+gr_acl_handle_psacct(struct task_struct *task, const long code)
55858+{
55859+ return;
55860+}
55861+
55862+int
55863+gr_handle_ptrace(struct task_struct *task, const long request)
55864+{
55865+ return 0;
55866+}
55867+
55868+int
55869+gr_handle_proc_ptrace(struct task_struct *task)
55870+{
55871+ return 0;
55872+}
55873+
55874+void
55875+gr_learn_resource(const struct task_struct *task,
55876+ const int res, const unsigned long wanted, const int gt)
55877+{
55878+ return;
55879+}
55880+
55881+int
55882+gr_set_acls(const int type)
55883+{
55884+ return 0;
55885+}
55886+
55887+int
55888+gr_check_hidden_task(const struct task_struct *tsk)
55889+{
55890+ return 0;
55891+}
55892+
55893+int
55894+gr_check_protected_task(const struct task_struct *task)
55895+{
55896+ return 0;
55897+}
55898+
55899+int
55900+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55901+{
55902+ return 0;
55903+}
55904+
55905+void
55906+gr_copy_label(struct task_struct *tsk)
55907+{
55908+ return;
55909+}
55910+
55911+void
55912+gr_set_pax_flags(struct task_struct *task)
55913+{
55914+ return;
55915+}
55916+
55917+int
55918+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55919+ const int unsafe_share)
55920+{
55921+ return 0;
55922+}
55923+
55924+void
55925+gr_handle_delete(const ino_t ino, const dev_t dev)
55926+{
55927+ return;
55928+}
55929+
55930+void
55931+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
55932+{
55933+ return;
55934+}
55935+
55936+void
55937+gr_handle_crash(struct task_struct *task, const int sig)
55938+{
55939+ return;
55940+}
55941+
55942+int
55943+gr_check_crash_exec(const struct file *filp)
55944+{
55945+ return 0;
55946+}
55947+
55948+int
55949+gr_check_crash_uid(const uid_t uid)
55950+{
55951+ return 0;
55952+}
55953+
55954+void
55955+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55956+ struct dentry *old_dentry,
55957+ struct dentry *new_dentry,
55958+ struct vfsmount *mnt, const __u8 replace)
55959+{
55960+ return;
55961+}
55962+
55963+int
55964+gr_search_socket(const int family, const int type, const int protocol)
55965+{
55966+ return 1;
55967+}
55968+
55969+int
55970+gr_search_connectbind(const int mode, const struct socket *sock,
55971+ const struct sockaddr_in *addr)
55972+{
55973+ return 0;
55974+}
55975+
55976+void
55977+gr_handle_alertkill(struct task_struct *task)
55978+{
55979+ return;
55980+}
55981+
55982+__u32
55983+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
55984+{
55985+ return 1;
55986+}
55987+
55988+__u32
55989+gr_acl_handle_hidden_file(const struct dentry * dentry,
55990+ const struct vfsmount * mnt)
55991+{
55992+ return 1;
55993+}
55994+
55995+__u32
55996+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55997+ int acc_mode)
55998+{
55999+ return 1;
56000+}
56001+
56002+__u32
56003+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56004+{
56005+ return 1;
56006+}
56007+
56008+__u32
56009+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56010+{
56011+ return 1;
56012+}
56013+
56014+int
56015+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56016+ unsigned int *vm_flags)
56017+{
56018+ return 1;
56019+}
56020+
56021+__u32
56022+gr_acl_handle_truncate(const struct dentry * dentry,
56023+ const struct vfsmount * mnt)
56024+{
56025+ return 1;
56026+}
56027+
56028+__u32
56029+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56030+{
56031+ return 1;
56032+}
56033+
56034+__u32
56035+gr_acl_handle_access(const struct dentry * dentry,
56036+ const struct vfsmount * mnt, const int fmode)
56037+{
56038+ return 1;
56039+}
56040+
56041+__u32
56042+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56043+ umode_t *mode)
56044+{
56045+ return 1;
56046+}
56047+
56048+__u32
56049+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56050+{
56051+ return 1;
56052+}
56053+
56054+__u32
56055+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56056+{
56057+ return 1;
56058+}
56059+
56060+void
56061+grsecurity_init(void)
56062+{
56063+ return;
56064+}
56065+
56066+umode_t gr_acl_umask(void)
56067+{
56068+ return 0;
56069+}
56070+
56071+__u32
56072+gr_acl_handle_mknod(const struct dentry * new_dentry,
56073+ const struct dentry * parent_dentry,
56074+ const struct vfsmount * parent_mnt,
56075+ const int mode)
56076+{
56077+ return 1;
56078+}
56079+
56080+__u32
56081+gr_acl_handle_mkdir(const struct dentry * new_dentry,
56082+ const struct dentry * parent_dentry,
56083+ const struct vfsmount * parent_mnt)
56084+{
56085+ return 1;
56086+}
56087+
56088+__u32
56089+gr_acl_handle_symlink(const struct dentry * new_dentry,
56090+ const struct dentry * parent_dentry,
56091+ const struct vfsmount * parent_mnt, const char *from)
56092+{
56093+ return 1;
56094+}
56095+
56096+__u32
56097+gr_acl_handle_link(const struct dentry * new_dentry,
56098+ const struct dentry * parent_dentry,
56099+ const struct vfsmount * parent_mnt,
56100+ const struct dentry * old_dentry,
56101+ const struct vfsmount * old_mnt, const char *to)
56102+{
56103+ return 1;
56104+}
56105+
56106+int
56107+gr_acl_handle_rename(const struct dentry *new_dentry,
56108+ const struct dentry *parent_dentry,
56109+ const struct vfsmount *parent_mnt,
56110+ const struct dentry *old_dentry,
56111+ const struct inode *old_parent_inode,
56112+ const struct vfsmount *old_mnt, const char *newname)
56113+{
56114+ return 0;
56115+}
56116+
56117+int
56118+gr_acl_handle_filldir(const struct file *file, const char *name,
56119+ const int namelen, const ino_t ino)
56120+{
56121+ return 1;
56122+}
56123+
56124+int
56125+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56126+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56127+{
56128+ return 1;
56129+}
56130+
56131+int
56132+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56133+{
56134+ return 0;
56135+}
56136+
56137+int
56138+gr_search_accept(const struct socket *sock)
56139+{
56140+ return 0;
56141+}
56142+
56143+int
56144+gr_search_listen(const struct socket *sock)
56145+{
56146+ return 0;
56147+}
56148+
56149+int
56150+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56151+{
56152+ return 0;
56153+}
56154+
56155+__u32
56156+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56157+{
56158+ return 1;
56159+}
56160+
56161+__u32
56162+gr_acl_handle_creat(const struct dentry * dentry,
56163+ const struct dentry * p_dentry,
56164+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56165+ const int imode)
56166+{
56167+ return 1;
56168+}
56169+
56170+void
56171+gr_acl_handle_exit(void)
56172+{
56173+ return;
56174+}
56175+
56176+int
56177+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56178+{
56179+ return 1;
56180+}
56181+
56182+void
56183+gr_set_role_label(const uid_t uid, const gid_t gid)
56184+{
56185+ return;
56186+}
56187+
56188+int
56189+gr_acl_handle_procpidmem(const struct task_struct *task)
56190+{
56191+ return 0;
56192+}
56193+
56194+int
56195+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56196+{
56197+ return 0;
56198+}
56199+
56200+int
56201+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56202+{
56203+ return 0;
56204+}
56205+
56206+void
56207+gr_set_kernel_label(struct task_struct *task)
56208+{
56209+ return;
56210+}
56211+
56212+int
56213+gr_check_user_change(int real, int effective, int fs)
56214+{
56215+ return 0;
56216+}
56217+
56218+int
56219+gr_check_group_change(int real, int effective, int fs)
56220+{
56221+ return 0;
56222+}
56223+
56224+int gr_acl_enable_at_secure(void)
56225+{
56226+ return 0;
56227+}
56228+
56229+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56230+{
56231+ return dentry->d_inode->i_sb->s_dev;
56232+}
56233+
56234+EXPORT_SYMBOL(gr_learn_resource);
56235+EXPORT_SYMBOL(gr_set_kernel_label);
56236+#ifdef CONFIG_SECURITY
56237+EXPORT_SYMBOL(gr_check_user_change);
56238+EXPORT_SYMBOL(gr_check_group_change);
56239+#endif
56240diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56241new file mode 100644
56242index 0000000..abfa971
56243--- /dev/null
56244+++ b/grsecurity/grsec_exec.c
56245@@ -0,0 +1,174 @@
56246+#include <linux/kernel.h>
56247+#include <linux/sched.h>
56248+#include <linux/file.h>
56249+#include <linux/binfmts.h>
56250+#include <linux/fs.h>
56251+#include <linux/types.h>
56252+#include <linux/grdefs.h>
56253+#include <linux/grsecurity.h>
56254+#include <linux/grinternal.h>
56255+#include <linux/capability.h>
56256+#include <linux/module.h>
56257+
56258+#include <asm/uaccess.h>
56259+
56260+#ifdef CONFIG_GRKERNSEC_EXECLOG
56261+static char gr_exec_arg_buf[132];
56262+static DEFINE_MUTEX(gr_exec_arg_mutex);
56263+#endif
56264+
56265+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56266+
56267+void
56268+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56269+{
56270+#ifdef CONFIG_GRKERNSEC_EXECLOG
56271+ char *grarg = gr_exec_arg_buf;
56272+ unsigned int i, x, execlen = 0;
56273+ char c;
56274+
56275+ if (!((grsec_enable_execlog && grsec_enable_group &&
56276+ in_group_p(grsec_audit_gid))
56277+ || (grsec_enable_execlog && !grsec_enable_group)))
56278+ return;
56279+
56280+ mutex_lock(&gr_exec_arg_mutex);
56281+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
56282+
56283+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
56284+ const char __user *p;
56285+ unsigned int len;
56286+
56287+ p = get_user_arg_ptr(argv, i);
56288+ if (IS_ERR(p))
56289+ goto log;
56290+
56291+ len = strnlen_user(p, 128 - execlen);
56292+ if (len > 128 - execlen)
56293+ len = 128 - execlen;
56294+ else if (len > 0)
56295+ len--;
56296+ if (copy_from_user(grarg + execlen, p, len))
56297+ goto log;
56298+
56299+ /* rewrite unprintable characters */
56300+ for (x = 0; x < len; x++) {
56301+ c = *(grarg + execlen + x);
56302+ if (c < 32 || c > 126)
56303+ *(grarg + execlen + x) = ' ';
56304+ }
56305+
56306+ execlen += len;
56307+ *(grarg + execlen) = ' ';
56308+ *(grarg + execlen + 1) = '\0';
56309+ execlen++;
56310+ }
56311+
56312+ log:
56313+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56314+ bprm->file->f_path.mnt, grarg);
56315+ mutex_unlock(&gr_exec_arg_mutex);
56316+#endif
56317+ return;
56318+}
56319+
56320+#ifdef CONFIG_GRKERNSEC
56321+extern int gr_acl_is_capable(const int cap);
56322+extern int gr_acl_is_capable_nolog(const int cap);
56323+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56324+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56325+extern int gr_chroot_is_capable(const int cap);
56326+extern int gr_chroot_is_capable_nolog(const int cap);
56327+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56328+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56329+#endif
56330+
56331+const char *captab_log[] = {
56332+ "CAP_CHOWN",
56333+ "CAP_DAC_OVERRIDE",
56334+ "CAP_DAC_READ_SEARCH",
56335+ "CAP_FOWNER",
56336+ "CAP_FSETID",
56337+ "CAP_KILL",
56338+ "CAP_SETGID",
56339+ "CAP_SETUID",
56340+ "CAP_SETPCAP",
56341+ "CAP_LINUX_IMMUTABLE",
56342+ "CAP_NET_BIND_SERVICE",
56343+ "CAP_NET_BROADCAST",
56344+ "CAP_NET_ADMIN",
56345+ "CAP_NET_RAW",
56346+ "CAP_IPC_LOCK",
56347+ "CAP_IPC_OWNER",
56348+ "CAP_SYS_MODULE",
56349+ "CAP_SYS_RAWIO",
56350+ "CAP_SYS_CHROOT",
56351+ "CAP_SYS_PTRACE",
56352+ "CAP_SYS_PACCT",
56353+ "CAP_SYS_ADMIN",
56354+ "CAP_SYS_BOOT",
56355+ "CAP_SYS_NICE",
56356+ "CAP_SYS_RESOURCE",
56357+ "CAP_SYS_TIME",
56358+ "CAP_SYS_TTY_CONFIG",
56359+ "CAP_MKNOD",
56360+ "CAP_LEASE",
56361+ "CAP_AUDIT_WRITE",
56362+ "CAP_AUDIT_CONTROL",
56363+ "CAP_SETFCAP",
56364+ "CAP_MAC_OVERRIDE",
56365+ "CAP_MAC_ADMIN",
56366+ "CAP_SYSLOG",
56367+ "CAP_WAKE_ALARM"
56368+};
56369+
56370+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56371+
56372+int gr_is_capable(const int cap)
56373+{
56374+#ifdef CONFIG_GRKERNSEC
56375+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56376+ return 1;
56377+ return 0;
56378+#else
56379+ return 1;
56380+#endif
56381+}
56382+
56383+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56384+{
56385+#ifdef CONFIG_GRKERNSEC
56386+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56387+ return 1;
56388+ return 0;
56389+#else
56390+ return 1;
56391+#endif
56392+}
56393+
56394+int gr_is_capable_nolog(const int cap)
56395+{
56396+#ifdef CONFIG_GRKERNSEC
56397+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56398+ return 1;
56399+ return 0;
56400+#else
56401+ return 1;
56402+#endif
56403+}
56404+
56405+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56406+{
56407+#ifdef CONFIG_GRKERNSEC
56408+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56409+ return 1;
56410+ return 0;
56411+#else
56412+ return 1;
56413+#endif
56414+}
56415+
56416+EXPORT_SYMBOL(gr_is_capable);
56417+EXPORT_SYMBOL(gr_is_capable_nolog);
56418+EXPORT_SYMBOL(gr_task_is_capable);
56419+EXPORT_SYMBOL(gr_task_is_capable_nolog);
56420diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56421new file mode 100644
56422index 0000000..d3ee748
56423--- /dev/null
56424+++ b/grsecurity/grsec_fifo.c
56425@@ -0,0 +1,24 @@
56426+#include <linux/kernel.h>
56427+#include <linux/sched.h>
56428+#include <linux/fs.h>
56429+#include <linux/file.h>
56430+#include <linux/grinternal.h>
56431+
56432+int
56433+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56434+ const struct dentry *dir, const int flag, const int acc_mode)
56435+{
56436+#ifdef CONFIG_GRKERNSEC_FIFO
56437+ const struct cred *cred = current_cred();
56438+
56439+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56440+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56441+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56442+ (cred->fsuid != dentry->d_inode->i_uid)) {
56443+ if (!inode_permission(dentry->d_inode, acc_mode))
56444+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56445+ return -EACCES;
56446+ }
56447+#endif
56448+ return 0;
56449+}
56450diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56451new file mode 100644
56452index 0000000..8ca18bf
56453--- /dev/null
56454+++ b/grsecurity/grsec_fork.c
56455@@ -0,0 +1,23 @@
56456+#include <linux/kernel.h>
56457+#include <linux/sched.h>
56458+#include <linux/grsecurity.h>
56459+#include <linux/grinternal.h>
56460+#include <linux/errno.h>
56461+
56462+void
56463+gr_log_forkfail(const int retval)
56464+{
56465+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56466+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56467+ switch (retval) {
56468+ case -EAGAIN:
56469+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56470+ break;
56471+ case -ENOMEM:
56472+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56473+ break;
56474+ }
56475+ }
56476+#endif
56477+ return;
56478+}
56479diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56480new file mode 100644
56481index 0000000..01ddde4
56482--- /dev/null
56483+++ b/grsecurity/grsec_init.c
56484@@ -0,0 +1,277 @@
56485+#include <linux/kernel.h>
56486+#include <linux/sched.h>
56487+#include <linux/mm.h>
56488+#include <linux/gracl.h>
56489+#include <linux/slab.h>
56490+#include <linux/vmalloc.h>
56491+#include <linux/percpu.h>
56492+#include <linux/module.h>
56493+
56494+int grsec_enable_ptrace_readexec;
56495+int grsec_enable_setxid;
56496+int grsec_enable_brute;
56497+int grsec_enable_link;
56498+int grsec_enable_dmesg;
56499+int grsec_enable_harden_ptrace;
56500+int grsec_enable_fifo;
56501+int grsec_enable_execlog;
56502+int grsec_enable_signal;
56503+int grsec_enable_forkfail;
56504+int grsec_enable_audit_ptrace;
56505+int grsec_enable_time;
56506+int grsec_enable_audit_textrel;
56507+int grsec_enable_group;
56508+int grsec_audit_gid;
56509+int grsec_enable_chdir;
56510+int grsec_enable_mount;
56511+int grsec_enable_rofs;
56512+int grsec_enable_chroot_findtask;
56513+int grsec_enable_chroot_mount;
56514+int grsec_enable_chroot_shmat;
56515+int grsec_enable_chroot_fchdir;
56516+int grsec_enable_chroot_double;
56517+int grsec_enable_chroot_pivot;
56518+int grsec_enable_chroot_chdir;
56519+int grsec_enable_chroot_chmod;
56520+int grsec_enable_chroot_mknod;
56521+int grsec_enable_chroot_nice;
56522+int grsec_enable_chroot_execlog;
56523+int grsec_enable_chroot_caps;
56524+int grsec_enable_chroot_sysctl;
56525+int grsec_enable_chroot_unix;
56526+int grsec_enable_tpe;
56527+int grsec_tpe_gid;
56528+int grsec_enable_blackhole;
56529+#ifdef CONFIG_IPV6_MODULE
56530+EXPORT_SYMBOL(grsec_enable_blackhole);
56531+#endif
56532+int grsec_lastack_retries;
56533+int grsec_enable_tpe_all;
56534+int grsec_enable_tpe_invert;
56535+int grsec_enable_socket_all;
56536+int grsec_socket_all_gid;
56537+int grsec_enable_socket_client;
56538+int grsec_socket_client_gid;
56539+int grsec_enable_socket_server;
56540+int grsec_socket_server_gid;
56541+int grsec_resource_logging;
56542+int grsec_disable_privio;
56543+int grsec_enable_log_rwxmaps;
56544+int grsec_lock;
56545+
56546+DEFINE_SPINLOCK(grsec_alert_lock);
56547+unsigned long grsec_alert_wtime = 0;
56548+unsigned long grsec_alert_fyet = 0;
56549+
56550+DEFINE_SPINLOCK(grsec_audit_lock);
56551+
56552+DEFINE_RWLOCK(grsec_exec_file_lock);
56553+
56554+char *gr_shared_page[4];
56555+
56556+char *gr_alert_log_fmt;
56557+char *gr_audit_log_fmt;
56558+char *gr_alert_log_buf;
56559+char *gr_audit_log_buf;
56560+
56561+extern struct gr_arg *gr_usermode;
56562+extern unsigned char *gr_system_salt;
56563+extern unsigned char *gr_system_sum;
56564+
56565+void __init
56566+grsecurity_init(void)
56567+{
56568+ int j;
56569+ /* create the per-cpu shared pages */
56570+
56571+#ifdef CONFIG_X86
56572+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56573+#endif
56574+
56575+ for (j = 0; j < 4; j++) {
56576+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56577+ if (gr_shared_page[j] == NULL) {
56578+ panic("Unable to allocate grsecurity shared page");
56579+ return;
56580+ }
56581+ }
56582+
56583+ /* allocate log buffers */
56584+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56585+ if (!gr_alert_log_fmt) {
56586+ panic("Unable to allocate grsecurity alert log format buffer");
56587+ return;
56588+ }
56589+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56590+ if (!gr_audit_log_fmt) {
56591+ panic("Unable to allocate grsecurity audit log format buffer");
56592+ return;
56593+ }
56594+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56595+ if (!gr_alert_log_buf) {
56596+ panic("Unable to allocate grsecurity alert log buffer");
56597+ return;
56598+ }
56599+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56600+ if (!gr_audit_log_buf) {
56601+ panic("Unable to allocate grsecurity audit log buffer");
56602+ return;
56603+ }
56604+
56605+ /* allocate memory for authentication structure */
56606+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56607+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56608+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56609+
56610+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56611+ panic("Unable to allocate grsecurity authentication structure");
56612+ return;
56613+ }
56614+
56615+
56616+#ifdef CONFIG_GRKERNSEC_IO
56617+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56618+ grsec_disable_privio = 1;
56619+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56620+ grsec_disable_privio = 1;
56621+#else
56622+ grsec_disable_privio = 0;
56623+#endif
56624+#endif
56625+
56626+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56627+ /* for backward compatibility, tpe_invert always defaults to on if
56628+ enabled in the kernel
56629+ */
56630+ grsec_enable_tpe_invert = 1;
56631+#endif
56632+
56633+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56634+#ifndef CONFIG_GRKERNSEC_SYSCTL
56635+ grsec_lock = 1;
56636+#endif
56637+
56638+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56639+ grsec_enable_audit_textrel = 1;
56640+#endif
56641+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56642+ grsec_enable_log_rwxmaps = 1;
56643+#endif
56644+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56645+ grsec_enable_group = 1;
56646+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
56647+#endif
56648+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56649+ grsec_enable_ptrace_readexec = 1;
56650+#endif
56651+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56652+ grsec_enable_chdir = 1;
56653+#endif
56654+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56655+ grsec_enable_harden_ptrace = 1;
56656+#endif
56657+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56658+ grsec_enable_mount = 1;
56659+#endif
56660+#ifdef CONFIG_GRKERNSEC_LINK
56661+ grsec_enable_link = 1;
56662+#endif
56663+#ifdef CONFIG_GRKERNSEC_BRUTE
56664+ grsec_enable_brute = 1;
56665+#endif
56666+#ifdef CONFIG_GRKERNSEC_DMESG
56667+ grsec_enable_dmesg = 1;
56668+#endif
56669+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56670+ grsec_enable_blackhole = 1;
56671+ grsec_lastack_retries = 4;
56672+#endif
56673+#ifdef CONFIG_GRKERNSEC_FIFO
56674+ grsec_enable_fifo = 1;
56675+#endif
56676+#ifdef CONFIG_GRKERNSEC_EXECLOG
56677+ grsec_enable_execlog = 1;
56678+#endif
56679+#ifdef CONFIG_GRKERNSEC_SETXID
56680+ grsec_enable_setxid = 1;
56681+#endif
56682+#ifdef CONFIG_GRKERNSEC_SIGNAL
56683+ grsec_enable_signal = 1;
56684+#endif
56685+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56686+ grsec_enable_forkfail = 1;
56687+#endif
56688+#ifdef CONFIG_GRKERNSEC_TIME
56689+ grsec_enable_time = 1;
56690+#endif
56691+#ifdef CONFIG_GRKERNSEC_RESLOG
56692+ grsec_resource_logging = 1;
56693+#endif
56694+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56695+ grsec_enable_chroot_findtask = 1;
56696+#endif
56697+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56698+ grsec_enable_chroot_unix = 1;
56699+#endif
56700+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56701+ grsec_enable_chroot_mount = 1;
56702+#endif
56703+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56704+ grsec_enable_chroot_fchdir = 1;
56705+#endif
56706+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56707+ grsec_enable_chroot_shmat = 1;
56708+#endif
56709+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56710+ grsec_enable_audit_ptrace = 1;
56711+#endif
56712+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56713+ grsec_enable_chroot_double = 1;
56714+#endif
56715+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56716+ grsec_enable_chroot_pivot = 1;
56717+#endif
56718+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56719+ grsec_enable_chroot_chdir = 1;
56720+#endif
56721+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56722+ grsec_enable_chroot_chmod = 1;
56723+#endif
56724+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56725+ grsec_enable_chroot_mknod = 1;
56726+#endif
56727+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56728+ grsec_enable_chroot_nice = 1;
56729+#endif
56730+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56731+ grsec_enable_chroot_execlog = 1;
56732+#endif
56733+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56734+ grsec_enable_chroot_caps = 1;
56735+#endif
56736+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56737+ grsec_enable_chroot_sysctl = 1;
56738+#endif
56739+#ifdef CONFIG_GRKERNSEC_TPE
56740+ grsec_enable_tpe = 1;
56741+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
56742+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56743+ grsec_enable_tpe_all = 1;
56744+#endif
56745+#endif
56746+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56747+ grsec_enable_socket_all = 1;
56748+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
56749+#endif
56750+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56751+ grsec_enable_socket_client = 1;
56752+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
56753+#endif
56754+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56755+ grsec_enable_socket_server = 1;
56756+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
56757+#endif
56758+#endif
56759+
56760+ return;
56761+}
56762diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
56763new file mode 100644
56764index 0000000..3efe141
56765--- /dev/null
56766+++ b/grsecurity/grsec_link.c
56767@@ -0,0 +1,43 @@
56768+#include <linux/kernel.h>
56769+#include <linux/sched.h>
56770+#include <linux/fs.h>
56771+#include <linux/file.h>
56772+#include <linux/grinternal.h>
56773+
56774+int
56775+gr_handle_follow_link(const struct inode *parent,
56776+ const struct inode *inode,
56777+ const struct dentry *dentry, const struct vfsmount *mnt)
56778+{
56779+#ifdef CONFIG_GRKERNSEC_LINK
56780+ const struct cred *cred = current_cred();
56781+
56782+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
56783+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
56784+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
56785+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
56786+ return -EACCES;
56787+ }
56788+#endif
56789+ return 0;
56790+}
56791+
56792+int
56793+gr_handle_hardlink(const struct dentry *dentry,
56794+ const struct vfsmount *mnt,
56795+ struct inode *inode, const int mode, const char *to)
56796+{
56797+#ifdef CONFIG_GRKERNSEC_LINK
56798+ const struct cred *cred = current_cred();
56799+
56800+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
56801+ (!S_ISREG(mode) || (mode & S_ISUID) ||
56802+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
56803+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
56804+ !capable(CAP_FOWNER) && cred->uid) {
56805+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
56806+ return -EPERM;
56807+ }
56808+#endif
56809+ return 0;
56810+}
56811diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
56812new file mode 100644
56813index 0000000..a45d2e9
56814--- /dev/null
56815+++ b/grsecurity/grsec_log.c
56816@@ -0,0 +1,322 @@
56817+#include <linux/kernel.h>
56818+#include <linux/sched.h>
56819+#include <linux/file.h>
56820+#include <linux/tty.h>
56821+#include <linux/fs.h>
56822+#include <linux/grinternal.h>
56823+
56824+#ifdef CONFIG_TREE_PREEMPT_RCU
56825+#define DISABLE_PREEMPT() preempt_disable()
56826+#define ENABLE_PREEMPT() preempt_enable()
56827+#else
56828+#define DISABLE_PREEMPT()
56829+#define ENABLE_PREEMPT()
56830+#endif
56831+
56832+#define BEGIN_LOCKS(x) \
56833+ DISABLE_PREEMPT(); \
56834+ rcu_read_lock(); \
56835+ read_lock(&tasklist_lock); \
56836+ read_lock(&grsec_exec_file_lock); \
56837+ if (x != GR_DO_AUDIT) \
56838+ spin_lock(&grsec_alert_lock); \
56839+ else \
56840+ spin_lock(&grsec_audit_lock)
56841+
56842+#define END_LOCKS(x) \
56843+ if (x != GR_DO_AUDIT) \
56844+ spin_unlock(&grsec_alert_lock); \
56845+ else \
56846+ spin_unlock(&grsec_audit_lock); \
56847+ read_unlock(&grsec_exec_file_lock); \
56848+ read_unlock(&tasklist_lock); \
56849+ rcu_read_unlock(); \
56850+ ENABLE_PREEMPT(); \
56851+ if (x == GR_DONT_AUDIT) \
56852+ gr_handle_alertkill(current)
56853+
56854+enum {
56855+ FLOODING,
56856+ NO_FLOODING
56857+};
56858+
56859+extern char *gr_alert_log_fmt;
56860+extern char *gr_audit_log_fmt;
56861+extern char *gr_alert_log_buf;
56862+extern char *gr_audit_log_buf;
56863+
56864+static int gr_log_start(int audit)
56865+{
56866+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
56867+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
56868+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56869+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
56870+ unsigned long curr_secs = get_seconds();
56871+
56872+ if (audit == GR_DO_AUDIT)
56873+ goto set_fmt;
56874+
56875+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
56876+ grsec_alert_wtime = curr_secs;
56877+ grsec_alert_fyet = 0;
56878+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
56879+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
56880+ grsec_alert_fyet++;
56881+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
56882+ grsec_alert_wtime = curr_secs;
56883+ grsec_alert_fyet++;
56884+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
56885+ return FLOODING;
56886+ }
56887+ else return FLOODING;
56888+
56889+set_fmt:
56890+#endif
56891+ memset(buf, 0, PAGE_SIZE);
56892+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
56893+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
56894+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56895+ } else if (current->signal->curr_ip) {
56896+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
56897+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
56898+ } else if (gr_acl_is_enabled()) {
56899+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
56900+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56901+ } else {
56902+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
56903+ strcpy(buf, fmt);
56904+ }
56905+
56906+ return NO_FLOODING;
56907+}
56908+
56909+static void gr_log_middle(int audit, const char *msg, va_list ap)
56910+ __attribute__ ((format (printf, 2, 0)));
56911+
56912+static void gr_log_middle(int audit, const char *msg, va_list ap)
56913+{
56914+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56915+ unsigned int len = strlen(buf);
56916+
56917+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56918+
56919+ return;
56920+}
56921+
56922+static void gr_log_middle_varargs(int audit, const char *msg, ...)
56923+ __attribute__ ((format (printf, 2, 3)));
56924+
56925+static void gr_log_middle_varargs(int audit, const char *msg, ...)
56926+{
56927+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56928+ unsigned int len = strlen(buf);
56929+ va_list ap;
56930+
56931+ va_start(ap, msg);
56932+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56933+ va_end(ap);
56934+
56935+ return;
56936+}
56937+
56938+static void gr_log_end(int audit, int append_default)
56939+{
56940+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56941+
56942+ if (append_default) {
56943+ unsigned int len = strlen(buf);
56944+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
56945+ }
56946+
56947+ printk("%s\n", buf);
56948+
56949+ return;
56950+}
56951+
56952+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
56953+{
56954+ int logtype;
56955+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
56956+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
56957+ void *voidptr = NULL;
56958+ int num1 = 0, num2 = 0;
56959+ unsigned long ulong1 = 0, ulong2 = 0;
56960+ struct dentry *dentry = NULL;
56961+ struct vfsmount *mnt = NULL;
56962+ struct file *file = NULL;
56963+ struct task_struct *task = NULL;
56964+ const struct cred *cred, *pcred;
56965+ va_list ap;
56966+
56967+ BEGIN_LOCKS(audit);
56968+ logtype = gr_log_start(audit);
56969+ if (logtype == FLOODING) {
56970+ END_LOCKS(audit);
56971+ return;
56972+ }
56973+ va_start(ap, argtypes);
56974+ switch (argtypes) {
56975+ case GR_TTYSNIFF:
56976+ task = va_arg(ap, struct task_struct *);
56977+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
56978+ break;
56979+ case GR_SYSCTL_HIDDEN:
56980+ str1 = va_arg(ap, char *);
56981+ gr_log_middle_varargs(audit, msg, result, str1);
56982+ break;
56983+ case GR_RBAC:
56984+ dentry = va_arg(ap, struct dentry *);
56985+ mnt = va_arg(ap, struct vfsmount *);
56986+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
56987+ break;
56988+ case GR_RBAC_STR:
56989+ dentry = va_arg(ap, struct dentry *);
56990+ mnt = va_arg(ap, struct vfsmount *);
56991+ str1 = va_arg(ap, char *);
56992+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
56993+ break;
56994+ case GR_STR_RBAC:
56995+ str1 = va_arg(ap, char *);
56996+ dentry = va_arg(ap, struct dentry *);
56997+ mnt = va_arg(ap, struct vfsmount *);
56998+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
56999+ break;
57000+ case GR_RBAC_MODE2:
57001+ dentry = va_arg(ap, struct dentry *);
57002+ mnt = va_arg(ap, struct vfsmount *);
57003+ str1 = va_arg(ap, char *);
57004+ str2 = va_arg(ap, char *);
57005+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57006+ break;
57007+ case GR_RBAC_MODE3:
57008+ dentry = va_arg(ap, struct dentry *);
57009+ mnt = va_arg(ap, struct vfsmount *);
57010+ str1 = va_arg(ap, char *);
57011+ str2 = va_arg(ap, char *);
57012+ str3 = va_arg(ap, char *);
57013+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57014+ break;
57015+ case GR_FILENAME:
57016+ dentry = va_arg(ap, struct dentry *);
57017+ mnt = va_arg(ap, struct vfsmount *);
57018+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57019+ break;
57020+ case GR_STR_FILENAME:
57021+ str1 = va_arg(ap, char *);
57022+ dentry = va_arg(ap, struct dentry *);
57023+ mnt = va_arg(ap, struct vfsmount *);
57024+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57025+ break;
57026+ case GR_FILENAME_STR:
57027+ dentry = va_arg(ap, struct dentry *);
57028+ mnt = va_arg(ap, struct vfsmount *);
57029+ str1 = va_arg(ap, char *);
57030+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57031+ break;
57032+ case GR_FILENAME_TWO_INT:
57033+ dentry = va_arg(ap, struct dentry *);
57034+ mnt = va_arg(ap, struct vfsmount *);
57035+ num1 = va_arg(ap, int);
57036+ num2 = va_arg(ap, int);
57037+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57038+ break;
57039+ case GR_FILENAME_TWO_INT_STR:
57040+ dentry = va_arg(ap, struct dentry *);
57041+ mnt = va_arg(ap, struct vfsmount *);
57042+ num1 = va_arg(ap, int);
57043+ num2 = va_arg(ap, int);
57044+ str1 = va_arg(ap, char *);
57045+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57046+ break;
57047+ case GR_TEXTREL:
57048+ file = va_arg(ap, struct file *);
57049+ ulong1 = va_arg(ap, unsigned long);
57050+ ulong2 = va_arg(ap, unsigned long);
57051+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57052+ break;
57053+ case GR_PTRACE:
57054+ task = va_arg(ap, struct task_struct *);
57055+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57056+ break;
57057+ case GR_RESOURCE:
57058+ task = va_arg(ap, struct task_struct *);
57059+ cred = __task_cred(task);
57060+ pcred = __task_cred(task->real_parent);
57061+ ulong1 = va_arg(ap, unsigned long);
57062+ str1 = va_arg(ap, char *);
57063+ ulong2 = va_arg(ap, unsigned long);
57064+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57065+ break;
57066+ case GR_CAP:
57067+ task = va_arg(ap, struct task_struct *);
57068+ cred = __task_cred(task);
57069+ pcred = __task_cred(task->real_parent);
57070+ str1 = va_arg(ap, char *);
57071+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57072+ break;
57073+ case GR_SIG:
57074+ str1 = va_arg(ap, char *);
57075+ voidptr = va_arg(ap, void *);
57076+ gr_log_middle_varargs(audit, msg, str1, voidptr);
57077+ break;
57078+ case GR_SIG2:
57079+ task = va_arg(ap, struct task_struct *);
57080+ cred = __task_cred(task);
57081+ pcred = __task_cred(task->real_parent);
57082+ num1 = va_arg(ap, int);
57083+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57084+ break;
57085+ case GR_CRASH1:
57086+ task = va_arg(ap, struct task_struct *);
57087+ cred = __task_cred(task);
57088+ pcred = __task_cred(task->real_parent);
57089+ ulong1 = va_arg(ap, unsigned long);
57090+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57091+ break;
57092+ case GR_CRASH2:
57093+ task = va_arg(ap, struct task_struct *);
57094+ cred = __task_cred(task);
57095+ pcred = __task_cred(task->real_parent);
57096+ ulong1 = va_arg(ap, unsigned long);
57097+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57098+ break;
57099+ case GR_RWXMAP:
57100+ file = va_arg(ap, struct file *);
57101+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57102+ break;
57103+ case GR_PSACCT:
57104+ {
57105+ unsigned int wday, cday;
57106+ __u8 whr, chr;
57107+ __u8 wmin, cmin;
57108+ __u8 wsec, csec;
57109+ char cur_tty[64] = { 0 };
57110+ char parent_tty[64] = { 0 };
57111+
57112+ task = va_arg(ap, struct task_struct *);
57113+ wday = va_arg(ap, unsigned int);
57114+ cday = va_arg(ap, unsigned int);
57115+ whr = va_arg(ap, int);
57116+ chr = va_arg(ap, int);
57117+ wmin = va_arg(ap, int);
57118+ cmin = va_arg(ap, int);
57119+ wsec = va_arg(ap, int);
57120+ csec = va_arg(ap, int);
57121+ ulong1 = va_arg(ap, unsigned long);
57122+ cred = __task_cred(task);
57123+ pcred = __task_cred(task->real_parent);
57124+
57125+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57126+ }
57127+ break;
57128+ default:
57129+ gr_log_middle(audit, msg, ap);
57130+ }
57131+ va_end(ap);
57132+ // these don't need DEFAULTSECARGS printed on the end
57133+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57134+ gr_log_end(audit, 0);
57135+ else
57136+ gr_log_end(audit, 1);
57137+ END_LOCKS(audit);
57138+}
57139diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57140new file mode 100644
57141index 0000000..f536303
57142--- /dev/null
57143+++ b/grsecurity/grsec_mem.c
57144@@ -0,0 +1,40 @@
57145+#include <linux/kernel.h>
57146+#include <linux/sched.h>
57147+#include <linux/mm.h>
57148+#include <linux/mman.h>
57149+#include <linux/grinternal.h>
57150+
57151+void
57152+gr_handle_ioperm(void)
57153+{
57154+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57155+ return;
57156+}
57157+
57158+void
57159+gr_handle_iopl(void)
57160+{
57161+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57162+ return;
57163+}
57164+
57165+void
57166+gr_handle_mem_readwrite(u64 from, u64 to)
57167+{
57168+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57169+ return;
57170+}
57171+
57172+void
57173+gr_handle_vm86(void)
57174+{
57175+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57176+ return;
57177+}
57178+
57179+void
57180+gr_log_badprocpid(const char *entry)
57181+{
57182+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57183+ return;
57184+}
57185diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57186new file mode 100644
57187index 0000000..2131422
57188--- /dev/null
57189+++ b/grsecurity/grsec_mount.c
57190@@ -0,0 +1,62 @@
57191+#include <linux/kernel.h>
57192+#include <linux/sched.h>
57193+#include <linux/mount.h>
57194+#include <linux/grsecurity.h>
57195+#include <linux/grinternal.h>
57196+
57197+void
57198+gr_log_remount(const char *devname, const int retval)
57199+{
57200+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57201+ if (grsec_enable_mount && (retval >= 0))
57202+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57203+#endif
57204+ return;
57205+}
57206+
57207+void
57208+gr_log_unmount(const char *devname, const int retval)
57209+{
57210+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57211+ if (grsec_enable_mount && (retval >= 0))
57212+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57213+#endif
57214+ return;
57215+}
57216+
57217+void
57218+gr_log_mount(const char *from, const char *to, const int retval)
57219+{
57220+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57221+ if (grsec_enable_mount && (retval >= 0))
57222+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57223+#endif
57224+ return;
57225+}
57226+
57227+int
57228+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57229+{
57230+#ifdef CONFIG_GRKERNSEC_ROFS
57231+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57232+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57233+ return -EPERM;
57234+ } else
57235+ return 0;
57236+#endif
57237+ return 0;
57238+}
57239+
57240+int
57241+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57242+{
57243+#ifdef CONFIG_GRKERNSEC_ROFS
57244+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57245+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57246+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57247+ return -EPERM;
57248+ } else
57249+ return 0;
57250+#endif
57251+ return 0;
57252+}
57253diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57254new file mode 100644
57255index 0000000..a3b12a0
57256--- /dev/null
57257+++ b/grsecurity/grsec_pax.c
57258@@ -0,0 +1,36 @@
57259+#include <linux/kernel.h>
57260+#include <linux/sched.h>
57261+#include <linux/mm.h>
57262+#include <linux/file.h>
57263+#include <linux/grinternal.h>
57264+#include <linux/grsecurity.h>
57265+
57266+void
57267+gr_log_textrel(struct vm_area_struct * vma)
57268+{
57269+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57270+ if (grsec_enable_audit_textrel)
57271+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57272+#endif
57273+ return;
57274+}
57275+
57276+void
57277+gr_log_rwxmmap(struct file *file)
57278+{
57279+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57280+ if (grsec_enable_log_rwxmaps)
57281+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57282+#endif
57283+ return;
57284+}
57285+
57286+void
57287+gr_log_rwxmprotect(struct file *file)
57288+{
57289+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57290+ if (grsec_enable_log_rwxmaps)
57291+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57292+#endif
57293+ return;
57294+}
57295diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57296new file mode 100644
57297index 0000000..f7f29aa
57298--- /dev/null
57299+++ b/grsecurity/grsec_ptrace.c
57300@@ -0,0 +1,30 @@
57301+#include <linux/kernel.h>
57302+#include <linux/sched.h>
57303+#include <linux/grinternal.h>
57304+#include <linux/security.h>
57305+
57306+void
57307+gr_audit_ptrace(struct task_struct *task)
57308+{
57309+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57310+ if (grsec_enable_audit_ptrace)
57311+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57312+#endif
57313+ return;
57314+}
57315+
57316+int
57317+gr_ptrace_readexec(struct file *file, int unsafe_flags)
57318+{
57319+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57320+ const struct dentry *dentry = file->f_path.dentry;
57321+ const struct vfsmount *mnt = file->f_path.mnt;
57322+
57323+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57324+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57325+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57326+ return -EACCES;
57327+ }
57328+#endif
57329+ return 0;
57330+}
57331diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57332new file mode 100644
57333index 0000000..7a5b2de
57334--- /dev/null
57335+++ b/grsecurity/grsec_sig.c
57336@@ -0,0 +1,207 @@
57337+#include <linux/kernel.h>
57338+#include <linux/sched.h>
57339+#include <linux/delay.h>
57340+#include <linux/grsecurity.h>
57341+#include <linux/grinternal.h>
57342+#include <linux/hardirq.h>
57343+
57344+char *signames[] = {
57345+ [SIGSEGV] = "Segmentation fault",
57346+ [SIGILL] = "Illegal instruction",
57347+ [SIGABRT] = "Abort",
57348+ [SIGBUS] = "Invalid alignment/Bus error"
57349+};
57350+
57351+void
57352+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57353+{
57354+#ifdef CONFIG_GRKERNSEC_SIGNAL
57355+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57356+ (sig == SIGABRT) || (sig == SIGBUS))) {
57357+ if (t->pid == current->pid) {
57358+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57359+ } else {
57360+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57361+ }
57362+ }
57363+#endif
57364+ return;
57365+}
57366+
57367+int
57368+gr_handle_signal(const struct task_struct *p, const int sig)
57369+{
57370+#ifdef CONFIG_GRKERNSEC
57371+ /* ignore the 0 signal for protected task checks */
57372+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57373+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57374+ return -EPERM;
57375+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57376+ return -EPERM;
57377+ }
57378+#endif
57379+ return 0;
57380+}
57381+
57382+#ifdef CONFIG_GRKERNSEC
57383+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57384+
57385+int gr_fake_force_sig(int sig, struct task_struct *t)
57386+{
57387+ unsigned long int flags;
57388+ int ret, blocked, ignored;
57389+ struct k_sigaction *action;
57390+
57391+ spin_lock_irqsave(&t->sighand->siglock, flags);
57392+ action = &t->sighand->action[sig-1];
57393+ ignored = action->sa.sa_handler == SIG_IGN;
57394+ blocked = sigismember(&t->blocked, sig);
57395+ if (blocked || ignored) {
57396+ action->sa.sa_handler = SIG_DFL;
57397+ if (blocked) {
57398+ sigdelset(&t->blocked, sig);
57399+ recalc_sigpending_and_wake(t);
57400+ }
57401+ }
57402+ if (action->sa.sa_handler == SIG_DFL)
57403+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
57404+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57405+
57406+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
57407+
57408+ return ret;
57409+}
57410+#endif
57411+
57412+#ifdef CONFIG_GRKERNSEC_BRUTE
57413+#define GR_USER_BAN_TIME (15 * 60)
57414+
57415+static int __get_dumpable(unsigned long mm_flags)
57416+{
57417+ int ret;
57418+
57419+ ret = mm_flags & MMF_DUMPABLE_MASK;
57420+ return (ret >= 2) ? 2 : ret;
57421+}
57422+#endif
57423+
57424+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57425+{
57426+#ifdef CONFIG_GRKERNSEC_BRUTE
57427+ uid_t uid = 0;
57428+
57429+ if (!grsec_enable_brute)
57430+ return;
57431+
57432+ rcu_read_lock();
57433+ read_lock(&tasklist_lock);
57434+ read_lock(&grsec_exec_file_lock);
57435+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57436+ p->real_parent->brute = 1;
57437+ else {
57438+ const struct cred *cred = __task_cred(p), *cred2;
57439+ struct task_struct *tsk, *tsk2;
57440+
57441+ if (!__get_dumpable(mm_flags) && cred->uid) {
57442+ struct user_struct *user;
57443+
57444+ uid = cred->uid;
57445+
57446+ /* this is put upon execution past expiration */
57447+ user = find_user(uid);
57448+ if (user == NULL)
57449+ goto unlock;
57450+ user->banned = 1;
57451+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57452+ if (user->ban_expires == ~0UL)
57453+ user->ban_expires--;
57454+
57455+ do_each_thread(tsk2, tsk) {
57456+ cred2 = __task_cred(tsk);
57457+ if (tsk != p && cred2->uid == uid)
57458+ gr_fake_force_sig(SIGKILL, tsk);
57459+ } while_each_thread(tsk2, tsk);
57460+ }
57461+ }
57462+unlock:
57463+ read_unlock(&grsec_exec_file_lock);
57464+ read_unlock(&tasklist_lock);
57465+ rcu_read_unlock();
57466+
57467+ if (uid)
57468+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57469+
57470+#endif
57471+ return;
57472+}
57473+
57474+void gr_handle_brute_check(void)
57475+{
57476+#ifdef CONFIG_GRKERNSEC_BRUTE
57477+ if (current->brute)
57478+ msleep(30 * 1000);
57479+#endif
57480+ return;
57481+}
57482+
57483+void gr_handle_kernel_exploit(void)
57484+{
57485+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57486+ const struct cred *cred;
57487+ struct task_struct *tsk, *tsk2;
57488+ struct user_struct *user;
57489+ uid_t uid;
57490+
57491+ if (in_irq() || in_serving_softirq() || in_nmi())
57492+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57493+
57494+ uid = current_uid();
57495+
57496+ if (uid == 0)
57497+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
57498+ else {
57499+ /* kill all the processes of this user, hold a reference
57500+ to their creds struct, and prevent them from creating
57501+ another process until system reset
57502+ */
57503+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57504+ /* we intentionally leak this ref */
57505+ user = get_uid(current->cred->user);
57506+ if (user) {
57507+ user->banned = 1;
57508+ user->ban_expires = ~0UL;
57509+ }
57510+
57511+ read_lock(&tasklist_lock);
57512+ do_each_thread(tsk2, tsk) {
57513+ cred = __task_cred(tsk);
57514+ if (cred->uid == uid)
57515+ gr_fake_force_sig(SIGKILL, tsk);
57516+ } while_each_thread(tsk2, tsk);
57517+ read_unlock(&tasklist_lock);
57518+ }
57519+#endif
57520+}
57521+
57522+int __gr_process_user_ban(struct user_struct *user)
57523+{
57524+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57525+ if (unlikely(user->banned)) {
57526+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57527+ user->banned = 0;
57528+ user->ban_expires = 0;
57529+ free_uid(user);
57530+ } else
57531+ return -EPERM;
57532+ }
57533+#endif
57534+ return 0;
57535+}
57536+
57537+int gr_process_user_ban(void)
57538+{
57539+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57540+ return __gr_process_user_ban(current->cred->user);
57541+#endif
57542+ return 0;
57543+}
57544diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57545new file mode 100644
57546index 0000000..4030d57
57547--- /dev/null
57548+++ b/grsecurity/grsec_sock.c
57549@@ -0,0 +1,244 @@
57550+#include <linux/kernel.h>
57551+#include <linux/module.h>
57552+#include <linux/sched.h>
57553+#include <linux/file.h>
57554+#include <linux/net.h>
57555+#include <linux/in.h>
57556+#include <linux/ip.h>
57557+#include <net/sock.h>
57558+#include <net/inet_sock.h>
57559+#include <linux/grsecurity.h>
57560+#include <linux/grinternal.h>
57561+#include <linux/gracl.h>
57562+
57563+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57564+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57565+
57566+EXPORT_SYMBOL(gr_search_udp_recvmsg);
57567+EXPORT_SYMBOL(gr_search_udp_sendmsg);
57568+
57569+#ifdef CONFIG_UNIX_MODULE
57570+EXPORT_SYMBOL(gr_acl_handle_unix);
57571+EXPORT_SYMBOL(gr_acl_handle_mknod);
57572+EXPORT_SYMBOL(gr_handle_chroot_unix);
57573+EXPORT_SYMBOL(gr_handle_create);
57574+#endif
57575+
57576+#ifdef CONFIG_GRKERNSEC
57577+#define gr_conn_table_size 32749
57578+struct conn_table_entry {
57579+ struct conn_table_entry *next;
57580+ struct signal_struct *sig;
57581+};
57582+
57583+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57584+DEFINE_SPINLOCK(gr_conn_table_lock);
57585+
57586+extern const char * gr_socktype_to_name(unsigned char type);
57587+extern const char * gr_proto_to_name(unsigned char proto);
57588+extern const char * gr_sockfamily_to_name(unsigned char family);
57589+
57590+static __inline__ int
57591+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57592+{
57593+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57594+}
57595+
57596+static __inline__ int
57597+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57598+ __u16 sport, __u16 dport)
57599+{
57600+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57601+ sig->gr_sport == sport && sig->gr_dport == dport))
57602+ return 1;
57603+ else
57604+ return 0;
57605+}
57606+
57607+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57608+{
57609+ struct conn_table_entry **match;
57610+ unsigned int index;
57611+
57612+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57613+ sig->gr_sport, sig->gr_dport,
57614+ gr_conn_table_size);
57615+
57616+ newent->sig = sig;
57617+
57618+ match = &gr_conn_table[index];
57619+ newent->next = *match;
57620+ *match = newent;
57621+
57622+ return;
57623+}
57624+
57625+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57626+{
57627+ struct conn_table_entry *match, *last = NULL;
57628+ unsigned int index;
57629+
57630+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57631+ sig->gr_sport, sig->gr_dport,
57632+ gr_conn_table_size);
57633+
57634+ match = gr_conn_table[index];
57635+ while (match && !conn_match(match->sig,
57636+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
57637+ sig->gr_dport)) {
57638+ last = match;
57639+ match = match->next;
57640+ }
57641+
57642+ if (match) {
57643+ if (last)
57644+ last->next = match->next;
57645+ else
57646+ gr_conn_table[index] = NULL;
57647+ kfree(match);
57648+ }
57649+
57650+ return;
57651+}
57652+
57653+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
57654+ __u16 sport, __u16 dport)
57655+{
57656+ struct conn_table_entry *match;
57657+ unsigned int index;
57658+
57659+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
57660+
57661+ match = gr_conn_table[index];
57662+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
57663+ match = match->next;
57664+
57665+ if (match)
57666+ return match->sig;
57667+ else
57668+ return NULL;
57669+}
57670+
57671+#endif
57672+
57673+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
57674+{
57675+#ifdef CONFIG_GRKERNSEC
57676+ struct signal_struct *sig = task->signal;
57677+ struct conn_table_entry *newent;
57678+
57679+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
57680+ if (newent == NULL)
57681+ return;
57682+ /* no bh lock needed since we are called with bh disabled */
57683+ spin_lock(&gr_conn_table_lock);
57684+ gr_del_task_from_ip_table_nolock(sig);
57685+ sig->gr_saddr = inet->inet_rcv_saddr;
57686+ sig->gr_daddr = inet->inet_daddr;
57687+ sig->gr_sport = inet->inet_sport;
57688+ sig->gr_dport = inet->inet_dport;
57689+ gr_add_to_task_ip_table_nolock(sig, newent);
57690+ spin_unlock(&gr_conn_table_lock);
57691+#endif
57692+ return;
57693+}
57694+
57695+void gr_del_task_from_ip_table(struct task_struct *task)
57696+{
57697+#ifdef CONFIG_GRKERNSEC
57698+ spin_lock_bh(&gr_conn_table_lock);
57699+ gr_del_task_from_ip_table_nolock(task->signal);
57700+ spin_unlock_bh(&gr_conn_table_lock);
57701+#endif
57702+ return;
57703+}
57704+
57705+void
57706+gr_attach_curr_ip(const struct sock *sk)
57707+{
57708+#ifdef CONFIG_GRKERNSEC
57709+ struct signal_struct *p, *set;
57710+ const struct inet_sock *inet = inet_sk(sk);
57711+
57712+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
57713+ return;
57714+
57715+ set = current->signal;
57716+
57717+ spin_lock_bh(&gr_conn_table_lock);
57718+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
57719+ inet->inet_dport, inet->inet_sport);
57720+ if (unlikely(p != NULL)) {
57721+ set->curr_ip = p->curr_ip;
57722+ set->used_accept = 1;
57723+ gr_del_task_from_ip_table_nolock(p);
57724+ spin_unlock_bh(&gr_conn_table_lock);
57725+ return;
57726+ }
57727+ spin_unlock_bh(&gr_conn_table_lock);
57728+
57729+ set->curr_ip = inet->inet_daddr;
57730+ set->used_accept = 1;
57731+#endif
57732+ return;
57733+}
57734+
57735+int
57736+gr_handle_sock_all(const int family, const int type, const int protocol)
57737+{
57738+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57739+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
57740+ (family != AF_UNIX)) {
57741+ if (family == AF_INET)
57742+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
57743+ else
57744+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
57745+ return -EACCES;
57746+ }
57747+#endif
57748+ return 0;
57749+}
57750+
57751+int
57752+gr_handle_sock_server(const struct sockaddr *sck)
57753+{
57754+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57755+ if (grsec_enable_socket_server &&
57756+ in_group_p(grsec_socket_server_gid) &&
57757+ sck && (sck->sa_family != AF_UNIX) &&
57758+ (sck->sa_family != AF_LOCAL)) {
57759+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57760+ return -EACCES;
57761+ }
57762+#endif
57763+ return 0;
57764+}
57765+
57766+int
57767+gr_handle_sock_server_other(const struct sock *sck)
57768+{
57769+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57770+ if (grsec_enable_socket_server &&
57771+ in_group_p(grsec_socket_server_gid) &&
57772+ sck && (sck->sk_family != AF_UNIX) &&
57773+ (sck->sk_family != AF_LOCAL)) {
57774+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57775+ return -EACCES;
57776+ }
57777+#endif
57778+ return 0;
57779+}
57780+
57781+int
57782+gr_handle_sock_client(const struct sockaddr *sck)
57783+{
57784+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57785+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
57786+ sck && (sck->sa_family != AF_UNIX) &&
57787+ (sck->sa_family != AF_LOCAL)) {
57788+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
57789+ return -EACCES;
57790+ }
57791+#endif
57792+ return 0;
57793+}
57794diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
57795new file mode 100644
57796index 0000000..a1aedd7
57797--- /dev/null
57798+++ b/grsecurity/grsec_sysctl.c
57799@@ -0,0 +1,451 @@
57800+#include <linux/kernel.h>
57801+#include <linux/sched.h>
57802+#include <linux/sysctl.h>
57803+#include <linux/grsecurity.h>
57804+#include <linux/grinternal.h>
57805+
57806+int
57807+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
57808+{
57809+#ifdef CONFIG_GRKERNSEC_SYSCTL
57810+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
57811+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
57812+ return -EACCES;
57813+ }
57814+#endif
57815+ return 0;
57816+}
57817+
57818+#ifdef CONFIG_GRKERNSEC_ROFS
57819+static int __maybe_unused one = 1;
57820+#endif
57821+
57822+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57823+struct ctl_table grsecurity_table[] = {
57824+#ifdef CONFIG_GRKERNSEC_SYSCTL
57825+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
57826+#ifdef CONFIG_GRKERNSEC_IO
57827+ {
57828+ .procname = "disable_priv_io",
57829+ .data = &grsec_disable_privio,
57830+ .maxlen = sizeof(int),
57831+ .mode = 0600,
57832+ .proc_handler = &proc_dointvec,
57833+ },
57834+#endif
57835+#endif
57836+#ifdef CONFIG_GRKERNSEC_LINK
57837+ {
57838+ .procname = "linking_restrictions",
57839+ .data = &grsec_enable_link,
57840+ .maxlen = sizeof(int),
57841+ .mode = 0600,
57842+ .proc_handler = &proc_dointvec,
57843+ },
57844+#endif
57845+#ifdef CONFIG_GRKERNSEC_BRUTE
57846+ {
57847+ .procname = "deter_bruteforce",
57848+ .data = &grsec_enable_brute,
57849+ .maxlen = sizeof(int),
57850+ .mode = 0600,
57851+ .proc_handler = &proc_dointvec,
57852+ },
57853+#endif
57854+#ifdef CONFIG_GRKERNSEC_FIFO
57855+ {
57856+ .procname = "fifo_restrictions",
57857+ .data = &grsec_enable_fifo,
57858+ .maxlen = sizeof(int),
57859+ .mode = 0600,
57860+ .proc_handler = &proc_dointvec,
57861+ },
57862+#endif
57863+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57864+ {
57865+ .procname = "ptrace_readexec",
57866+ .data = &grsec_enable_ptrace_readexec,
57867+ .maxlen = sizeof(int),
57868+ .mode = 0600,
57869+ .proc_handler = &proc_dointvec,
57870+ },
57871+#endif
57872+#ifdef CONFIG_GRKERNSEC_SETXID
57873+ {
57874+ .procname = "consistent_setxid",
57875+ .data = &grsec_enable_setxid,
57876+ .maxlen = sizeof(int),
57877+ .mode = 0600,
57878+ .proc_handler = &proc_dointvec,
57879+ },
57880+#endif
57881+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57882+ {
57883+ .procname = "ip_blackhole",
57884+ .data = &grsec_enable_blackhole,
57885+ .maxlen = sizeof(int),
57886+ .mode = 0600,
57887+ .proc_handler = &proc_dointvec,
57888+ },
57889+ {
57890+ .procname = "lastack_retries",
57891+ .data = &grsec_lastack_retries,
57892+ .maxlen = sizeof(int),
57893+ .mode = 0600,
57894+ .proc_handler = &proc_dointvec,
57895+ },
57896+#endif
57897+#ifdef CONFIG_GRKERNSEC_EXECLOG
57898+ {
57899+ .procname = "exec_logging",
57900+ .data = &grsec_enable_execlog,
57901+ .maxlen = sizeof(int),
57902+ .mode = 0600,
57903+ .proc_handler = &proc_dointvec,
57904+ },
57905+#endif
57906+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57907+ {
57908+ .procname = "rwxmap_logging",
57909+ .data = &grsec_enable_log_rwxmaps,
57910+ .maxlen = sizeof(int),
57911+ .mode = 0600,
57912+ .proc_handler = &proc_dointvec,
57913+ },
57914+#endif
57915+#ifdef CONFIG_GRKERNSEC_SIGNAL
57916+ {
57917+ .procname = "signal_logging",
57918+ .data = &grsec_enable_signal,
57919+ .maxlen = sizeof(int),
57920+ .mode = 0600,
57921+ .proc_handler = &proc_dointvec,
57922+ },
57923+#endif
57924+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57925+ {
57926+ .procname = "forkfail_logging",
57927+ .data = &grsec_enable_forkfail,
57928+ .maxlen = sizeof(int),
57929+ .mode = 0600,
57930+ .proc_handler = &proc_dointvec,
57931+ },
57932+#endif
57933+#ifdef CONFIG_GRKERNSEC_TIME
57934+ {
57935+ .procname = "timechange_logging",
57936+ .data = &grsec_enable_time,
57937+ .maxlen = sizeof(int),
57938+ .mode = 0600,
57939+ .proc_handler = &proc_dointvec,
57940+ },
57941+#endif
57942+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57943+ {
57944+ .procname = "chroot_deny_shmat",
57945+ .data = &grsec_enable_chroot_shmat,
57946+ .maxlen = sizeof(int),
57947+ .mode = 0600,
57948+ .proc_handler = &proc_dointvec,
57949+ },
57950+#endif
57951+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57952+ {
57953+ .procname = "chroot_deny_unix",
57954+ .data = &grsec_enable_chroot_unix,
57955+ .maxlen = sizeof(int),
57956+ .mode = 0600,
57957+ .proc_handler = &proc_dointvec,
57958+ },
57959+#endif
57960+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57961+ {
57962+ .procname = "chroot_deny_mount",
57963+ .data = &grsec_enable_chroot_mount,
57964+ .maxlen = sizeof(int),
57965+ .mode = 0600,
57966+ .proc_handler = &proc_dointvec,
57967+ },
57968+#endif
57969+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57970+ {
57971+ .procname = "chroot_deny_fchdir",
57972+ .data = &grsec_enable_chroot_fchdir,
57973+ .maxlen = sizeof(int),
57974+ .mode = 0600,
57975+ .proc_handler = &proc_dointvec,
57976+ },
57977+#endif
57978+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57979+ {
57980+ .procname = "chroot_deny_chroot",
57981+ .data = &grsec_enable_chroot_double,
57982+ .maxlen = sizeof(int),
57983+ .mode = 0600,
57984+ .proc_handler = &proc_dointvec,
57985+ },
57986+#endif
57987+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57988+ {
57989+ .procname = "chroot_deny_pivot",
57990+ .data = &grsec_enable_chroot_pivot,
57991+ .maxlen = sizeof(int),
57992+ .mode = 0600,
57993+ .proc_handler = &proc_dointvec,
57994+ },
57995+#endif
57996+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57997+ {
57998+ .procname = "chroot_enforce_chdir",
57999+ .data = &grsec_enable_chroot_chdir,
58000+ .maxlen = sizeof(int),
58001+ .mode = 0600,
58002+ .proc_handler = &proc_dointvec,
58003+ },
58004+#endif
58005+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58006+ {
58007+ .procname = "chroot_deny_chmod",
58008+ .data = &grsec_enable_chroot_chmod,
58009+ .maxlen = sizeof(int),
58010+ .mode = 0600,
58011+ .proc_handler = &proc_dointvec,
58012+ },
58013+#endif
58014+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58015+ {
58016+ .procname = "chroot_deny_mknod",
58017+ .data = &grsec_enable_chroot_mknod,
58018+ .maxlen = sizeof(int),
58019+ .mode = 0600,
58020+ .proc_handler = &proc_dointvec,
58021+ },
58022+#endif
58023+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58024+ {
58025+ .procname = "chroot_restrict_nice",
58026+ .data = &grsec_enable_chroot_nice,
58027+ .maxlen = sizeof(int),
58028+ .mode = 0600,
58029+ .proc_handler = &proc_dointvec,
58030+ },
58031+#endif
58032+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58033+ {
58034+ .procname = "chroot_execlog",
58035+ .data = &grsec_enable_chroot_execlog,
58036+ .maxlen = sizeof(int),
58037+ .mode = 0600,
58038+ .proc_handler = &proc_dointvec,
58039+ },
58040+#endif
58041+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58042+ {
58043+ .procname = "chroot_caps",
58044+ .data = &grsec_enable_chroot_caps,
58045+ .maxlen = sizeof(int),
58046+ .mode = 0600,
58047+ .proc_handler = &proc_dointvec,
58048+ },
58049+#endif
58050+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58051+ {
58052+ .procname = "chroot_deny_sysctl",
58053+ .data = &grsec_enable_chroot_sysctl,
58054+ .maxlen = sizeof(int),
58055+ .mode = 0600,
58056+ .proc_handler = &proc_dointvec,
58057+ },
58058+#endif
58059+#ifdef CONFIG_GRKERNSEC_TPE
58060+ {
58061+ .procname = "tpe",
58062+ .data = &grsec_enable_tpe,
58063+ .maxlen = sizeof(int),
58064+ .mode = 0600,
58065+ .proc_handler = &proc_dointvec,
58066+ },
58067+ {
58068+ .procname = "tpe_gid",
58069+ .data = &grsec_tpe_gid,
58070+ .maxlen = sizeof(int),
58071+ .mode = 0600,
58072+ .proc_handler = &proc_dointvec,
58073+ },
58074+#endif
58075+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58076+ {
58077+ .procname = "tpe_invert",
58078+ .data = &grsec_enable_tpe_invert,
58079+ .maxlen = sizeof(int),
58080+ .mode = 0600,
58081+ .proc_handler = &proc_dointvec,
58082+ },
58083+#endif
58084+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58085+ {
58086+ .procname = "tpe_restrict_all",
58087+ .data = &grsec_enable_tpe_all,
58088+ .maxlen = sizeof(int),
58089+ .mode = 0600,
58090+ .proc_handler = &proc_dointvec,
58091+ },
58092+#endif
58093+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58094+ {
58095+ .procname = "socket_all",
58096+ .data = &grsec_enable_socket_all,
58097+ .maxlen = sizeof(int),
58098+ .mode = 0600,
58099+ .proc_handler = &proc_dointvec,
58100+ },
58101+ {
58102+ .procname = "socket_all_gid",
58103+ .data = &grsec_socket_all_gid,
58104+ .maxlen = sizeof(int),
58105+ .mode = 0600,
58106+ .proc_handler = &proc_dointvec,
58107+ },
58108+#endif
58109+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58110+ {
58111+ .procname = "socket_client",
58112+ .data = &grsec_enable_socket_client,
58113+ .maxlen = sizeof(int),
58114+ .mode = 0600,
58115+ .proc_handler = &proc_dointvec,
58116+ },
58117+ {
58118+ .procname = "socket_client_gid",
58119+ .data = &grsec_socket_client_gid,
58120+ .maxlen = sizeof(int),
58121+ .mode = 0600,
58122+ .proc_handler = &proc_dointvec,
58123+ },
58124+#endif
58125+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58126+ {
58127+ .procname = "socket_server",
58128+ .data = &grsec_enable_socket_server,
58129+ .maxlen = sizeof(int),
58130+ .mode = 0600,
58131+ .proc_handler = &proc_dointvec,
58132+ },
58133+ {
58134+ .procname = "socket_server_gid",
58135+ .data = &grsec_socket_server_gid,
58136+ .maxlen = sizeof(int),
58137+ .mode = 0600,
58138+ .proc_handler = &proc_dointvec,
58139+ },
58140+#endif
58141+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58142+ {
58143+ .procname = "audit_group",
58144+ .data = &grsec_enable_group,
58145+ .maxlen = sizeof(int),
58146+ .mode = 0600,
58147+ .proc_handler = &proc_dointvec,
58148+ },
58149+ {
58150+ .procname = "audit_gid",
58151+ .data = &grsec_audit_gid,
58152+ .maxlen = sizeof(int),
58153+ .mode = 0600,
58154+ .proc_handler = &proc_dointvec,
58155+ },
58156+#endif
58157+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58158+ {
58159+ .procname = "audit_chdir",
58160+ .data = &grsec_enable_chdir,
58161+ .maxlen = sizeof(int),
58162+ .mode = 0600,
58163+ .proc_handler = &proc_dointvec,
58164+ },
58165+#endif
58166+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58167+ {
58168+ .procname = "audit_mount",
58169+ .data = &grsec_enable_mount,
58170+ .maxlen = sizeof(int),
58171+ .mode = 0600,
58172+ .proc_handler = &proc_dointvec,
58173+ },
58174+#endif
58175+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58176+ {
58177+ .procname = "audit_textrel",
58178+ .data = &grsec_enable_audit_textrel,
58179+ .maxlen = sizeof(int),
58180+ .mode = 0600,
58181+ .proc_handler = &proc_dointvec,
58182+ },
58183+#endif
58184+#ifdef CONFIG_GRKERNSEC_DMESG
58185+ {
58186+ .procname = "dmesg",
58187+ .data = &grsec_enable_dmesg,
58188+ .maxlen = sizeof(int),
58189+ .mode = 0600,
58190+ .proc_handler = &proc_dointvec,
58191+ },
58192+#endif
58193+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58194+ {
58195+ .procname = "chroot_findtask",
58196+ .data = &grsec_enable_chroot_findtask,
58197+ .maxlen = sizeof(int),
58198+ .mode = 0600,
58199+ .proc_handler = &proc_dointvec,
58200+ },
58201+#endif
58202+#ifdef CONFIG_GRKERNSEC_RESLOG
58203+ {
58204+ .procname = "resource_logging",
58205+ .data = &grsec_resource_logging,
58206+ .maxlen = sizeof(int),
58207+ .mode = 0600,
58208+ .proc_handler = &proc_dointvec,
58209+ },
58210+#endif
58211+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58212+ {
58213+ .procname = "audit_ptrace",
58214+ .data = &grsec_enable_audit_ptrace,
58215+ .maxlen = sizeof(int),
58216+ .mode = 0600,
58217+ .proc_handler = &proc_dointvec,
58218+ },
58219+#endif
58220+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58221+ {
58222+ .procname = "harden_ptrace",
58223+ .data = &grsec_enable_harden_ptrace,
58224+ .maxlen = sizeof(int),
58225+ .mode = 0600,
58226+ .proc_handler = &proc_dointvec,
58227+ },
58228+#endif
58229+ {
58230+ .procname = "grsec_lock",
58231+ .data = &grsec_lock,
58232+ .maxlen = sizeof(int),
58233+ .mode = 0600,
58234+ .proc_handler = &proc_dointvec,
58235+ },
58236+#endif
58237+#ifdef CONFIG_GRKERNSEC_ROFS
58238+ {
58239+ .procname = "romount_protect",
58240+ .data = &grsec_enable_rofs,
58241+ .maxlen = sizeof(int),
58242+ .mode = 0600,
58243+ .proc_handler = &proc_dointvec_minmax,
58244+ .extra1 = &one,
58245+ .extra2 = &one,
58246+ },
58247+#endif
58248+ { }
58249+};
58250+#endif
58251diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58252new file mode 100644
58253index 0000000..0dc13c3
58254--- /dev/null
58255+++ b/grsecurity/grsec_time.c
58256@@ -0,0 +1,16 @@
58257+#include <linux/kernel.h>
58258+#include <linux/sched.h>
58259+#include <linux/grinternal.h>
58260+#include <linux/module.h>
58261+
58262+void
58263+gr_log_timechange(void)
58264+{
58265+#ifdef CONFIG_GRKERNSEC_TIME
58266+ if (grsec_enable_time)
58267+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58268+#endif
58269+ return;
58270+}
58271+
58272+EXPORT_SYMBOL(gr_log_timechange);
58273diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58274new file mode 100644
58275index 0000000..07e0dc0
58276--- /dev/null
58277+++ b/grsecurity/grsec_tpe.c
58278@@ -0,0 +1,73 @@
58279+#include <linux/kernel.h>
58280+#include <linux/sched.h>
58281+#include <linux/file.h>
58282+#include <linux/fs.h>
58283+#include <linux/grinternal.h>
58284+
58285+extern int gr_acl_tpe_check(void);
58286+
58287+int
58288+gr_tpe_allow(const struct file *file)
58289+{
58290+#ifdef CONFIG_GRKERNSEC
58291+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58292+ const struct cred *cred = current_cred();
58293+ char *msg = NULL;
58294+ char *msg2 = NULL;
58295+
58296+ // never restrict root
58297+ if (!cred->uid)
58298+ return 1;
58299+
58300+ if (grsec_enable_tpe) {
58301+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58302+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58303+ msg = "not being in trusted group";
58304+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58305+ msg = "being in untrusted group";
58306+#else
58307+ if (in_group_p(grsec_tpe_gid))
58308+ msg = "being in untrusted group";
58309+#endif
58310+ }
58311+ if (!msg && gr_acl_tpe_check())
58312+ msg = "being in untrusted role";
58313+
58314+ // not in any affected group/role
58315+ if (!msg)
58316+ goto next_check;
58317+
58318+ if (inode->i_uid)
58319+ msg2 = "file in non-root-owned directory";
58320+ else if (inode->i_mode & S_IWOTH)
58321+ msg2 = "file in world-writable directory";
58322+ else if (inode->i_mode & S_IWGRP)
58323+ msg2 = "file in group-writable directory";
58324+
58325+ if (msg && msg2) {
58326+ char fullmsg[70] = {0};
58327+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58328+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58329+ return 0;
58330+ }
58331+ msg = NULL;
58332+next_check:
58333+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58334+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58335+ return 1;
58336+
58337+ if (inode->i_uid && (inode->i_uid != cred->uid))
58338+ msg = "directory not owned by user";
58339+ else if (inode->i_mode & S_IWOTH)
58340+ msg = "file in world-writable directory";
58341+ else if (inode->i_mode & S_IWGRP)
58342+ msg = "file in group-writable directory";
58343+
58344+ if (msg) {
58345+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58346+ return 0;
58347+ }
58348+#endif
58349+#endif
58350+ return 1;
58351+}
58352diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58353new file mode 100644
58354index 0000000..9f7b1ac
58355--- /dev/null
58356+++ b/grsecurity/grsum.c
58357@@ -0,0 +1,61 @@
58358+#include <linux/err.h>
58359+#include <linux/kernel.h>
58360+#include <linux/sched.h>
58361+#include <linux/mm.h>
58362+#include <linux/scatterlist.h>
58363+#include <linux/crypto.h>
58364+#include <linux/gracl.h>
58365+
58366+
58367+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58368+#error "crypto and sha256 must be built into the kernel"
58369+#endif
58370+
58371+int
58372+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58373+{
58374+ char *p;
58375+ struct crypto_hash *tfm;
58376+ struct hash_desc desc;
58377+ struct scatterlist sg;
58378+ unsigned char temp_sum[GR_SHA_LEN];
58379+ volatile int retval = 0;
58380+ volatile int dummy = 0;
58381+ unsigned int i;
58382+
58383+ sg_init_table(&sg, 1);
58384+
58385+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58386+ if (IS_ERR(tfm)) {
58387+ /* should never happen, since sha256 should be built in */
58388+ return 1;
58389+ }
58390+
58391+ desc.tfm = tfm;
58392+ desc.flags = 0;
58393+
58394+ crypto_hash_init(&desc);
58395+
58396+ p = salt;
58397+ sg_set_buf(&sg, p, GR_SALT_LEN);
58398+ crypto_hash_update(&desc, &sg, sg.length);
58399+
58400+ p = entry->pw;
58401+ sg_set_buf(&sg, p, strlen(p));
58402+
58403+ crypto_hash_update(&desc, &sg, sg.length);
58404+
58405+ crypto_hash_final(&desc, temp_sum);
58406+
58407+ memset(entry->pw, 0, GR_PW_LEN);
58408+
58409+ for (i = 0; i < GR_SHA_LEN; i++)
58410+ if (sum[i] != temp_sum[i])
58411+ retval = 1;
58412+ else
58413+ dummy = 1; // waste a cycle
58414+
58415+ crypto_free_hash(tfm);
58416+
58417+ return retval;
58418+}
58419diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58420index 6cd5b64..f620d2d 100644
58421--- a/include/acpi/acpi_bus.h
58422+++ b/include/acpi/acpi_bus.h
58423@@ -107,7 +107,7 @@ struct acpi_device_ops {
58424 acpi_op_bind bind;
58425 acpi_op_unbind unbind;
58426 acpi_op_notify notify;
58427-};
58428+} __no_const;
58429
58430 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58431
58432diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58433index b7babf0..71e4e74 100644
58434--- a/include/asm-generic/atomic-long.h
58435+++ b/include/asm-generic/atomic-long.h
58436@@ -22,6 +22,12 @@
58437
58438 typedef atomic64_t atomic_long_t;
58439
58440+#ifdef CONFIG_PAX_REFCOUNT
58441+typedef atomic64_unchecked_t atomic_long_unchecked_t;
58442+#else
58443+typedef atomic64_t atomic_long_unchecked_t;
58444+#endif
58445+
58446 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58447
58448 static inline long atomic_long_read(atomic_long_t *l)
58449@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58450 return (long)atomic64_read(v);
58451 }
58452
58453+#ifdef CONFIG_PAX_REFCOUNT
58454+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58455+{
58456+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58457+
58458+ return (long)atomic64_read_unchecked(v);
58459+}
58460+#endif
58461+
58462 static inline void atomic_long_set(atomic_long_t *l, long i)
58463 {
58464 atomic64_t *v = (atomic64_t *)l;
58465@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58466 atomic64_set(v, i);
58467 }
58468
58469+#ifdef CONFIG_PAX_REFCOUNT
58470+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58471+{
58472+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58473+
58474+ atomic64_set_unchecked(v, i);
58475+}
58476+#endif
58477+
58478 static inline void atomic_long_inc(atomic_long_t *l)
58479 {
58480 atomic64_t *v = (atomic64_t *)l;
58481@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58482 atomic64_inc(v);
58483 }
58484
58485+#ifdef CONFIG_PAX_REFCOUNT
58486+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58487+{
58488+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58489+
58490+ atomic64_inc_unchecked(v);
58491+}
58492+#endif
58493+
58494 static inline void atomic_long_dec(atomic_long_t *l)
58495 {
58496 atomic64_t *v = (atomic64_t *)l;
58497@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58498 atomic64_dec(v);
58499 }
58500
58501+#ifdef CONFIG_PAX_REFCOUNT
58502+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58503+{
58504+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58505+
58506+ atomic64_dec_unchecked(v);
58507+}
58508+#endif
58509+
58510 static inline void atomic_long_add(long i, atomic_long_t *l)
58511 {
58512 atomic64_t *v = (atomic64_t *)l;
58513@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58514 atomic64_add(i, v);
58515 }
58516
58517+#ifdef CONFIG_PAX_REFCOUNT
58518+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58519+{
58520+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58521+
58522+ atomic64_add_unchecked(i, v);
58523+}
58524+#endif
58525+
58526 static inline void atomic_long_sub(long i, atomic_long_t *l)
58527 {
58528 atomic64_t *v = (atomic64_t *)l;
58529@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58530 atomic64_sub(i, v);
58531 }
58532
58533+#ifdef CONFIG_PAX_REFCOUNT
58534+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58535+{
58536+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58537+
58538+ atomic64_sub_unchecked(i, v);
58539+}
58540+#endif
58541+
58542 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58543 {
58544 atomic64_t *v = (atomic64_t *)l;
58545@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58546 return (long)atomic64_inc_return(v);
58547 }
58548
58549+#ifdef CONFIG_PAX_REFCOUNT
58550+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58551+{
58552+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58553+
58554+ return (long)atomic64_inc_return_unchecked(v);
58555+}
58556+#endif
58557+
58558 static inline long atomic_long_dec_return(atomic_long_t *l)
58559 {
58560 atomic64_t *v = (atomic64_t *)l;
58561@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58562
58563 typedef atomic_t atomic_long_t;
58564
58565+#ifdef CONFIG_PAX_REFCOUNT
58566+typedef atomic_unchecked_t atomic_long_unchecked_t;
58567+#else
58568+typedef atomic_t atomic_long_unchecked_t;
58569+#endif
58570+
58571 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58572 static inline long atomic_long_read(atomic_long_t *l)
58573 {
58574@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58575 return (long)atomic_read(v);
58576 }
58577
58578+#ifdef CONFIG_PAX_REFCOUNT
58579+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58580+{
58581+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58582+
58583+ return (long)atomic_read_unchecked(v);
58584+}
58585+#endif
58586+
58587 static inline void atomic_long_set(atomic_long_t *l, long i)
58588 {
58589 atomic_t *v = (atomic_t *)l;
58590@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58591 atomic_set(v, i);
58592 }
58593
58594+#ifdef CONFIG_PAX_REFCOUNT
58595+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58596+{
58597+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58598+
58599+ atomic_set_unchecked(v, i);
58600+}
58601+#endif
58602+
58603 static inline void atomic_long_inc(atomic_long_t *l)
58604 {
58605 atomic_t *v = (atomic_t *)l;
58606@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58607 atomic_inc(v);
58608 }
58609
58610+#ifdef CONFIG_PAX_REFCOUNT
58611+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58612+{
58613+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58614+
58615+ atomic_inc_unchecked(v);
58616+}
58617+#endif
58618+
58619 static inline void atomic_long_dec(atomic_long_t *l)
58620 {
58621 atomic_t *v = (atomic_t *)l;
58622@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58623 atomic_dec(v);
58624 }
58625
58626+#ifdef CONFIG_PAX_REFCOUNT
58627+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58628+{
58629+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58630+
58631+ atomic_dec_unchecked(v);
58632+}
58633+#endif
58634+
58635 static inline void atomic_long_add(long i, atomic_long_t *l)
58636 {
58637 atomic_t *v = (atomic_t *)l;
58638@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58639 atomic_add(i, v);
58640 }
58641
58642+#ifdef CONFIG_PAX_REFCOUNT
58643+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58644+{
58645+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58646+
58647+ atomic_add_unchecked(i, v);
58648+}
58649+#endif
58650+
58651 static inline void atomic_long_sub(long i, atomic_long_t *l)
58652 {
58653 atomic_t *v = (atomic_t *)l;
58654@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58655 atomic_sub(i, v);
58656 }
58657
58658+#ifdef CONFIG_PAX_REFCOUNT
58659+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58660+{
58661+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58662+
58663+ atomic_sub_unchecked(i, v);
58664+}
58665+#endif
58666+
58667 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58668 {
58669 atomic_t *v = (atomic_t *)l;
58670@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58671 return (long)atomic_inc_return(v);
58672 }
58673
58674+#ifdef CONFIG_PAX_REFCOUNT
58675+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58676+{
58677+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58678+
58679+ return (long)atomic_inc_return_unchecked(v);
58680+}
58681+#endif
58682+
58683 static inline long atomic_long_dec_return(atomic_long_t *l)
58684 {
58685 atomic_t *v = (atomic_t *)l;
58686@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58687
58688 #endif /* BITS_PER_LONG == 64 */
58689
58690+#ifdef CONFIG_PAX_REFCOUNT
58691+static inline void pax_refcount_needs_these_functions(void)
58692+{
58693+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
58694+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
58695+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
58696+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
58697+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
58698+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
58699+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
58700+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
58701+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
58702+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
58703+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
58704+
58705+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
58706+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
58707+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
58708+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
58709+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
58710+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
58711+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
58712+}
58713+#else
58714+#define atomic_read_unchecked(v) atomic_read(v)
58715+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
58716+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
58717+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
58718+#define atomic_inc_unchecked(v) atomic_inc(v)
58719+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
58720+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
58721+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
58722+#define atomic_dec_unchecked(v) atomic_dec(v)
58723+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
58724+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
58725+
58726+#define atomic_long_read_unchecked(v) atomic_long_read(v)
58727+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
58728+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
58729+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
58730+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
58731+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
58732+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
58733+#endif
58734+
58735 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
58736diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
58737index b18ce4f..2ee2843 100644
58738--- a/include/asm-generic/atomic64.h
58739+++ b/include/asm-generic/atomic64.h
58740@@ -16,6 +16,8 @@ typedef struct {
58741 long long counter;
58742 } atomic64_t;
58743
58744+typedef atomic64_t atomic64_unchecked_t;
58745+
58746 #define ATOMIC64_INIT(i) { (i) }
58747
58748 extern long long atomic64_read(const atomic64_t *v);
58749@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
58750 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
58751 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
58752
58753+#define atomic64_read_unchecked(v) atomic64_read(v)
58754+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
58755+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
58756+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
58757+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
58758+#define atomic64_inc_unchecked(v) atomic64_inc(v)
58759+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
58760+#define atomic64_dec_unchecked(v) atomic64_dec(v)
58761+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
58762+
58763 #endif /* _ASM_GENERIC_ATOMIC64_H */
58764diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
58765index 1bfcfe5..e04c5c9 100644
58766--- a/include/asm-generic/cache.h
58767+++ b/include/asm-generic/cache.h
58768@@ -6,7 +6,7 @@
58769 * cache lines need to provide their own cache.h.
58770 */
58771
58772-#define L1_CACHE_SHIFT 5
58773-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
58774+#define L1_CACHE_SHIFT 5UL
58775+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
58776
58777 #endif /* __ASM_GENERIC_CACHE_H */
58778diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
58779index 0d68a1e..b74a761 100644
58780--- a/include/asm-generic/emergency-restart.h
58781+++ b/include/asm-generic/emergency-restart.h
58782@@ -1,7 +1,7 @@
58783 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
58784 #define _ASM_GENERIC_EMERGENCY_RESTART_H
58785
58786-static inline void machine_emergency_restart(void)
58787+static inline __noreturn void machine_emergency_restart(void)
58788 {
58789 machine_restart(NULL);
58790 }
58791diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
58792index 1ca3efc..e3dc852 100644
58793--- a/include/asm-generic/int-l64.h
58794+++ b/include/asm-generic/int-l64.h
58795@@ -46,6 +46,8 @@ typedef unsigned int u32;
58796 typedef signed long s64;
58797 typedef unsigned long u64;
58798
58799+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
58800+
58801 #define S8_C(x) x
58802 #define U8_C(x) x ## U
58803 #define S16_C(x) x
58804diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
58805index f394147..b6152b9 100644
58806--- a/include/asm-generic/int-ll64.h
58807+++ b/include/asm-generic/int-ll64.h
58808@@ -51,6 +51,8 @@ typedef unsigned int u32;
58809 typedef signed long long s64;
58810 typedef unsigned long long u64;
58811
58812+typedef unsigned long long intoverflow_t;
58813+
58814 #define S8_C(x) x
58815 #define U8_C(x) x ## U
58816 #define S16_C(x) x
58817diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
58818index 0232ccb..13d9165 100644
58819--- a/include/asm-generic/kmap_types.h
58820+++ b/include/asm-generic/kmap_types.h
58821@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
58822 KMAP_D(17) KM_NMI,
58823 KMAP_D(18) KM_NMI_PTE,
58824 KMAP_D(19) KM_KDB,
58825+KMAP_D(20) KM_CLEARPAGE,
58826 /*
58827 * Remember to update debug_kmap_atomic() when adding new kmap types!
58828 */
58829-KMAP_D(20) KM_TYPE_NR
58830+KMAP_D(21) KM_TYPE_NR
58831 };
58832
58833 #undef KMAP_D
58834diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
58835index 9ceb03b..2efbcbd 100644
58836--- a/include/asm-generic/local.h
58837+++ b/include/asm-generic/local.h
58838@@ -39,6 +39,7 @@ typedef struct
58839 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
58840 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
58841 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
58842+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
58843
58844 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
58845 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
58846diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
58847index 725612b..9cc513a 100644
58848--- a/include/asm-generic/pgtable-nopmd.h
58849+++ b/include/asm-generic/pgtable-nopmd.h
58850@@ -1,14 +1,19 @@
58851 #ifndef _PGTABLE_NOPMD_H
58852 #define _PGTABLE_NOPMD_H
58853
58854-#ifndef __ASSEMBLY__
58855-
58856 #include <asm-generic/pgtable-nopud.h>
58857
58858-struct mm_struct;
58859-
58860 #define __PAGETABLE_PMD_FOLDED
58861
58862+#define PMD_SHIFT PUD_SHIFT
58863+#define PTRS_PER_PMD 1
58864+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
58865+#define PMD_MASK (~(PMD_SIZE-1))
58866+
58867+#ifndef __ASSEMBLY__
58868+
58869+struct mm_struct;
58870+
58871 /*
58872 * Having the pmd type consist of a pud gets the size right, and allows
58873 * us to conceptually access the pud entry that this pmd is folded into
58874@@ -16,11 +21,6 @@ struct mm_struct;
58875 */
58876 typedef struct { pud_t pud; } pmd_t;
58877
58878-#define PMD_SHIFT PUD_SHIFT
58879-#define PTRS_PER_PMD 1
58880-#define PMD_SIZE (1UL << PMD_SHIFT)
58881-#define PMD_MASK (~(PMD_SIZE-1))
58882-
58883 /*
58884 * The "pud_xxx()" functions here are trivial for a folded two-level
58885 * setup: the pmd is never bad, and a pmd always exists (as it's folded
58886diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
58887index 810431d..ccc3638 100644
58888--- a/include/asm-generic/pgtable-nopud.h
58889+++ b/include/asm-generic/pgtable-nopud.h
58890@@ -1,10 +1,15 @@
58891 #ifndef _PGTABLE_NOPUD_H
58892 #define _PGTABLE_NOPUD_H
58893
58894-#ifndef __ASSEMBLY__
58895-
58896 #define __PAGETABLE_PUD_FOLDED
58897
58898+#define PUD_SHIFT PGDIR_SHIFT
58899+#define PTRS_PER_PUD 1
58900+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
58901+#define PUD_MASK (~(PUD_SIZE-1))
58902+
58903+#ifndef __ASSEMBLY__
58904+
58905 /*
58906 * Having the pud type consist of a pgd gets the size right, and allows
58907 * us to conceptually access the pgd entry that this pud is folded into
58908@@ -12,11 +17,6 @@
58909 */
58910 typedef struct { pgd_t pgd; } pud_t;
58911
58912-#define PUD_SHIFT PGDIR_SHIFT
58913-#define PTRS_PER_PUD 1
58914-#define PUD_SIZE (1UL << PUD_SHIFT)
58915-#define PUD_MASK (~(PUD_SIZE-1))
58916-
58917 /*
58918 * The "pgd_xxx()" functions here are trivial for a folded two-level
58919 * setup: the pud is never bad, and a pud always exists (as it's folded
58920diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
58921index 76bff2b..c7a14e2 100644
58922--- a/include/asm-generic/pgtable.h
58923+++ b/include/asm-generic/pgtable.h
58924@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
58925 #endif /* __HAVE_ARCH_PMD_WRITE */
58926 #endif
58927
58928+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
58929+static inline unsigned long pax_open_kernel(void) { return 0; }
58930+#endif
58931+
58932+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
58933+static inline unsigned long pax_close_kernel(void) { return 0; }
58934+#endif
58935+
58936 #endif /* !__ASSEMBLY__ */
58937
58938 #endif /* _ASM_GENERIC_PGTABLE_H */
58939diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
58940index b5e2e4c..6a5373e 100644
58941--- a/include/asm-generic/vmlinux.lds.h
58942+++ b/include/asm-generic/vmlinux.lds.h
58943@@ -217,6 +217,7 @@
58944 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
58945 VMLINUX_SYMBOL(__start_rodata) = .; \
58946 *(.rodata) *(.rodata.*) \
58947+ *(.data..read_only) \
58948 *(__vermagic) /* Kernel version magic */ \
58949 . = ALIGN(8); \
58950 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
58951@@ -722,17 +723,18 @@
58952 * section in the linker script will go there too. @phdr should have
58953 * a leading colon.
58954 *
58955- * Note that this macros defines __per_cpu_load as an absolute symbol.
58956+ * Note that this macros defines per_cpu_load as an absolute symbol.
58957 * If there is no need to put the percpu section at a predetermined
58958 * address, use PERCPU_SECTION.
58959 */
58960 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
58961- VMLINUX_SYMBOL(__per_cpu_load) = .; \
58962- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
58963+ per_cpu_load = .; \
58964+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
58965 - LOAD_OFFSET) { \
58966+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
58967 PERCPU_INPUT(cacheline) \
58968 } phdr \
58969- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
58970+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
58971
58972 /**
58973 * PERCPU_SECTION - define output section for percpu area, simple version
58974diff --git a/include/drm/drmP.h b/include/drm/drmP.h
58975index 92f0981..d44a37c 100644
58976--- a/include/drm/drmP.h
58977+++ b/include/drm/drmP.h
58978@@ -72,6 +72,7 @@
58979 #include <linux/workqueue.h>
58980 #include <linux/poll.h>
58981 #include <asm/pgalloc.h>
58982+#include <asm/local.h>
58983 #include "drm.h"
58984
58985 #include <linux/idr.h>
58986@@ -1038,7 +1039,7 @@ struct drm_device {
58987
58988 /** \name Usage Counters */
58989 /*@{ */
58990- int open_count; /**< Outstanding files open */
58991+ local_t open_count; /**< Outstanding files open */
58992 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
58993 atomic_t vma_count; /**< Outstanding vma areas open */
58994 int buf_use; /**< Buffers in use -- cannot alloc */
58995@@ -1049,7 +1050,7 @@ struct drm_device {
58996 /*@{ */
58997 unsigned long counters;
58998 enum drm_stat_type types[15];
58999- atomic_t counts[15];
59000+ atomic_unchecked_t counts[15];
59001 /*@} */
59002
59003 struct list_head filelist;
59004diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59005index 37515d1..34fa8b0 100644
59006--- a/include/drm/drm_crtc_helper.h
59007+++ b/include/drm/drm_crtc_helper.h
59008@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59009
59010 /* disable crtc when not in use - more explicit than dpms off */
59011 void (*disable)(struct drm_crtc *crtc);
59012-};
59013+} __no_const;
59014
59015 struct drm_encoder_helper_funcs {
59016 void (*dpms)(struct drm_encoder *encoder, int mode);
59017@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59018 struct drm_connector *connector);
59019 /* disable encoder when not in use - more explicit than dpms off */
59020 void (*disable)(struct drm_encoder *encoder);
59021-};
59022+} __no_const;
59023
59024 struct drm_connector_helper_funcs {
59025 int (*get_modes)(struct drm_connector *connector);
59026diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59027index 26c1f78..6722682 100644
59028--- a/include/drm/ttm/ttm_memory.h
59029+++ b/include/drm/ttm/ttm_memory.h
59030@@ -47,7 +47,7 @@
59031
59032 struct ttm_mem_shrink {
59033 int (*do_shrink) (struct ttm_mem_shrink *);
59034-};
59035+} __no_const;
59036
59037 /**
59038 * struct ttm_mem_global - Global memory accounting structure.
59039diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59040index e86dfca..40cc55f 100644
59041--- a/include/linux/a.out.h
59042+++ b/include/linux/a.out.h
59043@@ -39,6 +39,14 @@ enum machine_type {
59044 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59045 };
59046
59047+/* Constants for the N_FLAGS field */
59048+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59049+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59050+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59051+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59052+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59053+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59054+
59055 #if !defined (N_MAGIC)
59056 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59057 #endif
59058diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59059index f4ff882..84b53a6 100644
59060--- a/include/linux/atmdev.h
59061+++ b/include/linux/atmdev.h
59062@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59063 #endif
59064
59065 struct k_atm_aal_stats {
59066-#define __HANDLE_ITEM(i) atomic_t i
59067+#define __HANDLE_ITEM(i) atomic_unchecked_t i
59068 __AAL_STAT_ITEMS
59069 #undef __HANDLE_ITEM
59070 };
59071diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59072index 0092102..8a801b4 100644
59073--- a/include/linux/binfmts.h
59074+++ b/include/linux/binfmts.h
59075@@ -89,6 +89,7 @@ struct linux_binfmt {
59076 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59077 int (*load_shlib)(struct file *);
59078 int (*core_dump)(struct coredump_params *cprm);
59079+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59080 unsigned long min_coredump; /* minimal dump size */
59081 };
59082
59083diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59084index 606cf33..b72c577 100644
59085--- a/include/linux/blkdev.h
59086+++ b/include/linux/blkdev.h
59087@@ -1379,7 +1379,7 @@ struct block_device_operations {
59088 /* this callback is with swap_lock and sometimes page table lock held */
59089 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59090 struct module *owner;
59091-};
59092+} __do_const;
59093
59094 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59095 unsigned long);
59096diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59097index 4d1a074..88f929a 100644
59098--- a/include/linux/blktrace_api.h
59099+++ b/include/linux/blktrace_api.h
59100@@ -162,7 +162,7 @@ struct blk_trace {
59101 struct dentry *dir;
59102 struct dentry *dropped_file;
59103 struct dentry *msg_file;
59104- atomic_t dropped;
59105+ atomic_unchecked_t dropped;
59106 };
59107
59108 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59109diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59110index 83195fb..0b0f77d 100644
59111--- a/include/linux/byteorder/little_endian.h
59112+++ b/include/linux/byteorder/little_endian.h
59113@@ -42,51 +42,51 @@
59114
59115 static inline __le64 __cpu_to_le64p(const __u64 *p)
59116 {
59117- return (__force __le64)*p;
59118+ return (__force const __le64)*p;
59119 }
59120 static inline __u64 __le64_to_cpup(const __le64 *p)
59121 {
59122- return (__force __u64)*p;
59123+ return (__force const __u64)*p;
59124 }
59125 static inline __le32 __cpu_to_le32p(const __u32 *p)
59126 {
59127- return (__force __le32)*p;
59128+ return (__force const __le32)*p;
59129 }
59130 static inline __u32 __le32_to_cpup(const __le32 *p)
59131 {
59132- return (__force __u32)*p;
59133+ return (__force const __u32)*p;
59134 }
59135 static inline __le16 __cpu_to_le16p(const __u16 *p)
59136 {
59137- return (__force __le16)*p;
59138+ return (__force const __le16)*p;
59139 }
59140 static inline __u16 __le16_to_cpup(const __le16 *p)
59141 {
59142- return (__force __u16)*p;
59143+ return (__force const __u16)*p;
59144 }
59145 static inline __be64 __cpu_to_be64p(const __u64 *p)
59146 {
59147- return (__force __be64)__swab64p(p);
59148+ return (__force const __be64)__swab64p(p);
59149 }
59150 static inline __u64 __be64_to_cpup(const __be64 *p)
59151 {
59152- return __swab64p((__u64 *)p);
59153+ return __swab64p((const __u64 *)p);
59154 }
59155 static inline __be32 __cpu_to_be32p(const __u32 *p)
59156 {
59157- return (__force __be32)__swab32p(p);
59158+ return (__force const __be32)__swab32p(p);
59159 }
59160 static inline __u32 __be32_to_cpup(const __be32 *p)
59161 {
59162- return __swab32p((__u32 *)p);
59163+ return __swab32p((const __u32 *)p);
59164 }
59165 static inline __be16 __cpu_to_be16p(const __u16 *p)
59166 {
59167- return (__force __be16)__swab16p(p);
59168+ return (__force const __be16)__swab16p(p);
59169 }
59170 static inline __u16 __be16_to_cpup(const __be16 *p)
59171 {
59172- return __swab16p((__u16 *)p);
59173+ return __swab16p((const __u16 *)p);
59174 }
59175 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59176 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59177diff --git a/include/linux/cache.h b/include/linux/cache.h
59178index 4c57065..4307975 100644
59179--- a/include/linux/cache.h
59180+++ b/include/linux/cache.h
59181@@ -16,6 +16,10 @@
59182 #define __read_mostly
59183 #endif
59184
59185+#ifndef __read_only
59186+#define __read_only __read_mostly
59187+#endif
59188+
59189 #ifndef ____cacheline_aligned
59190 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59191 #endif
59192diff --git a/include/linux/capability.h b/include/linux/capability.h
59193index 12d52de..b5f7fa7 100644
59194--- a/include/linux/capability.h
59195+++ b/include/linux/capability.h
59196@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59197 extern bool capable(int cap);
59198 extern bool ns_capable(struct user_namespace *ns, int cap);
59199 extern bool nsown_capable(int cap);
59200+extern bool capable_nolog(int cap);
59201+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59202
59203 /* audit system wants to get cap info from files as well */
59204 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59205diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59206index 04ffb2e..6799180 100644
59207--- a/include/linux/cleancache.h
59208+++ b/include/linux/cleancache.h
59209@@ -31,7 +31,7 @@ struct cleancache_ops {
59210 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
59211 void (*flush_inode)(int, struct cleancache_filekey);
59212 void (*flush_fs)(int);
59213-};
59214+} __no_const;
59215
59216 extern struct cleancache_ops
59217 cleancache_register_ops(struct cleancache_ops *ops);
59218diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59219index 2f40791..89a56fd 100644
59220--- a/include/linux/compiler-gcc4.h
59221+++ b/include/linux/compiler-gcc4.h
59222@@ -32,6 +32,12 @@
59223 #define __linktime_error(message) __attribute__((__error__(message)))
59224
59225 #if __GNUC_MINOR__ >= 5
59226+
59227+#ifdef CONSTIFY_PLUGIN
59228+#define __no_const __attribute__((no_const))
59229+#define __do_const __attribute__((do_const))
59230+#endif
59231+
59232 /*
59233 * Mark a position in code as unreachable. This can be used to
59234 * suppress control flow warnings after asm blocks that transfer
59235@@ -47,6 +53,11 @@
59236 #define __noclone __attribute__((__noclone__))
59237
59238 #endif
59239+
59240+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59241+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59242+#define __bos0(ptr) __bos((ptr), 0)
59243+#define __bos1(ptr) __bos((ptr), 1)
59244 #endif
59245
59246 #if __GNUC_MINOR__ > 0
59247diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59248index 4a24354..9570c1b 100644
59249--- a/include/linux/compiler.h
59250+++ b/include/linux/compiler.h
59251@@ -5,31 +5,62 @@
59252
59253 #ifdef __CHECKER__
59254 # define __user __attribute__((noderef, address_space(1)))
59255+# define __force_user __force __user
59256 # define __kernel __attribute__((address_space(0)))
59257+# define __force_kernel __force __kernel
59258 # define __safe __attribute__((safe))
59259 # define __force __attribute__((force))
59260 # define __nocast __attribute__((nocast))
59261 # define __iomem __attribute__((noderef, address_space(2)))
59262+# define __force_iomem __force __iomem
59263 # define __acquires(x) __attribute__((context(x,0,1)))
59264 # define __releases(x) __attribute__((context(x,1,0)))
59265 # define __acquire(x) __context__(x,1)
59266 # define __release(x) __context__(x,-1)
59267 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59268 # define __percpu __attribute__((noderef, address_space(3)))
59269+# define __force_percpu __force __percpu
59270 #ifdef CONFIG_SPARSE_RCU_POINTER
59271 # define __rcu __attribute__((noderef, address_space(4)))
59272+# define __force_rcu __force __rcu
59273 #else
59274 # define __rcu
59275+# define __force_rcu
59276 #endif
59277 extern void __chk_user_ptr(const volatile void __user *);
59278 extern void __chk_io_ptr(const volatile void __iomem *);
59279+#elif defined(CHECKER_PLUGIN)
59280+//# define __user
59281+//# define __force_user
59282+//# define __kernel
59283+//# define __force_kernel
59284+# define __safe
59285+# define __force
59286+# define __nocast
59287+# define __iomem
59288+# define __force_iomem
59289+# define __chk_user_ptr(x) (void)0
59290+# define __chk_io_ptr(x) (void)0
59291+# define __builtin_warning(x, y...) (1)
59292+# define __acquires(x)
59293+# define __releases(x)
59294+# define __acquire(x) (void)0
59295+# define __release(x) (void)0
59296+# define __cond_lock(x,c) (c)
59297+# define __percpu
59298+# define __force_percpu
59299+# define __rcu
59300+# define __force_rcu
59301 #else
59302 # define __user
59303+# define __force_user
59304 # define __kernel
59305+# define __force_kernel
59306 # define __safe
59307 # define __force
59308 # define __nocast
59309 # define __iomem
59310+# define __force_iomem
59311 # define __chk_user_ptr(x) (void)0
59312 # define __chk_io_ptr(x) (void)0
59313 # define __builtin_warning(x, y...) (1)
59314@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59315 # define __release(x) (void)0
59316 # define __cond_lock(x,c) (c)
59317 # define __percpu
59318+# define __force_percpu
59319 # define __rcu
59320+# define __force_rcu
59321 #endif
59322
59323 #ifdef __KERNEL__
59324@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59325 # define __attribute_const__ /* unimplemented */
59326 #endif
59327
59328+#ifndef __no_const
59329+# define __no_const
59330+#endif
59331+
59332+#ifndef __do_const
59333+# define __do_const
59334+#endif
59335+
59336 /*
59337 * Tell gcc if a function is cold. The compiler will assume any path
59338 * directly leading to the call is unlikely.
59339@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59340 #define __cold
59341 #endif
59342
59343+#ifndef __alloc_size
59344+#define __alloc_size(...)
59345+#endif
59346+
59347+#ifndef __bos
59348+#define __bos(ptr, arg)
59349+#endif
59350+
59351+#ifndef __bos0
59352+#define __bos0(ptr)
59353+#endif
59354+
59355+#ifndef __bos1
59356+#define __bos1(ptr)
59357+#endif
59358+
59359 /* Simple shorthand for a section definition */
59360 #ifndef __section
59361 # define __section(S) __attribute__ ((__section__(#S)))
59362@@ -308,6 +365,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59363 * use is to mediate communication between process-level code and irq/NMI
59364 * handlers, all running on the same CPU.
59365 */
59366-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59367+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59368+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59369
59370 #endif /* __LINUX_COMPILER_H */
59371diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
59372index e9eaec5..bfeb9bb 100644
59373--- a/include/linux/cpuset.h
59374+++ b/include/linux/cpuset.h
59375@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
59376 * nodemask.
59377 */
59378 smp_mb();
59379- --ACCESS_ONCE(current->mems_allowed_change_disable);
59380+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
59381 }
59382
59383 static inline void set_mems_allowed(nodemask_t nodemask)
59384diff --git a/include/linux/cred.h b/include/linux/cred.h
59385index adadf71..6af5560 100644
59386--- a/include/linux/cred.h
59387+++ b/include/linux/cred.h
59388@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59389 static inline void validate_process_creds(void)
59390 {
59391 }
59392+static inline void validate_task_creds(struct task_struct *task)
59393+{
59394+}
59395 #endif
59396
59397 /**
59398diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59399index 8a94217..15d49e3 100644
59400--- a/include/linux/crypto.h
59401+++ b/include/linux/crypto.h
59402@@ -365,7 +365,7 @@ struct cipher_tfm {
59403 const u8 *key, unsigned int keylen);
59404 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59405 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59406-};
59407+} __no_const;
59408
59409 struct hash_tfm {
59410 int (*init)(struct hash_desc *desc);
59411@@ -386,13 +386,13 @@ struct compress_tfm {
59412 int (*cot_decompress)(struct crypto_tfm *tfm,
59413 const u8 *src, unsigned int slen,
59414 u8 *dst, unsigned int *dlen);
59415-};
59416+} __no_const;
59417
59418 struct rng_tfm {
59419 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59420 unsigned int dlen);
59421 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59422-};
59423+} __no_const;
59424
59425 #define crt_ablkcipher crt_u.ablkcipher
59426 #define crt_aead crt_u.aead
59427diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59428index 7925bf0..d5143d2 100644
59429--- a/include/linux/decompress/mm.h
59430+++ b/include/linux/decompress/mm.h
59431@@ -77,7 +77,7 @@ static void free(void *where)
59432 * warnings when not needed (indeed large_malloc / large_free are not
59433 * needed by inflate */
59434
59435-#define malloc(a) kmalloc(a, GFP_KERNEL)
59436+#define malloc(a) kmalloc((a), GFP_KERNEL)
59437 #define free(a) kfree(a)
59438
59439 #define large_malloc(a) vmalloc(a)
59440diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59441index e13117c..e9fc938 100644
59442--- a/include/linux/dma-mapping.h
59443+++ b/include/linux/dma-mapping.h
59444@@ -46,7 +46,7 @@ struct dma_map_ops {
59445 u64 (*get_required_mask)(struct device *dev);
59446 #endif
59447 int is_phys;
59448-};
59449+} __do_const;
59450
59451 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59452
59453diff --git a/include/linux/efi.h b/include/linux/efi.h
59454index 37c3007..92ab679 100644
59455--- a/include/linux/efi.h
59456+++ b/include/linux/efi.h
59457@@ -580,7 +580,7 @@ struct efivar_operations {
59458 efi_get_variable_t *get_variable;
59459 efi_get_next_variable_t *get_next_variable;
59460 efi_set_variable_t *set_variable;
59461-};
59462+} __no_const;
59463
59464 struct efivars {
59465 /*
59466diff --git a/include/linux/elf.h b/include/linux/elf.h
59467index 999b4f5..57753b4 100644
59468--- a/include/linux/elf.h
59469+++ b/include/linux/elf.h
59470@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59471 #define PT_GNU_EH_FRAME 0x6474e550
59472
59473 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59474+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59475+
59476+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59477+
59478+/* Constants for the e_flags field */
59479+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59480+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59481+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59482+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59483+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59484+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59485
59486 /*
59487 * Extended Numbering
59488@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59489 #define DT_DEBUG 21
59490 #define DT_TEXTREL 22
59491 #define DT_JMPREL 23
59492+#define DT_FLAGS 30
59493+ #define DF_TEXTREL 0x00000004
59494 #define DT_ENCODING 32
59495 #define OLD_DT_LOOS 0x60000000
59496 #define DT_LOOS 0x6000000d
59497@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59498 #define PF_W 0x2
59499 #define PF_X 0x1
59500
59501+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59502+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59503+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59504+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59505+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59506+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59507+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59508+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59509+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59510+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59511+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59512+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59513+
59514 typedef struct elf32_phdr{
59515 Elf32_Word p_type;
59516 Elf32_Off p_offset;
59517@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59518 #define EI_OSABI 7
59519 #define EI_PAD 8
59520
59521+#define EI_PAX 14
59522+
59523 #define ELFMAG0 0x7f /* EI_MAG */
59524 #define ELFMAG1 'E'
59525 #define ELFMAG2 'L'
59526@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59527 #define elf_note elf32_note
59528 #define elf_addr_t Elf32_Off
59529 #define Elf_Half Elf32_Half
59530+#define elf_dyn Elf32_Dyn
59531
59532 #else
59533
59534@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59535 #define elf_note elf64_note
59536 #define elf_addr_t Elf64_Off
59537 #define Elf_Half Elf64_Half
59538+#define elf_dyn Elf64_Dyn
59539
59540 #endif
59541
59542diff --git a/include/linux/filter.h b/include/linux/filter.h
59543index 8eeb205..d59bfa2 100644
59544--- a/include/linux/filter.h
59545+++ b/include/linux/filter.h
59546@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59547
59548 struct sk_buff;
59549 struct sock;
59550+struct bpf_jit_work;
59551
59552 struct sk_filter
59553 {
59554@@ -141,6 +142,9 @@ struct sk_filter
59555 unsigned int len; /* Number of filter blocks */
59556 unsigned int (*bpf_func)(const struct sk_buff *skb,
59557 const struct sock_filter *filter);
59558+#ifdef CONFIG_BPF_JIT
59559+ struct bpf_jit_work *work;
59560+#endif
59561 struct rcu_head rcu;
59562 struct sock_filter insns[0];
59563 };
59564diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59565index 84ccf8e..2e9b14c 100644
59566--- a/include/linux/firewire.h
59567+++ b/include/linux/firewire.h
59568@@ -428,7 +428,7 @@ struct fw_iso_context {
59569 union {
59570 fw_iso_callback_t sc;
59571 fw_iso_mc_callback_t mc;
59572- } callback;
59573+ } __no_const callback;
59574 void *callback_data;
59575 };
59576
59577diff --git a/include/linux/fs.h b/include/linux/fs.h
59578index 69cd5bb..58425c2 100644
59579--- a/include/linux/fs.h
59580+++ b/include/linux/fs.h
59581@@ -1623,7 +1623,8 @@ struct file_operations {
59582 int (*setlease)(struct file *, long, struct file_lock **);
59583 long (*fallocate)(struct file *file, int mode, loff_t offset,
59584 loff_t len);
59585-};
59586+} __do_const;
59587+typedef struct file_operations __no_const file_operations_no_const;
59588
59589 struct inode_operations {
59590 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59591diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59592index 003dc0f..3c4ea97 100644
59593--- a/include/linux/fs_struct.h
59594+++ b/include/linux/fs_struct.h
59595@@ -6,7 +6,7 @@
59596 #include <linux/seqlock.h>
59597
59598 struct fs_struct {
59599- int users;
59600+ atomic_t users;
59601 spinlock_t lock;
59602 seqcount_t seq;
59603 int umask;
59604diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59605index ce31408..b1ad003 100644
59606--- a/include/linux/fscache-cache.h
59607+++ b/include/linux/fscache-cache.h
59608@@ -102,7 +102,7 @@ struct fscache_operation {
59609 fscache_operation_release_t release;
59610 };
59611
59612-extern atomic_t fscache_op_debug_id;
59613+extern atomic_unchecked_t fscache_op_debug_id;
59614 extern void fscache_op_work_func(struct work_struct *work);
59615
59616 extern void fscache_enqueue_operation(struct fscache_operation *);
59617@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
59618 {
59619 INIT_WORK(&op->work, fscache_op_work_func);
59620 atomic_set(&op->usage, 1);
59621- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59622+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59623 op->processor = processor;
59624 op->release = release;
59625 INIT_LIST_HEAD(&op->pend_link);
59626diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
59627index 2a53f10..0187fdf 100644
59628--- a/include/linux/fsnotify.h
59629+++ b/include/linux/fsnotify.h
59630@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
59631 */
59632 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
59633 {
59634- return kstrdup(name, GFP_KERNEL);
59635+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
59636 }
59637
59638 /*
59639diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
59640index 91d0e0a3..035666b 100644
59641--- a/include/linux/fsnotify_backend.h
59642+++ b/include/linux/fsnotify_backend.h
59643@@ -105,6 +105,7 @@ struct fsnotify_ops {
59644 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
59645 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
59646 };
59647+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
59648
59649 /*
59650 * A group is a "thing" that wants to receive notification about filesystem
59651diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
59652index c3da42d..c70e0df 100644
59653--- a/include/linux/ftrace_event.h
59654+++ b/include/linux/ftrace_event.h
59655@@ -97,7 +97,7 @@ struct trace_event_functions {
59656 trace_print_func raw;
59657 trace_print_func hex;
59658 trace_print_func binary;
59659-};
59660+} __no_const;
59661
59662 struct trace_event {
59663 struct hlist_node node;
59664@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
59665 extern int trace_add_event_call(struct ftrace_event_call *call);
59666 extern void trace_remove_event_call(struct ftrace_event_call *call);
59667
59668-#define is_signed_type(type) (((type)(-1)) < 0)
59669+#define is_signed_type(type) (((type)(-1)) < (type)1)
59670
59671 int trace_set_clr_event(const char *system, const char *event, int set);
59672
59673diff --git a/include/linux/genhd.h b/include/linux/genhd.h
59674index e61d319..0da8505 100644
59675--- a/include/linux/genhd.h
59676+++ b/include/linux/genhd.h
59677@@ -185,7 +185,7 @@ struct gendisk {
59678 struct kobject *slave_dir;
59679
59680 struct timer_rand_state *random;
59681- atomic_t sync_io; /* RAID */
59682+ atomic_unchecked_t sync_io; /* RAID */
59683 struct disk_events *ev;
59684 #ifdef CONFIG_BLK_DEV_INTEGRITY
59685 struct blk_integrity *integrity;
59686diff --git a/include/linux/gracl.h b/include/linux/gracl.h
59687new file mode 100644
59688index 0000000..8a130b6
59689--- /dev/null
59690+++ b/include/linux/gracl.h
59691@@ -0,0 +1,319 @@
59692+#ifndef GR_ACL_H
59693+#define GR_ACL_H
59694+
59695+#include <linux/grdefs.h>
59696+#include <linux/resource.h>
59697+#include <linux/capability.h>
59698+#include <linux/dcache.h>
59699+#include <asm/resource.h>
59700+
59701+/* Major status information */
59702+
59703+#define GR_VERSION "grsecurity 2.9"
59704+#define GRSECURITY_VERSION 0x2900
59705+
59706+enum {
59707+ GR_SHUTDOWN = 0,
59708+ GR_ENABLE = 1,
59709+ GR_SPROLE = 2,
59710+ GR_RELOAD = 3,
59711+ GR_SEGVMOD = 4,
59712+ GR_STATUS = 5,
59713+ GR_UNSPROLE = 6,
59714+ GR_PASSSET = 7,
59715+ GR_SPROLEPAM = 8,
59716+};
59717+
59718+/* Password setup definitions
59719+ * kernel/grhash.c */
59720+enum {
59721+ GR_PW_LEN = 128,
59722+ GR_SALT_LEN = 16,
59723+ GR_SHA_LEN = 32,
59724+};
59725+
59726+enum {
59727+ GR_SPROLE_LEN = 64,
59728+};
59729+
59730+enum {
59731+ GR_NO_GLOB = 0,
59732+ GR_REG_GLOB,
59733+ GR_CREATE_GLOB
59734+};
59735+
59736+#define GR_NLIMITS 32
59737+
59738+/* Begin Data Structures */
59739+
59740+struct sprole_pw {
59741+ unsigned char *rolename;
59742+ unsigned char salt[GR_SALT_LEN];
59743+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
59744+};
59745+
59746+struct name_entry {
59747+ __u32 key;
59748+ ino_t inode;
59749+ dev_t device;
59750+ char *name;
59751+ __u16 len;
59752+ __u8 deleted;
59753+ struct name_entry *prev;
59754+ struct name_entry *next;
59755+};
59756+
59757+struct inodev_entry {
59758+ struct name_entry *nentry;
59759+ struct inodev_entry *prev;
59760+ struct inodev_entry *next;
59761+};
59762+
59763+struct acl_role_db {
59764+ struct acl_role_label **r_hash;
59765+ __u32 r_size;
59766+};
59767+
59768+struct inodev_db {
59769+ struct inodev_entry **i_hash;
59770+ __u32 i_size;
59771+};
59772+
59773+struct name_db {
59774+ struct name_entry **n_hash;
59775+ __u32 n_size;
59776+};
59777+
59778+struct crash_uid {
59779+ uid_t uid;
59780+ unsigned long expires;
59781+};
59782+
59783+struct gr_hash_struct {
59784+ void **table;
59785+ void **nametable;
59786+ void *first;
59787+ __u32 table_size;
59788+ __u32 used_size;
59789+ int type;
59790+};
59791+
59792+/* Userspace Grsecurity ACL data structures */
59793+
59794+struct acl_subject_label {
59795+ char *filename;
59796+ ino_t inode;
59797+ dev_t device;
59798+ __u32 mode;
59799+ kernel_cap_t cap_mask;
59800+ kernel_cap_t cap_lower;
59801+ kernel_cap_t cap_invert_audit;
59802+
59803+ struct rlimit res[GR_NLIMITS];
59804+ __u32 resmask;
59805+
59806+ __u8 user_trans_type;
59807+ __u8 group_trans_type;
59808+ uid_t *user_transitions;
59809+ gid_t *group_transitions;
59810+ __u16 user_trans_num;
59811+ __u16 group_trans_num;
59812+
59813+ __u32 sock_families[2];
59814+ __u32 ip_proto[8];
59815+ __u32 ip_type;
59816+ struct acl_ip_label **ips;
59817+ __u32 ip_num;
59818+ __u32 inaddr_any_override;
59819+
59820+ __u32 crashes;
59821+ unsigned long expires;
59822+
59823+ struct acl_subject_label *parent_subject;
59824+ struct gr_hash_struct *hash;
59825+ struct acl_subject_label *prev;
59826+ struct acl_subject_label *next;
59827+
59828+ struct acl_object_label **obj_hash;
59829+ __u32 obj_hash_size;
59830+ __u16 pax_flags;
59831+};
59832+
59833+struct role_allowed_ip {
59834+ __u32 addr;
59835+ __u32 netmask;
59836+
59837+ struct role_allowed_ip *prev;
59838+ struct role_allowed_ip *next;
59839+};
59840+
59841+struct role_transition {
59842+ char *rolename;
59843+
59844+ struct role_transition *prev;
59845+ struct role_transition *next;
59846+};
59847+
59848+struct acl_role_label {
59849+ char *rolename;
59850+ uid_t uidgid;
59851+ __u16 roletype;
59852+
59853+ __u16 auth_attempts;
59854+ unsigned long expires;
59855+
59856+ struct acl_subject_label *root_label;
59857+ struct gr_hash_struct *hash;
59858+
59859+ struct acl_role_label *prev;
59860+ struct acl_role_label *next;
59861+
59862+ struct role_transition *transitions;
59863+ struct role_allowed_ip *allowed_ips;
59864+ uid_t *domain_children;
59865+ __u16 domain_child_num;
59866+
59867+ umode_t umask;
59868+
59869+ struct acl_subject_label **subj_hash;
59870+ __u32 subj_hash_size;
59871+};
59872+
59873+struct user_acl_role_db {
59874+ struct acl_role_label **r_table;
59875+ __u32 num_pointers; /* Number of allocations to track */
59876+ __u32 num_roles; /* Number of roles */
59877+ __u32 num_domain_children; /* Number of domain children */
59878+ __u32 num_subjects; /* Number of subjects */
59879+ __u32 num_objects; /* Number of objects */
59880+};
59881+
59882+struct acl_object_label {
59883+ char *filename;
59884+ ino_t inode;
59885+ dev_t device;
59886+ __u32 mode;
59887+
59888+ struct acl_subject_label *nested;
59889+ struct acl_object_label *globbed;
59890+
59891+ /* next two structures not used */
59892+
59893+ struct acl_object_label *prev;
59894+ struct acl_object_label *next;
59895+};
59896+
59897+struct acl_ip_label {
59898+ char *iface;
59899+ __u32 addr;
59900+ __u32 netmask;
59901+ __u16 low, high;
59902+ __u8 mode;
59903+ __u32 type;
59904+ __u32 proto[8];
59905+
59906+ /* next two structures not used */
59907+
59908+ struct acl_ip_label *prev;
59909+ struct acl_ip_label *next;
59910+};
59911+
59912+struct gr_arg {
59913+ struct user_acl_role_db role_db;
59914+ unsigned char pw[GR_PW_LEN];
59915+ unsigned char salt[GR_SALT_LEN];
59916+ unsigned char sum[GR_SHA_LEN];
59917+ unsigned char sp_role[GR_SPROLE_LEN];
59918+ struct sprole_pw *sprole_pws;
59919+ dev_t segv_device;
59920+ ino_t segv_inode;
59921+ uid_t segv_uid;
59922+ __u16 num_sprole_pws;
59923+ __u16 mode;
59924+};
59925+
59926+struct gr_arg_wrapper {
59927+ struct gr_arg *arg;
59928+ __u32 version;
59929+ __u32 size;
59930+};
59931+
59932+struct subject_map {
59933+ struct acl_subject_label *user;
59934+ struct acl_subject_label *kernel;
59935+ struct subject_map *prev;
59936+ struct subject_map *next;
59937+};
59938+
59939+struct acl_subj_map_db {
59940+ struct subject_map **s_hash;
59941+ __u32 s_size;
59942+};
59943+
59944+/* End Data Structures Section */
59945+
59946+/* Hash functions generated by empirical testing by Brad Spengler
59947+ Makes good use of the low bits of the inode. Generally 0-1 times
59948+ in loop for successful match. 0-3 for unsuccessful match.
59949+ Shift/add algorithm with modulus of table size and an XOR*/
59950+
59951+static __inline__ unsigned int
59952+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
59953+{
59954+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
59955+}
59956+
59957+ static __inline__ unsigned int
59958+shash(const struct acl_subject_label *userp, const unsigned int sz)
59959+{
59960+ return ((const unsigned long)userp % sz);
59961+}
59962+
59963+static __inline__ unsigned int
59964+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
59965+{
59966+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
59967+}
59968+
59969+static __inline__ unsigned int
59970+nhash(const char *name, const __u16 len, const unsigned int sz)
59971+{
59972+ return full_name_hash((const unsigned char *)name, len) % sz;
59973+}
59974+
59975+#define FOR_EACH_ROLE_START(role) \
59976+ role = role_list; \
59977+ while (role) {
59978+
59979+#define FOR_EACH_ROLE_END(role) \
59980+ role = role->prev; \
59981+ }
59982+
59983+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
59984+ subj = NULL; \
59985+ iter = 0; \
59986+ while (iter < role->subj_hash_size) { \
59987+ if (subj == NULL) \
59988+ subj = role->subj_hash[iter]; \
59989+ if (subj == NULL) { \
59990+ iter++; \
59991+ continue; \
59992+ }
59993+
59994+#define FOR_EACH_SUBJECT_END(subj,iter) \
59995+ subj = subj->next; \
59996+ if (subj == NULL) \
59997+ iter++; \
59998+ }
59999+
60000+
60001+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60002+ subj = role->hash->first; \
60003+ while (subj != NULL) {
60004+
60005+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60006+ subj = subj->next; \
60007+ }
60008+
60009+#endif
60010+
60011diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60012new file mode 100644
60013index 0000000..323ecf2
60014--- /dev/null
60015+++ b/include/linux/gralloc.h
60016@@ -0,0 +1,9 @@
60017+#ifndef __GRALLOC_H
60018+#define __GRALLOC_H
60019+
60020+void acl_free_all(void);
60021+int acl_alloc_stack_init(unsigned long size);
60022+void *acl_alloc(unsigned long len);
60023+void *acl_alloc_num(unsigned long num, unsigned long len);
60024+
60025+#endif
60026diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60027new file mode 100644
60028index 0000000..b30e9bc
60029--- /dev/null
60030+++ b/include/linux/grdefs.h
60031@@ -0,0 +1,140 @@
60032+#ifndef GRDEFS_H
60033+#define GRDEFS_H
60034+
60035+/* Begin grsecurity status declarations */
60036+
60037+enum {
60038+ GR_READY = 0x01,
60039+ GR_STATUS_INIT = 0x00 // disabled state
60040+};
60041+
60042+/* Begin ACL declarations */
60043+
60044+/* Role flags */
60045+
60046+enum {
60047+ GR_ROLE_USER = 0x0001,
60048+ GR_ROLE_GROUP = 0x0002,
60049+ GR_ROLE_DEFAULT = 0x0004,
60050+ GR_ROLE_SPECIAL = 0x0008,
60051+ GR_ROLE_AUTH = 0x0010,
60052+ GR_ROLE_NOPW = 0x0020,
60053+ GR_ROLE_GOD = 0x0040,
60054+ GR_ROLE_LEARN = 0x0080,
60055+ GR_ROLE_TPE = 0x0100,
60056+ GR_ROLE_DOMAIN = 0x0200,
60057+ GR_ROLE_PAM = 0x0400,
60058+ GR_ROLE_PERSIST = 0x0800
60059+};
60060+
60061+/* ACL Subject and Object mode flags */
60062+enum {
60063+ GR_DELETED = 0x80000000
60064+};
60065+
60066+/* ACL Object-only mode flags */
60067+enum {
60068+ GR_READ = 0x00000001,
60069+ GR_APPEND = 0x00000002,
60070+ GR_WRITE = 0x00000004,
60071+ GR_EXEC = 0x00000008,
60072+ GR_FIND = 0x00000010,
60073+ GR_INHERIT = 0x00000020,
60074+ GR_SETID = 0x00000040,
60075+ GR_CREATE = 0x00000080,
60076+ GR_DELETE = 0x00000100,
60077+ GR_LINK = 0x00000200,
60078+ GR_AUDIT_READ = 0x00000400,
60079+ GR_AUDIT_APPEND = 0x00000800,
60080+ GR_AUDIT_WRITE = 0x00001000,
60081+ GR_AUDIT_EXEC = 0x00002000,
60082+ GR_AUDIT_FIND = 0x00004000,
60083+ GR_AUDIT_INHERIT= 0x00008000,
60084+ GR_AUDIT_SETID = 0x00010000,
60085+ GR_AUDIT_CREATE = 0x00020000,
60086+ GR_AUDIT_DELETE = 0x00040000,
60087+ GR_AUDIT_LINK = 0x00080000,
60088+ GR_PTRACERD = 0x00100000,
60089+ GR_NOPTRACE = 0x00200000,
60090+ GR_SUPPRESS = 0x00400000,
60091+ GR_NOLEARN = 0x00800000,
60092+ GR_INIT_TRANSFER= 0x01000000
60093+};
60094+
60095+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60096+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60097+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60098+
60099+/* ACL subject-only mode flags */
60100+enum {
60101+ GR_KILL = 0x00000001,
60102+ GR_VIEW = 0x00000002,
60103+ GR_PROTECTED = 0x00000004,
60104+ GR_LEARN = 0x00000008,
60105+ GR_OVERRIDE = 0x00000010,
60106+ /* just a placeholder, this mode is only used in userspace */
60107+ GR_DUMMY = 0x00000020,
60108+ GR_PROTSHM = 0x00000040,
60109+ GR_KILLPROC = 0x00000080,
60110+ GR_KILLIPPROC = 0x00000100,
60111+ /* just a placeholder, this mode is only used in userspace */
60112+ GR_NOTROJAN = 0x00000200,
60113+ GR_PROTPROCFD = 0x00000400,
60114+ GR_PROCACCT = 0x00000800,
60115+ GR_RELAXPTRACE = 0x00001000,
60116+ GR_NESTED = 0x00002000,
60117+ GR_INHERITLEARN = 0x00004000,
60118+ GR_PROCFIND = 0x00008000,
60119+ GR_POVERRIDE = 0x00010000,
60120+ GR_KERNELAUTH = 0x00020000,
60121+ GR_ATSECURE = 0x00040000,
60122+ GR_SHMEXEC = 0x00080000
60123+};
60124+
60125+enum {
60126+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60127+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60128+ GR_PAX_ENABLE_MPROTECT = 0x0004,
60129+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
60130+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60131+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60132+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60133+ GR_PAX_DISABLE_MPROTECT = 0x0400,
60134+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
60135+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60136+};
60137+
60138+enum {
60139+ GR_ID_USER = 0x01,
60140+ GR_ID_GROUP = 0x02,
60141+};
60142+
60143+enum {
60144+ GR_ID_ALLOW = 0x01,
60145+ GR_ID_DENY = 0x02,
60146+};
60147+
60148+#define GR_CRASH_RES 31
60149+#define GR_UIDTABLE_MAX 500
60150+
60151+/* begin resource learning section */
60152+enum {
60153+ GR_RLIM_CPU_BUMP = 60,
60154+ GR_RLIM_FSIZE_BUMP = 50000,
60155+ GR_RLIM_DATA_BUMP = 10000,
60156+ GR_RLIM_STACK_BUMP = 1000,
60157+ GR_RLIM_CORE_BUMP = 10000,
60158+ GR_RLIM_RSS_BUMP = 500000,
60159+ GR_RLIM_NPROC_BUMP = 1,
60160+ GR_RLIM_NOFILE_BUMP = 5,
60161+ GR_RLIM_MEMLOCK_BUMP = 50000,
60162+ GR_RLIM_AS_BUMP = 500000,
60163+ GR_RLIM_LOCKS_BUMP = 2,
60164+ GR_RLIM_SIGPENDING_BUMP = 5,
60165+ GR_RLIM_MSGQUEUE_BUMP = 10000,
60166+ GR_RLIM_NICE_BUMP = 1,
60167+ GR_RLIM_RTPRIO_BUMP = 1,
60168+ GR_RLIM_RTTIME_BUMP = 1000000
60169+};
60170+
60171+#endif
60172diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60173new file mode 100644
60174index 0000000..da390f1
60175--- /dev/null
60176+++ b/include/linux/grinternal.h
60177@@ -0,0 +1,221 @@
60178+#ifndef __GRINTERNAL_H
60179+#define __GRINTERNAL_H
60180+
60181+#ifdef CONFIG_GRKERNSEC
60182+
60183+#include <linux/fs.h>
60184+#include <linux/mnt_namespace.h>
60185+#include <linux/nsproxy.h>
60186+#include <linux/gracl.h>
60187+#include <linux/grdefs.h>
60188+#include <linux/grmsg.h>
60189+
60190+void gr_add_learn_entry(const char *fmt, ...)
60191+ __attribute__ ((format (printf, 1, 2)));
60192+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60193+ const struct vfsmount *mnt);
60194+__u32 gr_check_create(const struct dentry *new_dentry,
60195+ const struct dentry *parent,
60196+ const struct vfsmount *mnt, const __u32 mode);
60197+int gr_check_protected_task(const struct task_struct *task);
60198+__u32 to_gr_audit(const __u32 reqmode);
60199+int gr_set_acls(const int type);
60200+int gr_apply_subject_to_task(struct task_struct *task);
60201+int gr_acl_is_enabled(void);
60202+char gr_roletype_to_char(void);
60203+
60204+void gr_handle_alertkill(struct task_struct *task);
60205+char *gr_to_filename(const struct dentry *dentry,
60206+ const struct vfsmount *mnt);
60207+char *gr_to_filename1(const struct dentry *dentry,
60208+ const struct vfsmount *mnt);
60209+char *gr_to_filename2(const struct dentry *dentry,
60210+ const struct vfsmount *mnt);
60211+char *gr_to_filename3(const struct dentry *dentry,
60212+ const struct vfsmount *mnt);
60213+
60214+extern int grsec_enable_ptrace_readexec;
60215+extern int grsec_enable_harden_ptrace;
60216+extern int grsec_enable_link;
60217+extern int grsec_enable_fifo;
60218+extern int grsec_enable_execve;
60219+extern int grsec_enable_shm;
60220+extern int grsec_enable_execlog;
60221+extern int grsec_enable_signal;
60222+extern int grsec_enable_audit_ptrace;
60223+extern int grsec_enable_forkfail;
60224+extern int grsec_enable_time;
60225+extern int grsec_enable_rofs;
60226+extern int grsec_enable_chroot_shmat;
60227+extern int grsec_enable_chroot_mount;
60228+extern int grsec_enable_chroot_double;
60229+extern int grsec_enable_chroot_pivot;
60230+extern int grsec_enable_chroot_chdir;
60231+extern int grsec_enable_chroot_chmod;
60232+extern int grsec_enable_chroot_mknod;
60233+extern int grsec_enable_chroot_fchdir;
60234+extern int grsec_enable_chroot_nice;
60235+extern int grsec_enable_chroot_execlog;
60236+extern int grsec_enable_chroot_caps;
60237+extern int grsec_enable_chroot_sysctl;
60238+extern int grsec_enable_chroot_unix;
60239+extern int grsec_enable_tpe;
60240+extern int grsec_tpe_gid;
60241+extern int grsec_enable_tpe_all;
60242+extern int grsec_enable_tpe_invert;
60243+extern int grsec_enable_socket_all;
60244+extern int grsec_socket_all_gid;
60245+extern int grsec_enable_socket_client;
60246+extern int grsec_socket_client_gid;
60247+extern int grsec_enable_socket_server;
60248+extern int grsec_socket_server_gid;
60249+extern int grsec_audit_gid;
60250+extern int grsec_enable_group;
60251+extern int grsec_enable_audit_textrel;
60252+extern int grsec_enable_log_rwxmaps;
60253+extern int grsec_enable_mount;
60254+extern int grsec_enable_chdir;
60255+extern int grsec_resource_logging;
60256+extern int grsec_enable_blackhole;
60257+extern int grsec_lastack_retries;
60258+extern int grsec_enable_brute;
60259+extern int grsec_lock;
60260+
60261+extern spinlock_t grsec_alert_lock;
60262+extern unsigned long grsec_alert_wtime;
60263+extern unsigned long grsec_alert_fyet;
60264+
60265+extern spinlock_t grsec_audit_lock;
60266+
60267+extern rwlock_t grsec_exec_file_lock;
60268+
60269+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60270+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60271+ (tsk)->exec_file->f_vfsmnt) : "/")
60272+
60273+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60274+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60275+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60276+
60277+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60278+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
60279+ (tsk)->exec_file->f_vfsmnt) : "/")
60280+
60281+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60282+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60283+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60284+
60285+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60286+
60287+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60288+
60289+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60290+ (task)->pid, (cred)->uid, \
60291+ (cred)->euid, (cred)->gid, (cred)->egid, \
60292+ gr_parent_task_fullpath(task), \
60293+ (task)->real_parent->comm, (task)->real_parent->pid, \
60294+ (pcred)->uid, (pcred)->euid, \
60295+ (pcred)->gid, (pcred)->egid
60296+
60297+#define GR_CHROOT_CAPS {{ \
60298+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60299+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60300+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60301+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60302+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60303+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60304+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60305+
60306+#define security_learn(normal_msg,args...) \
60307+({ \
60308+ read_lock(&grsec_exec_file_lock); \
60309+ gr_add_learn_entry(normal_msg "\n", ## args); \
60310+ read_unlock(&grsec_exec_file_lock); \
60311+})
60312+
60313+enum {
60314+ GR_DO_AUDIT,
60315+ GR_DONT_AUDIT,
60316+ /* used for non-audit messages that we shouldn't kill the task on */
60317+ GR_DONT_AUDIT_GOOD
60318+};
60319+
60320+enum {
60321+ GR_TTYSNIFF,
60322+ GR_RBAC,
60323+ GR_RBAC_STR,
60324+ GR_STR_RBAC,
60325+ GR_RBAC_MODE2,
60326+ GR_RBAC_MODE3,
60327+ GR_FILENAME,
60328+ GR_SYSCTL_HIDDEN,
60329+ GR_NOARGS,
60330+ GR_ONE_INT,
60331+ GR_ONE_INT_TWO_STR,
60332+ GR_ONE_STR,
60333+ GR_STR_INT,
60334+ GR_TWO_STR_INT,
60335+ GR_TWO_INT,
60336+ GR_TWO_U64,
60337+ GR_THREE_INT,
60338+ GR_FIVE_INT_TWO_STR,
60339+ GR_TWO_STR,
60340+ GR_THREE_STR,
60341+ GR_FOUR_STR,
60342+ GR_STR_FILENAME,
60343+ GR_FILENAME_STR,
60344+ GR_FILENAME_TWO_INT,
60345+ GR_FILENAME_TWO_INT_STR,
60346+ GR_TEXTREL,
60347+ GR_PTRACE,
60348+ GR_RESOURCE,
60349+ GR_CAP,
60350+ GR_SIG,
60351+ GR_SIG2,
60352+ GR_CRASH1,
60353+ GR_CRASH2,
60354+ GR_PSACCT,
60355+ GR_RWXMAP
60356+};
60357+
60358+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60359+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60360+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60361+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60362+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60363+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60364+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60365+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60366+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60367+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60368+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60369+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60370+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60371+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60372+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60373+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60374+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60375+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60376+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60377+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60378+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60379+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60380+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60381+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60382+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60383+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60384+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60385+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60386+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60387+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60388+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60389+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60390+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60391+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60392+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60393+
60394+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60395+
60396+#endif
60397+
60398+#endif
60399diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60400new file mode 100644
60401index 0000000..ae576a1
60402--- /dev/null
60403+++ b/include/linux/grmsg.h
60404@@ -0,0 +1,109 @@
60405+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60406+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60407+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60408+#define GR_STOPMOD_MSG "denied modification of module state by "
60409+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60410+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60411+#define GR_IOPERM_MSG "denied use of ioperm() by "
60412+#define GR_IOPL_MSG "denied use of iopl() by "
60413+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60414+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60415+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60416+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60417+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60418+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60419+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60420+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60421+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60422+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60423+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60424+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60425+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60426+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60427+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60428+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60429+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60430+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60431+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60432+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60433+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60434+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60435+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60436+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60437+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60438+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60439+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60440+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60441+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60442+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60443+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60444+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60445+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60446+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60447+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60448+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60449+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60450+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60451+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60452+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60453+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60454+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60455+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60456+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60457+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60458+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60459+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60460+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60461+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60462+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60463+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60464+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60465+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60466+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60467+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60468+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60469+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60470+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60471+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60472+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60473+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60474+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60475+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60476+#define GR_FAILFORK_MSG "failed fork with errno %s by "
60477+#define GR_NICE_CHROOT_MSG "denied priority change by "
60478+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60479+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60480+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60481+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60482+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60483+#define GR_TIME_MSG "time set by "
60484+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60485+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60486+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60487+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60488+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60489+#define GR_BIND_MSG "denied bind() by "
60490+#define GR_CONNECT_MSG "denied connect() by "
60491+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60492+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60493+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60494+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60495+#define GR_CAP_ACL_MSG "use of %s denied for "
60496+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60497+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60498+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60499+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60500+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60501+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60502+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60503+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60504+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60505+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60506+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60507+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60508+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60509+#define GR_VM86_MSG "denied use of vm86 by "
60510+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60511+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60512+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60513+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60514diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60515new file mode 100644
60516index 0000000..acd05db
60517--- /dev/null
60518+++ b/include/linux/grsecurity.h
60519@@ -0,0 +1,232 @@
60520+#ifndef GR_SECURITY_H
60521+#define GR_SECURITY_H
60522+#include <linux/fs.h>
60523+#include <linux/fs_struct.h>
60524+#include <linux/binfmts.h>
60525+#include <linux/gracl.h>
60526+
60527+/* notify of brain-dead configs */
60528+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60529+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60530+#endif
60531+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60532+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60533+#endif
60534+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60535+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60536+#endif
60537+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60538+#error "CONFIG_PAX enabled, but no PaX options are enabled."
60539+#endif
60540+
60541+#include <linux/compat.h>
60542+
60543+struct user_arg_ptr {
60544+#ifdef CONFIG_COMPAT
60545+ bool is_compat;
60546+#endif
60547+ union {
60548+ const char __user *const __user *native;
60549+#ifdef CONFIG_COMPAT
60550+ compat_uptr_t __user *compat;
60551+#endif
60552+ } ptr;
60553+};
60554+
60555+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60556+void gr_handle_brute_check(void);
60557+void gr_handle_kernel_exploit(void);
60558+int gr_process_user_ban(void);
60559+
60560+char gr_roletype_to_char(void);
60561+
60562+int gr_acl_enable_at_secure(void);
60563+
60564+int gr_check_user_change(int real, int effective, int fs);
60565+int gr_check_group_change(int real, int effective, int fs);
60566+
60567+void gr_del_task_from_ip_table(struct task_struct *p);
60568+
60569+int gr_pid_is_chrooted(struct task_struct *p);
60570+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60571+int gr_handle_chroot_nice(void);
60572+int gr_handle_chroot_sysctl(const int op);
60573+int gr_handle_chroot_setpriority(struct task_struct *p,
60574+ const int niceval);
60575+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60576+int gr_handle_chroot_chroot(const struct dentry *dentry,
60577+ const struct vfsmount *mnt);
60578+void gr_handle_chroot_chdir(struct path *path);
60579+int gr_handle_chroot_chmod(const struct dentry *dentry,
60580+ const struct vfsmount *mnt, const int mode);
60581+int gr_handle_chroot_mknod(const struct dentry *dentry,
60582+ const struct vfsmount *mnt, const int mode);
60583+int gr_handle_chroot_mount(const struct dentry *dentry,
60584+ const struct vfsmount *mnt,
60585+ const char *dev_name);
60586+int gr_handle_chroot_pivot(void);
60587+int gr_handle_chroot_unix(const pid_t pid);
60588+
60589+int gr_handle_rawio(const struct inode *inode);
60590+
60591+void gr_handle_ioperm(void);
60592+void gr_handle_iopl(void);
60593+
60594+umode_t gr_acl_umask(void);
60595+
60596+int gr_tpe_allow(const struct file *file);
60597+
60598+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60599+void gr_clear_chroot_entries(struct task_struct *task);
60600+
60601+void gr_log_forkfail(const int retval);
60602+void gr_log_timechange(void);
60603+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60604+void gr_log_chdir(const struct dentry *dentry,
60605+ const struct vfsmount *mnt);
60606+void gr_log_chroot_exec(const struct dentry *dentry,
60607+ const struct vfsmount *mnt);
60608+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60609+void gr_log_remount(const char *devname, const int retval);
60610+void gr_log_unmount(const char *devname, const int retval);
60611+void gr_log_mount(const char *from, const char *to, const int retval);
60612+void gr_log_textrel(struct vm_area_struct *vma);
60613+void gr_log_rwxmmap(struct file *file);
60614+void gr_log_rwxmprotect(struct file *file);
60615+
60616+int gr_handle_follow_link(const struct inode *parent,
60617+ const struct inode *inode,
60618+ const struct dentry *dentry,
60619+ const struct vfsmount *mnt);
60620+int gr_handle_fifo(const struct dentry *dentry,
60621+ const struct vfsmount *mnt,
60622+ const struct dentry *dir, const int flag,
60623+ const int acc_mode);
60624+int gr_handle_hardlink(const struct dentry *dentry,
60625+ const struct vfsmount *mnt,
60626+ struct inode *inode,
60627+ const int mode, const char *to);
60628+
60629+int gr_is_capable(const int cap);
60630+int gr_is_capable_nolog(const int cap);
60631+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
60632+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
60633+
60634+void gr_learn_resource(const struct task_struct *task, const int limit,
60635+ const unsigned long wanted, const int gt);
60636+void gr_copy_label(struct task_struct *tsk);
60637+void gr_handle_crash(struct task_struct *task, const int sig);
60638+int gr_handle_signal(const struct task_struct *p, const int sig);
60639+int gr_check_crash_uid(const uid_t uid);
60640+int gr_check_protected_task(const struct task_struct *task);
60641+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
60642+int gr_acl_handle_mmap(const struct file *file,
60643+ const unsigned long prot);
60644+int gr_acl_handle_mprotect(const struct file *file,
60645+ const unsigned long prot);
60646+int gr_check_hidden_task(const struct task_struct *tsk);
60647+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
60648+ const struct vfsmount *mnt);
60649+__u32 gr_acl_handle_utime(const struct dentry *dentry,
60650+ const struct vfsmount *mnt);
60651+__u32 gr_acl_handle_access(const struct dentry *dentry,
60652+ const struct vfsmount *mnt, const int fmode);
60653+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
60654+ const struct vfsmount *mnt, umode_t *mode);
60655+__u32 gr_acl_handle_chown(const struct dentry *dentry,
60656+ const struct vfsmount *mnt);
60657+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
60658+ const struct vfsmount *mnt);
60659+int gr_handle_ptrace(struct task_struct *task, const long request);
60660+int gr_handle_proc_ptrace(struct task_struct *task);
60661+__u32 gr_acl_handle_execve(const struct dentry *dentry,
60662+ const struct vfsmount *mnt);
60663+int gr_check_crash_exec(const struct file *filp);
60664+int gr_acl_is_enabled(void);
60665+void gr_set_kernel_label(struct task_struct *task);
60666+void gr_set_role_label(struct task_struct *task, const uid_t uid,
60667+ const gid_t gid);
60668+int gr_set_proc_label(const struct dentry *dentry,
60669+ const struct vfsmount *mnt,
60670+ const int unsafe_flags);
60671+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
60672+ const struct vfsmount *mnt);
60673+__u32 gr_acl_handle_open(const struct dentry *dentry,
60674+ const struct vfsmount *mnt, int acc_mode);
60675+__u32 gr_acl_handle_creat(const struct dentry *dentry,
60676+ const struct dentry *p_dentry,
60677+ const struct vfsmount *p_mnt,
60678+ int open_flags, int acc_mode, const int imode);
60679+void gr_handle_create(const struct dentry *dentry,
60680+ const struct vfsmount *mnt);
60681+void gr_handle_proc_create(const struct dentry *dentry,
60682+ const struct inode *inode);
60683+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
60684+ const struct dentry *parent_dentry,
60685+ const struct vfsmount *parent_mnt,
60686+ const int mode);
60687+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
60688+ const struct dentry *parent_dentry,
60689+ const struct vfsmount *parent_mnt);
60690+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
60691+ const struct vfsmount *mnt);
60692+void gr_handle_delete(const ino_t ino, const dev_t dev);
60693+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
60694+ const struct vfsmount *mnt);
60695+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
60696+ const struct dentry *parent_dentry,
60697+ const struct vfsmount *parent_mnt,
60698+ const char *from);
60699+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
60700+ const struct dentry *parent_dentry,
60701+ const struct vfsmount *parent_mnt,
60702+ const struct dentry *old_dentry,
60703+ const struct vfsmount *old_mnt, const char *to);
60704+int gr_acl_handle_rename(struct dentry *new_dentry,
60705+ struct dentry *parent_dentry,
60706+ const struct vfsmount *parent_mnt,
60707+ struct dentry *old_dentry,
60708+ struct inode *old_parent_inode,
60709+ struct vfsmount *old_mnt, const char *newname);
60710+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60711+ struct dentry *old_dentry,
60712+ struct dentry *new_dentry,
60713+ struct vfsmount *mnt, const __u8 replace);
60714+__u32 gr_check_link(const struct dentry *new_dentry,
60715+ const struct dentry *parent_dentry,
60716+ const struct vfsmount *parent_mnt,
60717+ const struct dentry *old_dentry,
60718+ const struct vfsmount *old_mnt);
60719+int gr_acl_handle_filldir(const struct file *file, const char *name,
60720+ const unsigned int namelen, const ino_t ino);
60721+
60722+__u32 gr_acl_handle_unix(const struct dentry *dentry,
60723+ const struct vfsmount *mnt);
60724+void gr_acl_handle_exit(void);
60725+void gr_acl_handle_psacct(struct task_struct *task, const long code);
60726+int gr_acl_handle_procpidmem(const struct task_struct *task);
60727+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
60728+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
60729+void gr_audit_ptrace(struct task_struct *task);
60730+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
60731+
60732+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
60733+
60734+#ifdef CONFIG_GRKERNSEC
60735+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
60736+void gr_handle_vm86(void);
60737+void gr_handle_mem_readwrite(u64 from, u64 to);
60738+
60739+void gr_log_badprocpid(const char *entry);
60740+
60741+extern int grsec_enable_dmesg;
60742+extern int grsec_disable_privio;
60743+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60744+extern int grsec_enable_chroot_findtask;
60745+#endif
60746+#ifdef CONFIG_GRKERNSEC_SETXID
60747+extern int grsec_enable_setxid;
60748+#endif
60749+#endif
60750+
60751+#endif
60752diff --git a/include/linux/grsock.h b/include/linux/grsock.h
60753new file mode 100644
60754index 0000000..e7ffaaf
60755--- /dev/null
60756+++ b/include/linux/grsock.h
60757@@ -0,0 +1,19 @@
60758+#ifndef __GRSOCK_H
60759+#define __GRSOCK_H
60760+
60761+extern void gr_attach_curr_ip(const struct sock *sk);
60762+extern int gr_handle_sock_all(const int family, const int type,
60763+ const int protocol);
60764+extern int gr_handle_sock_server(const struct sockaddr *sck);
60765+extern int gr_handle_sock_server_other(const struct sock *sck);
60766+extern int gr_handle_sock_client(const struct sockaddr *sck);
60767+extern int gr_search_connect(struct socket * sock,
60768+ struct sockaddr_in * addr);
60769+extern int gr_search_bind(struct socket * sock,
60770+ struct sockaddr_in * addr);
60771+extern int gr_search_listen(struct socket * sock);
60772+extern int gr_search_accept(struct socket * sock);
60773+extern int gr_search_socket(const int domain, const int type,
60774+ const int protocol);
60775+
60776+#endif
60777diff --git a/include/linux/hid.h b/include/linux/hid.h
60778index 3a95da6..51986f1 100644
60779--- a/include/linux/hid.h
60780+++ b/include/linux/hid.h
60781@@ -696,7 +696,7 @@ struct hid_ll_driver {
60782 unsigned int code, int value);
60783
60784 int (*parse)(struct hid_device *hdev);
60785-};
60786+} __no_const;
60787
60788 #define PM_HINT_FULLON 1<<5
60789 #define PM_HINT_NORMAL 1<<1
60790diff --git a/include/linux/highmem.h b/include/linux/highmem.h
60791index 3a93f73..b19d0b3 100644
60792--- a/include/linux/highmem.h
60793+++ b/include/linux/highmem.h
60794@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
60795 kunmap_atomic(kaddr, KM_USER0);
60796 }
60797
60798+static inline void sanitize_highpage(struct page *page)
60799+{
60800+ void *kaddr;
60801+ unsigned long flags;
60802+
60803+ local_irq_save(flags);
60804+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
60805+ clear_page(kaddr);
60806+ kunmap_atomic(kaddr, KM_CLEARPAGE);
60807+ local_irq_restore(flags);
60808+}
60809+
60810 static inline void zero_user_segments(struct page *page,
60811 unsigned start1, unsigned end1,
60812 unsigned start2, unsigned end2)
60813diff --git a/include/linux/i2c.h b/include/linux/i2c.h
60814index 8e25a91..551b161 100644
60815--- a/include/linux/i2c.h
60816+++ b/include/linux/i2c.h
60817@@ -364,6 +364,7 @@ struct i2c_algorithm {
60818 /* To determine what the adapter supports */
60819 u32 (*functionality) (struct i2c_adapter *);
60820 };
60821+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
60822
60823 /*
60824 * i2c_adapter is the structure used to identify a physical i2c bus along
60825diff --git a/include/linux/i2o.h b/include/linux/i2o.h
60826index a6deef4..c56a7f2 100644
60827--- a/include/linux/i2o.h
60828+++ b/include/linux/i2o.h
60829@@ -564,7 +564,7 @@ struct i2o_controller {
60830 struct i2o_device *exec; /* Executive */
60831 #if BITS_PER_LONG == 64
60832 spinlock_t context_list_lock; /* lock for context_list */
60833- atomic_t context_list_counter; /* needed for unique contexts */
60834+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
60835 struct list_head context_list; /* list of context id's
60836 and pointers */
60837 #endif
60838diff --git a/include/linux/if_team.h b/include/linux/if_team.h
60839index 58404b0..439ed95 100644
60840--- a/include/linux/if_team.h
60841+++ b/include/linux/if_team.h
60842@@ -64,6 +64,7 @@ struct team_mode_ops {
60843 void (*port_leave)(struct team *team, struct team_port *port);
60844 void (*port_change_mac)(struct team *team, struct team_port *port);
60845 };
60846+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
60847
60848 enum team_option_type {
60849 TEAM_OPTION_TYPE_U32,
60850@@ -112,7 +113,7 @@ struct team {
60851 struct list_head option_list;
60852
60853 const struct team_mode *mode;
60854- struct team_mode_ops ops;
60855+ team_mode_ops_no_const ops;
60856 long mode_priv[TEAM_MODE_PRIV_LONGS];
60857 };
60858
60859diff --git a/include/linux/init.h b/include/linux/init.h
60860index 6b95109..4aca62c 100644
60861--- a/include/linux/init.h
60862+++ b/include/linux/init.h
60863@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
60864
60865 /* Each module must use one module_init(). */
60866 #define module_init(initfn) \
60867- static inline initcall_t __inittest(void) \
60868+ static inline __used initcall_t __inittest(void) \
60869 { return initfn; } \
60870 int init_module(void) __attribute__((alias(#initfn)));
60871
60872 /* This is only required if you want to be unloadable. */
60873 #define module_exit(exitfn) \
60874- static inline exitcall_t __exittest(void) \
60875+ static inline __used exitcall_t __exittest(void) \
60876 { return exitfn; } \
60877 void cleanup_module(void) __attribute__((alias(#exitfn)));
60878
60879diff --git a/include/linux/init_task.h b/include/linux/init_task.h
60880index 9c66b1a..a3fdded 100644
60881--- a/include/linux/init_task.h
60882+++ b/include/linux/init_task.h
60883@@ -127,6 +127,12 @@ extern struct cred init_cred;
60884
60885 #define INIT_TASK_COMM "swapper"
60886
60887+#ifdef CONFIG_X86
60888+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
60889+#else
60890+#define INIT_TASK_THREAD_INFO
60891+#endif
60892+
60893 /*
60894 * INIT_TASK is used to set up the first task table, touch at
60895 * your own risk!. Base=0, limit=0x1fffff (=2MB)
60896@@ -165,6 +171,7 @@ extern struct cred init_cred;
60897 RCU_INIT_POINTER(.cred, &init_cred), \
60898 .comm = INIT_TASK_COMM, \
60899 .thread = INIT_THREAD, \
60900+ INIT_TASK_THREAD_INFO \
60901 .fs = &init_fs, \
60902 .files = &init_files, \
60903 .signal = &init_signals, \
60904diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
60905index e6ca56d..8583707 100644
60906--- a/include/linux/intel-iommu.h
60907+++ b/include/linux/intel-iommu.h
60908@@ -296,7 +296,7 @@ struct iommu_flush {
60909 u8 fm, u64 type);
60910 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
60911 unsigned int size_order, u64 type);
60912-};
60913+} __no_const;
60914
60915 enum {
60916 SR_DMAR_FECTL_REG,
60917diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
60918index a64b00e..464d8bc 100644
60919--- a/include/linux/interrupt.h
60920+++ b/include/linux/interrupt.h
60921@@ -441,7 +441,7 @@ enum
60922 /* map softirq index to softirq name. update 'softirq_to_name' in
60923 * kernel/softirq.c when adding a new softirq.
60924 */
60925-extern char *softirq_to_name[NR_SOFTIRQS];
60926+extern const char * const softirq_to_name[NR_SOFTIRQS];
60927
60928 /* softirq mask and active fields moved to irq_cpustat_t in
60929 * asm/hardirq.h to get better cache usage. KAO
60930@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
60931
60932 struct softirq_action
60933 {
60934- void (*action)(struct softirq_action *);
60935+ void (*action)(void);
60936 };
60937
60938 asmlinkage void do_softirq(void);
60939 asmlinkage void __do_softirq(void);
60940-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
60941+extern void open_softirq(int nr, void (*action)(void));
60942 extern void softirq_init(void);
60943 static inline void __raise_softirq_irqoff(unsigned int nr)
60944 {
60945diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
60946index 3875719..4cd454c 100644
60947--- a/include/linux/kallsyms.h
60948+++ b/include/linux/kallsyms.h
60949@@ -15,7 +15,8 @@
60950
60951 struct module;
60952
60953-#ifdef CONFIG_KALLSYMS
60954+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
60955+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60956 /* Lookup the address for a symbol. Returns 0 if not found. */
60957 unsigned long kallsyms_lookup_name(const char *name);
60958
60959@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
60960 /* Stupid that this does nothing, but I didn't create this mess. */
60961 #define __print_symbol(fmt, addr)
60962 #endif /*CONFIG_KALLSYMS*/
60963+#else /* when included by kallsyms.c, vsnprintf.c, or
60964+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
60965+extern void __print_symbol(const char *fmt, unsigned long address);
60966+extern int sprint_backtrace(char *buffer, unsigned long address);
60967+extern int sprint_symbol(char *buffer, unsigned long address);
60968+const char *kallsyms_lookup(unsigned long addr,
60969+ unsigned long *symbolsize,
60970+ unsigned long *offset,
60971+ char **modname, char *namebuf);
60972+#endif
60973
60974 /* This macro allows us to keep printk typechecking */
60975 static __printf(1, 2)
60976diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
60977index fa39183..40160be 100644
60978--- a/include/linux/kgdb.h
60979+++ b/include/linux/kgdb.h
60980@@ -53,7 +53,7 @@ extern int kgdb_connected;
60981 extern int kgdb_io_module_registered;
60982
60983 extern atomic_t kgdb_setting_breakpoint;
60984-extern atomic_t kgdb_cpu_doing_single_step;
60985+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
60986
60987 extern struct task_struct *kgdb_usethread;
60988 extern struct task_struct *kgdb_contthread;
60989@@ -251,7 +251,7 @@ struct kgdb_arch {
60990 void (*disable_hw_break)(struct pt_regs *regs);
60991 void (*remove_all_hw_break)(void);
60992 void (*correct_hw_break)(void);
60993-};
60994+} __do_const;
60995
60996 /**
60997 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
60998@@ -276,7 +276,7 @@ struct kgdb_io {
60999 void (*pre_exception) (void);
61000 void (*post_exception) (void);
61001 int is_console;
61002-};
61003+} __do_const;
61004
61005 extern struct kgdb_arch arch_kgdb_ops;
61006
61007diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61008index 722f477..eef2a27 100644
61009--- a/include/linux/kmod.h
61010+++ b/include/linux/kmod.h
61011@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61012 * usually useless though. */
61013 extern __printf(2, 3)
61014 int __request_module(bool wait, const char *name, ...);
61015+extern __printf(3, 4)
61016+int ___request_module(bool wait, char *param_name, const char *name, ...);
61017 #define request_module(mod...) __request_module(true, mod)
61018 #define request_module_nowait(mod...) __request_module(false, mod)
61019 #define try_then_request_module(x, mod...) \
61020diff --git a/include/linux/kref.h b/include/linux/kref.h
61021index 9c07dce..a92fa71 100644
61022--- a/include/linux/kref.h
61023+++ b/include/linux/kref.h
61024@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61025 static inline int kref_sub(struct kref *kref, unsigned int count,
61026 void (*release)(struct kref *kref))
61027 {
61028- WARN_ON(release == NULL);
61029+ BUG_ON(release == NULL);
61030
61031 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61032 release(kref);
61033diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61034index 900c763..43260cf 100644
61035--- a/include/linux/kvm_host.h
61036+++ b/include/linux/kvm_host.h
61037@@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61038 void vcpu_load(struct kvm_vcpu *vcpu);
61039 void vcpu_put(struct kvm_vcpu *vcpu);
61040
61041-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61042+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61043 struct module *module);
61044 void kvm_exit(void);
61045
61046@@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61047 struct kvm_guest_debug *dbg);
61048 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61049
61050-int kvm_arch_init(void *opaque);
61051+int kvm_arch_init(const void *opaque);
61052 void kvm_arch_exit(void);
61053
61054 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61055diff --git a/include/linux/libata.h b/include/linux/libata.h
61056index cafc09a..d7e7829 100644
61057--- a/include/linux/libata.h
61058+++ b/include/linux/libata.h
61059@@ -909,7 +909,7 @@ struct ata_port_operations {
61060 * fields must be pointers.
61061 */
61062 const struct ata_port_operations *inherits;
61063-};
61064+} __do_const;
61065
61066 struct ata_port_info {
61067 unsigned long flags;
61068diff --git a/include/linux/mca.h b/include/linux/mca.h
61069index 3797270..7765ede 100644
61070--- a/include/linux/mca.h
61071+++ b/include/linux/mca.h
61072@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61073 int region);
61074 void * (*mca_transform_memory)(struct mca_device *,
61075 void *memory);
61076-};
61077+} __no_const;
61078
61079 struct mca_bus {
61080 u64 default_dma_mask;
61081diff --git a/include/linux/memory.h b/include/linux/memory.h
61082index 1ac7f6e..a5794d0 100644
61083--- a/include/linux/memory.h
61084+++ b/include/linux/memory.h
61085@@ -143,7 +143,7 @@ struct memory_accessor {
61086 size_t count);
61087 ssize_t (*write)(struct memory_accessor *, const char *buf,
61088 off_t offset, size_t count);
61089-};
61090+} __no_const;
61091
61092 /*
61093 * Kernel text modification mutex, used for code patching. Users of this lock
61094diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61095index 9970337..9444122 100644
61096--- a/include/linux/mfd/abx500.h
61097+++ b/include/linux/mfd/abx500.h
61098@@ -188,6 +188,7 @@ struct abx500_ops {
61099 int (*event_registers_startup_state_get) (struct device *, u8 *);
61100 int (*startup_irq_enabled) (struct device *, unsigned int);
61101 };
61102+typedef struct abx500_ops __no_const abx500_ops_no_const;
61103
61104 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61105 void abx500_remove_ops(struct device *dev);
61106diff --git a/include/linux/mm.h b/include/linux/mm.h
61107index 17b27cd..467ba2f 100644
61108--- a/include/linux/mm.h
61109+++ b/include/linux/mm.h
61110@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
61111
61112 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61113 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61114+
61115+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61116+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61117+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61118+#else
61119 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61120+#endif
61121+
61122 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61123 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61124
61125@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
61126 int set_page_dirty_lock(struct page *page);
61127 int clear_page_dirty_for_io(struct page *page);
61128
61129-/* Is the vma a continuation of the stack vma above it? */
61130-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61131-{
61132- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61133-}
61134-
61135-static inline int stack_guard_page_start(struct vm_area_struct *vma,
61136- unsigned long addr)
61137-{
61138- return (vma->vm_flags & VM_GROWSDOWN) &&
61139- (vma->vm_start == addr) &&
61140- !vma_growsdown(vma->vm_prev, addr);
61141-}
61142-
61143-/* Is the vma a continuation of the stack vma below it? */
61144-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61145-{
61146- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61147-}
61148-
61149-static inline int stack_guard_page_end(struct vm_area_struct *vma,
61150- unsigned long addr)
61151-{
61152- return (vma->vm_flags & VM_GROWSUP) &&
61153- (vma->vm_end == addr) &&
61154- !vma_growsup(vma->vm_next, addr);
61155-}
61156-
61157 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61158 unsigned long old_addr, struct vm_area_struct *new_vma,
61159 unsigned long new_addr, unsigned long len);
61160@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61161 }
61162 #endif
61163
61164+#ifdef CONFIG_MMU
61165+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61166+#else
61167+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61168+{
61169+ return __pgprot(0);
61170+}
61171+#endif
61172+
61173 int vma_wants_writenotify(struct vm_area_struct *vma);
61174
61175 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61176@@ -1409,6 +1397,7 @@ out:
61177 }
61178
61179 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61180+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61181
61182 extern unsigned long do_brk(unsigned long, unsigned long);
61183
61184@@ -1466,6 +1455,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61185 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61186 struct vm_area_struct **pprev);
61187
61188+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61189+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61190+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61191+
61192 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61193 NULL if none. Assume start_addr < end_addr. */
61194 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61195@@ -1494,15 +1487,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61196 return vma;
61197 }
61198
61199-#ifdef CONFIG_MMU
61200-pgprot_t vm_get_page_prot(unsigned long vm_flags);
61201-#else
61202-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61203-{
61204- return __pgprot(0);
61205-}
61206-#endif
61207-
61208 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61209 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61210 unsigned long pfn, unsigned long size, pgprot_t);
61211@@ -1606,7 +1590,7 @@ extern int unpoison_memory(unsigned long pfn);
61212 extern int sysctl_memory_failure_early_kill;
61213 extern int sysctl_memory_failure_recovery;
61214 extern void shake_page(struct page *p, int access);
61215-extern atomic_long_t mce_bad_pages;
61216+extern atomic_long_unchecked_t mce_bad_pages;
61217 extern int soft_offline_page(struct page *page, int flags);
61218
61219 extern void dump_page(struct page *page);
61220@@ -1637,5 +1621,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61221 static inline bool page_is_guard(struct page *page) { return false; }
61222 #endif /* CONFIG_DEBUG_PAGEALLOC */
61223
61224+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61225+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61226+#else
61227+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61228+#endif
61229+
61230 #endif /* __KERNEL__ */
61231 #endif /* _LINUX_MM_H */
61232diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61233index 3cc3062..8947a82 100644
61234--- a/include/linux/mm_types.h
61235+++ b/include/linux/mm_types.h
61236@@ -252,6 +252,8 @@ struct vm_area_struct {
61237 #ifdef CONFIG_NUMA
61238 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61239 #endif
61240+
61241+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61242 };
61243
61244 struct core_thread {
61245@@ -388,6 +390,24 @@ struct mm_struct {
61246 #ifdef CONFIG_CPUMASK_OFFSTACK
61247 struct cpumask cpumask_allocation;
61248 #endif
61249+
61250+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61251+ unsigned long pax_flags;
61252+#endif
61253+
61254+#ifdef CONFIG_PAX_DLRESOLVE
61255+ unsigned long call_dl_resolve;
61256+#endif
61257+
61258+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61259+ unsigned long call_syscall;
61260+#endif
61261+
61262+#ifdef CONFIG_PAX_ASLR
61263+ unsigned long delta_mmap; /* randomized offset */
61264+ unsigned long delta_stack; /* randomized offset */
61265+#endif
61266+
61267 };
61268
61269 static inline void mm_init_cpumask(struct mm_struct *mm)
61270diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61271index 1d1b1e1..2a13c78 100644
61272--- a/include/linux/mmu_notifier.h
61273+++ b/include/linux/mmu_notifier.h
61274@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61275 */
61276 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61277 ({ \
61278- pte_t __pte; \
61279+ pte_t ___pte; \
61280 struct vm_area_struct *___vma = __vma; \
61281 unsigned long ___address = __address; \
61282- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61283+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61284 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61285- __pte; \
61286+ ___pte; \
61287 })
61288
61289 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61290diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61291index 650ba2f..af0a58c 100644
61292--- a/include/linux/mmzone.h
61293+++ b/include/linux/mmzone.h
61294@@ -379,7 +379,7 @@ struct zone {
61295 unsigned long flags; /* zone flags, see below */
61296
61297 /* Zone statistics */
61298- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61299+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61300
61301 /*
61302 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61303diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61304index 83ac071..2656e0e 100644
61305--- a/include/linux/mod_devicetable.h
61306+++ b/include/linux/mod_devicetable.h
61307@@ -12,7 +12,7 @@
61308 typedef unsigned long kernel_ulong_t;
61309 #endif
61310
61311-#define PCI_ANY_ID (~0)
61312+#define PCI_ANY_ID ((__u16)~0)
61313
61314 struct pci_device_id {
61315 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61316@@ -131,7 +131,7 @@ struct usb_device_id {
61317 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61318 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61319
61320-#define HID_ANY_ID (~0)
61321+#define HID_ANY_ID (~0U)
61322
61323 struct hid_device_id {
61324 __u16 bus;
61325diff --git a/include/linux/module.h b/include/linux/module.h
61326index 4598bf0..e069d7f 100644
61327--- a/include/linux/module.h
61328+++ b/include/linux/module.h
61329@@ -17,6 +17,7 @@
61330 #include <linux/moduleparam.h>
61331 #include <linux/tracepoint.h>
61332 #include <linux/export.h>
61333+#include <linux/fs.h>
61334
61335 #include <linux/percpu.h>
61336 #include <asm/module.h>
61337@@ -275,19 +276,16 @@ struct module
61338 int (*init)(void);
61339
61340 /* If this is non-NULL, vfree after init() returns */
61341- void *module_init;
61342+ void *module_init_rx, *module_init_rw;
61343
61344 /* Here is the actual code + data, vfree'd on unload. */
61345- void *module_core;
61346+ void *module_core_rx, *module_core_rw;
61347
61348 /* Here are the sizes of the init and core sections */
61349- unsigned int init_size, core_size;
61350+ unsigned int init_size_rw, core_size_rw;
61351
61352 /* The size of the executable code in each section. */
61353- unsigned int init_text_size, core_text_size;
61354-
61355- /* Size of RO sections of the module (text+rodata) */
61356- unsigned int init_ro_size, core_ro_size;
61357+ unsigned int init_size_rx, core_size_rx;
61358
61359 /* Arch-specific module values */
61360 struct mod_arch_specific arch;
61361@@ -343,6 +341,10 @@ struct module
61362 #ifdef CONFIG_EVENT_TRACING
61363 struct ftrace_event_call **trace_events;
61364 unsigned int num_trace_events;
61365+ struct file_operations trace_id;
61366+ struct file_operations trace_enable;
61367+ struct file_operations trace_format;
61368+ struct file_operations trace_filter;
61369 #endif
61370 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61371 unsigned int num_ftrace_callsites;
61372@@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
61373 bool is_module_percpu_address(unsigned long addr);
61374 bool is_module_text_address(unsigned long addr);
61375
61376+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61377+{
61378+
61379+#ifdef CONFIG_PAX_KERNEXEC
61380+ if (ktla_ktva(addr) >= (unsigned long)start &&
61381+ ktla_ktva(addr) < (unsigned long)start + size)
61382+ return 1;
61383+#endif
61384+
61385+ return ((void *)addr >= start && (void *)addr < start + size);
61386+}
61387+
61388+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61389+{
61390+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61391+}
61392+
61393+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61394+{
61395+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61396+}
61397+
61398+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61399+{
61400+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61401+}
61402+
61403+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61404+{
61405+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61406+}
61407+
61408 static inline int within_module_core(unsigned long addr, struct module *mod)
61409 {
61410- return (unsigned long)mod->module_core <= addr &&
61411- addr < (unsigned long)mod->module_core + mod->core_size;
61412+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61413 }
61414
61415 static inline int within_module_init(unsigned long addr, struct module *mod)
61416 {
61417- return (unsigned long)mod->module_init <= addr &&
61418- addr < (unsigned long)mod->module_init + mod->init_size;
61419+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61420 }
61421
61422 /* Search for module by name: must hold module_mutex. */
61423diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61424index b2be02e..6a9fdb1 100644
61425--- a/include/linux/moduleloader.h
61426+++ b/include/linux/moduleloader.h
61427@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61428 sections. Returns NULL on failure. */
61429 void *module_alloc(unsigned long size);
61430
61431+#ifdef CONFIG_PAX_KERNEXEC
61432+void *module_alloc_exec(unsigned long size);
61433+#else
61434+#define module_alloc_exec(x) module_alloc(x)
61435+#endif
61436+
61437 /* Free memory returned from module_alloc. */
61438 void module_free(struct module *mod, void *module_region);
61439
61440+#ifdef CONFIG_PAX_KERNEXEC
61441+void module_free_exec(struct module *mod, void *module_region);
61442+#else
61443+#define module_free_exec(x, y) module_free((x), (y))
61444+#endif
61445+
61446 /* Apply the given relocation to the (simplified) ELF. Return -error
61447 or 0. */
61448 int apply_relocate(Elf_Shdr *sechdrs,
61449diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61450index c47f4d6..23f9bdb 100644
61451--- a/include/linux/moduleparam.h
61452+++ b/include/linux/moduleparam.h
61453@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
61454 * @len is usually just sizeof(string).
61455 */
61456 #define module_param_string(name, string, len, perm) \
61457- static const struct kparam_string __param_string_##name \
61458+ static const struct kparam_string __param_string_##name __used \
61459 = { len, string }; \
61460 __module_param_call(MODULE_PARAM_PREFIX, name, \
61461 &param_ops_string, \
61462@@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61463 */
61464 #define module_param_array_named(name, array, type, nump, perm) \
61465 param_check_##type(name, &(array)[0]); \
61466- static const struct kparam_array __param_arr_##name \
61467+ static const struct kparam_array __param_arr_##name __used \
61468 = { .max = ARRAY_SIZE(array), .num = nump, \
61469 .ops = &param_ops_##type, \
61470 .elemsize = sizeof(array[0]), .elem = array }; \
61471diff --git a/include/linux/namei.h b/include/linux/namei.h
61472index ffc0213..2c1f2cb 100644
61473--- a/include/linux/namei.h
61474+++ b/include/linux/namei.h
61475@@ -24,7 +24,7 @@ struct nameidata {
61476 unsigned seq;
61477 int last_type;
61478 unsigned depth;
61479- char *saved_names[MAX_NESTED_LINKS + 1];
61480+ const char *saved_names[MAX_NESTED_LINKS + 1];
61481
61482 /* Intent data */
61483 union {
61484@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61485 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61486 extern void unlock_rename(struct dentry *, struct dentry *);
61487
61488-static inline void nd_set_link(struct nameidata *nd, char *path)
61489+static inline void nd_set_link(struct nameidata *nd, const char *path)
61490 {
61491 nd->saved_names[nd->depth] = path;
61492 }
61493
61494-static inline char *nd_get_link(struct nameidata *nd)
61495+static inline const char *nd_get_link(const struct nameidata *nd)
61496 {
61497 return nd->saved_names[nd->depth];
61498 }
61499diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61500index 0eac07c..a59f6a8 100644
61501--- a/include/linux/netdevice.h
61502+++ b/include/linux/netdevice.h
61503@@ -1002,6 +1002,7 @@ struct net_device_ops {
61504 int (*ndo_neigh_construct)(struct neighbour *n);
61505 void (*ndo_neigh_destroy)(struct neighbour *n);
61506 };
61507+typedef struct net_device_ops __no_const net_device_ops_no_const;
61508
61509 /*
61510 * The DEVICE structure.
61511@@ -1063,7 +1064,7 @@ struct net_device {
61512 int iflink;
61513
61514 struct net_device_stats stats;
61515- atomic_long_t rx_dropped; /* dropped packets by core network
61516+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61517 * Do not use this in drivers.
61518 */
61519
61520diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61521new file mode 100644
61522index 0000000..33f4af8
61523--- /dev/null
61524+++ b/include/linux/netfilter/xt_gradm.h
61525@@ -0,0 +1,9 @@
61526+#ifndef _LINUX_NETFILTER_XT_GRADM_H
61527+#define _LINUX_NETFILTER_XT_GRADM_H 1
61528+
61529+struct xt_gradm_mtinfo {
61530+ __u16 flags;
61531+ __u16 invflags;
61532+};
61533+
61534+#endif
61535diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
61536index c65a18a..0c05f3a 100644
61537--- a/include/linux/of_pdt.h
61538+++ b/include/linux/of_pdt.h
61539@@ -32,7 +32,7 @@ struct of_pdt_ops {
61540
61541 /* return 0 on success; fill in 'len' with number of bytes in path */
61542 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
61543-};
61544+} __no_const;
61545
61546 extern void *prom_early_alloc(unsigned long size);
61547
61548diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
61549index a4c5624..79d6d88 100644
61550--- a/include/linux/oprofile.h
61551+++ b/include/linux/oprofile.h
61552@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
61553 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
61554 char const * name, ulong * val);
61555
61556-/** Create a file for read-only access to an atomic_t. */
61557+/** Create a file for read-only access to an atomic_unchecked_t. */
61558 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
61559- char const * name, atomic_t * val);
61560+ char const * name, atomic_unchecked_t * val);
61561
61562 /** create a directory */
61563 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
61564diff --git a/include/linux/padata.h b/include/linux/padata.h
61565index 4633b2f..988bc08 100644
61566--- a/include/linux/padata.h
61567+++ b/include/linux/padata.h
61568@@ -129,7 +129,7 @@ struct parallel_data {
61569 struct padata_instance *pinst;
61570 struct padata_parallel_queue __percpu *pqueue;
61571 struct padata_serial_queue __percpu *squeue;
61572- atomic_t seq_nr;
61573+ atomic_unchecked_t seq_nr;
61574 atomic_t reorder_objects;
61575 atomic_t refcnt;
61576 unsigned int max_seq_nr;
61577diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
61578index abb2776..d8b8e15 100644
61579--- a/include/linux/perf_event.h
61580+++ b/include/linux/perf_event.h
61581@@ -750,8 +750,8 @@ struct perf_event {
61582
61583 enum perf_event_active_state state;
61584 unsigned int attach_state;
61585- local64_t count;
61586- atomic64_t child_count;
61587+ local64_t count; /* PaX: fix it one day */
61588+ atomic64_unchecked_t child_count;
61589
61590 /*
61591 * These are the total time in nanoseconds that the event
61592@@ -802,8 +802,8 @@ struct perf_event {
61593 * These accumulate total time (in nanoseconds) that children
61594 * events have been enabled and running, respectively.
61595 */
61596- atomic64_t child_total_time_enabled;
61597- atomic64_t child_total_time_running;
61598+ atomic64_unchecked_t child_total_time_enabled;
61599+ atomic64_unchecked_t child_total_time_running;
61600
61601 /*
61602 * Protect attach/detach and child_list:
61603diff --git a/include/linux/personality.h b/include/linux/personality.h
61604index 8fc7dd1a..c19d89e 100644
61605--- a/include/linux/personality.h
61606+++ b/include/linux/personality.h
61607@@ -44,6 +44,7 @@ enum {
61608 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
61609 ADDR_NO_RANDOMIZE | \
61610 ADDR_COMPAT_LAYOUT | \
61611+ ADDR_LIMIT_3GB | \
61612 MMAP_PAGE_ZERO)
61613
61614 /*
61615diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
61616index 77257c9..51d473a 100644
61617--- a/include/linux/pipe_fs_i.h
61618+++ b/include/linux/pipe_fs_i.h
61619@@ -46,9 +46,9 @@ struct pipe_buffer {
61620 struct pipe_inode_info {
61621 wait_queue_head_t wait;
61622 unsigned int nrbufs, curbuf, buffers;
61623- unsigned int readers;
61624- unsigned int writers;
61625- unsigned int waiting_writers;
61626+ atomic_t readers;
61627+ atomic_t writers;
61628+ atomic_t waiting_writers;
61629 unsigned int r_counter;
61630 unsigned int w_counter;
61631 struct page *tmp_page;
61632diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
61633index 609daae..5392427 100644
61634--- a/include/linux/pm_runtime.h
61635+++ b/include/linux/pm_runtime.h
61636@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
61637
61638 static inline void pm_runtime_mark_last_busy(struct device *dev)
61639 {
61640- ACCESS_ONCE(dev->power.last_busy) = jiffies;
61641+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
61642 }
61643
61644 #else /* !CONFIG_PM_RUNTIME */
61645diff --git a/include/linux/poison.h b/include/linux/poison.h
61646index 2110a81..13a11bb 100644
61647--- a/include/linux/poison.h
61648+++ b/include/linux/poison.h
61649@@ -19,8 +19,8 @@
61650 * under normal circumstances, used to verify that nobody uses
61651 * non-initialized list entries.
61652 */
61653-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
61654-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
61655+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
61656+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
61657
61658 /********** include/linux/timer.h **********/
61659 /*
61660diff --git a/include/linux/preempt.h b/include/linux/preempt.h
61661index 58969b2..ead129b 100644
61662--- a/include/linux/preempt.h
61663+++ b/include/linux/preempt.h
61664@@ -123,7 +123,7 @@ struct preempt_ops {
61665 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
61666 void (*sched_out)(struct preempt_notifier *notifier,
61667 struct task_struct *next);
61668-};
61669+} __no_const;
61670
61671 /**
61672 * preempt_notifier - key for installing preemption notifiers
61673diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
61674index 85c5073..51fac8b 100644
61675--- a/include/linux/proc_fs.h
61676+++ b/include/linux/proc_fs.h
61677@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
61678 return proc_create_data(name, mode, parent, proc_fops, NULL);
61679 }
61680
61681+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
61682+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
61683+{
61684+#ifdef CONFIG_GRKERNSEC_PROC_USER
61685+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
61686+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61687+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
61688+#else
61689+ return proc_create_data(name, mode, parent, proc_fops, NULL);
61690+#endif
61691+}
61692+
61693 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
61694 umode_t mode, struct proc_dir_entry *base,
61695 read_proc_t *read_proc, void * data)
61696@@ -258,7 +270,7 @@ union proc_op {
61697 int (*proc_show)(struct seq_file *m,
61698 struct pid_namespace *ns, struct pid *pid,
61699 struct task_struct *task);
61700-};
61701+} __no_const;
61702
61703 struct ctl_table_header;
61704 struct ctl_table;
61705diff --git a/include/linux/random.h b/include/linux/random.h
61706index 8f74538..02a1012 100644
61707--- a/include/linux/random.h
61708+++ b/include/linux/random.h
61709@@ -69,12 +69,17 @@ void srandom32(u32 seed);
61710
61711 u32 prandom32(struct rnd_state *);
61712
61713+static inline unsigned long pax_get_random_long(void)
61714+{
61715+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
61716+}
61717+
61718 /*
61719 * Handle minimum values for seeds
61720 */
61721 static inline u32 __seed(u32 x, u32 m)
61722 {
61723- return (x < m) ? x + m : x;
61724+ return (x <= m) ? x + m + 1 : x;
61725 }
61726
61727 /**
61728diff --git a/include/linux/reboot.h b/include/linux/reboot.h
61729index e0879a7..a12f962 100644
61730--- a/include/linux/reboot.h
61731+++ b/include/linux/reboot.h
61732@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
61733 * Architecture-specific implementations of sys_reboot commands.
61734 */
61735
61736-extern void machine_restart(char *cmd);
61737-extern void machine_halt(void);
61738-extern void machine_power_off(void);
61739+extern void machine_restart(char *cmd) __noreturn;
61740+extern void machine_halt(void) __noreturn;
61741+extern void machine_power_off(void) __noreturn;
61742
61743 extern void machine_shutdown(void);
61744 struct pt_regs;
61745@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
61746 */
61747
61748 extern void kernel_restart_prepare(char *cmd);
61749-extern void kernel_restart(char *cmd);
61750-extern void kernel_halt(void);
61751-extern void kernel_power_off(void);
61752+extern void kernel_restart(char *cmd) __noreturn;
61753+extern void kernel_halt(void) __noreturn;
61754+extern void kernel_power_off(void) __noreturn;
61755
61756 extern int C_A_D; /* for sysctl */
61757 void ctrl_alt_del(void);
61758@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
61759 * Emergency restart, callable from an interrupt handler.
61760 */
61761
61762-extern void emergency_restart(void);
61763+extern void emergency_restart(void) __noreturn;
61764 #include <asm/emergency-restart.h>
61765
61766 #endif
61767diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
61768index 2213ddc..650212a 100644
61769--- a/include/linux/reiserfs_fs.h
61770+++ b/include/linux/reiserfs_fs.h
61771@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61772 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61773
61774 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61775-#define get_generation(s) atomic_read (&fs_generation(s))
61776+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61777 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61778 #define __fs_changed(gen,s) (gen != get_generation (s))
61779 #define fs_changed(gen,s) \
61780diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
61781index 8c9e85c..1698e9a 100644
61782--- a/include/linux/reiserfs_fs_sb.h
61783+++ b/include/linux/reiserfs_fs_sb.h
61784@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
61785 /* Comment? -Hans */
61786 wait_queue_head_t s_wait;
61787 /* To be obsoleted soon by per buffer seals.. -Hans */
61788- atomic_t s_generation_counter; // increased by one every time the
61789+ atomic_unchecked_t s_generation_counter; // increased by one every time the
61790 // tree gets re-balanced
61791 unsigned long s_properties; /* File system properties. Currently holds
61792 on-disk FS format */
61793diff --git a/include/linux/relay.h b/include/linux/relay.h
61794index a822fd7..62b70f6 100644
61795--- a/include/linux/relay.h
61796+++ b/include/linux/relay.h
61797@@ -159,7 +159,7 @@ struct rchan_callbacks
61798 * The callback should return 0 if successful, negative if not.
61799 */
61800 int (*remove_buf_file)(struct dentry *dentry);
61801-};
61802+} __no_const;
61803
61804 /*
61805 * CONFIG_RELAY kernel API, kernel/relay.c
61806diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
61807index c6c6084..5bf1212 100644
61808--- a/include/linux/rfkill.h
61809+++ b/include/linux/rfkill.h
61810@@ -147,6 +147,7 @@ struct rfkill_ops {
61811 void (*query)(struct rfkill *rfkill, void *data);
61812 int (*set_block)(void *data, bool blocked);
61813 };
61814+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
61815
61816 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
61817 /**
61818diff --git a/include/linux/rio.h b/include/linux/rio.h
61819index 4d50611..c6858a2 100644
61820--- a/include/linux/rio.h
61821+++ b/include/linux/rio.h
61822@@ -315,7 +315,7 @@ struct rio_ops {
61823 int mbox, void *buffer, size_t len);
61824 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
61825 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
61826-};
61827+} __no_const;
61828
61829 #define RIO_RESOURCE_MEM 0x00000100
61830 #define RIO_RESOURCE_DOORBELL 0x00000200
61831diff --git a/include/linux/rmap.h b/include/linux/rmap.h
61832index 1cdd62a..e399f0d 100644
61833--- a/include/linux/rmap.h
61834+++ b/include/linux/rmap.h
61835@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
61836 void anon_vma_init(void); /* create anon_vma_cachep */
61837 int anon_vma_prepare(struct vm_area_struct *);
61838 void unlink_anon_vmas(struct vm_area_struct *);
61839-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
61840+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
61841 void anon_vma_moveto_tail(struct vm_area_struct *);
61842-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
61843+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
61844 void __anon_vma_link(struct vm_area_struct *);
61845
61846 static inline void anon_vma_merge(struct vm_area_struct *vma,
61847diff --git a/include/linux/sched.h b/include/linux/sched.h
61848index 0657368..765f70f 100644
61849--- a/include/linux/sched.h
61850+++ b/include/linux/sched.h
61851@@ -101,6 +101,7 @@ struct bio_list;
61852 struct fs_struct;
61853 struct perf_event_context;
61854 struct blk_plug;
61855+struct linux_binprm;
61856
61857 /*
61858 * List of flags we want to share for kernel threads,
61859@@ -382,10 +383,13 @@ struct user_namespace;
61860 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
61861
61862 extern int sysctl_max_map_count;
61863+extern unsigned long sysctl_heap_stack_gap;
61864
61865 #include <linux/aio.h>
61866
61867 #ifdef CONFIG_MMU
61868+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
61869+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
61870 extern void arch_pick_mmap_layout(struct mm_struct *mm);
61871 extern unsigned long
61872 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
61873@@ -631,6 +635,17 @@ struct signal_struct {
61874 #ifdef CONFIG_TASKSTATS
61875 struct taskstats *stats;
61876 #endif
61877+
61878+#ifdef CONFIG_GRKERNSEC
61879+ u32 curr_ip;
61880+ u32 saved_ip;
61881+ u32 gr_saddr;
61882+ u32 gr_daddr;
61883+ u16 gr_sport;
61884+ u16 gr_dport;
61885+ u8 used_accept:1;
61886+#endif
61887+
61888 #ifdef CONFIG_AUDIT
61889 unsigned audit_tty;
61890 struct tty_audit_buf *tty_audit_buf;
61891@@ -714,6 +729,11 @@ struct user_struct {
61892 struct key *session_keyring; /* UID's default session keyring */
61893 #endif
61894
61895+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61896+ unsigned int banned;
61897+ unsigned long ban_expires;
61898+#endif
61899+
61900 /* Hash table maintenance information */
61901 struct hlist_node uidhash_node;
61902 uid_t uid;
61903@@ -1354,8 +1374,8 @@ struct task_struct {
61904 struct list_head thread_group;
61905
61906 struct completion *vfork_done; /* for vfork() */
61907- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
61908- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61909+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
61910+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61911
61912 cputime_t utime, stime, utimescaled, stimescaled;
61913 cputime_t gtime;
61914@@ -1371,13 +1391,6 @@ struct task_struct {
61915 struct task_cputime cputime_expires;
61916 struct list_head cpu_timers[3];
61917
61918-/* process credentials */
61919- const struct cred __rcu *real_cred; /* objective and real subjective task
61920- * credentials (COW) */
61921- const struct cred __rcu *cred; /* effective (overridable) subjective task
61922- * credentials (COW) */
61923- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
61924-
61925 char comm[TASK_COMM_LEN]; /* executable name excluding path
61926 - access with [gs]et_task_comm (which lock
61927 it with task_lock())
61928@@ -1394,8 +1407,16 @@ struct task_struct {
61929 #endif
61930 /* CPU-specific state of this task */
61931 struct thread_struct thread;
61932+/* thread_info moved to task_struct */
61933+#ifdef CONFIG_X86
61934+ struct thread_info tinfo;
61935+#endif
61936 /* filesystem information */
61937 struct fs_struct *fs;
61938+
61939+ const struct cred __rcu *cred; /* effective (overridable) subjective task
61940+ * credentials (COW) */
61941+
61942 /* open file information */
61943 struct files_struct *files;
61944 /* namespaces */
61945@@ -1442,6 +1463,11 @@ struct task_struct {
61946 struct rt_mutex_waiter *pi_blocked_on;
61947 #endif
61948
61949+/* process credentials */
61950+ const struct cred __rcu *real_cred; /* objective and real subjective task
61951+ * credentials (COW) */
61952+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
61953+
61954 #ifdef CONFIG_DEBUG_MUTEXES
61955 /* mutex deadlock detection */
61956 struct mutex_waiter *blocked_on;
61957@@ -1558,6 +1584,27 @@ struct task_struct {
61958 unsigned long default_timer_slack_ns;
61959
61960 struct list_head *scm_work_list;
61961+
61962+#ifdef CONFIG_GRKERNSEC
61963+ /* grsecurity */
61964+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61965+ u64 exec_id;
61966+#endif
61967+#ifdef CONFIG_GRKERNSEC_SETXID
61968+ const struct cred *delayed_cred;
61969+#endif
61970+ struct dentry *gr_chroot_dentry;
61971+ struct acl_subject_label *acl;
61972+ struct acl_role_label *role;
61973+ struct file *exec_file;
61974+ u16 acl_role_id;
61975+ /* is this the task that authenticated to the special role */
61976+ u8 acl_sp_role;
61977+ u8 is_writable;
61978+ u8 brute;
61979+ u8 gr_is_chrooted;
61980+#endif
61981+
61982 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61983 /* Index of current stored address in ret_stack */
61984 int curr_ret_stack;
61985@@ -1592,6 +1639,51 @@ struct task_struct {
61986 #endif
61987 };
61988
61989+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
61990+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
61991+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
61992+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
61993+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
61994+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
61995+
61996+#ifdef CONFIG_PAX_SOFTMODE
61997+extern int pax_softmode;
61998+#endif
61999+
62000+extern int pax_check_flags(unsigned long *);
62001+
62002+/* if tsk != current then task_lock must be held on it */
62003+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62004+static inline unsigned long pax_get_flags(struct task_struct *tsk)
62005+{
62006+ if (likely(tsk->mm))
62007+ return tsk->mm->pax_flags;
62008+ else
62009+ return 0UL;
62010+}
62011+
62012+/* if tsk != current then task_lock must be held on it */
62013+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62014+{
62015+ if (likely(tsk->mm)) {
62016+ tsk->mm->pax_flags = flags;
62017+ return 0;
62018+ }
62019+ return -EINVAL;
62020+}
62021+#endif
62022+
62023+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62024+extern void pax_set_initial_flags(struct linux_binprm *bprm);
62025+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62026+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62027+#endif
62028+
62029+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62030+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62031+extern void pax_report_refcount_overflow(struct pt_regs *regs);
62032+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62033+
62034 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62035 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62036
62037@@ -2104,7 +2196,9 @@ void yield(void);
62038 extern struct exec_domain default_exec_domain;
62039
62040 union thread_union {
62041+#ifndef CONFIG_X86
62042 struct thread_info thread_info;
62043+#endif
62044 unsigned long stack[THREAD_SIZE/sizeof(long)];
62045 };
62046
62047@@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
62048 */
62049
62050 extern struct task_struct *find_task_by_vpid(pid_t nr);
62051+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62052 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62053 struct pid_namespace *ns);
62054
62055@@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62056 extern void exit_itimers(struct signal_struct *);
62057 extern void flush_itimer_signals(void);
62058
62059-extern void do_group_exit(int);
62060+extern __noreturn void do_group_exit(int);
62061
62062 extern void daemonize(const char *, ...);
62063 extern int allow_signal(int);
62064@@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62065
62066 #endif
62067
62068-static inline int object_is_on_stack(void *obj)
62069+static inline int object_starts_on_stack(void *obj)
62070 {
62071- void *stack = task_stack_page(current);
62072+ const void *stack = task_stack_page(current);
62073
62074 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62075 }
62076
62077+#ifdef CONFIG_PAX_USERCOPY
62078+extern int object_is_on_stack(const void *obj, unsigned long len);
62079+#endif
62080+
62081 extern void thread_info_cache_init(void);
62082
62083 #ifdef CONFIG_DEBUG_STACK_USAGE
62084diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62085index 899fbb4..1cb4138 100644
62086--- a/include/linux/screen_info.h
62087+++ b/include/linux/screen_info.h
62088@@ -43,7 +43,8 @@ struct screen_info {
62089 __u16 pages; /* 0x32 */
62090 __u16 vesa_attributes; /* 0x34 */
62091 __u32 capabilities; /* 0x36 */
62092- __u8 _reserved[6]; /* 0x3a */
62093+ __u16 vesapm_size; /* 0x3a */
62094+ __u8 _reserved[4]; /* 0x3c */
62095 } __attribute__((packed));
62096
62097 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62098diff --git a/include/linux/security.h b/include/linux/security.h
62099index 83c18e8..2d98860 100644
62100--- a/include/linux/security.h
62101+++ b/include/linux/security.h
62102@@ -37,6 +37,7 @@
62103 #include <linux/xfrm.h>
62104 #include <linux/slab.h>
62105 #include <linux/xattr.h>
62106+#include <linux/grsecurity.h>
62107 #include <net/flow.h>
62108
62109 /* Maximum number of letters for an LSM name string */
62110diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62111index 44f1514..2bbf6c1 100644
62112--- a/include/linux/seq_file.h
62113+++ b/include/linux/seq_file.h
62114@@ -24,6 +24,9 @@ struct seq_file {
62115 struct mutex lock;
62116 const struct seq_operations *op;
62117 int poll_event;
62118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62119+ u64 exec_id;
62120+#endif
62121 void *private;
62122 };
62123
62124@@ -33,6 +36,7 @@ struct seq_operations {
62125 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62126 int (*show) (struct seq_file *m, void *v);
62127 };
62128+typedef struct seq_operations __no_const seq_operations_no_const;
62129
62130 #define SEQ_SKIP 1
62131
62132diff --git a/include/linux/shm.h b/include/linux/shm.h
62133index 92808b8..c28cac4 100644
62134--- a/include/linux/shm.h
62135+++ b/include/linux/shm.h
62136@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62137
62138 /* The task created the shm object. NULL if the task is dead. */
62139 struct task_struct *shm_creator;
62140+#ifdef CONFIG_GRKERNSEC
62141+ time_t shm_createtime;
62142+ pid_t shm_lapid;
62143+#endif
62144 };
62145
62146 /* shm_mode upper byte flags */
62147diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62148index ae86ade..2b51468 100644
62149--- a/include/linux/skbuff.h
62150+++ b/include/linux/skbuff.h
62151@@ -654,7 +654,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62152 */
62153 static inline int skb_queue_empty(const struct sk_buff_head *list)
62154 {
62155- return list->next == (struct sk_buff *)list;
62156+ return list->next == (const struct sk_buff *)list;
62157 }
62158
62159 /**
62160@@ -667,7 +667,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62161 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62162 const struct sk_buff *skb)
62163 {
62164- return skb->next == (struct sk_buff *)list;
62165+ return skb->next == (const struct sk_buff *)list;
62166 }
62167
62168 /**
62169@@ -680,7 +680,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62170 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62171 const struct sk_buff *skb)
62172 {
62173- return skb->prev == (struct sk_buff *)list;
62174+ return skb->prev == (const struct sk_buff *)list;
62175 }
62176
62177 /**
62178@@ -1545,7 +1545,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62179 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62180 */
62181 #ifndef NET_SKB_PAD
62182-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62183+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62184 #endif
62185
62186 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62187diff --git a/include/linux/slab.h b/include/linux/slab.h
62188index 573c809..e84c132 100644
62189--- a/include/linux/slab.h
62190+++ b/include/linux/slab.h
62191@@ -11,12 +11,20 @@
62192
62193 #include <linux/gfp.h>
62194 #include <linux/types.h>
62195+#include <linux/err.h>
62196
62197 /*
62198 * Flags to pass to kmem_cache_create().
62199 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62200 */
62201 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62202+
62203+#ifdef CONFIG_PAX_USERCOPY
62204+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62205+#else
62206+#define SLAB_USERCOPY 0x00000000UL
62207+#endif
62208+
62209 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62210 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62211 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62212@@ -87,10 +95,13 @@
62213 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62214 * Both make kfree a no-op.
62215 */
62216-#define ZERO_SIZE_PTR ((void *)16)
62217+#define ZERO_SIZE_PTR \
62218+({ \
62219+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62220+ (void *)(-MAX_ERRNO-1L); \
62221+})
62222
62223-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62224- (unsigned long)ZERO_SIZE_PTR)
62225+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62226
62227 /*
62228 * struct kmem_cache related prototypes
62229@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62230 void kfree(const void *);
62231 void kzfree(const void *);
62232 size_t ksize(const void *);
62233+void check_object_size(const void *ptr, unsigned long n, bool to);
62234
62235 /*
62236 * Allocator specific definitions. These are mainly used to establish optimized
62237@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62238
62239 void __init kmem_cache_init_late(void);
62240
62241+#define kmalloc(x, y) \
62242+({ \
62243+ void *___retval; \
62244+ intoverflow_t ___x = (intoverflow_t)x; \
62245+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62246+ ___retval = NULL; \
62247+ else \
62248+ ___retval = kmalloc((size_t)___x, (y)); \
62249+ ___retval; \
62250+})
62251+
62252+#define kmalloc_node(x, y, z) \
62253+({ \
62254+ void *___retval; \
62255+ intoverflow_t ___x = (intoverflow_t)x; \
62256+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62257+ ___retval = NULL; \
62258+ else \
62259+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
62260+ ___retval; \
62261+})
62262+
62263+#define kzalloc(x, y) \
62264+({ \
62265+ void *___retval; \
62266+ intoverflow_t ___x = (intoverflow_t)x; \
62267+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
62268+ ___retval = NULL; \
62269+ else \
62270+ ___retval = kzalloc((size_t)___x, (y)); \
62271+ ___retval; \
62272+})
62273+
62274+#define __krealloc(x, y, z) \
62275+({ \
62276+ void *___retval; \
62277+ intoverflow_t ___y = (intoverflow_t)y; \
62278+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
62279+ ___retval = NULL; \
62280+ else \
62281+ ___retval = __krealloc((x), (size_t)___y, (z)); \
62282+ ___retval; \
62283+})
62284+
62285+#define krealloc(x, y, z) \
62286+({ \
62287+ void *___retval; \
62288+ intoverflow_t ___y = (intoverflow_t)y; \
62289+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
62290+ ___retval = NULL; \
62291+ else \
62292+ ___retval = krealloc((x), (size_t)___y, (z)); \
62293+ ___retval; \
62294+})
62295+
62296 #endif /* _LINUX_SLAB_H */
62297diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62298index fbd1117..1e5e46c 100644
62299--- a/include/linux/slab_def.h
62300+++ b/include/linux/slab_def.h
62301@@ -66,10 +66,10 @@ struct kmem_cache {
62302 unsigned long node_allocs;
62303 unsigned long node_frees;
62304 unsigned long node_overflow;
62305- atomic_t allochit;
62306- atomic_t allocmiss;
62307- atomic_t freehit;
62308- atomic_t freemiss;
62309+ atomic_unchecked_t allochit;
62310+ atomic_unchecked_t allocmiss;
62311+ atomic_unchecked_t freehit;
62312+ atomic_unchecked_t freemiss;
62313
62314 /*
62315 * If debugging is enabled, then the allocator can add additional
62316diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62317index a32bcfd..53b71f4 100644
62318--- a/include/linux/slub_def.h
62319+++ b/include/linux/slub_def.h
62320@@ -89,7 +89,7 @@ struct kmem_cache {
62321 struct kmem_cache_order_objects max;
62322 struct kmem_cache_order_objects min;
62323 gfp_t allocflags; /* gfp flags to use on each alloc */
62324- int refcount; /* Refcount for slab cache destroy */
62325+ atomic_t refcount; /* Refcount for slab cache destroy */
62326 void (*ctor)(void *);
62327 int inuse; /* Offset to metadata */
62328 int align; /* Alignment */
62329@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62330 }
62331
62332 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62333-void *__kmalloc(size_t size, gfp_t flags);
62334+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
62335
62336 static __always_inline void *
62337 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62338diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62339index de8832d..0147b46 100644
62340--- a/include/linux/sonet.h
62341+++ b/include/linux/sonet.h
62342@@ -61,7 +61,7 @@ struct sonet_stats {
62343 #include <linux/atomic.h>
62344
62345 struct k_sonet_stats {
62346-#define __HANDLE_ITEM(i) atomic_t i
62347+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62348 __SONET_ITEMS
62349 #undef __HANDLE_ITEM
62350 };
62351diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62352index 2c5993a..b0e79f0 100644
62353--- a/include/linux/sunrpc/clnt.h
62354+++ b/include/linux/sunrpc/clnt.h
62355@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62356 {
62357 switch (sap->sa_family) {
62358 case AF_INET:
62359- return ntohs(((struct sockaddr_in *)sap)->sin_port);
62360+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62361 case AF_INET6:
62362- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62363+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62364 }
62365 return 0;
62366 }
62367@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62368 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62369 const struct sockaddr *src)
62370 {
62371- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62372+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62373 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62374
62375 dsin->sin_family = ssin->sin_family;
62376@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62377 if (sa->sa_family != AF_INET6)
62378 return 0;
62379
62380- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62381+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62382 }
62383
62384 #endif /* __KERNEL__ */
62385diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62386index e775689..9e206d9 100644
62387--- a/include/linux/sunrpc/sched.h
62388+++ b/include/linux/sunrpc/sched.h
62389@@ -105,6 +105,7 @@ struct rpc_call_ops {
62390 void (*rpc_call_done)(struct rpc_task *, void *);
62391 void (*rpc_release)(void *);
62392 };
62393+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62394
62395 struct rpc_task_setup {
62396 struct rpc_task *task;
62397diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62398index c14fe86..393245e 100644
62399--- a/include/linux/sunrpc/svc_rdma.h
62400+++ b/include/linux/sunrpc/svc_rdma.h
62401@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62402 extern unsigned int svcrdma_max_requests;
62403 extern unsigned int svcrdma_max_req_size;
62404
62405-extern atomic_t rdma_stat_recv;
62406-extern atomic_t rdma_stat_read;
62407-extern atomic_t rdma_stat_write;
62408-extern atomic_t rdma_stat_sq_starve;
62409-extern atomic_t rdma_stat_rq_starve;
62410-extern atomic_t rdma_stat_rq_poll;
62411-extern atomic_t rdma_stat_rq_prod;
62412-extern atomic_t rdma_stat_sq_poll;
62413-extern atomic_t rdma_stat_sq_prod;
62414+extern atomic_unchecked_t rdma_stat_recv;
62415+extern atomic_unchecked_t rdma_stat_read;
62416+extern atomic_unchecked_t rdma_stat_write;
62417+extern atomic_unchecked_t rdma_stat_sq_starve;
62418+extern atomic_unchecked_t rdma_stat_rq_starve;
62419+extern atomic_unchecked_t rdma_stat_rq_poll;
62420+extern atomic_unchecked_t rdma_stat_rq_prod;
62421+extern atomic_unchecked_t rdma_stat_sq_poll;
62422+extern atomic_unchecked_t rdma_stat_sq_prod;
62423
62424 #define RPCRDMA_VERSION 1
62425
62426diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62427index bb9127d..34ab358 100644
62428--- a/include/linux/sysctl.h
62429+++ b/include/linux/sysctl.h
62430@@ -155,7 +155,11 @@ enum
62431 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62432 };
62433
62434-
62435+#ifdef CONFIG_PAX_SOFTMODE
62436+enum {
62437+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62438+};
62439+#endif
62440
62441 /* CTL_VM names: */
62442 enum
62443@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62444
62445 extern int proc_dostring(struct ctl_table *, int,
62446 void __user *, size_t *, loff_t *);
62447+extern int proc_dostring_modpriv(struct ctl_table *, int,
62448+ void __user *, size_t *, loff_t *);
62449 extern int proc_dointvec(struct ctl_table *, int,
62450 void __user *, size_t *, loff_t *);
62451 extern int proc_dointvec_minmax(struct ctl_table *, int,
62452diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
62453index a71a292..51bd91d 100644
62454--- a/include/linux/tracehook.h
62455+++ b/include/linux/tracehook.h
62456@@ -54,12 +54,12 @@ struct linux_binprm;
62457 /*
62458 * ptrace report for syscall entry and exit looks identical.
62459 */
62460-static inline void ptrace_report_syscall(struct pt_regs *regs)
62461+static inline int ptrace_report_syscall(struct pt_regs *regs)
62462 {
62463 int ptrace = current->ptrace;
62464
62465 if (!(ptrace & PT_PTRACED))
62466- return;
62467+ return 0;
62468
62469 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
62470
62471@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62472 send_sig(current->exit_code, current, 1);
62473 current->exit_code = 0;
62474 }
62475+
62476+ return fatal_signal_pending(current);
62477 }
62478
62479 /**
62480@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62481 static inline __must_check int tracehook_report_syscall_entry(
62482 struct pt_regs *regs)
62483 {
62484- ptrace_report_syscall(regs);
62485- return 0;
62486+ return ptrace_report_syscall(regs);
62487 }
62488
62489 /**
62490diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62491index ff7dc08..893e1bd 100644
62492--- a/include/linux/tty_ldisc.h
62493+++ b/include/linux/tty_ldisc.h
62494@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62495
62496 struct module *owner;
62497
62498- int refcount;
62499+ atomic_t refcount;
62500 };
62501
62502 struct tty_ldisc {
62503diff --git a/include/linux/types.h b/include/linux/types.h
62504index e5fa503..df6e8a4 100644
62505--- a/include/linux/types.h
62506+++ b/include/linux/types.h
62507@@ -214,10 +214,26 @@ typedef struct {
62508 int counter;
62509 } atomic_t;
62510
62511+#ifdef CONFIG_PAX_REFCOUNT
62512+typedef struct {
62513+ int counter;
62514+} atomic_unchecked_t;
62515+#else
62516+typedef atomic_t atomic_unchecked_t;
62517+#endif
62518+
62519 #ifdef CONFIG_64BIT
62520 typedef struct {
62521 long counter;
62522 } atomic64_t;
62523+
62524+#ifdef CONFIG_PAX_REFCOUNT
62525+typedef struct {
62526+ long counter;
62527+} atomic64_unchecked_t;
62528+#else
62529+typedef atomic64_t atomic64_unchecked_t;
62530+#endif
62531 #endif
62532
62533 struct list_head {
62534diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62535index 5ca0951..ab496a5 100644
62536--- a/include/linux/uaccess.h
62537+++ b/include/linux/uaccess.h
62538@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62539 long ret; \
62540 mm_segment_t old_fs = get_fs(); \
62541 \
62542- set_fs(KERNEL_DS); \
62543 pagefault_disable(); \
62544- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62545- pagefault_enable(); \
62546+ set_fs(KERNEL_DS); \
62547+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62548 set_fs(old_fs); \
62549+ pagefault_enable(); \
62550 ret; \
62551 })
62552
62553diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62554index 99c1b4d..bb94261 100644
62555--- a/include/linux/unaligned/access_ok.h
62556+++ b/include/linux/unaligned/access_ok.h
62557@@ -6,32 +6,32 @@
62558
62559 static inline u16 get_unaligned_le16(const void *p)
62560 {
62561- return le16_to_cpup((__le16 *)p);
62562+ return le16_to_cpup((const __le16 *)p);
62563 }
62564
62565 static inline u32 get_unaligned_le32(const void *p)
62566 {
62567- return le32_to_cpup((__le32 *)p);
62568+ return le32_to_cpup((const __le32 *)p);
62569 }
62570
62571 static inline u64 get_unaligned_le64(const void *p)
62572 {
62573- return le64_to_cpup((__le64 *)p);
62574+ return le64_to_cpup((const __le64 *)p);
62575 }
62576
62577 static inline u16 get_unaligned_be16(const void *p)
62578 {
62579- return be16_to_cpup((__be16 *)p);
62580+ return be16_to_cpup((const __be16 *)p);
62581 }
62582
62583 static inline u32 get_unaligned_be32(const void *p)
62584 {
62585- return be32_to_cpup((__be32 *)p);
62586+ return be32_to_cpup((const __be32 *)p);
62587 }
62588
62589 static inline u64 get_unaligned_be64(const void *p)
62590 {
62591- return be64_to_cpup((__be64 *)p);
62592+ return be64_to_cpup((const __be64 *)p);
62593 }
62594
62595 static inline void put_unaligned_le16(u16 val, void *p)
62596diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
62597index 0d3f988..000f101 100644
62598--- a/include/linux/usb/renesas_usbhs.h
62599+++ b/include/linux/usb/renesas_usbhs.h
62600@@ -39,7 +39,7 @@ enum {
62601 */
62602 struct renesas_usbhs_driver_callback {
62603 int (*notify_hotplug)(struct platform_device *pdev);
62604-};
62605+} __no_const;
62606
62607 /*
62608 * callback functions for platform
62609@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
62610 * VBUS control is needed for Host
62611 */
62612 int (*set_vbus)(struct platform_device *pdev, int enable);
62613-};
62614+} __no_const;
62615
62616 /*
62617 * parameters for renesas usbhs
62618diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
62619index 6f8fbcf..8259001 100644
62620--- a/include/linux/vermagic.h
62621+++ b/include/linux/vermagic.h
62622@@ -25,9 +25,35 @@
62623 #define MODULE_ARCH_VERMAGIC ""
62624 #endif
62625
62626+#ifdef CONFIG_PAX_REFCOUNT
62627+#define MODULE_PAX_REFCOUNT "REFCOUNT "
62628+#else
62629+#define MODULE_PAX_REFCOUNT ""
62630+#endif
62631+
62632+#ifdef CONSTIFY_PLUGIN
62633+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
62634+#else
62635+#define MODULE_CONSTIFY_PLUGIN ""
62636+#endif
62637+
62638+#ifdef STACKLEAK_PLUGIN
62639+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
62640+#else
62641+#define MODULE_STACKLEAK_PLUGIN ""
62642+#endif
62643+
62644+#ifdef CONFIG_GRKERNSEC
62645+#define MODULE_GRSEC "GRSEC "
62646+#else
62647+#define MODULE_GRSEC ""
62648+#endif
62649+
62650 #define VERMAGIC_STRING \
62651 UTS_RELEASE " " \
62652 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
62653 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
62654- MODULE_ARCH_VERMAGIC
62655+ MODULE_ARCH_VERMAGIC \
62656+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
62657+ MODULE_GRSEC
62658
62659diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
62660index dcdfc2b..f937197 100644
62661--- a/include/linux/vmalloc.h
62662+++ b/include/linux/vmalloc.h
62663@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
62664 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
62665 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
62666 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
62667+
62668+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
62669+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
62670+#endif
62671+
62672 /* bits [20..32] reserved for arch specific ioremap internals */
62673
62674 /*
62675@@ -157,4 +162,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
62676 # endif
62677 #endif
62678
62679+#define vmalloc(x) \
62680+({ \
62681+ void *___retval; \
62682+ intoverflow_t ___x = (intoverflow_t)x; \
62683+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
62684+ ___retval = NULL; \
62685+ else \
62686+ ___retval = vmalloc((unsigned long)___x); \
62687+ ___retval; \
62688+})
62689+
62690+#define vzalloc(x) \
62691+({ \
62692+ void *___retval; \
62693+ intoverflow_t ___x = (intoverflow_t)x; \
62694+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
62695+ ___retval = NULL; \
62696+ else \
62697+ ___retval = vzalloc((unsigned long)___x); \
62698+ ___retval; \
62699+})
62700+
62701+#define __vmalloc(x, y, z) \
62702+({ \
62703+ void *___retval; \
62704+ intoverflow_t ___x = (intoverflow_t)x; \
62705+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
62706+ ___retval = NULL; \
62707+ else \
62708+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
62709+ ___retval; \
62710+})
62711+
62712+#define vmalloc_user(x) \
62713+({ \
62714+ void *___retval; \
62715+ intoverflow_t ___x = (intoverflow_t)x; \
62716+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
62717+ ___retval = NULL; \
62718+ else \
62719+ ___retval = vmalloc_user((unsigned long)___x); \
62720+ ___retval; \
62721+})
62722+
62723+#define vmalloc_exec(x) \
62724+({ \
62725+ void *___retval; \
62726+ intoverflow_t ___x = (intoverflow_t)x; \
62727+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
62728+ ___retval = NULL; \
62729+ else \
62730+ ___retval = vmalloc_exec((unsigned long)___x); \
62731+ ___retval; \
62732+})
62733+
62734+#define vmalloc_node(x, y) \
62735+({ \
62736+ void *___retval; \
62737+ intoverflow_t ___x = (intoverflow_t)x; \
62738+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
62739+ ___retval = NULL; \
62740+ else \
62741+ ___retval = vmalloc_node((unsigned long)___x, (y));\
62742+ ___retval; \
62743+})
62744+
62745+#define vzalloc_node(x, y) \
62746+({ \
62747+ void *___retval; \
62748+ intoverflow_t ___x = (intoverflow_t)x; \
62749+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
62750+ ___retval = NULL; \
62751+ else \
62752+ ___retval = vzalloc_node((unsigned long)___x, (y));\
62753+ ___retval; \
62754+})
62755+
62756+#define vmalloc_32(x) \
62757+({ \
62758+ void *___retval; \
62759+ intoverflow_t ___x = (intoverflow_t)x; \
62760+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
62761+ ___retval = NULL; \
62762+ else \
62763+ ___retval = vmalloc_32((unsigned long)___x); \
62764+ ___retval; \
62765+})
62766+
62767+#define vmalloc_32_user(x) \
62768+({ \
62769+void *___retval; \
62770+ intoverflow_t ___x = (intoverflow_t)x; \
62771+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
62772+ ___retval = NULL; \
62773+ else \
62774+ ___retval = vmalloc_32_user((unsigned long)___x);\
62775+ ___retval; \
62776+})
62777+
62778 #endif /* _LINUX_VMALLOC_H */
62779diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
62780index 65efb92..137adbb 100644
62781--- a/include/linux/vmstat.h
62782+++ b/include/linux/vmstat.h
62783@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
62784 /*
62785 * Zone based page accounting with per cpu differentials.
62786 */
62787-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62788+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62789
62790 static inline void zone_page_state_add(long x, struct zone *zone,
62791 enum zone_stat_item item)
62792 {
62793- atomic_long_add(x, &zone->vm_stat[item]);
62794- atomic_long_add(x, &vm_stat[item]);
62795+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
62796+ atomic_long_add_unchecked(x, &vm_stat[item]);
62797 }
62798
62799 static inline unsigned long global_page_state(enum zone_stat_item item)
62800 {
62801- long x = atomic_long_read(&vm_stat[item]);
62802+ long x = atomic_long_read_unchecked(&vm_stat[item]);
62803 #ifdef CONFIG_SMP
62804 if (x < 0)
62805 x = 0;
62806@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
62807 static inline unsigned long zone_page_state(struct zone *zone,
62808 enum zone_stat_item item)
62809 {
62810- long x = atomic_long_read(&zone->vm_stat[item]);
62811+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62812 #ifdef CONFIG_SMP
62813 if (x < 0)
62814 x = 0;
62815@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
62816 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
62817 enum zone_stat_item item)
62818 {
62819- long x = atomic_long_read(&zone->vm_stat[item]);
62820+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62821
62822 #ifdef CONFIG_SMP
62823 int cpu;
62824@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
62825
62826 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
62827 {
62828- atomic_long_inc(&zone->vm_stat[item]);
62829- atomic_long_inc(&vm_stat[item]);
62830+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
62831+ atomic_long_inc_unchecked(&vm_stat[item]);
62832 }
62833
62834 static inline void __inc_zone_page_state(struct page *page,
62835@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
62836
62837 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
62838 {
62839- atomic_long_dec(&zone->vm_stat[item]);
62840- atomic_long_dec(&vm_stat[item]);
62841+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
62842+ atomic_long_dec_unchecked(&vm_stat[item]);
62843 }
62844
62845 static inline void __dec_zone_page_state(struct page *page,
62846diff --git a/include/linux/xattr.h b/include/linux/xattr.h
62847index e5d1220..ef6e406 100644
62848--- a/include/linux/xattr.h
62849+++ b/include/linux/xattr.h
62850@@ -57,6 +57,11 @@
62851 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
62852 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
62853
62854+/* User namespace */
62855+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
62856+#define XATTR_PAX_FLAGS_SUFFIX "flags"
62857+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
62858+
62859 #ifdef __KERNEL__
62860
62861 #include <linux/types.h>
62862diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
62863index 4aeff96..b378cdc 100644
62864--- a/include/media/saa7146_vv.h
62865+++ b/include/media/saa7146_vv.h
62866@@ -163,7 +163,7 @@ struct saa7146_ext_vv
62867 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
62868
62869 /* the extension can override this */
62870- struct v4l2_ioctl_ops ops;
62871+ v4l2_ioctl_ops_no_const ops;
62872 /* pointer to the saa7146 core ops */
62873 const struct v4l2_ioctl_ops *core_ops;
62874
62875diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
62876index c7c40f1..4f01585 100644
62877--- a/include/media/v4l2-dev.h
62878+++ b/include/media/v4l2-dev.h
62879@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
62880
62881
62882 struct v4l2_file_operations {
62883- struct module *owner;
62884+ struct module * const owner;
62885 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
62886 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
62887 unsigned int (*poll) (struct file *, struct poll_table_struct *);
62888@@ -68,6 +68,7 @@ struct v4l2_file_operations {
62889 int (*open) (struct file *);
62890 int (*release) (struct file *);
62891 };
62892+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
62893
62894 /*
62895 * Newer version of video_device, handled by videodev2.c
62896diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
62897index 3f5d60f..44210ed 100644
62898--- a/include/media/v4l2-ioctl.h
62899+++ b/include/media/v4l2-ioctl.h
62900@@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
62901 long (*vidioc_default) (struct file *file, void *fh,
62902 bool valid_prio, int cmd, void *arg);
62903 };
62904-
62905+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
62906
62907 /* v4l debugging and diagnostics */
62908
62909diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
62910index 8d55251..dfe5b0a 100644
62911--- a/include/net/caif/caif_hsi.h
62912+++ b/include/net/caif/caif_hsi.h
62913@@ -98,7 +98,7 @@ struct cfhsi_drv {
62914 void (*rx_done_cb) (struct cfhsi_drv *drv);
62915 void (*wake_up_cb) (struct cfhsi_drv *drv);
62916 void (*wake_down_cb) (struct cfhsi_drv *drv);
62917-};
62918+} __no_const;
62919
62920 /* Structure implemented by HSI device. */
62921 struct cfhsi_dev {
62922diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
62923index 9e5425b..8136ffc 100644
62924--- a/include/net/caif/cfctrl.h
62925+++ b/include/net/caif/cfctrl.h
62926@@ -52,7 +52,7 @@ struct cfctrl_rsp {
62927 void (*radioset_rsp)(void);
62928 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
62929 struct cflayer *client_layer);
62930-};
62931+} __no_const;
62932
62933 /* Link Setup Parameters for CAIF-Links. */
62934 struct cfctrl_link_param {
62935@@ -101,8 +101,8 @@ struct cfctrl_request_info {
62936 struct cfctrl {
62937 struct cfsrvl serv;
62938 struct cfctrl_rsp res;
62939- atomic_t req_seq_no;
62940- atomic_t rsp_seq_no;
62941+ atomic_unchecked_t req_seq_no;
62942+ atomic_unchecked_t rsp_seq_no;
62943 struct list_head list;
62944 /* Protects from simultaneous access to first_req list */
62945 spinlock_t info_list_lock;
62946diff --git a/include/net/flow.h b/include/net/flow.h
62947index 6c469db..7743b8e 100644
62948--- a/include/net/flow.h
62949+++ b/include/net/flow.h
62950@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
62951
62952 extern void flow_cache_flush(void);
62953 extern void flow_cache_flush_deferred(void);
62954-extern atomic_t flow_cache_genid;
62955+extern atomic_unchecked_t flow_cache_genid;
62956
62957 #endif
62958diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
62959index b94765e..053f68b 100644
62960--- a/include/net/inetpeer.h
62961+++ b/include/net/inetpeer.h
62962@@ -48,8 +48,8 @@ struct inet_peer {
62963 */
62964 union {
62965 struct {
62966- atomic_t rid; /* Frag reception counter */
62967- atomic_t ip_id_count; /* IP ID for the next packet */
62968+ atomic_unchecked_t rid; /* Frag reception counter */
62969+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
62970 __u32 tcp_ts;
62971 __u32 tcp_ts_stamp;
62972 };
62973@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
62974 more++;
62975 inet_peer_refcheck(p);
62976 do {
62977- old = atomic_read(&p->ip_id_count);
62978+ old = atomic_read_unchecked(&p->ip_id_count);
62979 new = old + more;
62980 if (!new)
62981 new = 1;
62982- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
62983+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
62984 return new;
62985 }
62986
62987diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
62988index 10422ef..662570f 100644
62989--- a/include/net/ip_fib.h
62990+++ b/include/net/ip_fib.h
62991@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
62992
62993 #define FIB_RES_SADDR(net, res) \
62994 ((FIB_RES_NH(res).nh_saddr_genid == \
62995- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
62996+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
62997 FIB_RES_NH(res).nh_saddr : \
62998 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
62999 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63000diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63001index ebe517f..1bd286b 100644
63002--- a/include/net/ip_vs.h
63003+++ b/include/net/ip_vs.h
63004@@ -509,7 +509,7 @@ struct ip_vs_conn {
63005 struct ip_vs_conn *control; /* Master control connection */
63006 atomic_t n_control; /* Number of controlled ones */
63007 struct ip_vs_dest *dest; /* real server */
63008- atomic_t in_pkts; /* incoming packet counter */
63009+ atomic_unchecked_t in_pkts; /* incoming packet counter */
63010
63011 /* packet transmitter for different forwarding methods. If it
63012 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63013@@ -647,7 +647,7 @@ struct ip_vs_dest {
63014 __be16 port; /* port number of the server */
63015 union nf_inet_addr addr; /* IP address of the server */
63016 volatile unsigned flags; /* dest status flags */
63017- atomic_t conn_flags; /* flags to copy to conn */
63018+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
63019 atomic_t weight; /* server weight */
63020
63021 atomic_t refcnt; /* reference counter */
63022diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63023index 69b610a..fe3962c 100644
63024--- a/include/net/irda/ircomm_core.h
63025+++ b/include/net/irda/ircomm_core.h
63026@@ -51,7 +51,7 @@ typedef struct {
63027 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63028 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63029 struct ircomm_info *);
63030-} call_t;
63031+} __no_const call_t;
63032
63033 struct ircomm_cb {
63034 irda_queue_t queue;
63035diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63036index 59ba38bc..d515662 100644
63037--- a/include/net/irda/ircomm_tty.h
63038+++ b/include/net/irda/ircomm_tty.h
63039@@ -35,6 +35,7 @@
63040 #include <linux/termios.h>
63041 #include <linux/timer.h>
63042 #include <linux/tty.h> /* struct tty_struct */
63043+#include <asm/local.h>
63044
63045 #include <net/irda/irias_object.h>
63046 #include <net/irda/ircomm_core.h>
63047@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63048 unsigned short close_delay;
63049 unsigned short closing_wait; /* time to wait before closing */
63050
63051- int open_count;
63052- int blocked_open; /* # of blocked opens */
63053+ local_t open_count;
63054+ local_t blocked_open; /* # of blocked opens */
63055
63056 /* Protect concurent access to :
63057 * o self->open_count
63058diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63059index 0954ec9..7413562 100644
63060--- a/include/net/iucv/af_iucv.h
63061+++ b/include/net/iucv/af_iucv.h
63062@@ -138,7 +138,7 @@ struct iucv_sock {
63063 struct iucv_sock_list {
63064 struct hlist_head head;
63065 rwlock_t lock;
63066- atomic_t autobind_name;
63067+ atomic_unchecked_t autobind_name;
63068 };
63069
63070 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63071diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63072index 34c996f..bb3b4d4 100644
63073--- a/include/net/neighbour.h
63074+++ b/include/net/neighbour.h
63075@@ -123,7 +123,7 @@ struct neigh_ops {
63076 void (*error_report)(struct neighbour *, struct sk_buff *);
63077 int (*output)(struct neighbour *, struct sk_buff *);
63078 int (*connected_output)(struct neighbour *, struct sk_buff *);
63079-};
63080+} __do_const;
63081
63082 struct pneigh_entry {
63083 struct pneigh_entry *next;
63084diff --git a/include/net/netlink.h b/include/net/netlink.h
63085index cb1f350..3279d2c 100644
63086--- a/include/net/netlink.h
63087+++ b/include/net/netlink.h
63088@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63089 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63090 {
63091 if (mark)
63092- skb_trim(skb, (unsigned char *) mark - skb->data);
63093+ skb_trim(skb, (const unsigned char *) mark - skb->data);
63094 }
63095
63096 /**
63097diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63098index bbd023a..97c6d0d 100644
63099--- a/include/net/netns/ipv4.h
63100+++ b/include/net/netns/ipv4.h
63101@@ -57,8 +57,8 @@ struct netns_ipv4 {
63102 unsigned int sysctl_ping_group_range[2];
63103 long sysctl_tcp_mem[3];
63104
63105- atomic_t rt_genid;
63106- atomic_t dev_addr_genid;
63107+ atomic_unchecked_t rt_genid;
63108+ atomic_unchecked_t dev_addr_genid;
63109
63110 #ifdef CONFIG_IP_MROUTE
63111 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63112diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63113index d368561..96aaa17 100644
63114--- a/include/net/sctp/sctp.h
63115+++ b/include/net/sctp/sctp.h
63116@@ -318,9 +318,9 @@ do { \
63117
63118 #else /* SCTP_DEBUG */
63119
63120-#define SCTP_DEBUG_PRINTK(whatever...)
63121-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63122-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63123+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63124+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63125+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63126 #define SCTP_ENABLE_DEBUG
63127 #define SCTP_DISABLE_DEBUG
63128 #define SCTP_ASSERT(expr, str, func)
63129diff --git a/include/net/sock.h b/include/net/sock.h
63130index 91c1c8b..15ae923 100644
63131--- a/include/net/sock.h
63132+++ b/include/net/sock.h
63133@@ -299,7 +299,7 @@ struct sock {
63134 #ifdef CONFIG_RPS
63135 __u32 sk_rxhash;
63136 #endif
63137- atomic_t sk_drops;
63138+ atomic_unchecked_t sk_drops;
63139 int sk_rcvbuf;
63140
63141 struct sk_filter __rcu *sk_filter;
63142@@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63143 }
63144
63145 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63146- char __user *from, char *to,
63147+ char __user *from, unsigned char *to,
63148 int copy, int offset)
63149 {
63150 if (skb->ip_summed == CHECKSUM_NONE) {
63151diff --git a/include/net/tcp.h b/include/net/tcp.h
63152index 2d80c29..aa07caf 100644
63153--- a/include/net/tcp.h
63154+++ b/include/net/tcp.h
63155@@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
63156 char *name;
63157 sa_family_t family;
63158 const struct file_operations *seq_fops;
63159- struct seq_operations seq_ops;
63160+ seq_operations_no_const seq_ops;
63161 };
63162
63163 struct tcp_iter_state {
63164diff --git a/include/net/udp.h b/include/net/udp.h
63165index e39592f..fef9680 100644
63166--- a/include/net/udp.h
63167+++ b/include/net/udp.h
63168@@ -243,7 +243,7 @@ struct udp_seq_afinfo {
63169 sa_family_t family;
63170 struct udp_table *udp_table;
63171 const struct file_operations *seq_fops;
63172- struct seq_operations seq_ops;
63173+ seq_operations_no_const seq_ops;
63174 };
63175
63176 struct udp_iter_state {
63177diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63178index 89174e2..1f82598 100644
63179--- a/include/net/xfrm.h
63180+++ b/include/net/xfrm.h
63181@@ -505,7 +505,7 @@ struct xfrm_policy {
63182 struct timer_list timer;
63183
63184 struct flow_cache_object flo;
63185- atomic_t genid;
63186+ atomic_unchecked_t genid;
63187 u32 priority;
63188 u32 index;
63189 struct xfrm_mark mark;
63190diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63191index 1a046b1..ee0bef0 100644
63192--- a/include/rdma/iw_cm.h
63193+++ b/include/rdma/iw_cm.h
63194@@ -122,7 +122,7 @@ struct iw_cm_verbs {
63195 int backlog);
63196
63197 int (*destroy_listen)(struct iw_cm_id *cm_id);
63198-};
63199+} __no_const;
63200
63201 /**
63202 * iw_create_cm_id - Create an IW CM identifier.
63203diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63204index 6a3922f..0b73022 100644
63205--- a/include/scsi/libfc.h
63206+++ b/include/scsi/libfc.h
63207@@ -748,6 +748,7 @@ struct libfc_function_template {
63208 */
63209 void (*disc_stop_final) (struct fc_lport *);
63210 };
63211+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63212
63213 /**
63214 * struct fc_disc - Discovery context
63215@@ -851,7 +852,7 @@ struct fc_lport {
63216 struct fc_vport *vport;
63217
63218 /* Operational Information */
63219- struct libfc_function_template tt;
63220+ libfc_function_template_no_const tt;
63221 u8 link_up;
63222 u8 qfull;
63223 enum fc_lport_state state;
63224diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63225index 77273f2..dd4031f 100644
63226--- a/include/scsi/scsi_device.h
63227+++ b/include/scsi/scsi_device.h
63228@@ -161,9 +161,9 @@ struct scsi_device {
63229 unsigned int max_device_blocked; /* what device_blocked counts down from */
63230 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63231
63232- atomic_t iorequest_cnt;
63233- atomic_t iodone_cnt;
63234- atomic_t ioerr_cnt;
63235+ atomic_unchecked_t iorequest_cnt;
63236+ atomic_unchecked_t iodone_cnt;
63237+ atomic_unchecked_t ioerr_cnt;
63238
63239 struct device sdev_gendev,
63240 sdev_dev;
63241diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63242index 2a65167..91e01f8 100644
63243--- a/include/scsi/scsi_transport_fc.h
63244+++ b/include/scsi/scsi_transport_fc.h
63245@@ -711,7 +711,7 @@ struct fc_function_template {
63246 unsigned long show_host_system_hostname:1;
63247
63248 unsigned long disable_target_scan:1;
63249-};
63250+} __do_const;
63251
63252
63253 /**
63254diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63255index 030b87c..98a6954 100644
63256--- a/include/sound/ak4xxx-adda.h
63257+++ b/include/sound/ak4xxx-adda.h
63258@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63259 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63260 unsigned char val);
63261 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63262-};
63263+} __no_const;
63264
63265 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63266
63267diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63268index 8c05e47..2b5df97 100644
63269--- a/include/sound/hwdep.h
63270+++ b/include/sound/hwdep.h
63271@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63272 struct snd_hwdep_dsp_status *status);
63273 int (*dsp_load)(struct snd_hwdep *hw,
63274 struct snd_hwdep_dsp_image *image);
63275-};
63276+} __no_const;
63277
63278 struct snd_hwdep {
63279 struct snd_card *card;
63280diff --git a/include/sound/info.h b/include/sound/info.h
63281index 9ca1a49..aba1728 100644
63282--- a/include/sound/info.h
63283+++ b/include/sound/info.h
63284@@ -44,7 +44,7 @@ struct snd_info_entry_text {
63285 struct snd_info_buffer *buffer);
63286 void (*write)(struct snd_info_entry *entry,
63287 struct snd_info_buffer *buffer);
63288-};
63289+} __no_const;
63290
63291 struct snd_info_entry_ops {
63292 int (*open)(struct snd_info_entry *entry,
63293diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63294index 0cf91b2..b70cae4 100644
63295--- a/include/sound/pcm.h
63296+++ b/include/sound/pcm.h
63297@@ -81,6 +81,7 @@ struct snd_pcm_ops {
63298 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63299 int (*ack)(struct snd_pcm_substream *substream);
63300 };
63301+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63302
63303 /*
63304 *
63305diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63306index af1b49e..a5d55a5 100644
63307--- a/include/sound/sb16_csp.h
63308+++ b/include/sound/sb16_csp.h
63309@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63310 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63311 int (*csp_stop) (struct snd_sb_csp * p);
63312 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63313-};
63314+} __no_const;
63315
63316 /*
63317 * CSP private data
63318diff --git a/include/sound/soc.h b/include/sound/soc.h
63319index 0992dff..bb366fe 100644
63320--- a/include/sound/soc.h
63321+++ b/include/sound/soc.h
63322@@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
63323 /* platform IO - used for platform DAPM */
63324 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63325 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63326-};
63327+} __do_const;
63328
63329 struct snd_soc_platform {
63330 const char *name;
63331@@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
63332 struct snd_soc_dai_link *dai_link;
63333 struct mutex pcm_mutex;
63334 enum snd_soc_pcm_subclass pcm_subclass;
63335- struct snd_pcm_ops ops;
63336+ snd_pcm_ops_no_const ops;
63337
63338 unsigned int complete:1;
63339 unsigned int dev_registered:1;
63340diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63341index 444cd6b..3327cc5 100644
63342--- a/include/sound/ymfpci.h
63343+++ b/include/sound/ymfpci.h
63344@@ -358,7 +358,7 @@ struct snd_ymfpci {
63345 spinlock_t reg_lock;
63346 spinlock_t voice_lock;
63347 wait_queue_head_t interrupt_sleep;
63348- atomic_t interrupt_sleep_count;
63349+ atomic_unchecked_t interrupt_sleep_count;
63350 struct snd_info_entry *proc_entry;
63351 const struct firmware *dsp_microcode;
63352 const struct firmware *controller_microcode;
63353diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63354index dc4e345..6bf6080 100644
63355--- a/include/target/target_core_base.h
63356+++ b/include/target/target_core_base.h
63357@@ -443,7 +443,7 @@ struct t10_reservation_ops {
63358 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63359 int (*t10_pr_register)(struct se_cmd *);
63360 int (*t10_pr_clear)(struct se_cmd *);
63361-};
63362+} __no_const;
63363
63364 struct t10_reservation {
63365 /* Reservation effects all target ports */
63366@@ -561,8 +561,8 @@ struct se_cmd {
63367 atomic_t t_se_count;
63368 atomic_t t_task_cdbs_left;
63369 atomic_t t_task_cdbs_ex_left;
63370- atomic_t t_task_cdbs_sent;
63371- atomic_t t_transport_aborted;
63372+ atomic_unchecked_t t_task_cdbs_sent;
63373+ atomic_unchecked_t t_transport_aborted;
63374 atomic_t t_transport_active;
63375 atomic_t t_transport_complete;
63376 atomic_t t_transport_queue_active;
63377@@ -799,7 +799,7 @@ struct se_device {
63378 spinlock_t stats_lock;
63379 /* Active commands on this virtual SE device */
63380 atomic_t simple_cmds;
63381- atomic_t dev_ordered_id;
63382+ atomic_unchecked_t dev_ordered_id;
63383 atomic_t execute_tasks;
63384 atomic_t dev_ordered_sync;
63385 atomic_t dev_qf_count;
63386diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63387index 1c09820..7f5ec79 100644
63388--- a/include/trace/events/irq.h
63389+++ b/include/trace/events/irq.h
63390@@ -36,7 +36,7 @@ struct softirq_action;
63391 */
63392 TRACE_EVENT(irq_handler_entry,
63393
63394- TP_PROTO(int irq, struct irqaction *action),
63395+ TP_PROTO(int irq, const struct irqaction *action),
63396
63397 TP_ARGS(irq, action),
63398
63399@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63400 */
63401 TRACE_EVENT(irq_handler_exit,
63402
63403- TP_PROTO(int irq, struct irqaction *action, int ret),
63404+ TP_PROTO(int irq, const struct irqaction *action, int ret),
63405
63406 TP_ARGS(irq, action, ret),
63407
63408diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63409index c41f308..6918de3 100644
63410--- a/include/video/udlfb.h
63411+++ b/include/video/udlfb.h
63412@@ -52,10 +52,10 @@ struct dlfb_data {
63413 u32 pseudo_palette[256];
63414 int blank_mode; /*one of FB_BLANK_ */
63415 /* blit-only rendering path metrics, exposed through sysfs */
63416- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63417- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63418- atomic_t bytes_sent; /* to usb, after compression including overhead */
63419- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63420+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63421+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63422+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63423+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63424 };
63425
63426 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63427diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63428index 0993a22..32ba2fe 100644
63429--- a/include/video/uvesafb.h
63430+++ b/include/video/uvesafb.h
63431@@ -177,6 +177,7 @@ struct uvesafb_par {
63432 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63433 u8 pmi_setpal; /* PMI for palette changes */
63434 u16 *pmi_base; /* protected mode interface location */
63435+ u8 *pmi_code; /* protected mode code location */
63436 void *pmi_start;
63437 void *pmi_pal;
63438 u8 *vbe_state_orig; /*
63439diff --git a/init/Kconfig b/init/Kconfig
63440index 3f42cd6..613f41d 100644
63441--- a/init/Kconfig
63442+++ b/init/Kconfig
63443@@ -799,6 +799,7 @@ endif # CGROUPS
63444
63445 config CHECKPOINT_RESTORE
63446 bool "Checkpoint/restore support" if EXPERT
63447+ depends on !GRKERNSEC
63448 default n
63449 help
63450 Enables additional kernel features in a sake of checkpoint/restore.
63451@@ -1249,7 +1250,7 @@ config SLUB_DEBUG
63452
63453 config COMPAT_BRK
63454 bool "Disable heap randomization"
63455- default y
63456+ default n
63457 help
63458 Randomizing heap placement makes heap exploits harder, but it
63459 also breaks ancient binaries (including anything libc5 based).
63460diff --git a/init/do_mounts.c b/init/do_mounts.c
63461index 2974c8b..0b863ae 100644
63462--- a/init/do_mounts.c
63463+++ b/init/do_mounts.c
63464@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63465 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63466 {
63467 struct super_block *s;
63468- int err = sys_mount(name, "/root", fs, flags, data);
63469+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63470 if (err)
63471 return err;
63472
63473- sys_chdir((const char __user __force *)"/root");
63474+ sys_chdir((const char __force_user *)"/root");
63475 s = current->fs->pwd.dentry->d_sb;
63476 ROOT_DEV = s->s_dev;
63477 printk(KERN_INFO
63478@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63479 va_start(args, fmt);
63480 vsprintf(buf, fmt, args);
63481 va_end(args);
63482- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63483+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63484 if (fd >= 0) {
63485 sys_ioctl(fd, FDEJECT, 0);
63486 sys_close(fd);
63487 }
63488 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63489- fd = sys_open("/dev/console", O_RDWR, 0);
63490+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63491 if (fd >= 0) {
63492 sys_ioctl(fd, TCGETS, (long)&termios);
63493 termios.c_lflag &= ~ICANON;
63494 sys_ioctl(fd, TCSETSF, (long)&termios);
63495- sys_read(fd, &c, 1);
63496+ sys_read(fd, (char __user *)&c, 1);
63497 termios.c_lflag |= ICANON;
63498 sys_ioctl(fd, TCSETSF, (long)&termios);
63499 sys_close(fd);
63500@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63501 mount_root();
63502 out:
63503 devtmpfs_mount("dev");
63504- sys_mount(".", "/", NULL, MS_MOVE, NULL);
63505- sys_chroot((const char __user __force *)".");
63506+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63507+ sys_chroot((const char __force_user *)".");
63508 }
63509diff --git a/init/do_mounts.h b/init/do_mounts.h
63510index f5b978a..69dbfe8 100644
63511--- a/init/do_mounts.h
63512+++ b/init/do_mounts.h
63513@@ -15,15 +15,15 @@ extern int root_mountflags;
63514
63515 static inline int create_dev(char *name, dev_t dev)
63516 {
63517- sys_unlink(name);
63518- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63519+ sys_unlink((char __force_user *)name);
63520+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63521 }
63522
63523 #if BITS_PER_LONG == 32
63524 static inline u32 bstat(char *name)
63525 {
63526 struct stat64 stat;
63527- if (sys_stat64(name, &stat) != 0)
63528+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63529 return 0;
63530 if (!S_ISBLK(stat.st_mode))
63531 return 0;
63532@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63533 static inline u32 bstat(char *name)
63534 {
63535 struct stat stat;
63536- if (sys_newstat(name, &stat) != 0)
63537+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63538 return 0;
63539 if (!S_ISBLK(stat.st_mode))
63540 return 0;
63541diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63542index 3098a38..253064e 100644
63543--- a/init/do_mounts_initrd.c
63544+++ b/init/do_mounts_initrd.c
63545@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
63546 create_dev("/dev/root.old", Root_RAM0);
63547 /* mount initrd on rootfs' /root */
63548 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63549- sys_mkdir("/old", 0700);
63550- root_fd = sys_open("/", 0, 0);
63551- old_fd = sys_open("/old", 0, 0);
63552+ sys_mkdir((const char __force_user *)"/old", 0700);
63553+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
63554+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63555 /* move initrd over / and chdir/chroot in initrd root */
63556- sys_chdir("/root");
63557- sys_mount(".", "/", NULL, MS_MOVE, NULL);
63558- sys_chroot(".");
63559+ sys_chdir((const char __force_user *)"/root");
63560+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63561+ sys_chroot((const char __force_user *)".");
63562
63563 /*
63564 * In case that a resume from disk is carried out by linuxrc or one of
63565@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
63566
63567 /* move initrd to rootfs' /old */
63568 sys_fchdir(old_fd);
63569- sys_mount("/", ".", NULL, MS_MOVE, NULL);
63570+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63571 /* switch root and cwd back to / of rootfs */
63572 sys_fchdir(root_fd);
63573- sys_chroot(".");
63574+ sys_chroot((const char __force_user *)".");
63575 sys_close(old_fd);
63576 sys_close(root_fd);
63577
63578 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63579- sys_chdir("/old");
63580+ sys_chdir((const char __force_user *)"/old");
63581 return;
63582 }
63583
63584@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
63585 mount_root();
63586
63587 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63588- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63589+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63590 if (!error)
63591 printk("okay\n");
63592 else {
63593- int fd = sys_open("/dev/root.old", O_RDWR, 0);
63594+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63595 if (error == -ENOENT)
63596 printk("/initrd does not exist. Ignored.\n");
63597 else
63598 printk("failed\n");
63599 printk(KERN_NOTICE "Unmounting old root\n");
63600- sys_umount("/old", MNT_DETACH);
63601+ sys_umount((char __force_user *)"/old", MNT_DETACH);
63602 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63603 if (fd < 0) {
63604 error = fd;
63605@@ -116,11 +116,11 @@ int __init initrd_load(void)
63606 * mounted in the normal path.
63607 */
63608 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63609- sys_unlink("/initrd.image");
63610+ sys_unlink((const char __force_user *)"/initrd.image");
63611 handle_initrd();
63612 return 1;
63613 }
63614 }
63615- sys_unlink("/initrd.image");
63616+ sys_unlink((const char __force_user *)"/initrd.image");
63617 return 0;
63618 }
63619diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63620index 32c4799..c27ee74 100644
63621--- a/init/do_mounts_md.c
63622+++ b/init/do_mounts_md.c
63623@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63624 partitioned ? "_d" : "", minor,
63625 md_setup_args[ent].device_names);
63626
63627- fd = sys_open(name, 0, 0);
63628+ fd = sys_open((char __force_user *)name, 0, 0);
63629 if (fd < 0) {
63630 printk(KERN_ERR "md: open failed - cannot start "
63631 "array %s\n", name);
63632@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63633 * array without it
63634 */
63635 sys_close(fd);
63636- fd = sys_open(name, 0, 0);
63637+ fd = sys_open((char __force_user *)name, 0, 0);
63638 sys_ioctl(fd, BLKRRPART, 0);
63639 }
63640 sys_close(fd);
63641@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
63642
63643 wait_for_device_probe();
63644
63645- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
63646+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
63647 if (fd >= 0) {
63648 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
63649 sys_close(fd);
63650diff --git a/init/initramfs.c b/init/initramfs.c
63651index 8216c30..25e8e32 100644
63652--- a/init/initramfs.c
63653+++ b/init/initramfs.c
63654@@ -74,7 +74,7 @@ static void __init free_hash(void)
63655 }
63656 }
63657
63658-static long __init do_utime(char __user *filename, time_t mtime)
63659+static long __init do_utime(__force char __user *filename, time_t mtime)
63660 {
63661 struct timespec t[2];
63662
63663@@ -109,7 +109,7 @@ static void __init dir_utime(void)
63664 struct dir_entry *de, *tmp;
63665 list_for_each_entry_safe(de, tmp, &dir_list, list) {
63666 list_del(&de->list);
63667- do_utime(de->name, de->mtime);
63668+ do_utime((char __force_user *)de->name, de->mtime);
63669 kfree(de->name);
63670 kfree(de);
63671 }
63672@@ -271,7 +271,7 @@ static int __init maybe_link(void)
63673 if (nlink >= 2) {
63674 char *old = find_link(major, minor, ino, mode, collected);
63675 if (old)
63676- return (sys_link(old, collected) < 0) ? -1 : 1;
63677+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
63678 }
63679 return 0;
63680 }
63681@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
63682 {
63683 struct stat st;
63684
63685- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
63686+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
63687 if (S_ISDIR(st.st_mode))
63688- sys_rmdir(path);
63689+ sys_rmdir((char __force_user *)path);
63690 else
63691- sys_unlink(path);
63692+ sys_unlink((char __force_user *)path);
63693 }
63694 }
63695
63696@@ -305,7 +305,7 @@ static int __init do_name(void)
63697 int openflags = O_WRONLY|O_CREAT;
63698 if (ml != 1)
63699 openflags |= O_TRUNC;
63700- wfd = sys_open(collected, openflags, mode);
63701+ wfd = sys_open((char __force_user *)collected, openflags, mode);
63702
63703 if (wfd >= 0) {
63704 sys_fchown(wfd, uid, gid);
63705@@ -317,17 +317,17 @@ static int __init do_name(void)
63706 }
63707 }
63708 } else if (S_ISDIR(mode)) {
63709- sys_mkdir(collected, mode);
63710- sys_chown(collected, uid, gid);
63711- sys_chmod(collected, mode);
63712+ sys_mkdir((char __force_user *)collected, mode);
63713+ sys_chown((char __force_user *)collected, uid, gid);
63714+ sys_chmod((char __force_user *)collected, mode);
63715 dir_add(collected, mtime);
63716 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
63717 S_ISFIFO(mode) || S_ISSOCK(mode)) {
63718 if (maybe_link() == 0) {
63719- sys_mknod(collected, mode, rdev);
63720- sys_chown(collected, uid, gid);
63721- sys_chmod(collected, mode);
63722- do_utime(collected, mtime);
63723+ sys_mknod((char __force_user *)collected, mode, rdev);
63724+ sys_chown((char __force_user *)collected, uid, gid);
63725+ sys_chmod((char __force_user *)collected, mode);
63726+ do_utime((char __force_user *)collected, mtime);
63727 }
63728 }
63729 return 0;
63730@@ -336,15 +336,15 @@ static int __init do_name(void)
63731 static int __init do_copy(void)
63732 {
63733 if (count >= body_len) {
63734- sys_write(wfd, victim, body_len);
63735+ sys_write(wfd, (char __force_user *)victim, body_len);
63736 sys_close(wfd);
63737- do_utime(vcollected, mtime);
63738+ do_utime((char __force_user *)vcollected, mtime);
63739 kfree(vcollected);
63740 eat(body_len);
63741 state = SkipIt;
63742 return 0;
63743 } else {
63744- sys_write(wfd, victim, count);
63745+ sys_write(wfd, (char __force_user *)victim, count);
63746 body_len -= count;
63747 eat(count);
63748 return 1;
63749@@ -355,9 +355,9 @@ static int __init do_symlink(void)
63750 {
63751 collected[N_ALIGN(name_len) + body_len] = '\0';
63752 clean_path(collected, 0);
63753- sys_symlink(collected + N_ALIGN(name_len), collected);
63754- sys_lchown(collected, uid, gid);
63755- do_utime(collected, mtime);
63756+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
63757+ sys_lchown((char __force_user *)collected, uid, gid);
63758+ do_utime((char __force_user *)collected, mtime);
63759 state = SkipIt;
63760 next_state = Reset;
63761 return 0;
63762diff --git a/init/main.c b/init/main.c
63763index ff49a6d..5fa0429 100644
63764--- a/init/main.c
63765+++ b/init/main.c
63766@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
63767 extern void tc_init(void);
63768 #endif
63769
63770+extern void grsecurity_init(void);
63771+
63772 /*
63773 * Debug helper: via this flag we know that we are in 'early bootup code'
63774 * where only the boot processor is running with IRQ disabled. This means
63775@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
63776
63777 __setup("reset_devices", set_reset_devices);
63778
63779+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
63780+extern char pax_enter_kernel_user[];
63781+extern char pax_exit_kernel_user[];
63782+extern pgdval_t clone_pgd_mask;
63783+#endif
63784+
63785+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
63786+static int __init setup_pax_nouderef(char *str)
63787+{
63788+#ifdef CONFIG_X86_32
63789+ unsigned int cpu;
63790+ struct desc_struct *gdt;
63791+
63792+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
63793+ gdt = get_cpu_gdt_table(cpu);
63794+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
63795+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
63796+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
63797+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
63798+ }
63799+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
63800+#else
63801+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
63802+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
63803+ clone_pgd_mask = ~(pgdval_t)0UL;
63804+#endif
63805+
63806+ return 0;
63807+}
63808+early_param("pax_nouderef", setup_pax_nouderef);
63809+#endif
63810+
63811+#ifdef CONFIG_PAX_SOFTMODE
63812+int pax_softmode;
63813+
63814+static int __init setup_pax_softmode(char *str)
63815+{
63816+ get_option(&str, &pax_softmode);
63817+ return 1;
63818+}
63819+__setup("pax_softmode=", setup_pax_softmode);
63820+#endif
63821+
63822 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
63823 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
63824 static const char *panic_later, *panic_param;
63825@@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
63826 {
63827 int count = preempt_count();
63828 int ret;
63829+ const char *msg1 = "", *msg2 = "";
63830
63831 if (initcall_debug)
63832 ret = do_one_initcall_debug(fn);
63833@@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
63834 sprintf(msgbuf, "error code %d ", ret);
63835
63836 if (preempt_count() != count) {
63837- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
63838+ msg1 = " preemption imbalance";
63839 preempt_count() = count;
63840 }
63841 if (irqs_disabled()) {
63842- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
63843+ msg2 = " disabled interrupts";
63844 local_irq_enable();
63845 }
63846- if (msgbuf[0]) {
63847- printk("initcall %pF returned with %s\n", fn, msgbuf);
63848+ if (msgbuf[0] || *msg1 || *msg2) {
63849+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
63850 }
63851
63852 return ret;
63853@@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
63854 do_basic_setup();
63855
63856 /* Open the /dev/console on the rootfs, this should never fail */
63857- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
63858+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
63859 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
63860
63861 (void) sys_dup(0);
63862@@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
63863 if (!ramdisk_execute_command)
63864 ramdisk_execute_command = "/init";
63865
63866- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
63867+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
63868 ramdisk_execute_command = NULL;
63869 prepare_namespace();
63870 }
63871
63872+ grsecurity_init();
63873+
63874 /*
63875 * Ok, we have completed the initial bootup, and
63876 * we're essentially up and running. Get rid of the
63877diff --git a/ipc/mqueue.c b/ipc/mqueue.c
63878index 86ee272..773d937 100644
63879--- a/ipc/mqueue.c
63880+++ b/ipc/mqueue.c
63881@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
63882 mq_bytes = (mq_msg_tblsz +
63883 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
63884
63885+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
63886 spin_lock(&mq_lock);
63887 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
63888 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
63889diff --git a/ipc/msg.c b/ipc/msg.c
63890index 7385de2..a8180e08 100644
63891--- a/ipc/msg.c
63892+++ b/ipc/msg.c
63893@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
63894 return security_msg_queue_associate(msq, msgflg);
63895 }
63896
63897+static struct ipc_ops msg_ops = {
63898+ .getnew = newque,
63899+ .associate = msg_security,
63900+ .more_checks = NULL
63901+};
63902+
63903 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
63904 {
63905 struct ipc_namespace *ns;
63906- struct ipc_ops msg_ops;
63907 struct ipc_params msg_params;
63908
63909 ns = current->nsproxy->ipc_ns;
63910
63911- msg_ops.getnew = newque;
63912- msg_ops.associate = msg_security;
63913- msg_ops.more_checks = NULL;
63914-
63915 msg_params.key = key;
63916 msg_params.flg = msgflg;
63917
63918diff --git a/ipc/sem.c b/ipc/sem.c
63919index 5215a81..cfc0cac 100644
63920--- a/ipc/sem.c
63921+++ b/ipc/sem.c
63922@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
63923 return 0;
63924 }
63925
63926+static struct ipc_ops sem_ops = {
63927+ .getnew = newary,
63928+ .associate = sem_security,
63929+ .more_checks = sem_more_checks
63930+};
63931+
63932 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
63933 {
63934 struct ipc_namespace *ns;
63935- struct ipc_ops sem_ops;
63936 struct ipc_params sem_params;
63937
63938 ns = current->nsproxy->ipc_ns;
63939@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
63940 if (nsems < 0 || nsems > ns->sc_semmsl)
63941 return -EINVAL;
63942
63943- sem_ops.getnew = newary;
63944- sem_ops.associate = sem_security;
63945- sem_ops.more_checks = sem_more_checks;
63946-
63947 sem_params.key = key;
63948 sem_params.flg = semflg;
63949 sem_params.u.nsems = nsems;
63950diff --git a/ipc/shm.c b/ipc/shm.c
63951index b76be5b..859e750 100644
63952--- a/ipc/shm.c
63953+++ b/ipc/shm.c
63954@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
63955 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
63956 #endif
63957
63958+#ifdef CONFIG_GRKERNSEC
63959+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63960+ const time_t shm_createtime, const uid_t cuid,
63961+ const int shmid);
63962+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63963+ const time_t shm_createtime);
63964+#endif
63965+
63966 void shm_init_ns(struct ipc_namespace *ns)
63967 {
63968 ns->shm_ctlmax = SHMMAX;
63969@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
63970 shp->shm_lprid = 0;
63971 shp->shm_atim = shp->shm_dtim = 0;
63972 shp->shm_ctim = get_seconds();
63973+#ifdef CONFIG_GRKERNSEC
63974+ {
63975+ struct timespec timeval;
63976+ do_posix_clock_monotonic_gettime(&timeval);
63977+
63978+ shp->shm_createtime = timeval.tv_sec;
63979+ }
63980+#endif
63981 shp->shm_segsz = size;
63982 shp->shm_nattch = 0;
63983 shp->shm_file = file;
63984@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
63985 return 0;
63986 }
63987
63988+static struct ipc_ops shm_ops = {
63989+ .getnew = newseg,
63990+ .associate = shm_security,
63991+ .more_checks = shm_more_checks
63992+};
63993+
63994 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
63995 {
63996 struct ipc_namespace *ns;
63997- struct ipc_ops shm_ops;
63998 struct ipc_params shm_params;
63999
64000 ns = current->nsproxy->ipc_ns;
64001
64002- shm_ops.getnew = newseg;
64003- shm_ops.associate = shm_security;
64004- shm_ops.more_checks = shm_more_checks;
64005-
64006 shm_params.key = key;
64007 shm_params.flg = shmflg;
64008 shm_params.u.size = size;
64009@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64010 f_mode = FMODE_READ | FMODE_WRITE;
64011 }
64012 if (shmflg & SHM_EXEC) {
64013+
64014+#ifdef CONFIG_PAX_MPROTECT
64015+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
64016+ goto out;
64017+#endif
64018+
64019 prot |= PROT_EXEC;
64020 acc_mode |= S_IXUGO;
64021 }
64022@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64023 if (err)
64024 goto out_unlock;
64025
64026+#ifdef CONFIG_GRKERNSEC
64027+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64028+ shp->shm_perm.cuid, shmid) ||
64029+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64030+ err = -EACCES;
64031+ goto out_unlock;
64032+ }
64033+#endif
64034+
64035 path = shp->shm_file->f_path;
64036 path_get(&path);
64037 shp->shm_nattch++;
64038+#ifdef CONFIG_GRKERNSEC
64039+ shp->shm_lapid = current->pid;
64040+#endif
64041 size = i_size_read(path.dentry->d_inode);
64042 shm_unlock(shp);
64043
64044diff --git a/kernel/acct.c b/kernel/acct.c
64045index 02e6167..54824f7 100644
64046--- a/kernel/acct.c
64047+++ b/kernel/acct.c
64048@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64049 */
64050 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64051 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64052- file->f_op->write(file, (char *)&ac,
64053+ file->f_op->write(file, (char __force_user *)&ac,
64054 sizeof(acct_t), &file->f_pos);
64055 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64056 set_fs(fs);
64057diff --git a/kernel/audit.c b/kernel/audit.c
64058index bb0eb5b..cf2a03a 100644
64059--- a/kernel/audit.c
64060+++ b/kernel/audit.c
64061@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64062 3) suppressed due to audit_rate_limit
64063 4) suppressed due to audit_backlog_limit
64064 */
64065-static atomic_t audit_lost = ATOMIC_INIT(0);
64066+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64067
64068 /* The netlink socket. */
64069 static struct sock *audit_sock;
64070@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64071 unsigned long now;
64072 int print;
64073
64074- atomic_inc(&audit_lost);
64075+ atomic_inc_unchecked(&audit_lost);
64076
64077 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64078
64079@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64080 printk(KERN_WARNING
64081 "audit: audit_lost=%d audit_rate_limit=%d "
64082 "audit_backlog_limit=%d\n",
64083- atomic_read(&audit_lost),
64084+ atomic_read_unchecked(&audit_lost),
64085 audit_rate_limit,
64086 audit_backlog_limit);
64087 audit_panic(message);
64088@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64089 status_set.pid = audit_pid;
64090 status_set.rate_limit = audit_rate_limit;
64091 status_set.backlog_limit = audit_backlog_limit;
64092- status_set.lost = atomic_read(&audit_lost);
64093+ status_set.lost = atomic_read_unchecked(&audit_lost);
64094 status_set.backlog = skb_queue_len(&audit_skb_queue);
64095 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64096 &status_set, sizeof(status_set));
64097diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64098index af1de0f..06dfe57 100644
64099--- a/kernel/auditsc.c
64100+++ b/kernel/auditsc.c
64101@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64102 }
64103
64104 /* global counter which is incremented every time something logs in */
64105-static atomic_t session_id = ATOMIC_INIT(0);
64106+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64107
64108 /**
64109 * audit_set_loginuid - set current task's audit_context loginuid
64110@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64111 return -EPERM;
64112 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64113
64114- sessionid = atomic_inc_return(&session_id);
64115+ sessionid = atomic_inc_return_unchecked(&session_id);
64116 if (context && context->in_syscall) {
64117 struct audit_buffer *ab;
64118
64119diff --git a/kernel/capability.c b/kernel/capability.c
64120index 3f1adb6..c564db0 100644
64121--- a/kernel/capability.c
64122+++ b/kernel/capability.c
64123@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64124 * before modification is attempted and the application
64125 * fails.
64126 */
64127+ if (tocopy > ARRAY_SIZE(kdata))
64128+ return -EFAULT;
64129+
64130 if (copy_to_user(dataptr, kdata, tocopy
64131 * sizeof(struct __user_cap_data_struct))) {
64132 return -EFAULT;
64133@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64134 int ret;
64135
64136 rcu_read_lock();
64137- ret = security_capable(__task_cred(t), ns, cap);
64138+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64139+ gr_task_is_capable(t, __task_cred(t), cap);
64140 rcu_read_unlock();
64141
64142- return (ret == 0);
64143+ return ret;
64144 }
64145
64146 /**
64147@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64148 int ret;
64149
64150 rcu_read_lock();
64151- ret = security_capable_noaudit(__task_cred(t), ns, cap);
64152+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64153 rcu_read_unlock();
64154
64155- return (ret == 0);
64156+ return ret;
64157 }
64158
64159 /**
64160@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64161 BUG();
64162 }
64163
64164- if (security_capable(current_cred(), ns, cap) == 0) {
64165+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64166 current->flags |= PF_SUPERPRIV;
64167 return true;
64168 }
64169@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64170 }
64171 EXPORT_SYMBOL(ns_capable);
64172
64173+bool ns_capable_nolog(struct user_namespace *ns, int cap)
64174+{
64175+ if (unlikely(!cap_valid(cap))) {
64176+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64177+ BUG();
64178+ }
64179+
64180+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64181+ current->flags |= PF_SUPERPRIV;
64182+ return true;
64183+ }
64184+ return false;
64185+}
64186+EXPORT_SYMBOL(ns_capable_nolog);
64187+
64188 /**
64189 * capable - Determine if the current task has a superior capability in effect
64190 * @cap: The capability to be tested for
64191@@ -408,6 +427,12 @@ bool capable(int cap)
64192 }
64193 EXPORT_SYMBOL(capable);
64194
64195+bool capable_nolog(int cap)
64196+{
64197+ return ns_capable_nolog(&init_user_ns, cap);
64198+}
64199+EXPORT_SYMBOL(capable_nolog);
64200+
64201 /**
64202 * nsown_capable - Check superior capability to one's own user_ns
64203 * @cap: The capability in question
64204diff --git a/kernel/compat.c b/kernel/compat.c
64205index f346ced..aa2b1f4 100644
64206--- a/kernel/compat.c
64207+++ b/kernel/compat.c
64208@@ -13,6 +13,7 @@
64209
64210 #include <linux/linkage.h>
64211 #include <linux/compat.h>
64212+#include <linux/module.h>
64213 #include <linux/errno.h>
64214 #include <linux/time.h>
64215 #include <linux/signal.h>
64216@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64217 mm_segment_t oldfs;
64218 long ret;
64219
64220- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64221+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64222 oldfs = get_fs();
64223 set_fs(KERNEL_DS);
64224 ret = hrtimer_nanosleep_restart(restart);
64225@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64226 oldfs = get_fs();
64227 set_fs(KERNEL_DS);
64228 ret = hrtimer_nanosleep(&tu,
64229- rmtp ? (struct timespec __user *)&rmt : NULL,
64230+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
64231 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64232 set_fs(oldfs);
64233
64234@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64235 mm_segment_t old_fs = get_fs();
64236
64237 set_fs(KERNEL_DS);
64238- ret = sys_sigpending((old_sigset_t __user *) &s);
64239+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
64240 set_fs(old_fs);
64241 if (ret == 0)
64242 ret = put_user(s, set);
64243@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64244 old_fs = get_fs();
64245 set_fs(KERNEL_DS);
64246 ret = sys_sigprocmask(how,
64247- set ? (old_sigset_t __user *) &s : NULL,
64248- oset ? (old_sigset_t __user *) &s : NULL);
64249+ set ? (old_sigset_t __force_user *) &s : NULL,
64250+ oset ? (old_sigset_t __force_user *) &s : NULL);
64251 set_fs(old_fs);
64252 if (ret == 0)
64253 if (oset)
64254@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64255 mm_segment_t old_fs = get_fs();
64256
64257 set_fs(KERNEL_DS);
64258- ret = sys_old_getrlimit(resource, &r);
64259+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64260 set_fs(old_fs);
64261
64262 if (!ret) {
64263@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64264 mm_segment_t old_fs = get_fs();
64265
64266 set_fs(KERNEL_DS);
64267- ret = sys_getrusage(who, (struct rusage __user *) &r);
64268+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64269 set_fs(old_fs);
64270
64271 if (ret)
64272@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64273 set_fs (KERNEL_DS);
64274 ret = sys_wait4(pid,
64275 (stat_addr ?
64276- (unsigned int __user *) &status : NULL),
64277- options, (struct rusage __user *) &r);
64278+ (unsigned int __force_user *) &status : NULL),
64279+ options, (struct rusage __force_user *) &r);
64280 set_fs (old_fs);
64281
64282 if (ret > 0) {
64283@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64284 memset(&info, 0, sizeof(info));
64285
64286 set_fs(KERNEL_DS);
64287- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64288- uru ? (struct rusage __user *)&ru : NULL);
64289+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64290+ uru ? (struct rusage __force_user *)&ru : NULL);
64291 set_fs(old_fs);
64292
64293 if ((ret < 0) || (info.si_signo == 0))
64294@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64295 oldfs = get_fs();
64296 set_fs(KERNEL_DS);
64297 err = sys_timer_settime(timer_id, flags,
64298- (struct itimerspec __user *) &newts,
64299- (struct itimerspec __user *) &oldts);
64300+ (struct itimerspec __force_user *) &newts,
64301+ (struct itimerspec __force_user *) &oldts);
64302 set_fs(oldfs);
64303 if (!err && old && put_compat_itimerspec(old, &oldts))
64304 return -EFAULT;
64305@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64306 oldfs = get_fs();
64307 set_fs(KERNEL_DS);
64308 err = sys_timer_gettime(timer_id,
64309- (struct itimerspec __user *) &ts);
64310+ (struct itimerspec __force_user *) &ts);
64311 set_fs(oldfs);
64312 if (!err && put_compat_itimerspec(setting, &ts))
64313 return -EFAULT;
64314@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64315 oldfs = get_fs();
64316 set_fs(KERNEL_DS);
64317 err = sys_clock_settime(which_clock,
64318- (struct timespec __user *) &ts);
64319+ (struct timespec __force_user *) &ts);
64320 set_fs(oldfs);
64321 return err;
64322 }
64323@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64324 oldfs = get_fs();
64325 set_fs(KERNEL_DS);
64326 err = sys_clock_gettime(which_clock,
64327- (struct timespec __user *) &ts);
64328+ (struct timespec __force_user *) &ts);
64329 set_fs(oldfs);
64330 if (!err && put_compat_timespec(&ts, tp))
64331 return -EFAULT;
64332@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64333
64334 oldfs = get_fs();
64335 set_fs(KERNEL_DS);
64336- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64337+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64338 set_fs(oldfs);
64339
64340 err = compat_put_timex(utp, &txc);
64341@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64342 oldfs = get_fs();
64343 set_fs(KERNEL_DS);
64344 err = sys_clock_getres(which_clock,
64345- (struct timespec __user *) &ts);
64346+ (struct timespec __force_user *) &ts);
64347 set_fs(oldfs);
64348 if (!err && tp && put_compat_timespec(&ts, tp))
64349 return -EFAULT;
64350@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64351 long err;
64352 mm_segment_t oldfs;
64353 struct timespec tu;
64354- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64355+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64356
64357- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64358+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64359 oldfs = get_fs();
64360 set_fs(KERNEL_DS);
64361 err = clock_nanosleep_restart(restart);
64362@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64363 oldfs = get_fs();
64364 set_fs(KERNEL_DS);
64365 err = sys_clock_nanosleep(which_clock, flags,
64366- (struct timespec __user *) &in,
64367- (struct timespec __user *) &out);
64368+ (struct timespec __force_user *) &in,
64369+ (struct timespec __force_user *) &out);
64370 set_fs(oldfs);
64371
64372 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64373diff --git a/kernel/configs.c b/kernel/configs.c
64374index 42e8fa0..9e7406b 100644
64375--- a/kernel/configs.c
64376+++ b/kernel/configs.c
64377@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64378 struct proc_dir_entry *entry;
64379
64380 /* create the current config file */
64381+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64382+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64383+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64384+ &ikconfig_file_ops);
64385+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64386+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64387+ &ikconfig_file_ops);
64388+#endif
64389+#else
64390 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64391 &ikconfig_file_ops);
64392+#endif
64393+
64394 if (!entry)
64395 return -ENOMEM;
64396
64397diff --git a/kernel/cred.c b/kernel/cred.c
64398index 5791612..a3c04dc 100644
64399--- a/kernel/cred.c
64400+++ b/kernel/cred.c
64401@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
64402 validate_creds(cred);
64403 put_cred(cred);
64404 }
64405+
64406+#ifdef CONFIG_GRKERNSEC_SETXID
64407+ cred = (struct cred *) tsk->delayed_cred;
64408+ if (cred) {
64409+ tsk->delayed_cred = NULL;
64410+ validate_creds(cred);
64411+ put_cred(cred);
64412+ }
64413+#endif
64414 }
64415
64416 /**
64417@@ -470,7 +479,7 @@ error_put:
64418 * Always returns 0 thus allowing this function to be tail-called at the end
64419 * of, say, sys_setgid().
64420 */
64421-int commit_creds(struct cred *new)
64422+static int __commit_creds(struct cred *new)
64423 {
64424 struct task_struct *task = current;
64425 const struct cred *old = task->real_cred;
64426@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
64427
64428 get_cred(new); /* we will require a ref for the subj creds too */
64429
64430+ gr_set_role_label(task, new->uid, new->gid);
64431+
64432 /* dumpability changes */
64433 if (old->euid != new->euid ||
64434 old->egid != new->egid ||
64435@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
64436 put_cred(old);
64437 return 0;
64438 }
64439+#ifdef CONFIG_GRKERNSEC_SETXID
64440+extern int set_user(struct cred *new);
64441+
64442+void gr_delayed_cred_worker(void)
64443+{
64444+ const struct cred *new = current->delayed_cred;
64445+ struct cred *ncred;
64446+
64447+ current->delayed_cred = NULL;
64448+
64449+ if (current_uid() && new != NULL) {
64450+ // from doing get_cred on it when queueing this
64451+ put_cred(new);
64452+ return;
64453+ } else if (new == NULL)
64454+ return;
64455+
64456+ ncred = prepare_creds();
64457+ if (!ncred)
64458+ goto die;
64459+ // uids
64460+ ncred->uid = new->uid;
64461+ ncred->euid = new->euid;
64462+ ncred->suid = new->suid;
64463+ ncred->fsuid = new->fsuid;
64464+ // gids
64465+ ncred->gid = new->gid;
64466+ ncred->egid = new->egid;
64467+ ncred->sgid = new->sgid;
64468+ ncred->fsgid = new->fsgid;
64469+ // groups
64470+ if (set_groups(ncred, new->group_info) < 0) {
64471+ abort_creds(ncred);
64472+ goto die;
64473+ }
64474+ // caps
64475+ ncred->securebits = new->securebits;
64476+ ncred->cap_inheritable = new->cap_inheritable;
64477+ ncred->cap_permitted = new->cap_permitted;
64478+ ncred->cap_effective = new->cap_effective;
64479+ ncred->cap_bset = new->cap_bset;
64480+
64481+ if (set_user(ncred)) {
64482+ abort_creds(ncred);
64483+ goto die;
64484+ }
64485+
64486+ // from doing get_cred on it when queueing this
64487+ put_cred(new);
64488+
64489+ __commit_creds(ncred);
64490+ return;
64491+die:
64492+ // from doing get_cred on it when queueing this
64493+ put_cred(new);
64494+ do_group_exit(SIGKILL);
64495+}
64496+#endif
64497+
64498+int commit_creds(struct cred *new)
64499+{
64500+#ifdef CONFIG_GRKERNSEC_SETXID
64501+ struct task_struct *t;
64502+
64503+ /* we won't get called with tasklist_lock held for writing
64504+ and interrupts disabled as the cred struct in that case is
64505+ init_cred
64506+ */
64507+ if (grsec_enable_setxid && !current_is_single_threaded() &&
64508+ !current_uid() && new->uid) {
64509+ rcu_read_lock();
64510+ read_lock(&tasklist_lock);
64511+ for (t = next_thread(current); t != current;
64512+ t = next_thread(t)) {
64513+ if (t->delayed_cred == NULL) {
64514+ t->delayed_cred = get_cred(new);
64515+ set_tsk_need_resched(t);
64516+ }
64517+ }
64518+ read_unlock(&tasklist_lock);
64519+ rcu_read_unlock();
64520+ }
64521+#endif
64522+ return __commit_creds(new);
64523+}
64524+
64525 EXPORT_SYMBOL(commit_creds);
64526
64527 /**
64528diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64529index 0d7c087..01b8cef 100644
64530--- a/kernel/debug/debug_core.c
64531+++ b/kernel/debug/debug_core.c
64532@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64533 */
64534 static atomic_t masters_in_kgdb;
64535 static atomic_t slaves_in_kgdb;
64536-static atomic_t kgdb_break_tasklet_var;
64537+static atomic_unchecked_t kgdb_break_tasklet_var;
64538 atomic_t kgdb_setting_breakpoint;
64539
64540 struct task_struct *kgdb_usethread;
64541@@ -129,7 +129,7 @@ int kgdb_single_step;
64542 static pid_t kgdb_sstep_pid;
64543
64544 /* to keep track of the CPU which is doing the single stepping*/
64545-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64546+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64547
64548 /*
64549 * If you are debugging a problem where roundup (the collection of
64550@@ -542,7 +542,7 @@ return_normal:
64551 * kernel will only try for the value of sstep_tries before
64552 * giving up and continuing on.
64553 */
64554- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64555+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64556 (kgdb_info[cpu].task &&
64557 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64558 atomic_set(&kgdb_active, -1);
64559@@ -636,8 +636,8 @@ cpu_master_loop:
64560 }
64561
64562 kgdb_restore:
64563- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64564- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64565+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64566+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64567 if (kgdb_info[sstep_cpu].task)
64568 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64569 else
64570@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
64571 static void kgdb_tasklet_bpt(unsigned long ing)
64572 {
64573 kgdb_breakpoint();
64574- atomic_set(&kgdb_break_tasklet_var, 0);
64575+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64576 }
64577
64578 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64579
64580 void kgdb_schedule_breakpoint(void)
64581 {
64582- if (atomic_read(&kgdb_break_tasklet_var) ||
64583+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64584 atomic_read(&kgdb_active) != -1 ||
64585 atomic_read(&kgdb_setting_breakpoint))
64586 return;
64587- atomic_inc(&kgdb_break_tasklet_var);
64588+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
64589 tasklet_schedule(&kgdb_tasklet_breakpoint);
64590 }
64591 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64592diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64593index e2ae734..08a4c5c 100644
64594--- a/kernel/debug/kdb/kdb_main.c
64595+++ b/kernel/debug/kdb/kdb_main.c
64596@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
64597 list_for_each_entry(mod, kdb_modules, list) {
64598
64599 kdb_printf("%-20s%8u 0x%p ", mod->name,
64600- mod->core_size, (void *)mod);
64601+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
64602 #ifdef CONFIG_MODULE_UNLOAD
64603 kdb_printf("%4ld ", module_refcount(mod));
64604 #endif
64605@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
64606 kdb_printf(" (Loading)");
64607 else
64608 kdb_printf(" (Live)");
64609- kdb_printf(" 0x%p", mod->module_core);
64610+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64611
64612 #ifdef CONFIG_MODULE_UNLOAD
64613 {
64614diff --git a/kernel/events/core.c b/kernel/events/core.c
64615index 1b5c081..c375f83 100644
64616--- a/kernel/events/core.c
64617+++ b/kernel/events/core.c
64618@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64619 return 0;
64620 }
64621
64622-static atomic64_t perf_event_id;
64623+static atomic64_unchecked_t perf_event_id;
64624
64625 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64626 enum event_type_t event_type);
64627@@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
64628
64629 static inline u64 perf_event_count(struct perf_event *event)
64630 {
64631- return local64_read(&event->count) + atomic64_read(&event->child_count);
64632+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64633 }
64634
64635 static u64 perf_event_read(struct perf_event *event)
64636@@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64637 mutex_lock(&event->child_mutex);
64638 total += perf_event_read(event);
64639 *enabled += event->total_time_enabled +
64640- atomic64_read(&event->child_total_time_enabled);
64641+ atomic64_read_unchecked(&event->child_total_time_enabled);
64642 *running += event->total_time_running +
64643- atomic64_read(&event->child_total_time_running);
64644+ atomic64_read_unchecked(&event->child_total_time_running);
64645
64646 list_for_each_entry(child, &event->child_list, child_list) {
64647 total += perf_event_read(child);
64648@@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
64649 userpg->offset -= local64_read(&event->hw.prev_count);
64650
64651 userpg->time_enabled = enabled +
64652- atomic64_read(&event->child_total_time_enabled);
64653+ atomic64_read_unchecked(&event->child_total_time_enabled);
64654
64655 userpg->time_running = running +
64656- atomic64_read(&event->child_total_time_running);
64657+ atomic64_read_unchecked(&event->child_total_time_running);
64658
64659 barrier();
64660 ++userpg->lock;
64661@@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
64662 values[n++] = perf_event_count(event);
64663 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64664 values[n++] = enabled +
64665- atomic64_read(&event->child_total_time_enabled);
64666+ atomic64_read_unchecked(&event->child_total_time_enabled);
64667 }
64668 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64669 values[n++] = running +
64670- atomic64_read(&event->child_total_time_running);
64671+ atomic64_read_unchecked(&event->child_total_time_running);
64672 }
64673 if (read_format & PERF_FORMAT_ID)
64674 values[n++] = primary_event_id(event);
64675@@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
64676 * need to add enough zero bytes after the string to handle
64677 * the 64bit alignment we do later.
64678 */
64679- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
64680+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
64681 if (!buf) {
64682 name = strncpy(tmp, "//enomem", sizeof(tmp));
64683 goto got_name;
64684 }
64685- name = d_path(&file->f_path, buf, PATH_MAX);
64686+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
64687 if (IS_ERR(name)) {
64688 name = strncpy(tmp, "//toolong", sizeof(tmp));
64689 goto got_name;
64690@@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
64691 event->parent = parent_event;
64692
64693 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64694- event->id = atomic64_inc_return(&perf_event_id);
64695+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64696
64697 event->state = PERF_EVENT_STATE_INACTIVE;
64698
64699@@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
64700 /*
64701 * Add back the child's count to the parent's count:
64702 */
64703- atomic64_add(child_val, &parent_event->child_count);
64704- atomic64_add(child_event->total_time_enabled,
64705+ atomic64_add_unchecked(child_val, &parent_event->child_count);
64706+ atomic64_add_unchecked(child_event->total_time_enabled,
64707 &parent_event->child_total_time_enabled);
64708- atomic64_add(child_event->total_time_running,
64709+ atomic64_add_unchecked(child_event->total_time_running,
64710 &parent_event->child_total_time_running);
64711
64712 /*
64713diff --git a/kernel/exit.c b/kernel/exit.c
64714index 4b4042f..5bdd8d5 100644
64715--- a/kernel/exit.c
64716+++ b/kernel/exit.c
64717@@ -58,6 +58,10 @@
64718 #include <asm/pgtable.h>
64719 #include <asm/mmu_context.h>
64720
64721+#ifdef CONFIG_GRKERNSEC
64722+extern rwlock_t grsec_exec_file_lock;
64723+#endif
64724+
64725 static void exit_mm(struct task_struct * tsk);
64726
64727 static void __unhash_process(struct task_struct *p, bool group_dead)
64728@@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
64729 struct task_struct *leader;
64730 int zap_leader;
64731 repeat:
64732+#ifdef CONFIG_NET
64733+ gr_del_task_from_ip_table(p);
64734+#endif
64735+
64736 /* don't need to get the RCU readlock here - the process is dead and
64737 * can't be modifying its own credentials. But shut RCU-lockdep up */
64738 rcu_read_lock();
64739@@ -381,7 +389,7 @@ int allow_signal(int sig)
64740 * know it'll be handled, so that they don't get converted to
64741 * SIGKILL or just silently dropped.
64742 */
64743- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
64744+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
64745 recalc_sigpending();
64746 spin_unlock_irq(&current->sighand->siglock);
64747 return 0;
64748@@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
64749 vsnprintf(current->comm, sizeof(current->comm), name, args);
64750 va_end(args);
64751
64752+#ifdef CONFIG_GRKERNSEC
64753+ write_lock(&grsec_exec_file_lock);
64754+ if (current->exec_file) {
64755+ fput(current->exec_file);
64756+ current->exec_file = NULL;
64757+ }
64758+ write_unlock(&grsec_exec_file_lock);
64759+#endif
64760+
64761+ gr_set_kernel_label(current);
64762+
64763 /*
64764 * If we were started as result of loading a module, close all of the
64765 * user space pages. We don't need them, and if we didn't close them
64766@@ -892,6 +911,8 @@ void do_exit(long code)
64767 struct task_struct *tsk = current;
64768 int group_dead;
64769
64770+ set_fs(USER_DS);
64771+
64772 profile_task_exit(tsk);
64773
64774 WARN_ON(blk_needs_flush_plug(tsk));
64775@@ -908,7 +929,6 @@ void do_exit(long code)
64776 * mm_release()->clear_child_tid() from writing to a user-controlled
64777 * kernel address.
64778 */
64779- set_fs(USER_DS);
64780
64781 ptrace_event(PTRACE_EVENT_EXIT, code);
64782
64783@@ -969,6 +989,9 @@ void do_exit(long code)
64784 tsk->exit_code = code;
64785 taskstats_exit(tsk, group_dead);
64786
64787+ gr_acl_handle_psacct(tsk, code);
64788+ gr_acl_handle_exit();
64789+
64790 exit_mm(tsk);
64791
64792 if (group_dead)
64793@@ -1085,7 +1108,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
64794 * Take down every thread in the group. This is called by fatal signals
64795 * as well as by sys_exit_group (below).
64796 */
64797-void
64798+__noreturn void
64799 do_group_exit(int exit_code)
64800 {
64801 struct signal_struct *sig = current->signal;
64802diff --git a/kernel/fork.c b/kernel/fork.c
64803index 26a7a67..a1053f9 100644
64804--- a/kernel/fork.c
64805+++ b/kernel/fork.c
64806@@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
64807 *stackend = STACK_END_MAGIC; /* for overflow detection */
64808
64809 #ifdef CONFIG_CC_STACKPROTECTOR
64810- tsk->stack_canary = get_random_int();
64811+ tsk->stack_canary = pax_get_random_long();
64812 #endif
64813
64814 /*
64815@@ -308,13 +308,77 @@ out:
64816 }
64817
64818 #ifdef CONFIG_MMU
64819+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
64820+{
64821+ struct vm_area_struct *tmp;
64822+ unsigned long charge;
64823+ struct mempolicy *pol;
64824+ struct file *file;
64825+
64826+ charge = 0;
64827+ if (mpnt->vm_flags & VM_ACCOUNT) {
64828+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64829+ if (security_vm_enough_memory(len))
64830+ goto fail_nomem;
64831+ charge = len;
64832+ }
64833+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64834+ if (!tmp)
64835+ goto fail_nomem;
64836+ *tmp = *mpnt;
64837+ tmp->vm_mm = mm;
64838+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
64839+ pol = mpol_dup(vma_policy(mpnt));
64840+ if (IS_ERR(pol))
64841+ goto fail_nomem_policy;
64842+ vma_set_policy(tmp, pol);
64843+ if (anon_vma_fork(tmp, mpnt))
64844+ goto fail_nomem_anon_vma_fork;
64845+ tmp->vm_flags &= ~VM_LOCKED;
64846+ tmp->vm_next = tmp->vm_prev = NULL;
64847+ tmp->vm_mirror = NULL;
64848+ file = tmp->vm_file;
64849+ if (file) {
64850+ struct inode *inode = file->f_path.dentry->d_inode;
64851+ struct address_space *mapping = file->f_mapping;
64852+
64853+ get_file(file);
64854+ if (tmp->vm_flags & VM_DENYWRITE)
64855+ atomic_dec(&inode->i_writecount);
64856+ mutex_lock(&mapping->i_mmap_mutex);
64857+ if (tmp->vm_flags & VM_SHARED)
64858+ mapping->i_mmap_writable++;
64859+ flush_dcache_mmap_lock(mapping);
64860+ /* insert tmp into the share list, just after mpnt */
64861+ vma_prio_tree_add(tmp, mpnt);
64862+ flush_dcache_mmap_unlock(mapping);
64863+ mutex_unlock(&mapping->i_mmap_mutex);
64864+ }
64865+
64866+ /*
64867+ * Clear hugetlb-related page reserves for children. This only
64868+ * affects MAP_PRIVATE mappings. Faults generated by the child
64869+ * are not guaranteed to succeed, even if read-only
64870+ */
64871+ if (is_vm_hugetlb_page(tmp))
64872+ reset_vma_resv_huge_pages(tmp);
64873+
64874+ return tmp;
64875+
64876+fail_nomem_anon_vma_fork:
64877+ mpol_put(pol);
64878+fail_nomem_policy:
64879+ kmem_cache_free(vm_area_cachep, tmp);
64880+fail_nomem:
64881+ vm_unacct_memory(charge);
64882+ return NULL;
64883+}
64884+
64885 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64886 {
64887 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
64888 struct rb_node **rb_link, *rb_parent;
64889 int retval;
64890- unsigned long charge;
64891- struct mempolicy *pol;
64892
64893 down_write(&oldmm->mmap_sem);
64894 flush_cache_dup_mm(oldmm);
64895@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64896 mm->locked_vm = 0;
64897 mm->mmap = NULL;
64898 mm->mmap_cache = NULL;
64899- mm->free_area_cache = oldmm->mmap_base;
64900- mm->cached_hole_size = ~0UL;
64901+ mm->free_area_cache = oldmm->free_area_cache;
64902+ mm->cached_hole_size = oldmm->cached_hole_size;
64903 mm->map_count = 0;
64904 cpumask_clear(mm_cpumask(mm));
64905 mm->mm_rb = RB_ROOT;
64906@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64907
64908 prev = NULL;
64909 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
64910- struct file *file;
64911-
64912 if (mpnt->vm_flags & VM_DONTCOPY) {
64913 long pages = vma_pages(mpnt);
64914 mm->total_vm -= pages;
64915@@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64916 -pages);
64917 continue;
64918 }
64919- charge = 0;
64920- if (mpnt->vm_flags & VM_ACCOUNT) {
64921- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64922- if (security_vm_enough_memory(len))
64923- goto fail_nomem;
64924- charge = len;
64925+ tmp = dup_vma(mm, mpnt);
64926+ if (!tmp) {
64927+ retval = -ENOMEM;
64928+ goto out;
64929 }
64930- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64931- if (!tmp)
64932- goto fail_nomem;
64933- *tmp = *mpnt;
64934- INIT_LIST_HEAD(&tmp->anon_vma_chain);
64935- pol = mpol_dup(vma_policy(mpnt));
64936- retval = PTR_ERR(pol);
64937- if (IS_ERR(pol))
64938- goto fail_nomem_policy;
64939- vma_set_policy(tmp, pol);
64940- tmp->vm_mm = mm;
64941- if (anon_vma_fork(tmp, mpnt))
64942- goto fail_nomem_anon_vma_fork;
64943- tmp->vm_flags &= ~VM_LOCKED;
64944- tmp->vm_next = tmp->vm_prev = NULL;
64945- file = tmp->vm_file;
64946- if (file) {
64947- struct inode *inode = file->f_path.dentry->d_inode;
64948- struct address_space *mapping = file->f_mapping;
64949-
64950- get_file(file);
64951- if (tmp->vm_flags & VM_DENYWRITE)
64952- atomic_dec(&inode->i_writecount);
64953- mutex_lock(&mapping->i_mmap_mutex);
64954- if (tmp->vm_flags & VM_SHARED)
64955- mapping->i_mmap_writable++;
64956- flush_dcache_mmap_lock(mapping);
64957- /* insert tmp into the share list, just after mpnt */
64958- vma_prio_tree_add(tmp, mpnt);
64959- flush_dcache_mmap_unlock(mapping);
64960- mutex_unlock(&mapping->i_mmap_mutex);
64961- }
64962-
64963- /*
64964- * Clear hugetlb-related page reserves for children. This only
64965- * affects MAP_PRIVATE mappings. Faults generated by the child
64966- * are not guaranteed to succeed, even if read-only
64967- */
64968- if (is_vm_hugetlb_page(tmp))
64969- reset_vma_resv_huge_pages(tmp);
64970
64971 /*
64972 * Link in the new vma and copy the page table entries.
64973@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64974 if (retval)
64975 goto out;
64976 }
64977+
64978+#ifdef CONFIG_PAX_SEGMEXEC
64979+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
64980+ struct vm_area_struct *mpnt_m;
64981+
64982+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
64983+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
64984+
64985+ if (!mpnt->vm_mirror)
64986+ continue;
64987+
64988+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
64989+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
64990+ mpnt->vm_mirror = mpnt_m;
64991+ } else {
64992+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
64993+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
64994+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
64995+ mpnt->vm_mirror->vm_mirror = mpnt;
64996+ }
64997+ }
64998+ BUG_ON(mpnt_m);
64999+ }
65000+#endif
65001+
65002 /* a new mm has just been created */
65003 arch_dup_mmap(oldmm, mm);
65004 retval = 0;
65005@@ -429,14 +474,6 @@ out:
65006 flush_tlb_mm(oldmm);
65007 up_write(&oldmm->mmap_sem);
65008 return retval;
65009-fail_nomem_anon_vma_fork:
65010- mpol_put(pol);
65011-fail_nomem_policy:
65012- kmem_cache_free(vm_area_cachep, tmp);
65013-fail_nomem:
65014- retval = -ENOMEM;
65015- vm_unacct_memory(charge);
65016- goto out;
65017 }
65018
65019 static inline int mm_alloc_pgd(struct mm_struct *mm)
65020@@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65021 return ERR_PTR(err);
65022
65023 mm = get_task_mm(task);
65024- if (mm && mm != current->mm &&
65025- !ptrace_may_access(task, mode)) {
65026+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65027+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65028 mmput(mm);
65029 mm = ERR_PTR(-EACCES);
65030 }
65031@@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65032 spin_unlock(&fs->lock);
65033 return -EAGAIN;
65034 }
65035- fs->users++;
65036+ atomic_inc(&fs->users);
65037 spin_unlock(&fs->lock);
65038 return 0;
65039 }
65040 tsk->fs = copy_fs_struct(fs);
65041 if (!tsk->fs)
65042 return -ENOMEM;
65043+ gr_set_chroot_entries(tsk, &tsk->fs->root);
65044 return 0;
65045 }
65046
65047@@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65048 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65049 #endif
65050 retval = -EAGAIN;
65051+
65052+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65053+
65054 if (atomic_read(&p->real_cred->user->processes) >=
65055 task_rlimit(p, RLIMIT_NPROC)) {
65056 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65057@@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65058 if (clone_flags & CLONE_THREAD)
65059 p->tgid = current->tgid;
65060
65061+ gr_copy_label(p);
65062+
65063 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65064 /*
65065 * Clear TID on mm_release()?
65066@@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
65067 bad_fork_free:
65068 free_task(p);
65069 fork_out:
65070+ gr_log_forkfail(retval);
65071+
65072 return ERR_PTR(retval);
65073 }
65074
65075@@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
65076 if (clone_flags & CLONE_PARENT_SETTID)
65077 put_user(nr, parent_tidptr);
65078
65079+ gr_handle_brute_check();
65080+
65081 if (clone_flags & CLONE_VFORK) {
65082 p->vfork_done = &vfork;
65083 init_completion(&vfork);
65084@@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65085 return 0;
65086
65087 /* don't need lock here; in the worst case we'll do useless copy */
65088- if (fs->users == 1)
65089+ if (atomic_read(&fs->users) == 1)
65090 return 0;
65091
65092 *new_fsp = copy_fs_struct(fs);
65093@@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65094 fs = current->fs;
65095 spin_lock(&fs->lock);
65096 current->fs = new_fs;
65097- if (--fs->users)
65098+ gr_set_chroot_entries(current, &current->fs->root);
65099+ if (atomic_dec_return(&fs->users))
65100 new_fs = NULL;
65101 else
65102 new_fs = fs;
65103diff --git a/kernel/futex.c b/kernel/futex.c
65104index 1614be2..37abc7e 100644
65105--- a/kernel/futex.c
65106+++ b/kernel/futex.c
65107@@ -54,6 +54,7 @@
65108 #include <linux/mount.h>
65109 #include <linux/pagemap.h>
65110 #include <linux/syscalls.h>
65111+#include <linux/ptrace.h>
65112 #include <linux/signal.h>
65113 #include <linux/export.h>
65114 #include <linux/magic.h>
65115@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65116 struct page *page, *page_head;
65117 int err, ro = 0;
65118
65119+#ifdef CONFIG_PAX_SEGMEXEC
65120+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65121+ return -EFAULT;
65122+#endif
65123+
65124 /*
65125 * The futex address must be "naturally" aligned.
65126 */
65127@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65128 if (!p)
65129 goto err_unlock;
65130 ret = -EPERM;
65131+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65132+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
65133+ goto err_unlock;
65134+#endif
65135 pcred = __task_cred(p);
65136 /* If victim is in different user_ns, then uids are not
65137 comparable, so we must have CAP_SYS_PTRACE */
65138@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
65139 {
65140 u32 curval;
65141 int i;
65142+ mm_segment_t oldfs;
65143
65144 /*
65145 * This will fail and we want it. Some arch implementations do
65146@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
65147 * implementation, the non-functional ones will return
65148 * -ENOSYS.
65149 */
65150+ oldfs = get_fs();
65151+ set_fs(USER_DS);
65152 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65153 futex_cmpxchg_enabled = 1;
65154+ set_fs(oldfs);
65155
65156 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65157 plist_head_init(&futex_queues[i].chain);
65158diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65159index 5f9e689..582d46d 100644
65160--- a/kernel/futex_compat.c
65161+++ b/kernel/futex_compat.c
65162@@ -10,6 +10,7 @@
65163 #include <linux/compat.h>
65164 #include <linux/nsproxy.h>
65165 #include <linux/futex.h>
65166+#include <linux/ptrace.h>
65167
65168 #include <asm/uaccess.h>
65169
65170@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65171 {
65172 struct compat_robust_list_head __user *head;
65173 unsigned long ret;
65174- const struct cred *cred = current_cred(), *pcred;
65175+ const struct cred *cred = current_cred();
65176+ const struct cred *pcred;
65177
65178 if (!futex_cmpxchg_enabled)
65179 return -ENOSYS;
65180@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65181 if (!p)
65182 goto err_unlock;
65183 ret = -EPERM;
65184+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65185+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
65186+ goto err_unlock;
65187+#endif
65188 pcred = __task_cred(p);
65189 /* If victim is in different user_ns, then uids are not
65190 comparable, so we must have CAP_SYS_PTRACE */
65191diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65192index 9b22d03..6295b62 100644
65193--- a/kernel/gcov/base.c
65194+++ b/kernel/gcov/base.c
65195@@ -102,11 +102,6 @@ void gcov_enable_events(void)
65196 }
65197
65198 #ifdef CONFIG_MODULES
65199-static inline int within(void *addr, void *start, unsigned long size)
65200-{
65201- return ((addr >= start) && (addr < start + size));
65202-}
65203-
65204 /* Update list and generate events when modules are unloaded. */
65205 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65206 void *data)
65207@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65208 prev = NULL;
65209 /* Remove entries located in module from linked list. */
65210 for (info = gcov_info_head; info; info = info->next) {
65211- if (within(info, mod->module_core, mod->core_size)) {
65212+ if (within_module_core_rw((unsigned long)info, mod)) {
65213 if (prev)
65214 prev->next = info->next;
65215 else
65216diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65217index ae34bf5..4e2f3d0 100644
65218--- a/kernel/hrtimer.c
65219+++ b/kernel/hrtimer.c
65220@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65221 local_irq_restore(flags);
65222 }
65223
65224-static void run_hrtimer_softirq(struct softirq_action *h)
65225+static void run_hrtimer_softirq(void)
65226 {
65227 hrtimer_peek_ahead_timers();
65228 }
65229diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65230index 01d3b70..9e4d098 100644
65231--- a/kernel/jump_label.c
65232+++ b/kernel/jump_label.c
65233@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65234
65235 size = (((unsigned long)stop - (unsigned long)start)
65236 / sizeof(struct jump_entry));
65237+ pax_open_kernel();
65238 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65239+ pax_close_kernel();
65240 }
65241
65242 static void jump_label_update(struct jump_label_key *key, int enable);
65243@@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65244 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65245 struct jump_entry *iter;
65246
65247+ pax_open_kernel();
65248 for (iter = iter_start; iter < iter_stop; iter++) {
65249 if (within_module_init(iter->code, mod))
65250 iter->code = 0;
65251 }
65252+ pax_close_kernel();
65253 }
65254
65255 static int
65256diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65257index 079f1d3..a407562 100644
65258--- a/kernel/kallsyms.c
65259+++ b/kernel/kallsyms.c
65260@@ -11,6 +11,9 @@
65261 * Changed the compression method from stem compression to "table lookup"
65262 * compression (see scripts/kallsyms.c for a more complete description)
65263 */
65264+#ifdef CONFIG_GRKERNSEC_HIDESYM
65265+#define __INCLUDED_BY_HIDESYM 1
65266+#endif
65267 #include <linux/kallsyms.h>
65268 #include <linux/module.h>
65269 #include <linux/init.h>
65270@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65271
65272 static inline int is_kernel_inittext(unsigned long addr)
65273 {
65274+ if (system_state != SYSTEM_BOOTING)
65275+ return 0;
65276+
65277 if (addr >= (unsigned long)_sinittext
65278 && addr <= (unsigned long)_einittext)
65279 return 1;
65280 return 0;
65281 }
65282
65283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65284+#ifdef CONFIG_MODULES
65285+static inline int is_module_text(unsigned long addr)
65286+{
65287+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65288+ return 1;
65289+
65290+ addr = ktla_ktva(addr);
65291+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65292+}
65293+#else
65294+static inline int is_module_text(unsigned long addr)
65295+{
65296+ return 0;
65297+}
65298+#endif
65299+#endif
65300+
65301 static inline int is_kernel_text(unsigned long addr)
65302 {
65303 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65304@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65305
65306 static inline int is_kernel(unsigned long addr)
65307 {
65308+
65309+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65310+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
65311+ return 1;
65312+
65313+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65314+#else
65315 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65316+#endif
65317+
65318 return 1;
65319 return in_gate_area_no_mm(addr);
65320 }
65321
65322 static int is_ksym_addr(unsigned long addr)
65323 {
65324+
65325+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65326+ if (is_module_text(addr))
65327+ return 0;
65328+#endif
65329+
65330 if (all_var)
65331 return is_kernel(addr);
65332
65333@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65334
65335 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65336 {
65337- iter->name[0] = '\0';
65338 iter->nameoff = get_symbol_offset(new_pos);
65339 iter->pos = new_pos;
65340 }
65341@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65342 {
65343 struct kallsym_iter *iter = m->private;
65344
65345+#ifdef CONFIG_GRKERNSEC_HIDESYM
65346+ if (current_uid())
65347+ return 0;
65348+#endif
65349+
65350 /* Some debugging symbols have no name. Ignore them. */
65351 if (!iter->name[0])
65352 return 0;
65353@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65354 struct kallsym_iter *iter;
65355 int ret;
65356
65357- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65358+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65359 if (!iter)
65360 return -ENOMEM;
65361 reset_iter(iter, 0);
65362diff --git a/kernel/kexec.c b/kernel/kexec.c
65363index 7b08867..3bac516 100644
65364--- a/kernel/kexec.c
65365+++ b/kernel/kexec.c
65366@@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65367 unsigned long flags)
65368 {
65369 struct compat_kexec_segment in;
65370- struct kexec_segment out, __user *ksegments;
65371+ struct kexec_segment out;
65372+ struct kexec_segment __user *ksegments;
65373 unsigned long i, result;
65374
65375 /* Don't allow clients that don't understand the native
65376diff --git a/kernel/kmod.c b/kernel/kmod.c
65377index a0a8854..642b106 100644
65378--- a/kernel/kmod.c
65379+++ b/kernel/kmod.c
65380@@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
65381 * If module auto-loading support is disabled then this function
65382 * becomes a no-operation.
65383 */
65384-int __request_module(bool wait, const char *fmt, ...)
65385+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65386 {
65387- va_list args;
65388 char module_name[MODULE_NAME_LEN];
65389 unsigned int max_modprobes;
65390 int ret;
65391- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
65392+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
65393 static char *envp[] = { "HOME=/",
65394 "TERM=linux",
65395 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
65396@@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
65397 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65398 static int kmod_loop_msg;
65399
65400- va_start(args, fmt);
65401- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65402- va_end(args);
65403+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65404 if (ret >= MODULE_NAME_LEN)
65405 return -ENAMETOOLONG;
65406
65407@@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
65408 if (ret)
65409 return ret;
65410
65411+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65412+ if (!current_uid()) {
65413+ /* hack to workaround consolekit/udisks stupidity */
65414+ read_lock(&tasklist_lock);
65415+ if (!strcmp(current->comm, "mount") &&
65416+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65417+ read_unlock(&tasklist_lock);
65418+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65419+ return -EPERM;
65420+ }
65421+ read_unlock(&tasklist_lock);
65422+ }
65423+#endif
65424+
65425 /* If modprobe needs a service that is in a module, we get a recursive
65426 * loop. Limit the number of running kmod threads to max_threads/2 or
65427 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65428@@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
65429 atomic_dec(&kmod_concurrent);
65430 return ret;
65431 }
65432+
65433+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65434+{
65435+ va_list args;
65436+ int ret;
65437+
65438+ va_start(args, fmt);
65439+ ret = ____request_module(wait, module_param, fmt, args);
65440+ va_end(args);
65441+
65442+ return ret;
65443+}
65444+
65445+int __request_module(bool wait, const char *fmt, ...)
65446+{
65447+ va_list args;
65448+ int ret;
65449+
65450+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65451+ if (current_uid()) {
65452+ char module_param[MODULE_NAME_LEN];
65453+
65454+ memset(module_param, 0, sizeof(module_param));
65455+
65456+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65457+
65458+ va_start(args, fmt);
65459+ ret = ____request_module(wait, module_param, fmt, args);
65460+ va_end(args);
65461+
65462+ return ret;
65463+ }
65464+#endif
65465+
65466+ va_start(args, fmt);
65467+ ret = ____request_module(wait, NULL, fmt, args);
65468+ va_end(args);
65469+
65470+ return ret;
65471+}
65472+
65473 EXPORT_SYMBOL(__request_module);
65474 #endif /* CONFIG_MODULES */
65475
65476@@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
65477 *
65478 * Thus the __user pointer cast is valid here.
65479 */
65480- sys_wait4(pid, (int __user *)&ret, 0, NULL);
65481+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65482
65483 /*
65484 * If ret is 0, either ____call_usermodehelper failed and the
65485diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65486index c62b854..cb67968 100644
65487--- a/kernel/kprobes.c
65488+++ b/kernel/kprobes.c
65489@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65490 * kernel image and loaded module images reside. This is required
65491 * so x86_64 can correctly handle the %rip-relative fixups.
65492 */
65493- kip->insns = module_alloc(PAGE_SIZE);
65494+ kip->insns = module_alloc_exec(PAGE_SIZE);
65495 if (!kip->insns) {
65496 kfree(kip);
65497 return NULL;
65498@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65499 */
65500 if (!list_is_singular(&kip->list)) {
65501 list_del(&kip->list);
65502- module_free(NULL, kip->insns);
65503+ module_free_exec(NULL, kip->insns);
65504 kfree(kip);
65505 }
65506 return 1;
65507@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65508 {
65509 int i, err = 0;
65510 unsigned long offset = 0, size = 0;
65511- char *modname, namebuf[128];
65512+ char *modname, namebuf[KSYM_NAME_LEN];
65513 const char *symbol_name;
65514 void *addr;
65515 struct kprobe_blackpoint *kb;
65516@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65517 const char *sym = NULL;
65518 unsigned int i = *(loff_t *) v;
65519 unsigned long offset = 0;
65520- char *modname, namebuf[128];
65521+ char *modname, namebuf[KSYM_NAME_LEN];
65522
65523 head = &kprobe_table[i];
65524 preempt_disable();
65525diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65526index 8889f7d..95319b7 100644
65527--- a/kernel/lockdep.c
65528+++ b/kernel/lockdep.c
65529@@ -590,6 +590,10 @@ static int static_obj(void *obj)
65530 end = (unsigned long) &_end,
65531 addr = (unsigned long) obj;
65532
65533+#ifdef CONFIG_PAX_KERNEXEC
65534+ start = ktla_ktva(start);
65535+#endif
65536+
65537 /*
65538 * static variable?
65539 */
65540@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65541 if (!static_obj(lock->key)) {
65542 debug_locks_off();
65543 printk("INFO: trying to register non-static key.\n");
65544+ printk("lock:%pS key:%pS.\n", lock, lock->key);
65545 printk("the code is fine but needs lockdep annotation.\n");
65546 printk("turning off the locking correctness validator.\n");
65547 dump_stack();
65548@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65549 if (!class)
65550 return 0;
65551 }
65552- atomic_inc((atomic_t *)&class->ops);
65553+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65554 if (very_verbose(class)) {
65555 printk("\nacquire class [%p] %s", class->key, class->name);
65556 if (class->name_version > 1)
65557diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65558index 91c32a0..b2c71c5 100644
65559--- a/kernel/lockdep_proc.c
65560+++ b/kernel/lockdep_proc.c
65561@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65562
65563 static void print_name(struct seq_file *m, struct lock_class *class)
65564 {
65565- char str[128];
65566+ char str[KSYM_NAME_LEN];
65567 const char *name = class->name;
65568
65569 if (!name) {
65570diff --git a/kernel/module.c b/kernel/module.c
65571index 2c93276..476fe81 100644
65572--- a/kernel/module.c
65573+++ b/kernel/module.c
65574@@ -58,6 +58,7 @@
65575 #include <linux/jump_label.h>
65576 #include <linux/pfn.h>
65577 #include <linux/bsearch.h>
65578+#include <linux/grsecurity.h>
65579
65580 #define CREATE_TRACE_POINTS
65581 #include <trace/events/module.h>
65582@@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65583
65584 /* Bounds of module allocation, for speeding __module_address.
65585 * Protected by module_mutex. */
65586-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65587+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65588+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65589
65590 int register_module_notifier(struct notifier_block * nb)
65591 {
65592@@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65593 return true;
65594
65595 list_for_each_entry_rcu(mod, &modules, list) {
65596- struct symsearch arr[] = {
65597+ struct symsearch modarr[] = {
65598 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65599 NOT_GPL_ONLY, false },
65600 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65601@@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65602 #endif
65603 };
65604
65605- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65606+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65607 return true;
65608 }
65609 return false;
65610@@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65611 static int percpu_modalloc(struct module *mod,
65612 unsigned long size, unsigned long align)
65613 {
65614- if (align > PAGE_SIZE) {
65615+ if (align-1 >= PAGE_SIZE) {
65616 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65617 mod->name, align, PAGE_SIZE);
65618 align = PAGE_SIZE;
65619@@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
65620 static ssize_t show_coresize(struct module_attribute *mattr,
65621 struct module_kobject *mk, char *buffer)
65622 {
65623- return sprintf(buffer, "%u\n", mk->mod->core_size);
65624+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
65625 }
65626
65627 static struct module_attribute modinfo_coresize =
65628@@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
65629 static ssize_t show_initsize(struct module_attribute *mattr,
65630 struct module_kobject *mk, char *buffer)
65631 {
65632- return sprintf(buffer, "%u\n", mk->mod->init_size);
65633+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
65634 }
65635
65636 static struct module_attribute modinfo_initsize =
65637@@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
65638 */
65639 #ifdef CONFIG_SYSFS
65640
65641-#ifdef CONFIG_KALLSYMS
65642+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65643 static inline bool sect_empty(const Elf_Shdr *sect)
65644 {
65645 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
65646@@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
65647
65648 static void unset_module_core_ro_nx(struct module *mod)
65649 {
65650- set_page_attributes(mod->module_core + mod->core_text_size,
65651- mod->module_core + mod->core_size,
65652+ set_page_attributes(mod->module_core_rw,
65653+ mod->module_core_rw + mod->core_size_rw,
65654 set_memory_x);
65655- set_page_attributes(mod->module_core,
65656- mod->module_core + mod->core_ro_size,
65657+ set_page_attributes(mod->module_core_rx,
65658+ mod->module_core_rx + mod->core_size_rx,
65659 set_memory_rw);
65660 }
65661
65662 static void unset_module_init_ro_nx(struct module *mod)
65663 {
65664- set_page_attributes(mod->module_init + mod->init_text_size,
65665- mod->module_init + mod->init_size,
65666+ set_page_attributes(mod->module_init_rw,
65667+ mod->module_init_rw + mod->init_size_rw,
65668 set_memory_x);
65669- set_page_attributes(mod->module_init,
65670- mod->module_init + mod->init_ro_size,
65671+ set_page_attributes(mod->module_init_rx,
65672+ mod->module_init_rx + mod->init_size_rx,
65673 set_memory_rw);
65674 }
65675
65676@@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
65677
65678 mutex_lock(&module_mutex);
65679 list_for_each_entry_rcu(mod, &modules, list) {
65680- if ((mod->module_core) && (mod->core_text_size)) {
65681- set_page_attributes(mod->module_core,
65682- mod->module_core + mod->core_text_size,
65683+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
65684+ set_page_attributes(mod->module_core_rx,
65685+ mod->module_core_rx + mod->core_size_rx,
65686 set_memory_rw);
65687 }
65688- if ((mod->module_init) && (mod->init_text_size)) {
65689- set_page_attributes(mod->module_init,
65690- mod->module_init + mod->init_text_size,
65691+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
65692+ set_page_attributes(mod->module_init_rx,
65693+ mod->module_init_rx + mod->init_size_rx,
65694 set_memory_rw);
65695 }
65696 }
65697@@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
65698
65699 mutex_lock(&module_mutex);
65700 list_for_each_entry_rcu(mod, &modules, list) {
65701- if ((mod->module_core) && (mod->core_text_size)) {
65702- set_page_attributes(mod->module_core,
65703- mod->module_core + mod->core_text_size,
65704+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
65705+ set_page_attributes(mod->module_core_rx,
65706+ mod->module_core_rx + mod->core_size_rx,
65707 set_memory_ro);
65708 }
65709- if ((mod->module_init) && (mod->init_text_size)) {
65710- set_page_attributes(mod->module_init,
65711- mod->module_init + mod->init_text_size,
65712+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
65713+ set_page_attributes(mod->module_init_rx,
65714+ mod->module_init_rx + mod->init_size_rx,
65715 set_memory_ro);
65716 }
65717 }
65718@@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
65719
65720 /* This may be NULL, but that's OK */
65721 unset_module_init_ro_nx(mod);
65722- module_free(mod, mod->module_init);
65723+ module_free(mod, mod->module_init_rw);
65724+ module_free_exec(mod, mod->module_init_rx);
65725 kfree(mod->args);
65726 percpu_modfree(mod);
65727
65728 /* Free lock-classes: */
65729- lockdep_free_key_range(mod->module_core, mod->core_size);
65730+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
65731+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
65732
65733 /* Finally, free the core (containing the module structure) */
65734 unset_module_core_ro_nx(mod);
65735- module_free(mod, mod->module_core);
65736+ module_free_exec(mod, mod->module_core_rx);
65737+ module_free(mod, mod->module_core_rw);
65738
65739 #ifdef CONFIG_MPU
65740 update_protections(current->mm);
65741@@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65742 unsigned int i;
65743 int ret = 0;
65744 const struct kernel_symbol *ksym;
65745+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65746+ int is_fs_load = 0;
65747+ int register_filesystem_found = 0;
65748+ char *p;
65749+
65750+ p = strstr(mod->args, "grsec_modharden_fs");
65751+ if (p) {
65752+ char *endptr = p + strlen("grsec_modharden_fs");
65753+ /* copy \0 as well */
65754+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
65755+ is_fs_load = 1;
65756+ }
65757+#endif
65758
65759 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
65760 const char *name = info->strtab + sym[i].st_name;
65761
65762+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65763+ /* it's a real shame this will never get ripped and copied
65764+ upstream! ;(
65765+ */
65766+ if (is_fs_load && !strcmp(name, "register_filesystem"))
65767+ register_filesystem_found = 1;
65768+#endif
65769+
65770 switch (sym[i].st_shndx) {
65771 case SHN_COMMON:
65772 /* We compiled with -fno-common. These are not
65773@@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65774 ksym = resolve_symbol_wait(mod, info, name);
65775 /* Ok if resolved. */
65776 if (ksym && !IS_ERR(ksym)) {
65777+ pax_open_kernel();
65778 sym[i].st_value = ksym->value;
65779+ pax_close_kernel();
65780 break;
65781 }
65782
65783@@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65784 secbase = (unsigned long)mod_percpu(mod);
65785 else
65786 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
65787+ pax_open_kernel();
65788 sym[i].st_value += secbase;
65789+ pax_close_kernel();
65790 break;
65791 }
65792 }
65793
65794+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65795+ if (is_fs_load && !register_filesystem_found) {
65796+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
65797+ ret = -EPERM;
65798+ }
65799+#endif
65800+
65801 return ret;
65802 }
65803
65804@@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
65805 || s->sh_entsize != ~0UL
65806 || strstarts(sname, ".init"))
65807 continue;
65808- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
65809+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65810+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
65811+ else
65812+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
65813 pr_debug("\t%s\n", sname);
65814 }
65815- switch (m) {
65816- case 0: /* executable */
65817- mod->core_size = debug_align(mod->core_size);
65818- mod->core_text_size = mod->core_size;
65819- break;
65820- case 1: /* RO: text and ro-data */
65821- mod->core_size = debug_align(mod->core_size);
65822- mod->core_ro_size = mod->core_size;
65823- break;
65824- case 3: /* whole core */
65825- mod->core_size = debug_align(mod->core_size);
65826- break;
65827- }
65828 }
65829
65830 pr_debug("Init section allocation order:\n");
65831@@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
65832 || s->sh_entsize != ~0UL
65833 || !strstarts(sname, ".init"))
65834 continue;
65835- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
65836- | INIT_OFFSET_MASK);
65837+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65838+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
65839+ else
65840+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
65841+ s->sh_entsize |= INIT_OFFSET_MASK;
65842 pr_debug("\t%s\n", sname);
65843 }
65844- switch (m) {
65845- case 0: /* executable */
65846- mod->init_size = debug_align(mod->init_size);
65847- mod->init_text_size = mod->init_size;
65848- break;
65849- case 1: /* RO: text and ro-data */
65850- mod->init_size = debug_align(mod->init_size);
65851- mod->init_ro_size = mod->init_size;
65852- break;
65853- case 3: /* whole init */
65854- mod->init_size = debug_align(mod->init_size);
65855- break;
65856- }
65857 }
65858 }
65859
65860@@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65861
65862 /* Put symbol section at end of init part of module. */
65863 symsect->sh_flags |= SHF_ALLOC;
65864- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
65865+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
65866 info->index.sym) | INIT_OFFSET_MASK;
65867 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
65868
65869@@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65870 }
65871
65872 /* Append room for core symbols at end of core part. */
65873- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
65874- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
65875- mod->core_size += strtab_size;
65876+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
65877+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
65878+ mod->core_size_rx += strtab_size;
65879
65880 /* Put string table section at end of init part of module. */
65881 strsect->sh_flags |= SHF_ALLOC;
65882- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
65883+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
65884 info->index.str) | INIT_OFFSET_MASK;
65885 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
65886 }
65887@@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65888 /* Make sure we get permanent strtab: don't use info->strtab. */
65889 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
65890
65891+ pax_open_kernel();
65892+
65893 /* Set types up while we still have access to sections. */
65894 for (i = 0; i < mod->num_symtab; i++)
65895 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
65896
65897- mod->core_symtab = dst = mod->module_core + info->symoffs;
65898- mod->core_strtab = s = mod->module_core + info->stroffs;
65899+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
65900+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
65901 src = mod->symtab;
65902 *dst = *src;
65903 *s++ = 0;
65904@@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65905 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
65906 }
65907 mod->core_num_syms = ndst;
65908+
65909+ pax_close_kernel();
65910 }
65911 #else
65912 static inline void layout_symtab(struct module *mod, struct load_info *info)
65913@@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
65914 return size == 0 ? NULL : vmalloc_exec(size);
65915 }
65916
65917-static void *module_alloc_update_bounds(unsigned long size)
65918+static void *module_alloc_update_bounds_rw(unsigned long size)
65919 {
65920 void *ret = module_alloc(size);
65921
65922 if (ret) {
65923 mutex_lock(&module_mutex);
65924 /* Update module bounds. */
65925- if ((unsigned long)ret < module_addr_min)
65926- module_addr_min = (unsigned long)ret;
65927- if ((unsigned long)ret + size > module_addr_max)
65928- module_addr_max = (unsigned long)ret + size;
65929+ if ((unsigned long)ret < module_addr_min_rw)
65930+ module_addr_min_rw = (unsigned long)ret;
65931+ if ((unsigned long)ret + size > module_addr_max_rw)
65932+ module_addr_max_rw = (unsigned long)ret + size;
65933+ mutex_unlock(&module_mutex);
65934+ }
65935+ return ret;
65936+}
65937+
65938+static void *module_alloc_update_bounds_rx(unsigned long size)
65939+{
65940+ void *ret = module_alloc_exec(size);
65941+
65942+ if (ret) {
65943+ mutex_lock(&module_mutex);
65944+ /* Update module bounds. */
65945+ if ((unsigned long)ret < module_addr_min_rx)
65946+ module_addr_min_rx = (unsigned long)ret;
65947+ if ((unsigned long)ret + size > module_addr_max_rx)
65948+ module_addr_max_rx = (unsigned long)ret + size;
65949 mutex_unlock(&module_mutex);
65950 }
65951 return ret;
65952@@ -2513,8 +2550,14 @@ static struct module *setup_load_info(struct load_info *info)
65953 static int check_modinfo(struct module *mod, struct load_info *info)
65954 {
65955 const char *modmagic = get_modinfo(info, "vermagic");
65956+ const char *license = get_modinfo(info, "license");
65957 int err;
65958
65959+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
65960+ if (!license || !license_is_gpl_compatible(license))
65961+ return -ENOEXEC;
65962+#endif
65963+
65964 /* This is allowed: modprobe --force will invalidate it. */
65965 if (!modmagic) {
65966 err = try_to_force_load(mod, "bad vermagic");
65967@@ -2537,7 +2580,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
65968 }
65969
65970 /* Set up license info based on the info section */
65971- set_license(mod, get_modinfo(info, "license"));
65972+ set_license(mod, license);
65973
65974 return 0;
65975 }
65976@@ -2631,7 +2674,7 @@ static int move_module(struct module *mod, struct load_info *info)
65977 void *ptr;
65978
65979 /* Do the allocs. */
65980- ptr = module_alloc_update_bounds(mod->core_size);
65981+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
65982 /*
65983 * The pointer to this block is stored in the module structure
65984 * which is inside the block. Just mark it as not being a
65985@@ -2641,23 +2684,50 @@ static int move_module(struct module *mod, struct load_info *info)
65986 if (!ptr)
65987 return -ENOMEM;
65988
65989- memset(ptr, 0, mod->core_size);
65990- mod->module_core = ptr;
65991+ memset(ptr, 0, mod->core_size_rw);
65992+ mod->module_core_rw = ptr;
65993
65994- ptr = module_alloc_update_bounds(mod->init_size);
65995+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
65996 /*
65997 * The pointer to this block is stored in the module structure
65998 * which is inside the block. This block doesn't need to be
65999 * scanned as it contains data and code that will be freed
66000 * after the module is initialized.
66001 */
66002- kmemleak_ignore(ptr);
66003- if (!ptr && mod->init_size) {
66004- module_free(mod, mod->module_core);
66005+ kmemleak_not_leak(ptr);
66006+ if (!ptr && mod->init_size_rw) {
66007+ module_free(mod, mod->module_core_rw);
66008 return -ENOMEM;
66009 }
66010- memset(ptr, 0, mod->init_size);
66011- mod->module_init = ptr;
66012+ memset(ptr, 0, mod->init_size_rw);
66013+ mod->module_init_rw = ptr;
66014+
66015+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66016+ kmemleak_not_leak(ptr);
66017+ if (!ptr) {
66018+ module_free(mod, mod->module_init_rw);
66019+ module_free(mod, mod->module_core_rw);
66020+ return -ENOMEM;
66021+ }
66022+
66023+ pax_open_kernel();
66024+ memset(ptr, 0, mod->core_size_rx);
66025+ pax_close_kernel();
66026+ mod->module_core_rx = ptr;
66027+
66028+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66029+ kmemleak_not_leak(ptr);
66030+ if (!ptr && mod->init_size_rx) {
66031+ module_free_exec(mod, mod->module_core_rx);
66032+ module_free(mod, mod->module_init_rw);
66033+ module_free(mod, mod->module_core_rw);
66034+ return -ENOMEM;
66035+ }
66036+
66037+ pax_open_kernel();
66038+ memset(ptr, 0, mod->init_size_rx);
66039+ pax_close_kernel();
66040+ mod->module_init_rx = ptr;
66041
66042 /* Transfer each section which specifies SHF_ALLOC */
66043 pr_debug("final section addresses:\n");
66044@@ -2668,16 +2738,45 @@ static int move_module(struct module *mod, struct load_info *info)
66045 if (!(shdr->sh_flags & SHF_ALLOC))
66046 continue;
66047
66048- if (shdr->sh_entsize & INIT_OFFSET_MASK)
66049- dest = mod->module_init
66050- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66051- else
66052- dest = mod->module_core + shdr->sh_entsize;
66053+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66054+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66055+ dest = mod->module_init_rw
66056+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66057+ else
66058+ dest = mod->module_init_rx
66059+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66060+ } else {
66061+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66062+ dest = mod->module_core_rw + shdr->sh_entsize;
66063+ else
66064+ dest = mod->module_core_rx + shdr->sh_entsize;
66065+ }
66066+
66067+ if (shdr->sh_type != SHT_NOBITS) {
66068+
66069+#ifdef CONFIG_PAX_KERNEXEC
66070+#ifdef CONFIG_X86_64
66071+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66072+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66073+#endif
66074+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66075+ pax_open_kernel();
66076+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66077+ pax_close_kernel();
66078+ } else
66079+#endif
66080
66081- if (shdr->sh_type != SHT_NOBITS)
66082 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66083+ }
66084 /* Update sh_addr to point to copy in image. */
66085- shdr->sh_addr = (unsigned long)dest;
66086+
66087+#ifdef CONFIG_PAX_KERNEXEC
66088+ if (shdr->sh_flags & SHF_EXECINSTR)
66089+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
66090+ else
66091+#endif
66092+
66093+ shdr->sh_addr = (unsigned long)dest;
66094 pr_debug("\t0x%lx %s\n",
66095 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66096 }
66097@@ -2728,12 +2827,12 @@ static void flush_module_icache(const struct module *mod)
66098 * Do it before processing of module parameters, so the module
66099 * can provide parameter accessor functions of its own.
66100 */
66101- if (mod->module_init)
66102- flush_icache_range((unsigned long)mod->module_init,
66103- (unsigned long)mod->module_init
66104- + mod->init_size);
66105- flush_icache_range((unsigned long)mod->module_core,
66106- (unsigned long)mod->module_core + mod->core_size);
66107+ if (mod->module_init_rx)
66108+ flush_icache_range((unsigned long)mod->module_init_rx,
66109+ (unsigned long)mod->module_init_rx
66110+ + mod->init_size_rx);
66111+ flush_icache_range((unsigned long)mod->module_core_rx,
66112+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
66113
66114 set_fs(old_fs);
66115 }
66116@@ -2803,8 +2902,10 @@ out:
66117 static void module_deallocate(struct module *mod, struct load_info *info)
66118 {
66119 percpu_modfree(mod);
66120- module_free(mod, mod->module_init);
66121- module_free(mod, mod->module_core);
66122+ module_free_exec(mod, mod->module_init_rx);
66123+ module_free_exec(mod, mod->module_core_rx);
66124+ module_free(mod, mod->module_init_rw);
66125+ module_free(mod, mod->module_core_rw);
66126 }
66127
66128 int __weak module_finalize(const Elf_Ehdr *hdr,
66129@@ -2868,9 +2969,38 @@ static struct module *load_module(void __user *umod,
66130 if (err)
66131 goto free_unload;
66132
66133+ /* Now copy in args */
66134+ mod->args = strndup_user(uargs, ~0UL >> 1);
66135+ if (IS_ERR(mod->args)) {
66136+ err = PTR_ERR(mod->args);
66137+ goto free_unload;
66138+ }
66139+
66140 /* Set up MODINFO_ATTR fields */
66141 setup_modinfo(mod, &info);
66142
66143+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66144+ {
66145+ char *p, *p2;
66146+
66147+ if (strstr(mod->args, "grsec_modharden_netdev")) {
66148+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66149+ err = -EPERM;
66150+ goto free_modinfo;
66151+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66152+ p += strlen("grsec_modharden_normal");
66153+ p2 = strstr(p, "_");
66154+ if (p2) {
66155+ *p2 = '\0';
66156+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66157+ *p2 = '_';
66158+ }
66159+ err = -EPERM;
66160+ goto free_modinfo;
66161+ }
66162+ }
66163+#endif
66164+
66165 /* Fix up syms, so that st_value is a pointer to location. */
66166 err = simplify_symbols(mod, &info);
66167 if (err < 0)
66168@@ -2886,13 +3016,6 @@ static struct module *load_module(void __user *umod,
66169
66170 flush_module_icache(mod);
66171
66172- /* Now copy in args */
66173- mod->args = strndup_user(uargs, ~0UL >> 1);
66174- if (IS_ERR(mod->args)) {
66175- err = PTR_ERR(mod->args);
66176- goto free_arch_cleanup;
66177- }
66178-
66179 /* Mark state as coming so strong_try_module_get() ignores us. */
66180 mod->state = MODULE_STATE_COMING;
66181
66182@@ -2949,11 +3072,10 @@ static struct module *load_module(void __user *umod,
66183 unlock:
66184 mutex_unlock(&module_mutex);
66185 synchronize_sched();
66186- kfree(mod->args);
66187- free_arch_cleanup:
66188 module_arch_cleanup(mod);
66189 free_modinfo:
66190 free_modinfo(mod);
66191+ kfree(mod->args);
66192 free_unload:
66193 module_unload_free(mod);
66194 free_module:
66195@@ -2994,16 +3116,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66196 MODULE_STATE_COMING, mod);
66197
66198 /* Set RO and NX regions for core */
66199- set_section_ro_nx(mod->module_core,
66200- mod->core_text_size,
66201- mod->core_ro_size,
66202- mod->core_size);
66203+ set_section_ro_nx(mod->module_core_rx,
66204+ mod->core_size_rx,
66205+ mod->core_size_rx,
66206+ mod->core_size_rx);
66207
66208 /* Set RO and NX regions for init */
66209- set_section_ro_nx(mod->module_init,
66210- mod->init_text_size,
66211- mod->init_ro_size,
66212- mod->init_size);
66213+ set_section_ro_nx(mod->module_init_rx,
66214+ mod->init_size_rx,
66215+ mod->init_size_rx,
66216+ mod->init_size_rx);
66217
66218 do_mod_ctors(mod);
66219 /* Start the module */
66220@@ -3049,11 +3171,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66221 mod->strtab = mod->core_strtab;
66222 #endif
66223 unset_module_init_ro_nx(mod);
66224- module_free(mod, mod->module_init);
66225- mod->module_init = NULL;
66226- mod->init_size = 0;
66227- mod->init_ro_size = 0;
66228- mod->init_text_size = 0;
66229+ module_free(mod, mod->module_init_rw);
66230+ module_free_exec(mod, mod->module_init_rx);
66231+ mod->module_init_rw = NULL;
66232+ mod->module_init_rx = NULL;
66233+ mod->init_size_rw = 0;
66234+ mod->init_size_rx = 0;
66235 mutex_unlock(&module_mutex);
66236
66237 return 0;
66238@@ -3084,10 +3207,16 @@ static const char *get_ksymbol(struct module *mod,
66239 unsigned long nextval;
66240
66241 /* At worse, next value is at end of module */
66242- if (within_module_init(addr, mod))
66243- nextval = (unsigned long)mod->module_init+mod->init_text_size;
66244+ if (within_module_init_rx(addr, mod))
66245+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66246+ else if (within_module_init_rw(addr, mod))
66247+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66248+ else if (within_module_core_rx(addr, mod))
66249+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66250+ else if (within_module_core_rw(addr, mod))
66251+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66252 else
66253- nextval = (unsigned long)mod->module_core+mod->core_text_size;
66254+ return NULL;
66255
66256 /* Scan for closest preceding symbol, and next symbol. (ELF
66257 starts real symbols at 1). */
66258@@ -3322,7 +3451,7 @@ static int m_show(struct seq_file *m, void *p)
66259 char buf[8];
66260
66261 seq_printf(m, "%s %u",
66262- mod->name, mod->init_size + mod->core_size);
66263+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66264 print_unload_info(m, mod);
66265
66266 /* Informative for users. */
66267@@ -3331,7 +3460,7 @@ static int m_show(struct seq_file *m, void *p)
66268 mod->state == MODULE_STATE_COMING ? "Loading":
66269 "Live");
66270 /* Used by oprofile and other similar tools. */
66271- seq_printf(m, " 0x%pK", mod->module_core);
66272+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66273
66274 /* Taints info */
66275 if (mod->taints)
66276@@ -3367,7 +3496,17 @@ static const struct file_operations proc_modules_operations = {
66277
66278 static int __init proc_modules_init(void)
66279 {
66280+#ifndef CONFIG_GRKERNSEC_HIDESYM
66281+#ifdef CONFIG_GRKERNSEC_PROC_USER
66282+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66283+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66284+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66285+#else
66286 proc_create("modules", 0, NULL, &proc_modules_operations);
66287+#endif
66288+#else
66289+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66290+#endif
66291 return 0;
66292 }
66293 module_init(proc_modules_init);
66294@@ -3426,12 +3565,12 @@ struct module *__module_address(unsigned long addr)
66295 {
66296 struct module *mod;
66297
66298- if (addr < module_addr_min || addr > module_addr_max)
66299+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66300+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
66301 return NULL;
66302
66303 list_for_each_entry_rcu(mod, &modules, list)
66304- if (within_module_core(addr, mod)
66305- || within_module_init(addr, mod))
66306+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
66307 return mod;
66308 return NULL;
66309 }
66310@@ -3465,11 +3604,20 @@ bool is_module_text_address(unsigned long addr)
66311 */
66312 struct module *__module_text_address(unsigned long addr)
66313 {
66314- struct module *mod = __module_address(addr);
66315+ struct module *mod;
66316+
66317+#ifdef CONFIG_X86_32
66318+ addr = ktla_ktva(addr);
66319+#endif
66320+
66321+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66322+ return NULL;
66323+
66324+ mod = __module_address(addr);
66325+
66326 if (mod) {
66327 /* Make sure it's within the text section. */
66328- if (!within(addr, mod->module_init, mod->init_text_size)
66329- && !within(addr, mod->module_core, mod->core_text_size))
66330+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66331 mod = NULL;
66332 }
66333 return mod;
66334diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66335index 7e3443f..b2a1e6b 100644
66336--- a/kernel/mutex-debug.c
66337+++ b/kernel/mutex-debug.c
66338@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66339 }
66340
66341 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66342- struct thread_info *ti)
66343+ struct task_struct *task)
66344 {
66345 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66346
66347 /* Mark the current thread as blocked on the lock: */
66348- ti->task->blocked_on = waiter;
66349+ task->blocked_on = waiter;
66350 }
66351
66352 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66353- struct thread_info *ti)
66354+ struct task_struct *task)
66355 {
66356 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66357- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66358- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66359- ti->task->blocked_on = NULL;
66360+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66361+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66362+ task->blocked_on = NULL;
66363
66364 list_del_init(&waiter->list);
66365 waiter->task = NULL;
66366diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66367index 0799fd3..d06ae3b 100644
66368--- a/kernel/mutex-debug.h
66369+++ b/kernel/mutex-debug.h
66370@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66371 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66372 extern void debug_mutex_add_waiter(struct mutex *lock,
66373 struct mutex_waiter *waiter,
66374- struct thread_info *ti);
66375+ struct task_struct *task);
66376 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66377- struct thread_info *ti);
66378+ struct task_struct *task);
66379 extern void debug_mutex_unlock(struct mutex *lock);
66380 extern void debug_mutex_init(struct mutex *lock, const char *name,
66381 struct lock_class_key *key);
66382diff --git a/kernel/mutex.c b/kernel/mutex.c
66383index 89096dd..f91ebc5 100644
66384--- a/kernel/mutex.c
66385+++ b/kernel/mutex.c
66386@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66387 spin_lock_mutex(&lock->wait_lock, flags);
66388
66389 debug_mutex_lock_common(lock, &waiter);
66390- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66391+ debug_mutex_add_waiter(lock, &waiter, task);
66392
66393 /* add waiting tasks to the end of the waitqueue (FIFO): */
66394 list_add_tail(&waiter.list, &lock->wait_list);
66395@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66396 * TASK_UNINTERRUPTIBLE case.)
66397 */
66398 if (unlikely(signal_pending_state(state, task))) {
66399- mutex_remove_waiter(lock, &waiter,
66400- task_thread_info(task));
66401+ mutex_remove_waiter(lock, &waiter, task);
66402 mutex_release(&lock->dep_map, 1, ip);
66403 spin_unlock_mutex(&lock->wait_lock, flags);
66404
66405@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66406 done:
66407 lock_acquired(&lock->dep_map, ip);
66408 /* got the lock - rejoice! */
66409- mutex_remove_waiter(lock, &waiter, current_thread_info());
66410+ mutex_remove_waiter(lock, &waiter, task);
66411 mutex_set_owner(lock);
66412
66413 /* set it to 0 if there are no waiters left: */
66414diff --git a/kernel/padata.c b/kernel/padata.c
66415index b452599..5d68f4e 100644
66416--- a/kernel/padata.c
66417+++ b/kernel/padata.c
66418@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
66419 padata->pd = pd;
66420 padata->cb_cpu = cb_cpu;
66421
66422- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
66423- atomic_set(&pd->seq_nr, -1);
66424+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
66425+ atomic_set_unchecked(&pd->seq_nr, -1);
66426
66427- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
66428+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
66429
66430 target_cpu = padata_cpu_hash(padata);
66431 queue = per_cpu_ptr(pd->pqueue, target_cpu);
66432@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
66433 padata_init_pqueues(pd);
66434 padata_init_squeues(pd);
66435 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
66436- atomic_set(&pd->seq_nr, -1);
66437+ atomic_set_unchecked(&pd->seq_nr, -1);
66438 atomic_set(&pd->reorder_objects, 0);
66439 atomic_set(&pd->refcnt, 0);
66440 pd->pinst = pinst;
66441diff --git a/kernel/panic.c b/kernel/panic.c
66442index 80aed44..f291d37 100644
66443--- a/kernel/panic.c
66444+++ b/kernel/panic.c
66445@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66446 const char *board;
66447
66448 printk(KERN_WARNING "------------[ cut here ]------------\n");
66449- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66450+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66451 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66452 if (board)
66453 printk(KERN_WARNING "Hardware name: %s\n", board);
66454@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66455 */
66456 void __stack_chk_fail(void)
66457 {
66458- panic("stack-protector: Kernel stack is corrupted in: %p\n",
66459+ dump_stack();
66460+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66461 __builtin_return_address(0));
66462 }
66463 EXPORT_SYMBOL(__stack_chk_fail);
66464diff --git a/kernel/pid.c b/kernel/pid.c
66465index 9f08dfa..6765c40 100644
66466--- a/kernel/pid.c
66467+++ b/kernel/pid.c
66468@@ -33,6 +33,7 @@
66469 #include <linux/rculist.h>
66470 #include <linux/bootmem.h>
66471 #include <linux/hash.h>
66472+#include <linux/security.h>
66473 #include <linux/pid_namespace.h>
66474 #include <linux/init_task.h>
66475 #include <linux/syscalls.h>
66476@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66477
66478 int pid_max = PID_MAX_DEFAULT;
66479
66480-#define RESERVED_PIDS 300
66481+#define RESERVED_PIDS 500
66482
66483 int pid_max_min = RESERVED_PIDS + 1;
66484 int pid_max_max = PID_MAX_LIMIT;
66485@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66486 */
66487 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66488 {
66489+ struct task_struct *task;
66490+
66491 rcu_lockdep_assert(rcu_read_lock_held(),
66492 "find_task_by_pid_ns() needs rcu_read_lock()"
66493 " protection");
66494- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66495+
66496+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66497+
66498+ if (gr_pid_is_chrooted(task))
66499+ return NULL;
66500+
66501+ return task;
66502 }
66503
66504 struct task_struct *find_task_by_vpid(pid_t vnr)
66505@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66506 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66507 }
66508
66509+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66510+{
66511+ rcu_lockdep_assert(rcu_read_lock_held(),
66512+ "find_task_by_pid_ns() needs rcu_read_lock()"
66513+ " protection");
66514+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66515+}
66516+
66517 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66518 {
66519 struct pid *pid;
66520diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66521index 125cb67..a4d1c30 100644
66522--- a/kernel/posix-cpu-timers.c
66523+++ b/kernel/posix-cpu-timers.c
66524@@ -6,6 +6,7 @@
66525 #include <linux/posix-timers.h>
66526 #include <linux/errno.h>
66527 #include <linux/math64.h>
66528+#include <linux/security.h>
66529 #include <asm/uaccess.h>
66530 #include <linux/kernel_stat.h>
66531 #include <trace/events/timer.h>
66532@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66533
66534 static __init int init_posix_cpu_timers(void)
66535 {
66536- struct k_clock process = {
66537+ static struct k_clock process = {
66538 .clock_getres = process_cpu_clock_getres,
66539 .clock_get = process_cpu_clock_get,
66540 .timer_create = process_cpu_timer_create,
66541 .nsleep = process_cpu_nsleep,
66542 .nsleep_restart = process_cpu_nsleep_restart,
66543 };
66544- struct k_clock thread = {
66545+ static struct k_clock thread = {
66546 .clock_getres = thread_cpu_clock_getres,
66547 .clock_get = thread_cpu_clock_get,
66548 .timer_create = thread_cpu_timer_create,
66549diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66550index 69185ae..cc2847a 100644
66551--- a/kernel/posix-timers.c
66552+++ b/kernel/posix-timers.c
66553@@ -43,6 +43,7 @@
66554 #include <linux/idr.h>
66555 #include <linux/posix-clock.h>
66556 #include <linux/posix-timers.h>
66557+#include <linux/grsecurity.h>
66558 #include <linux/syscalls.h>
66559 #include <linux/wait.h>
66560 #include <linux/workqueue.h>
66561@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66562 * which we beg off on and pass to do_sys_settimeofday().
66563 */
66564
66565-static struct k_clock posix_clocks[MAX_CLOCKS];
66566+static struct k_clock *posix_clocks[MAX_CLOCKS];
66567
66568 /*
66569 * These ones are defined below.
66570@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66571 */
66572 static __init int init_posix_timers(void)
66573 {
66574- struct k_clock clock_realtime = {
66575+ static struct k_clock clock_realtime = {
66576 .clock_getres = hrtimer_get_res,
66577 .clock_get = posix_clock_realtime_get,
66578 .clock_set = posix_clock_realtime_set,
66579@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66580 .timer_get = common_timer_get,
66581 .timer_del = common_timer_del,
66582 };
66583- struct k_clock clock_monotonic = {
66584+ static struct k_clock clock_monotonic = {
66585 .clock_getres = hrtimer_get_res,
66586 .clock_get = posix_ktime_get_ts,
66587 .nsleep = common_nsleep,
66588@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66589 .timer_get = common_timer_get,
66590 .timer_del = common_timer_del,
66591 };
66592- struct k_clock clock_monotonic_raw = {
66593+ static struct k_clock clock_monotonic_raw = {
66594 .clock_getres = hrtimer_get_res,
66595 .clock_get = posix_get_monotonic_raw,
66596 };
66597- struct k_clock clock_realtime_coarse = {
66598+ static struct k_clock clock_realtime_coarse = {
66599 .clock_getres = posix_get_coarse_res,
66600 .clock_get = posix_get_realtime_coarse,
66601 };
66602- struct k_clock clock_monotonic_coarse = {
66603+ static struct k_clock clock_monotonic_coarse = {
66604 .clock_getres = posix_get_coarse_res,
66605 .clock_get = posix_get_monotonic_coarse,
66606 };
66607- struct k_clock clock_boottime = {
66608+ static struct k_clock clock_boottime = {
66609 .clock_getres = hrtimer_get_res,
66610 .clock_get = posix_get_boottime,
66611 .nsleep = common_nsleep,
66612@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66613 return;
66614 }
66615
66616- posix_clocks[clock_id] = *new_clock;
66617+ posix_clocks[clock_id] = new_clock;
66618 }
66619 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66620
66621@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66622 return (id & CLOCKFD_MASK) == CLOCKFD ?
66623 &clock_posix_dynamic : &clock_posix_cpu;
66624
66625- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66626+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66627 return NULL;
66628- return &posix_clocks[id];
66629+ return posix_clocks[id];
66630 }
66631
66632 static int common_timer_create(struct k_itimer *new_timer)
66633@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66634 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66635 return -EFAULT;
66636
66637+ /* only the CLOCK_REALTIME clock can be set, all other clocks
66638+ have their clock_set fptr set to a nosettime dummy function
66639+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66640+ call common_clock_set, which calls do_sys_settimeofday, which
66641+ we hook
66642+ */
66643+
66644 return kc->clock_set(which_clock, &new_tp);
66645 }
66646
66647diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
66648index d523593..68197a4 100644
66649--- a/kernel/power/poweroff.c
66650+++ b/kernel/power/poweroff.c
66651@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
66652 .enable_mask = SYSRQ_ENABLE_BOOT,
66653 };
66654
66655-static int pm_sysrq_init(void)
66656+static int __init pm_sysrq_init(void)
66657 {
66658 register_sysrq_key('o', &sysrq_poweroff_op);
66659 return 0;
66660diff --git a/kernel/power/process.c b/kernel/power/process.c
66661index 7e42645..3d43df1 100644
66662--- a/kernel/power/process.c
66663+++ b/kernel/power/process.c
66664@@ -32,6 +32,7 @@ static int try_to_freeze_tasks(bool user_only)
66665 u64 elapsed_csecs64;
66666 unsigned int elapsed_csecs;
66667 bool wakeup = false;
66668+ bool timedout = false;
66669
66670 do_gettimeofday(&start);
66671
66672@@ -42,6 +43,8 @@ static int try_to_freeze_tasks(bool user_only)
66673
66674 while (true) {
66675 todo = 0;
66676+ if (time_after(jiffies, end_time))
66677+ timedout = true;
66678 read_lock(&tasklist_lock);
66679 do_each_thread(g, p) {
66680 if (p == current || !freeze_task(p))
66681@@ -59,9 +62,13 @@ static int try_to_freeze_tasks(bool user_only)
66682 * try_to_stop() after schedule() in ptrace/signal
66683 * stop sees TIF_FREEZE.
66684 */
66685- if (!task_is_stopped_or_traced(p) &&
66686- !freezer_should_skip(p))
66687+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
66688 todo++;
66689+ if (timedout) {
66690+ printk(KERN_ERR "Task refusing to freeze:\n");
66691+ sched_show_task(p);
66692+ }
66693+ }
66694 } while_each_thread(g, p);
66695 read_unlock(&tasklist_lock);
66696
66697@@ -70,7 +77,7 @@ static int try_to_freeze_tasks(bool user_only)
66698 todo += wq_busy;
66699 }
66700
66701- if (!todo || time_after(jiffies, end_time))
66702+ if (!todo || timedout)
66703 break;
66704
66705 if (pm_wakeup_pending()) {
66706diff --git a/kernel/printk.c b/kernel/printk.c
66707index 32690a0..cd7c798 100644
66708--- a/kernel/printk.c
66709+++ b/kernel/printk.c
66710@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
66711 if (from_file && type != SYSLOG_ACTION_OPEN)
66712 return 0;
66713
66714+#ifdef CONFIG_GRKERNSEC_DMESG
66715+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
66716+ return -EPERM;
66717+#endif
66718+
66719 if (syslog_action_restricted(type)) {
66720 if (capable(CAP_SYSLOG))
66721 return 0;
66722diff --git a/kernel/profile.c b/kernel/profile.c
66723index 76b8e77..a2930e8 100644
66724--- a/kernel/profile.c
66725+++ b/kernel/profile.c
66726@@ -39,7 +39,7 @@ struct profile_hit {
66727 /* Oprofile timer tick hook */
66728 static int (*timer_hook)(struct pt_regs *) __read_mostly;
66729
66730-static atomic_t *prof_buffer;
66731+static atomic_unchecked_t *prof_buffer;
66732 static unsigned long prof_len, prof_shift;
66733
66734 int prof_on __read_mostly;
66735@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
66736 hits[i].pc = 0;
66737 continue;
66738 }
66739- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66740+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66741 hits[i].hits = hits[i].pc = 0;
66742 }
66743 }
66744@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66745 * Add the current hit(s) and flush the write-queue out
66746 * to the global buffer:
66747 */
66748- atomic_add(nr_hits, &prof_buffer[pc]);
66749+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
66750 for (i = 0; i < NR_PROFILE_HIT; ++i) {
66751- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66752+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66753 hits[i].pc = hits[i].hits = 0;
66754 }
66755 out:
66756@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66757 {
66758 unsigned long pc;
66759 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
66760- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66761+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66762 }
66763 #endif /* !CONFIG_SMP */
66764
66765@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
66766 return -EFAULT;
66767 buf++; p++; count--; read++;
66768 }
66769- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
66770+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
66771 if (copy_to_user(buf, (void *)pnt, count))
66772 return -EFAULT;
66773 read += count;
66774@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
66775 }
66776 #endif
66777 profile_discard_flip_buffers();
66778- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
66779+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
66780 return count;
66781 }
66782
66783diff --git a/kernel/ptrace.c b/kernel/ptrace.c
66784index 00ab2ca..d237f61 100644
66785--- a/kernel/ptrace.c
66786+++ b/kernel/ptrace.c
66787@@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
66788 task->ptrace = PT_PTRACED;
66789 if (seize)
66790 task->ptrace |= PT_SEIZED;
66791- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
66792+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
66793 task->ptrace |= PT_PTRACE_CAP;
66794
66795 __ptrace_link(task, current);
66796@@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
66797 break;
66798 return -EIO;
66799 }
66800- if (copy_to_user(dst, buf, retval))
66801+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
66802 return -EFAULT;
66803 copied += retval;
66804 src += retval;
66805@@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
66806 bool seized = child->ptrace & PT_SEIZED;
66807 int ret = -EIO;
66808 siginfo_t siginfo, *si;
66809- void __user *datavp = (void __user *) data;
66810+ void __user *datavp = (__force void __user *) data;
66811 unsigned long __user *datalp = datavp;
66812 unsigned long flags;
66813
66814@@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
66815 goto out;
66816 }
66817
66818+ if (gr_handle_ptrace(child, request)) {
66819+ ret = -EPERM;
66820+ goto out_put_task_struct;
66821+ }
66822+
66823 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66824 ret = ptrace_attach(child, request, data);
66825 /*
66826 * Some architectures need to do book-keeping after
66827 * a ptrace attach.
66828 */
66829- if (!ret)
66830+ if (!ret) {
66831 arch_ptrace_attach(child);
66832+ gr_audit_ptrace(child);
66833+ }
66834 goto out_put_task_struct;
66835 }
66836
66837@@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
66838 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
66839 if (copied != sizeof(tmp))
66840 return -EIO;
66841- return put_user(tmp, (unsigned long __user *)data);
66842+ return put_user(tmp, (__force unsigned long __user *)data);
66843 }
66844
66845 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
66846@@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
66847 goto out;
66848 }
66849
66850+ if (gr_handle_ptrace(child, request)) {
66851+ ret = -EPERM;
66852+ goto out_put_task_struct;
66853+ }
66854+
66855 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66856 ret = ptrace_attach(child, request, data);
66857 /*
66858 * Some architectures need to do book-keeping after
66859 * a ptrace attach.
66860 */
66861- if (!ret)
66862+ if (!ret) {
66863 arch_ptrace_attach(child);
66864+ gr_audit_ptrace(child);
66865+ }
66866 goto out_put_task_struct;
66867 }
66868
66869diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
66870index 977296d..c4744dc 100644
66871--- a/kernel/rcutiny.c
66872+++ b/kernel/rcutiny.c
66873@@ -46,7 +46,7 @@
66874 struct rcu_ctrlblk;
66875 static void invoke_rcu_callbacks(void);
66876 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
66877-static void rcu_process_callbacks(struct softirq_action *unused);
66878+static void rcu_process_callbacks(void);
66879 static void __call_rcu(struct rcu_head *head,
66880 void (*func)(struct rcu_head *rcu),
66881 struct rcu_ctrlblk *rcp);
66882@@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
66883 rcu_is_callbacks_kthread()));
66884 }
66885
66886-static void rcu_process_callbacks(struct softirq_action *unused)
66887+static void rcu_process_callbacks(void)
66888 {
66889 __rcu_process_callbacks(&rcu_sched_ctrlblk);
66890 __rcu_process_callbacks(&rcu_bh_ctrlblk);
66891diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
66892index a58ac28..196a3d8 100644
66893--- a/kernel/rcutorture.c
66894+++ b/kernel/rcutorture.c
66895@@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
66896 { 0 };
66897 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
66898 { 0 };
66899-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66900-static atomic_t n_rcu_torture_alloc;
66901-static atomic_t n_rcu_torture_alloc_fail;
66902-static atomic_t n_rcu_torture_free;
66903-static atomic_t n_rcu_torture_mberror;
66904-static atomic_t n_rcu_torture_error;
66905+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66906+static atomic_unchecked_t n_rcu_torture_alloc;
66907+static atomic_unchecked_t n_rcu_torture_alloc_fail;
66908+static atomic_unchecked_t n_rcu_torture_free;
66909+static atomic_unchecked_t n_rcu_torture_mberror;
66910+static atomic_unchecked_t n_rcu_torture_error;
66911 static long n_rcu_torture_boost_ktrerror;
66912 static long n_rcu_torture_boost_rterror;
66913 static long n_rcu_torture_boost_failure;
66914@@ -243,11 +243,11 @@ rcu_torture_alloc(void)
66915
66916 spin_lock_bh(&rcu_torture_lock);
66917 if (list_empty(&rcu_torture_freelist)) {
66918- atomic_inc(&n_rcu_torture_alloc_fail);
66919+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
66920 spin_unlock_bh(&rcu_torture_lock);
66921 return NULL;
66922 }
66923- atomic_inc(&n_rcu_torture_alloc);
66924+ atomic_inc_unchecked(&n_rcu_torture_alloc);
66925 p = rcu_torture_freelist.next;
66926 list_del_init(p);
66927 spin_unlock_bh(&rcu_torture_lock);
66928@@ -260,7 +260,7 @@ rcu_torture_alloc(void)
66929 static void
66930 rcu_torture_free(struct rcu_torture *p)
66931 {
66932- atomic_inc(&n_rcu_torture_free);
66933+ atomic_inc_unchecked(&n_rcu_torture_free);
66934 spin_lock_bh(&rcu_torture_lock);
66935 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
66936 spin_unlock_bh(&rcu_torture_lock);
66937@@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
66938 i = rp->rtort_pipe_count;
66939 if (i > RCU_TORTURE_PIPE_LEN)
66940 i = RCU_TORTURE_PIPE_LEN;
66941- atomic_inc(&rcu_torture_wcount[i]);
66942+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66943 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
66944 rp->rtort_mbtest = 0;
66945 rcu_torture_free(rp);
66946@@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
66947 i = rp->rtort_pipe_count;
66948 if (i > RCU_TORTURE_PIPE_LEN)
66949 i = RCU_TORTURE_PIPE_LEN;
66950- atomic_inc(&rcu_torture_wcount[i]);
66951+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66952 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
66953 rp->rtort_mbtest = 0;
66954 list_del(&rp->rtort_free);
66955@@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
66956 i = old_rp->rtort_pipe_count;
66957 if (i > RCU_TORTURE_PIPE_LEN)
66958 i = RCU_TORTURE_PIPE_LEN;
66959- atomic_inc(&rcu_torture_wcount[i]);
66960+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66961 old_rp->rtort_pipe_count++;
66962 cur_ops->deferred_free(old_rp);
66963 }
66964@@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
66965 return;
66966 }
66967 if (p->rtort_mbtest == 0)
66968- atomic_inc(&n_rcu_torture_mberror);
66969+ atomic_inc_unchecked(&n_rcu_torture_mberror);
66970 spin_lock(&rand_lock);
66971 cur_ops->read_delay(&rand);
66972 n_rcu_torture_timers++;
66973@@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
66974 continue;
66975 }
66976 if (p->rtort_mbtest == 0)
66977- atomic_inc(&n_rcu_torture_mberror);
66978+ atomic_inc_unchecked(&n_rcu_torture_mberror);
66979 cur_ops->read_delay(&rand);
66980 preempt_disable();
66981 pipe_count = p->rtort_pipe_count;
66982@@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
66983 rcu_torture_current,
66984 rcu_torture_current_version,
66985 list_empty(&rcu_torture_freelist),
66986- atomic_read(&n_rcu_torture_alloc),
66987- atomic_read(&n_rcu_torture_alloc_fail),
66988- atomic_read(&n_rcu_torture_free),
66989- atomic_read(&n_rcu_torture_mberror),
66990+ atomic_read_unchecked(&n_rcu_torture_alloc),
66991+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
66992+ atomic_read_unchecked(&n_rcu_torture_free),
66993+ atomic_read_unchecked(&n_rcu_torture_mberror),
66994 n_rcu_torture_boost_ktrerror,
66995 n_rcu_torture_boost_rterror,
66996 n_rcu_torture_boost_failure,
66997@@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
66998 n_online_attempts,
66999 n_offline_successes,
67000 n_offline_attempts);
67001- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67002+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67003 n_rcu_torture_boost_ktrerror != 0 ||
67004 n_rcu_torture_boost_rterror != 0 ||
67005 n_rcu_torture_boost_failure != 0)
67006@@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
67007 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67008 if (i > 1) {
67009 cnt += sprintf(&page[cnt], "!!! ");
67010- atomic_inc(&n_rcu_torture_error);
67011+ atomic_inc_unchecked(&n_rcu_torture_error);
67012 WARN_ON_ONCE(1);
67013 }
67014 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67015@@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
67016 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67017 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67018 cnt += sprintf(&page[cnt], " %d",
67019- atomic_read(&rcu_torture_wcount[i]));
67020+ atomic_read_unchecked(&rcu_torture_wcount[i]));
67021 }
67022 cnt += sprintf(&page[cnt], "\n");
67023 if (cur_ops->stats)
67024@@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
67025
67026 if (cur_ops->cleanup)
67027 cur_ops->cleanup();
67028- if (atomic_read(&n_rcu_torture_error))
67029+ if (atomic_read_unchecked(&n_rcu_torture_error))
67030 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67031 else
67032 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67033@@ -1664,17 +1664,17 @@ rcu_torture_init(void)
67034
67035 rcu_torture_current = NULL;
67036 rcu_torture_current_version = 0;
67037- atomic_set(&n_rcu_torture_alloc, 0);
67038- atomic_set(&n_rcu_torture_alloc_fail, 0);
67039- atomic_set(&n_rcu_torture_free, 0);
67040- atomic_set(&n_rcu_torture_mberror, 0);
67041- atomic_set(&n_rcu_torture_error, 0);
67042+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67043+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67044+ atomic_set_unchecked(&n_rcu_torture_free, 0);
67045+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67046+ atomic_set_unchecked(&n_rcu_torture_error, 0);
67047 n_rcu_torture_boost_ktrerror = 0;
67048 n_rcu_torture_boost_rterror = 0;
67049 n_rcu_torture_boost_failure = 0;
67050 n_rcu_torture_boosts = 0;
67051 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67052- atomic_set(&rcu_torture_wcount[i], 0);
67053+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67054 for_each_possible_cpu(cpu) {
67055 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67056 per_cpu(rcu_torture_count, cpu)[i] = 0;
67057diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67058index 6c4a672..70f3202 100644
67059--- a/kernel/rcutree.c
67060+++ b/kernel/rcutree.c
67061@@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67062 rcu_prepare_for_idle(smp_processor_id());
67063 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67064 smp_mb__before_atomic_inc(); /* See above. */
67065- atomic_inc(&rdtp->dynticks);
67066+ atomic_inc_unchecked(&rdtp->dynticks);
67067 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67068- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67069+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67070 }
67071
67072 /**
67073@@ -438,10 +438,10 @@ void rcu_irq_exit(void)
67074 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67075 {
67076 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67077- atomic_inc(&rdtp->dynticks);
67078+ atomic_inc_unchecked(&rdtp->dynticks);
67079 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67080 smp_mb__after_atomic_inc(); /* See above. */
67081- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67082+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67083 rcu_cleanup_after_idle(smp_processor_id());
67084 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67085 if (!is_idle_task(current)) {
67086@@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
67087 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67088
67089 if (rdtp->dynticks_nmi_nesting == 0 &&
67090- (atomic_read(&rdtp->dynticks) & 0x1))
67091+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67092 return;
67093 rdtp->dynticks_nmi_nesting++;
67094 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67095- atomic_inc(&rdtp->dynticks);
67096+ atomic_inc_unchecked(&rdtp->dynticks);
67097 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67098 smp_mb__after_atomic_inc(); /* See above. */
67099- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67100+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67101 }
67102
67103 /**
67104@@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
67105 return;
67106 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67107 smp_mb__before_atomic_inc(); /* See above. */
67108- atomic_inc(&rdtp->dynticks);
67109+ atomic_inc_unchecked(&rdtp->dynticks);
67110 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67111- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67112+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67113 }
67114
67115 #ifdef CONFIG_PROVE_RCU
67116@@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
67117 int ret;
67118
67119 preempt_disable();
67120- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67121+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67122 preempt_enable();
67123 return ret;
67124 }
67125@@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67126 */
67127 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67128 {
67129- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67130+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67131 return (rdp->dynticks_snap & 0x1) == 0;
67132 }
67133
67134@@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67135 unsigned int curr;
67136 unsigned int snap;
67137
67138- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67139+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67140 snap = (unsigned int)rdp->dynticks_snap;
67141
67142 /*
67143@@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67144 /*
67145 * Do RCU core processing for the current CPU.
67146 */
67147-static void rcu_process_callbacks(struct softirq_action *unused)
67148+static void rcu_process_callbacks(void)
67149 {
67150 trace_rcu_utilization("Start RCU core");
67151 __rcu_process_callbacks(&rcu_sched_state,
67152@@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67153 rdp->qlen = 0;
67154 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67155 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
67156- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67157+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67158 rdp->cpu = cpu;
67159 rdp->rsp = rsp;
67160 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67161@@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67162 rdp->n_force_qs_snap = rsp->n_force_qs;
67163 rdp->blimit = blimit;
67164 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
67165- atomic_set(&rdp->dynticks->dynticks,
67166- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67167+ atomic_set_unchecked(&rdp->dynticks->dynticks,
67168+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67169 rcu_prepare_for_idle_init(cpu);
67170 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67171
67172diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67173index fddff92..2c08359 100644
67174--- a/kernel/rcutree.h
67175+++ b/kernel/rcutree.h
67176@@ -87,7 +87,7 @@ struct rcu_dynticks {
67177 long long dynticks_nesting; /* Track irq/process nesting level. */
67178 /* Process level is worth LLONG_MAX/2. */
67179 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67180- atomic_t dynticks; /* Even value for idle, else odd. */
67181+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67182 };
67183
67184 /* RCU's kthread states for tracing. */
67185diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67186index 8bb35d7..6ea0a463 100644
67187--- a/kernel/rcutree_plugin.h
67188+++ b/kernel/rcutree_plugin.h
67189@@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
67190
67191 /* Clean up and exit. */
67192 smp_mb(); /* ensure expedited GP seen before counter increment. */
67193- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67194+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67195 unlock_mb_ret:
67196 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67197 mb_ret:
67198@@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67199
67200 #else /* #ifndef CONFIG_SMP */
67201
67202-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67203-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67204+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67205+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67206
67207 static int synchronize_sched_expedited_cpu_stop(void *data)
67208 {
67209@@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
67210 int firstsnap, s, snap, trycount = 0;
67211
67212 /* Note that atomic_inc_return() implies full memory barrier. */
67213- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67214+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67215 get_online_cpus();
67216
67217 /*
67218@@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
67219 }
67220
67221 /* Check to see if someone else did our work for us. */
67222- s = atomic_read(&sync_sched_expedited_done);
67223+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67224 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67225 smp_mb(); /* ensure test happens before caller kfree */
67226 return;
67227@@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
67228 * grace period works for us.
67229 */
67230 get_online_cpus();
67231- snap = atomic_read(&sync_sched_expedited_started);
67232+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
67233 smp_mb(); /* ensure read is before try_stop_cpus(). */
67234 }
67235
67236@@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
67237 * than we did beat us to the punch.
67238 */
67239 do {
67240- s = atomic_read(&sync_sched_expedited_done);
67241+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67242 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67243 smp_mb(); /* ensure test happens before caller kfree */
67244 break;
67245 }
67246- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67247+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67248
67249 put_online_cpus();
67250 }
67251diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67252index 654cfe6..c0b28e2 100644
67253--- a/kernel/rcutree_trace.c
67254+++ b/kernel/rcutree_trace.c
67255@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67256 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67257 rdp->qs_pending);
67258 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67259- atomic_read(&rdp->dynticks->dynticks),
67260+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67261 rdp->dynticks->dynticks_nesting,
67262 rdp->dynticks->dynticks_nmi_nesting,
67263 rdp->dynticks_fqs);
67264@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67265 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67266 rdp->qs_pending);
67267 seq_printf(m, ",%d,%llx,%d,%lu",
67268- atomic_read(&rdp->dynticks->dynticks),
67269+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67270 rdp->dynticks->dynticks_nesting,
67271 rdp->dynticks->dynticks_nmi_nesting,
67272 rdp->dynticks_fqs);
67273diff --git a/kernel/resource.c b/kernel/resource.c
67274index 7640b3a..5879283 100644
67275--- a/kernel/resource.c
67276+++ b/kernel/resource.c
67277@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67278
67279 static int __init ioresources_init(void)
67280 {
67281+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67282+#ifdef CONFIG_GRKERNSEC_PROC_USER
67283+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67284+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67285+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67286+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67287+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67288+#endif
67289+#else
67290 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67291 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67292+#endif
67293 return 0;
67294 }
67295 __initcall(ioresources_init);
67296diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67297index 98ec494..4241d6d 100644
67298--- a/kernel/rtmutex-tester.c
67299+++ b/kernel/rtmutex-tester.c
67300@@ -20,7 +20,7 @@
67301 #define MAX_RT_TEST_MUTEXES 8
67302
67303 static spinlock_t rttest_lock;
67304-static atomic_t rttest_event;
67305+static atomic_unchecked_t rttest_event;
67306
67307 struct test_thread_data {
67308 int opcode;
67309@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67310
67311 case RTTEST_LOCKCONT:
67312 td->mutexes[td->opdata] = 1;
67313- td->event = atomic_add_return(1, &rttest_event);
67314+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67315 return 0;
67316
67317 case RTTEST_RESET:
67318@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67319 return 0;
67320
67321 case RTTEST_RESETEVENT:
67322- atomic_set(&rttest_event, 0);
67323+ atomic_set_unchecked(&rttest_event, 0);
67324 return 0;
67325
67326 default:
67327@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67328 return ret;
67329
67330 td->mutexes[id] = 1;
67331- td->event = atomic_add_return(1, &rttest_event);
67332+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67333 rt_mutex_lock(&mutexes[id]);
67334- td->event = atomic_add_return(1, &rttest_event);
67335+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67336 td->mutexes[id] = 4;
67337 return 0;
67338
67339@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67340 return ret;
67341
67342 td->mutexes[id] = 1;
67343- td->event = atomic_add_return(1, &rttest_event);
67344+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67345 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67346- td->event = atomic_add_return(1, &rttest_event);
67347+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67348 td->mutexes[id] = ret ? 0 : 4;
67349 return ret ? -EINTR : 0;
67350
67351@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67352 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67353 return ret;
67354
67355- td->event = atomic_add_return(1, &rttest_event);
67356+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67357 rt_mutex_unlock(&mutexes[id]);
67358- td->event = atomic_add_return(1, &rttest_event);
67359+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67360 td->mutexes[id] = 0;
67361 return 0;
67362
67363@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67364 break;
67365
67366 td->mutexes[dat] = 2;
67367- td->event = atomic_add_return(1, &rttest_event);
67368+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67369 break;
67370
67371 default:
67372@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67373 return;
67374
67375 td->mutexes[dat] = 3;
67376- td->event = atomic_add_return(1, &rttest_event);
67377+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67378 break;
67379
67380 case RTTEST_LOCKNOWAIT:
67381@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67382 return;
67383
67384 td->mutexes[dat] = 1;
67385- td->event = atomic_add_return(1, &rttest_event);
67386+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67387 return;
67388
67389 default:
67390diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67391index e8a1f83..363d17d 100644
67392--- a/kernel/sched/auto_group.c
67393+++ b/kernel/sched/auto_group.c
67394@@ -11,7 +11,7 @@
67395
67396 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67397 static struct autogroup autogroup_default;
67398-static atomic_t autogroup_seq_nr;
67399+static atomic_unchecked_t autogroup_seq_nr;
67400
67401 void __init autogroup_init(struct task_struct *init_task)
67402 {
67403@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67404
67405 kref_init(&ag->kref);
67406 init_rwsem(&ag->lock);
67407- ag->id = atomic_inc_return(&autogroup_seq_nr);
67408+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67409 ag->tg = tg;
67410 #ifdef CONFIG_RT_GROUP_SCHED
67411 /*
67412diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67413index b342f57..00324a0 100644
67414--- a/kernel/sched/core.c
67415+++ b/kernel/sched/core.c
67416@@ -3143,6 +3143,19 @@ pick_next_task(struct rq *rq)
67417 BUG(); /* the idle class will always have a runnable task */
67418 }
67419
67420+#ifdef CONFIG_GRKERNSEC_SETXID
67421+extern void gr_delayed_cred_worker(void);
67422+static inline void gr_cred_schedule(void)
67423+{
67424+ if (unlikely(current->delayed_cred))
67425+ gr_delayed_cred_worker();
67426+}
67427+#else
67428+static inline void gr_cred_schedule(void)
67429+{
67430+}
67431+#endif
67432+
67433 /*
67434 * __schedule() is the main scheduler function.
67435 */
67436@@ -3162,6 +3175,8 @@ need_resched:
67437
67438 schedule_debug(prev);
67439
67440+ gr_cred_schedule();
67441+
67442 if (sched_feat(HRTICK))
67443 hrtick_clear(rq);
67444
67445@@ -3852,6 +3867,8 @@ int can_nice(const struct task_struct *p, const int nice)
67446 /* convert nice value [19,-20] to rlimit style value [1,40] */
67447 int nice_rlim = 20 - nice;
67448
67449+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67450+
67451 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67452 capable(CAP_SYS_NICE));
67453 }
67454@@ -3885,7 +3902,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67455 if (nice > 19)
67456 nice = 19;
67457
67458- if (increment < 0 && !can_nice(current, nice))
67459+ if (increment < 0 && (!can_nice(current, nice) ||
67460+ gr_handle_chroot_nice()))
67461 return -EPERM;
67462
67463 retval = security_task_setnice(current, nice);
67464@@ -4042,6 +4060,7 @@ recheck:
67465 unsigned long rlim_rtprio =
67466 task_rlimit(p, RLIMIT_RTPRIO);
67467
67468+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67469 /* can't set/change the rt policy */
67470 if (policy != p->policy && !rlim_rtprio)
67471 return -EPERM;
67472diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67473index aca16b8..8e3acc4 100644
67474--- a/kernel/sched/fair.c
67475+++ b/kernel/sched/fair.c
67476@@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67477 * run_rebalance_domains is triggered when needed from the scheduler tick.
67478 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67479 */
67480-static void run_rebalance_domains(struct softirq_action *h)
67481+static void run_rebalance_domains(void)
67482 {
67483 int this_cpu = smp_processor_id();
67484 struct rq *this_rq = cpu_rq(this_cpu);
67485diff --git a/kernel/signal.c b/kernel/signal.c
67486index c73c428..7040057 100644
67487--- a/kernel/signal.c
67488+++ b/kernel/signal.c
67489@@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
67490
67491 int print_fatal_signals __read_mostly;
67492
67493-static void __user *sig_handler(struct task_struct *t, int sig)
67494+static __sighandler_t sig_handler(struct task_struct *t, int sig)
67495 {
67496 return t->sighand->action[sig - 1].sa.sa_handler;
67497 }
67498
67499-static int sig_handler_ignored(void __user *handler, int sig)
67500+static int sig_handler_ignored(__sighandler_t handler, int sig)
67501 {
67502 /* Is it explicitly or implicitly ignored? */
67503 return handler == SIG_IGN ||
67504@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67505 static int sig_task_ignored(struct task_struct *t, int sig,
67506 int from_ancestor_ns)
67507 {
67508- void __user *handler;
67509+ __sighandler_t handler;
67510
67511 handler = sig_handler(t, sig);
67512
67513@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67514 atomic_inc(&user->sigpending);
67515 rcu_read_unlock();
67516
67517+ if (!override_rlimit)
67518+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67519+
67520 if (override_rlimit ||
67521 atomic_read(&user->sigpending) <=
67522 task_rlimit(t, RLIMIT_SIGPENDING)) {
67523@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67524
67525 int unhandled_signal(struct task_struct *tsk, int sig)
67526 {
67527- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67528+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67529 if (is_global_init(tsk))
67530 return 1;
67531 if (handler != SIG_IGN && handler != SIG_DFL)
67532@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67533 }
67534 }
67535
67536+ /* allow glibc communication via tgkill to other threads in our
67537+ thread group */
67538+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67539+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67540+ && gr_handle_signal(t, sig))
67541+ return -EPERM;
67542+
67543 return security_task_kill(t, info, sig, 0);
67544 }
67545
67546@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67547 return send_signal(sig, info, p, 1);
67548 }
67549
67550-static int
67551+int
67552 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67553 {
67554 return send_signal(sig, info, t, 0);
67555@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67556 unsigned long int flags;
67557 int ret, blocked, ignored;
67558 struct k_sigaction *action;
67559+ int is_unhandled = 0;
67560
67561 spin_lock_irqsave(&t->sighand->siglock, flags);
67562 action = &t->sighand->action[sig-1];
67563@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67564 }
67565 if (action->sa.sa_handler == SIG_DFL)
67566 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67567+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67568+ is_unhandled = 1;
67569 ret = specific_send_sig_info(sig, info, t);
67570 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67571
67572+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
67573+ normal operation */
67574+ if (is_unhandled) {
67575+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67576+ gr_handle_crash(t, sig);
67577+ }
67578+
67579 return ret;
67580 }
67581
67582@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67583 ret = check_kill_permission(sig, info, p);
67584 rcu_read_unlock();
67585
67586- if (!ret && sig)
67587+ if (!ret && sig) {
67588 ret = do_send_sig_info(sig, info, p, true);
67589+ if (!ret)
67590+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67591+ }
67592
67593 return ret;
67594 }
67595@@ -2820,7 +2843,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67596 int error = -ESRCH;
67597
67598 rcu_read_lock();
67599- p = find_task_by_vpid(pid);
67600+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67601+ /* allow glibc communication via tgkill to other threads in our
67602+ thread group */
67603+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67604+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
67605+ p = find_task_by_vpid_unrestricted(pid);
67606+ else
67607+#endif
67608+ p = find_task_by_vpid(pid);
67609 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67610 error = check_kill_permission(sig, info, p);
67611 /*
67612diff --git a/kernel/smp.c b/kernel/smp.c
67613index db197d6..17aef0b 100644
67614--- a/kernel/smp.c
67615+++ b/kernel/smp.c
67616@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
67617 }
67618 EXPORT_SYMBOL(smp_call_function);
67619
67620-void ipi_call_lock(void)
67621+void ipi_call_lock(void) __acquires(call_function.lock)
67622 {
67623 raw_spin_lock(&call_function.lock);
67624 }
67625
67626-void ipi_call_unlock(void)
67627+void ipi_call_unlock(void) __releases(call_function.lock)
67628 {
67629 raw_spin_unlock(&call_function.lock);
67630 }
67631
67632-void ipi_call_lock_irq(void)
67633+void ipi_call_lock_irq(void) __acquires(call_function.lock)
67634 {
67635 raw_spin_lock_irq(&call_function.lock);
67636 }
67637
67638-void ipi_call_unlock_irq(void)
67639+void ipi_call_unlock_irq(void) __releases(call_function.lock)
67640 {
67641 raw_spin_unlock_irq(&call_function.lock);
67642 }
67643diff --git a/kernel/softirq.c b/kernel/softirq.c
67644index 4eb3a0f..6f1fa81 100644
67645--- a/kernel/softirq.c
67646+++ b/kernel/softirq.c
67647@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
67648
67649 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
67650
67651-char *softirq_to_name[NR_SOFTIRQS] = {
67652+const char * const softirq_to_name[NR_SOFTIRQS] = {
67653 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
67654 "TASKLET", "SCHED", "HRTIMER", "RCU"
67655 };
67656@@ -235,7 +235,7 @@ restart:
67657 kstat_incr_softirqs_this_cpu(vec_nr);
67658
67659 trace_softirq_entry(vec_nr);
67660- h->action(h);
67661+ h->action();
67662 trace_softirq_exit(vec_nr);
67663 if (unlikely(prev_count != preempt_count())) {
67664 printk(KERN_ERR "huh, entered softirq %u %s %p"
67665@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
67666 local_irq_restore(flags);
67667 }
67668
67669-void open_softirq(int nr, void (*action)(struct softirq_action *))
67670+void open_softirq(int nr, void (*action)(void))
67671 {
67672- softirq_vec[nr].action = action;
67673+ pax_open_kernel();
67674+ *(void **)&softirq_vec[nr].action = action;
67675+ pax_close_kernel();
67676 }
67677
67678 /*
67679@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
67680
67681 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
67682
67683-static void tasklet_action(struct softirq_action *a)
67684+static void tasklet_action(void)
67685 {
67686 struct tasklet_struct *list;
67687
67688@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
67689 }
67690 }
67691
67692-static void tasklet_hi_action(struct softirq_action *a)
67693+static void tasklet_hi_action(void)
67694 {
67695 struct tasklet_struct *list;
67696
67697diff --git a/kernel/sys.c b/kernel/sys.c
67698index 888d227..f04b318 100644
67699--- a/kernel/sys.c
67700+++ b/kernel/sys.c
67701@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
67702 error = -EACCES;
67703 goto out;
67704 }
67705+
67706+ if (gr_handle_chroot_setpriority(p, niceval)) {
67707+ error = -EACCES;
67708+ goto out;
67709+ }
67710+
67711 no_nice = security_task_setnice(p, niceval);
67712 if (no_nice) {
67713 error = no_nice;
67714@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
67715 goto error;
67716 }
67717
67718+ if (gr_check_group_change(new->gid, new->egid, -1))
67719+ goto error;
67720+
67721 if (rgid != (gid_t) -1 ||
67722 (egid != (gid_t) -1 && egid != old->gid))
67723 new->sgid = new->egid;
67724@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
67725 old = current_cred();
67726
67727 retval = -EPERM;
67728+
67729+ if (gr_check_group_change(gid, gid, gid))
67730+ goto error;
67731+
67732 if (nsown_capable(CAP_SETGID))
67733 new->gid = new->egid = new->sgid = new->fsgid = gid;
67734 else if (gid == old->gid || gid == old->sgid)
67735@@ -618,7 +631,7 @@ error:
67736 /*
67737 * change the user struct in a credentials set to match the new UID
67738 */
67739-static int set_user(struct cred *new)
67740+int set_user(struct cred *new)
67741 {
67742 struct user_struct *new_user;
67743
67744@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
67745 goto error;
67746 }
67747
67748+ if (gr_check_user_change(new->uid, new->euid, -1))
67749+ goto error;
67750+
67751 if (new->uid != old->uid) {
67752 retval = set_user(new);
67753 if (retval < 0)
67754@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
67755 old = current_cred();
67756
67757 retval = -EPERM;
67758+
67759+ if (gr_check_crash_uid(uid))
67760+ goto error;
67761+ if (gr_check_user_change(uid, uid, uid))
67762+ goto error;
67763+
67764 if (nsown_capable(CAP_SETUID)) {
67765 new->suid = new->uid = uid;
67766 if (uid != old->uid) {
67767@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
67768 goto error;
67769 }
67770
67771+ if (gr_check_user_change(ruid, euid, -1))
67772+ goto error;
67773+
67774 if (ruid != (uid_t) -1) {
67775 new->uid = ruid;
67776 if (ruid != old->uid) {
67777@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
67778 goto error;
67779 }
67780
67781+ if (gr_check_group_change(rgid, egid, -1))
67782+ goto error;
67783+
67784 if (rgid != (gid_t) -1)
67785 new->gid = rgid;
67786 if (egid != (gid_t) -1)
67787@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67788 old = current_cred();
67789 old_fsuid = old->fsuid;
67790
67791+ if (gr_check_user_change(-1, -1, uid))
67792+ goto error;
67793+
67794 if (uid == old->uid || uid == old->euid ||
67795 uid == old->suid || uid == old->fsuid ||
67796 nsown_capable(CAP_SETUID)) {
67797@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67798 }
67799 }
67800
67801+error:
67802 abort_creds(new);
67803 return old_fsuid;
67804
67805@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
67806 if (gid == old->gid || gid == old->egid ||
67807 gid == old->sgid || gid == old->fsgid ||
67808 nsown_capable(CAP_SETGID)) {
67809+ if (gr_check_group_change(-1, -1, gid))
67810+ goto error;
67811+
67812 if (gid != old_fsgid) {
67813 new->fsgid = gid;
67814 goto change_okay;
67815 }
67816 }
67817
67818+error:
67819 abort_creds(new);
67820 return old_fsgid;
67821
67822@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
67823 }
67824 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
67825 snprintf(buf, len, "2.6.%u%s", v, rest);
67826- ret = copy_to_user(release, buf, len);
67827+ if (len > sizeof(buf))
67828+ ret = -EFAULT;
67829+ else
67830+ ret = copy_to_user(release, buf, len);
67831 }
67832 return ret;
67833 }
67834@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
67835 return -EFAULT;
67836
67837 down_read(&uts_sem);
67838- error = __copy_to_user(&name->sysname, &utsname()->sysname,
67839+ error = __copy_to_user(name->sysname, &utsname()->sysname,
67840 __OLD_UTS_LEN);
67841 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
67842- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
67843+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
67844 __OLD_UTS_LEN);
67845 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
67846- error |= __copy_to_user(&name->release, &utsname()->release,
67847+ error |= __copy_to_user(name->release, &utsname()->release,
67848 __OLD_UTS_LEN);
67849 error |= __put_user(0, name->release + __OLD_UTS_LEN);
67850- error |= __copy_to_user(&name->version, &utsname()->version,
67851+ error |= __copy_to_user(name->version, &utsname()->version,
67852 __OLD_UTS_LEN);
67853 error |= __put_user(0, name->version + __OLD_UTS_LEN);
67854- error |= __copy_to_user(&name->machine, &utsname()->machine,
67855+ error |= __copy_to_user(name->machine, &utsname()->machine,
67856 __OLD_UTS_LEN);
67857 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
67858 up_read(&uts_sem);
67859@@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
67860 error = get_dumpable(me->mm);
67861 break;
67862 case PR_SET_DUMPABLE:
67863- if (arg2 < 0 || arg2 > 1) {
67864+ if (arg2 > 1) {
67865 error = -EINVAL;
67866 break;
67867 }
67868diff --git a/kernel/sysctl.c b/kernel/sysctl.c
67869index f487f25..9056a9e 100644
67870--- a/kernel/sysctl.c
67871+++ b/kernel/sysctl.c
67872@@ -86,6 +86,13 @@
67873
67874
67875 #if defined(CONFIG_SYSCTL)
67876+#include <linux/grsecurity.h>
67877+#include <linux/grinternal.h>
67878+
67879+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
67880+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67881+ const int op);
67882+extern int gr_handle_chroot_sysctl(const int op);
67883
67884 /* External variables not in a header file. */
67885 extern int sysctl_overcommit_memory;
67886@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
67887 }
67888
67889 #endif
67890+extern struct ctl_table grsecurity_table[];
67891
67892 static struct ctl_table root_table[];
67893 static struct ctl_table_root sysctl_table_root;
67894@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
67895 int sysctl_legacy_va_layout;
67896 #endif
67897
67898+#ifdef CONFIG_PAX_SOFTMODE
67899+static ctl_table pax_table[] = {
67900+ {
67901+ .procname = "softmode",
67902+ .data = &pax_softmode,
67903+ .maxlen = sizeof(unsigned int),
67904+ .mode = 0600,
67905+ .proc_handler = &proc_dointvec,
67906+ },
67907+
67908+ { }
67909+};
67910+#endif
67911+
67912 /* The default sysctl tables: */
67913
67914 static struct ctl_table root_table[] = {
67915@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
67916 #endif
67917
67918 static struct ctl_table kern_table[] = {
67919+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
67920+ {
67921+ .procname = "grsecurity",
67922+ .mode = 0500,
67923+ .child = grsecurity_table,
67924+ },
67925+#endif
67926+
67927+#ifdef CONFIG_PAX_SOFTMODE
67928+ {
67929+ .procname = "pax",
67930+ .mode = 0500,
67931+ .child = pax_table,
67932+ },
67933+#endif
67934+
67935 {
67936 .procname = "sched_child_runs_first",
67937 .data = &sysctl_sched_child_runs_first,
67938@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
67939 .data = &modprobe_path,
67940 .maxlen = KMOD_PATH_LEN,
67941 .mode = 0644,
67942- .proc_handler = proc_dostring,
67943+ .proc_handler = proc_dostring_modpriv,
67944 },
67945 {
67946 .procname = "modules_disabled",
67947@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
67948 .extra1 = &zero,
67949 .extra2 = &one,
67950 },
67951+#endif
67952 {
67953 .procname = "kptr_restrict",
67954 .data = &kptr_restrict,
67955 .maxlen = sizeof(int),
67956 .mode = 0644,
67957 .proc_handler = proc_dmesg_restrict,
67958+#ifdef CONFIG_GRKERNSEC_HIDESYM
67959+ .extra1 = &two,
67960+#else
67961 .extra1 = &zero,
67962+#endif
67963 .extra2 = &two,
67964 },
67965-#endif
67966 {
67967 .procname = "ngroups_max",
67968 .data = &ngroups_max,
67969@@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
67970 .proc_handler = proc_dointvec_minmax,
67971 .extra1 = &zero,
67972 },
67973+ {
67974+ .procname = "heap_stack_gap",
67975+ .data = &sysctl_heap_stack_gap,
67976+ .maxlen = sizeof(sysctl_heap_stack_gap),
67977+ .mode = 0644,
67978+ .proc_handler = proc_doulongvec_minmax,
67979+ },
67980 #else
67981 {
67982 .procname = "nr_trim_pages",
67983@@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
67984 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
67985 {
67986 int mode;
67987+ int error;
67988+
67989+ if (table->parent != NULL && table->parent->procname != NULL &&
67990+ table->procname != NULL &&
67991+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
67992+ return -EACCES;
67993+ if (gr_handle_chroot_sysctl(op))
67994+ return -EACCES;
67995+ error = gr_handle_sysctl(table, op);
67996+ if (error)
67997+ return error;
67998
67999 if (root->permissions)
68000 mode = root->permissions(root, current->nsproxy, table);
68001@@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
68002 buffer, lenp, ppos);
68003 }
68004
68005+int proc_dostring_modpriv(struct ctl_table *table, int write,
68006+ void __user *buffer, size_t *lenp, loff_t *ppos)
68007+{
68008+ if (write && !capable(CAP_SYS_MODULE))
68009+ return -EPERM;
68010+
68011+ return _proc_do_string(table->data, table->maxlen, write,
68012+ buffer, lenp, ppos);
68013+}
68014+
68015 static size_t proc_skip_spaces(char **buf)
68016 {
68017 size_t ret;
68018@@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68019 len = strlen(tmp);
68020 if (len > *size)
68021 len = *size;
68022+ if (len > sizeof(tmp))
68023+ len = sizeof(tmp);
68024 if (copy_to_user(*buf, tmp, len))
68025 return -EFAULT;
68026 *size -= len;
68027@@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68028 *i = val;
68029 } else {
68030 val = convdiv * (*i) / convmul;
68031- if (!first)
68032+ if (!first) {
68033 err = proc_put_char(&buffer, &left, '\t');
68034+ if (err)
68035+ break;
68036+ }
68037 err = proc_put_long(&buffer, &left, val, false);
68038 if (err)
68039 break;
68040@@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
68041 return -ENOSYS;
68042 }
68043
68044+int proc_dostring_modpriv(struct ctl_table *table, int write,
68045+ void __user *buffer, size_t *lenp, loff_t *ppos)
68046+{
68047+ return -ENOSYS;
68048+}
68049+
68050 int proc_dointvec(struct ctl_table *table, int write,
68051 void __user *buffer, size_t *lenp, loff_t *ppos)
68052 {
68053@@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68054 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68055 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68056 EXPORT_SYMBOL(proc_dostring);
68057+EXPORT_SYMBOL(proc_dostring_modpriv);
68058 EXPORT_SYMBOL(proc_doulongvec_minmax);
68059 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68060 EXPORT_SYMBOL(register_sysctl_table);
68061diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68062index a650694..aaeeb20 100644
68063--- a/kernel/sysctl_binary.c
68064+++ b/kernel/sysctl_binary.c
68065@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68066 int i;
68067
68068 set_fs(KERNEL_DS);
68069- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68070+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68071 set_fs(old_fs);
68072 if (result < 0)
68073 goto out_kfree;
68074@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68075 }
68076
68077 set_fs(KERNEL_DS);
68078- result = vfs_write(file, buffer, str - buffer, &pos);
68079+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68080 set_fs(old_fs);
68081 if (result < 0)
68082 goto out_kfree;
68083@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68084 int i;
68085
68086 set_fs(KERNEL_DS);
68087- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68088+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68089 set_fs(old_fs);
68090 if (result < 0)
68091 goto out_kfree;
68092@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68093 }
68094
68095 set_fs(KERNEL_DS);
68096- result = vfs_write(file, buffer, str - buffer, &pos);
68097+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68098 set_fs(old_fs);
68099 if (result < 0)
68100 goto out_kfree;
68101@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68102 int i;
68103
68104 set_fs(KERNEL_DS);
68105- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68106+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68107 set_fs(old_fs);
68108 if (result < 0)
68109 goto out;
68110@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68111 __le16 dnaddr;
68112
68113 set_fs(KERNEL_DS);
68114- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68115+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68116 set_fs(old_fs);
68117 if (result < 0)
68118 goto out;
68119@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68120 le16_to_cpu(dnaddr) & 0x3ff);
68121
68122 set_fs(KERNEL_DS);
68123- result = vfs_write(file, buf, len, &pos);
68124+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68125 set_fs(old_fs);
68126 if (result < 0)
68127 goto out;
68128diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68129index 362da65..ab8ef8c 100644
68130--- a/kernel/sysctl_check.c
68131+++ b/kernel/sysctl_check.c
68132@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68133 set_fail(&fail, table, "Directory with extra2");
68134 } else {
68135 if ((table->proc_handler == proc_dostring) ||
68136+ (table->proc_handler == proc_dostring_modpriv) ||
68137 (table->proc_handler == proc_dointvec) ||
68138 (table->proc_handler == proc_dointvec_minmax) ||
68139 (table->proc_handler == proc_dointvec_jiffies) ||
68140diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68141index e660464..c8b9e67 100644
68142--- a/kernel/taskstats.c
68143+++ b/kernel/taskstats.c
68144@@ -27,9 +27,12 @@
68145 #include <linux/cgroup.h>
68146 #include <linux/fs.h>
68147 #include <linux/file.h>
68148+#include <linux/grsecurity.h>
68149 #include <net/genetlink.h>
68150 #include <linux/atomic.h>
68151
68152+extern int gr_is_taskstats_denied(int pid);
68153+
68154 /*
68155 * Maximum length of a cpumask that can be specified in
68156 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68157@@ -556,6 +559,9 @@ err:
68158
68159 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68160 {
68161+ if (gr_is_taskstats_denied(current->pid))
68162+ return -EACCES;
68163+
68164 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68165 return cmd_attr_register_cpumask(info);
68166 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68167diff --git a/kernel/time.c b/kernel/time.c
68168index 73e416d..cfc6f69 100644
68169--- a/kernel/time.c
68170+++ b/kernel/time.c
68171@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68172 return error;
68173
68174 if (tz) {
68175+ /* we log in do_settimeofday called below, so don't log twice
68176+ */
68177+ if (!tv)
68178+ gr_log_timechange();
68179+
68180 /* SMP safe, global irq locking makes it work. */
68181 sys_tz = *tz;
68182 update_vsyscall_tz();
68183diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68184index 8a46f5d..bbe6f9c 100644
68185--- a/kernel/time/alarmtimer.c
68186+++ b/kernel/time/alarmtimer.c
68187@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
68188 struct platform_device *pdev;
68189 int error = 0;
68190 int i;
68191- struct k_clock alarm_clock = {
68192+ static struct k_clock alarm_clock = {
68193 .clock_getres = alarm_clock_getres,
68194 .clock_get = alarm_clock_get,
68195 .timer_create = alarm_timer_create,
68196diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68197index fd4a7b1..fae5c2a 100644
68198--- a/kernel/time/tick-broadcast.c
68199+++ b/kernel/time/tick-broadcast.c
68200@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68201 * then clear the broadcast bit.
68202 */
68203 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68204- int cpu = smp_processor_id();
68205+ cpu = smp_processor_id();
68206
68207 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68208 tick_broadcast_clear_oneshot(cpu);
68209diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68210index 0c63581..e25dcb6 100644
68211--- a/kernel/time/timekeeping.c
68212+++ b/kernel/time/timekeeping.c
68213@@ -14,6 +14,7 @@
68214 #include <linux/init.h>
68215 #include <linux/mm.h>
68216 #include <linux/sched.h>
68217+#include <linux/grsecurity.h>
68218 #include <linux/syscore_ops.h>
68219 #include <linux/clocksource.h>
68220 #include <linux/jiffies.h>
68221@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
68222 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68223 return -EINVAL;
68224
68225+ gr_log_timechange();
68226+
68227 write_seqlock_irqsave(&xtime_lock, flags);
68228
68229 timekeeping_forward_now();
68230diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68231index 3258455..f35227d 100644
68232--- a/kernel/time/timer_list.c
68233+++ b/kernel/time/timer_list.c
68234@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68235
68236 static void print_name_offset(struct seq_file *m, void *sym)
68237 {
68238+#ifdef CONFIG_GRKERNSEC_HIDESYM
68239+ SEQ_printf(m, "<%p>", NULL);
68240+#else
68241 char symname[KSYM_NAME_LEN];
68242
68243 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68244 SEQ_printf(m, "<%pK>", sym);
68245 else
68246 SEQ_printf(m, "%s", symname);
68247+#endif
68248 }
68249
68250 static void
68251@@ -112,7 +116,11 @@ next_one:
68252 static void
68253 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68254 {
68255+#ifdef CONFIG_GRKERNSEC_HIDESYM
68256+ SEQ_printf(m, " .base: %p\n", NULL);
68257+#else
68258 SEQ_printf(m, " .base: %pK\n", base);
68259+#endif
68260 SEQ_printf(m, " .index: %d\n",
68261 base->index);
68262 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68263@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68264 {
68265 struct proc_dir_entry *pe;
68266
68267+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68268+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68269+#else
68270 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68271+#endif
68272 if (!pe)
68273 return -ENOMEM;
68274 return 0;
68275diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68276index 0b537f2..9e71eca 100644
68277--- a/kernel/time/timer_stats.c
68278+++ b/kernel/time/timer_stats.c
68279@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68280 static unsigned long nr_entries;
68281 static struct entry entries[MAX_ENTRIES];
68282
68283-static atomic_t overflow_count;
68284+static atomic_unchecked_t overflow_count;
68285
68286 /*
68287 * The entries are in a hash-table, for fast lookup:
68288@@ -140,7 +140,7 @@ static void reset_entries(void)
68289 nr_entries = 0;
68290 memset(entries, 0, sizeof(entries));
68291 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68292- atomic_set(&overflow_count, 0);
68293+ atomic_set_unchecked(&overflow_count, 0);
68294 }
68295
68296 static struct entry *alloc_entry(void)
68297@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68298 if (likely(entry))
68299 entry->count++;
68300 else
68301- atomic_inc(&overflow_count);
68302+ atomic_inc_unchecked(&overflow_count);
68303
68304 out_unlock:
68305 raw_spin_unlock_irqrestore(lock, flags);
68306@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68307
68308 static void print_name_offset(struct seq_file *m, unsigned long addr)
68309 {
68310+#ifdef CONFIG_GRKERNSEC_HIDESYM
68311+ seq_printf(m, "<%p>", NULL);
68312+#else
68313 char symname[KSYM_NAME_LEN];
68314
68315 if (lookup_symbol_name(addr, symname) < 0)
68316 seq_printf(m, "<%p>", (void *)addr);
68317 else
68318 seq_printf(m, "%s", symname);
68319+#endif
68320 }
68321
68322 static int tstats_show(struct seq_file *m, void *v)
68323@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68324
68325 seq_puts(m, "Timer Stats Version: v0.2\n");
68326 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68327- if (atomic_read(&overflow_count))
68328+ if (atomic_read_unchecked(&overflow_count))
68329 seq_printf(m, "Overflow: %d entries\n",
68330- atomic_read(&overflow_count));
68331+ atomic_read_unchecked(&overflow_count));
68332
68333 for (i = 0; i < nr_entries; i++) {
68334 entry = entries + i;
68335@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68336 {
68337 struct proc_dir_entry *pe;
68338
68339+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68340+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68341+#else
68342 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68343+#endif
68344 if (!pe)
68345 return -ENOMEM;
68346 return 0;
68347diff --git a/kernel/timer.c b/kernel/timer.c
68348index a297ffc..5e16b0b 100644
68349--- a/kernel/timer.c
68350+++ b/kernel/timer.c
68351@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68352 /*
68353 * This function runs timers and the timer-tq in bottom half context.
68354 */
68355-static void run_timer_softirq(struct softirq_action *h)
68356+static void run_timer_softirq(void)
68357 {
68358 struct tvec_base *base = __this_cpu_read(tvec_bases);
68359
68360diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68361index cdea7b5..9b820d4 100644
68362--- a/kernel/trace/blktrace.c
68363+++ b/kernel/trace/blktrace.c
68364@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68365 struct blk_trace *bt = filp->private_data;
68366 char buf[16];
68367
68368- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68369+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68370
68371 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68372 }
68373@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68374 return 1;
68375
68376 bt = buf->chan->private_data;
68377- atomic_inc(&bt->dropped);
68378+ atomic_inc_unchecked(&bt->dropped);
68379 return 0;
68380 }
68381
68382@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68383
68384 bt->dir = dir;
68385 bt->dev = dev;
68386- atomic_set(&bt->dropped, 0);
68387+ atomic_set_unchecked(&bt->dropped, 0);
68388
68389 ret = -EIO;
68390 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68391diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68392index 683d559..d70d914 100644
68393--- a/kernel/trace/ftrace.c
68394+++ b/kernel/trace/ftrace.c
68395@@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68396 if (unlikely(ftrace_disabled))
68397 return 0;
68398
68399+ ret = ftrace_arch_code_modify_prepare();
68400+ FTRACE_WARN_ON(ret);
68401+ if (ret)
68402+ return 0;
68403+
68404 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68405+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68406 if (ret) {
68407 ftrace_bug(ret, ip);
68408- return 0;
68409 }
68410- return 1;
68411+ return ret ? 0 : 1;
68412 }
68413
68414 /*
68415@@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68416
68417 int
68418 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68419- void *data)
68420+ void *data)
68421 {
68422 struct ftrace_func_probe *entry;
68423 struct ftrace_page *pg;
68424diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68425index a3f1bc5..5e651718 100644
68426--- a/kernel/trace/trace.c
68427+++ b/kernel/trace/trace.c
68428@@ -4254,10 +4254,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68429 };
68430 #endif
68431
68432-static struct dentry *d_tracer;
68433-
68434 struct dentry *tracing_init_dentry(void)
68435 {
68436+ static struct dentry *d_tracer;
68437 static int once;
68438
68439 if (d_tracer)
68440@@ -4277,10 +4276,9 @@ struct dentry *tracing_init_dentry(void)
68441 return d_tracer;
68442 }
68443
68444-static struct dentry *d_percpu;
68445-
68446 struct dentry *tracing_dentry_percpu(void)
68447 {
68448+ static struct dentry *d_percpu;
68449 static int once;
68450 struct dentry *d_tracer;
68451
68452diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68453index c212a7f..7b02394 100644
68454--- a/kernel/trace/trace_events.c
68455+++ b/kernel/trace/trace_events.c
68456@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
68457 struct ftrace_module_file_ops {
68458 struct list_head list;
68459 struct module *mod;
68460- struct file_operations id;
68461- struct file_operations enable;
68462- struct file_operations format;
68463- struct file_operations filter;
68464 };
68465
68466 static struct ftrace_module_file_ops *
68467@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
68468
68469 file_ops->mod = mod;
68470
68471- file_ops->id = ftrace_event_id_fops;
68472- file_ops->id.owner = mod;
68473-
68474- file_ops->enable = ftrace_enable_fops;
68475- file_ops->enable.owner = mod;
68476-
68477- file_ops->filter = ftrace_event_filter_fops;
68478- file_ops->filter.owner = mod;
68479-
68480- file_ops->format = ftrace_event_format_fops;
68481- file_ops->format.owner = mod;
68482+ pax_open_kernel();
68483+ *(void **)&mod->trace_id.owner = mod;
68484+ *(void **)&mod->trace_enable.owner = mod;
68485+ *(void **)&mod->trace_filter.owner = mod;
68486+ *(void **)&mod->trace_format.owner = mod;
68487+ pax_close_kernel();
68488
68489 list_add(&file_ops->list, &ftrace_module_file_list);
68490
68491@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
68492
68493 for_each_event(call, start, end) {
68494 __trace_add_event_call(*call, mod,
68495- &file_ops->id, &file_ops->enable,
68496- &file_ops->filter, &file_ops->format);
68497+ &mod->trace_id, &mod->trace_enable,
68498+ &mod->trace_filter, &mod->trace_format);
68499 }
68500 }
68501
68502diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68503index 00d527c..7c5b1a3 100644
68504--- a/kernel/trace/trace_kprobe.c
68505+++ b/kernel/trace/trace_kprobe.c
68506@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68507 long ret;
68508 int maxlen = get_rloc_len(*(u32 *)dest);
68509 u8 *dst = get_rloc_data(dest);
68510- u8 *src = addr;
68511+ const u8 __user *src = (const u8 __force_user *)addr;
68512 mm_segment_t old_fs = get_fs();
68513 if (!maxlen)
68514 return;
68515@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68516 pagefault_disable();
68517 do
68518 ret = __copy_from_user_inatomic(dst++, src++, 1);
68519- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68520+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68521 dst[-1] = '\0';
68522 pagefault_enable();
68523 set_fs(old_fs);
68524@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68525 ((u8 *)get_rloc_data(dest))[0] = '\0';
68526 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68527 } else
68528- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68529+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68530 get_rloc_offs(*(u32 *)dest));
68531 }
68532 /* Return the length of string -- including null terminal byte */
68533@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68534 set_fs(KERNEL_DS);
68535 pagefault_disable();
68536 do {
68537- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68538+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68539 len++;
68540 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68541 pagefault_enable();
68542diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68543index fd3c8aa..5f324a6 100644
68544--- a/kernel/trace/trace_mmiotrace.c
68545+++ b/kernel/trace/trace_mmiotrace.c
68546@@ -24,7 +24,7 @@ struct header_iter {
68547 static struct trace_array *mmio_trace_array;
68548 static bool overrun_detected;
68549 static unsigned long prev_overruns;
68550-static atomic_t dropped_count;
68551+static atomic_unchecked_t dropped_count;
68552
68553 static void mmio_reset_data(struct trace_array *tr)
68554 {
68555@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68556
68557 static unsigned long count_overruns(struct trace_iterator *iter)
68558 {
68559- unsigned long cnt = atomic_xchg(&dropped_count, 0);
68560+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68561 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68562
68563 if (over > prev_overruns)
68564@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68565 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68566 sizeof(*entry), 0, pc);
68567 if (!event) {
68568- atomic_inc(&dropped_count);
68569+ atomic_inc_unchecked(&dropped_count);
68570 return;
68571 }
68572 entry = ring_buffer_event_data(event);
68573@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68574 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68575 sizeof(*entry), 0, pc);
68576 if (!event) {
68577- atomic_inc(&dropped_count);
68578+ atomic_inc_unchecked(&dropped_count);
68579 return;
68580 }
68581 entry = ring_buffer_event_data(event);
68582diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68583index 0d6ff35..67e0ed7 100644
68584--- a/kernel/trace/trace_output.c
68585+++ b/kernel/trace/trace_output.c
68586@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
68587
68588 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68589 if (!IS_ERR(p)) {
68590- p = mangle_path(s->buffer + s->len, p, "\n");
68591+ p = mangle_path(s->buffer + s->len, p, "\n\\");
68592 if (p) {
68593 s->len = p - s->buffer;
68594 return 1;
68595diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68596index d4545f4..a9010a1 100644
68597--- a/kernel/trace/trace_stack.c
68598+++ b/kernel/trace/trace_stack.c
68599@@ -53,7 +53,7 @@ static inline void check_stack(void)
68600 return;
68601
68602 /* we do not handle interrupt stacks yet */
68603- if (!object_is_on_stack(&this_size))
68604+ if (!object_starts_on_stack(&this_size))
68605 return;
68606
68607 local_irq_save(flags);
68608diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68609index 209b379..7f76423 100644
68610--- a/kernel/trace/trace_workqueue.c
68611+++ b/kernel/trace/trace_workqueue.c
68612@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68613 int cpu;
68614 pid_t pid;
68615 /* Can be inserted from interrupt or user context, need to be atomic */
68616- atomic_t inserted;
68617+ atomic_unchecked_t inserted;
68618 /*
68619 * Don't need to be atomic, works are serialized in a single workqueue thread
68620 * on a single CPU.
68621@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68622 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
68623 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
68624 if (node->pid == wq_thread->pid) {
68625- atomic_inc(&node->inserted);
68626+ atomic_inc_unchecked(&node->inserted);
68627 goto found;
68628 }
68629 }
68630@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
68631 tsk = get_pid_task(pid, PIDTYPE_PID);
68632 if (tsk) {
68633 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
68634- atomic_read(&cws->inserted), cws->executed,
68635+ atomic_read_unchecked(&cws->inserted), cws->executed,
68636 tsk->comm);
68637 put_task_struct(tsk);
68638 }
68639diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
68640index 8745ac7..d144e37 100644
68641--- a/lib/Kconfig.debug
68642+++ b/lib/Kconfig.debug
68643@@ -1103,6 +1103,7 @@ config LATENCYTOP
68644 depends on DEBUG_KERNEL
68645 depends on STACKTRACE_SUPPORT
68646 depends on PROC_FS
68647+ depends on !GRKERNSEC_HIDESYM
68648 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
68649 select KALLSYMS
68650 select KALLSYMS_ALL
68651diff --git a/lib/bitmap.c b/lib/bitmap.c
68652index 0d4a127..33a06c7 100644
68653--- a/lib/bitmap.c
68654+++ b/lib/bitmap.c
68655@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
68656 {
68657 int c, old_c, totaldigits, ndigits, nchunks, nbits;
68658 u32 chunk;
68659- const char __user __force *ubuf = (const char __user __force *)buf;
68660+ const char __user *ubuf = (const char __force_user *)buf;
68661
68662 bitmap_zero(maskp, nmaskbits);
68663
68664@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
68665 {
68666 if (!access_ok(VERIFY_READ, ubuf, ulen))
68667 return -EFAULT;
68668- return __bitmap_parse((const char __force *)ubuf,
68669+ return __bitmap_parse((const char __force_kernel *)ubuf,
68670 ulen, 1, maskp, nmaskbits);
68671
68672 }
68673@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
68674 {
68675 unsigned a, b;
68676 int c, old_c, totaldigits;
68677- const char __user __force *ubuf = (const char __user __force *)buf;
68678+ const char __user *ubuf = (const char __force_user *)buf;
68679 int exp_digit, in_range;
68680
68681 totaldigits = c = 0;
68682@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
68683 {
68684 if (!access_ok(VERIFY_READ, ubuf, ulen))
68685 return -EFAULT;
68686- return __bitmap_parselist((const char __force *)ubuf,
68687+ return __bitmap_parselist((const char __force_kernel *)ubuf,
68688 ulen, 1, maskp, nmaskbits);
68689 }
68690 EXPORT_SYMBOL(bitmap_parselist_user);
68691diff --git a/lib/bug.c b/lib/bug.c
68692index a28c141..2bd3d95 100644
68693--- a/lib/bug.c
68694+++ b/lib/bug.c
68695@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
68696 return BUG_TRAP_TYPE_NONE;
68697
68698 bug = find_bug(bugaddr);
68699+ if (!bug)
68700+ return BUG_TRAP_TYPE_NONE;
68701
68702 file = NULL;
68703 line = 0;
68704diff --git a/lib/debugobjects.c b/lib/debugobjects.c
68705index 0ab9ae8..f01ceca 100644
68706--- a/lib/debugobjects.c
68707+++ b/lib/debugobjects.c
68708@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
68709 if (limit > 4)
68710 return;
68711
68712- is_on_stack = object_is_on_stack(addr);
68713+ is_on_stack = object_starts_on_stack(addr);
68714 if (is_on_stack == onstack)
68715 return;
68716
68717diff --git a/lib/devres.c b/lib/devres.c
68718index 9676617..5149e15 100644
68719--- a/lib/devres.c
68720+++ b/lib/devres.c
68721@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
68722 void devm_iounmap(struct device *dev, void __iomem *addr)
68723 {
68724 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
68725- (void *)addr));
68726+ (void __force *)addr));
68727 iounmap(addr);
68728 }
68729 EXPORT_SYMBOL(devm_iounmap);
68730@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
68731 {
68732 ioport_unmap(addr);
68733 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
68734- devm_ioport_map_match, (void *)addr));
68735+ devm_ioport_map_match, (void __force *)addr));
68736 }
68737 EXPORT_SYMBOL(devm_ioport_unmap);
68738
68739diff --git a/lib/dma-debug.c b/lib/dma-debug.c
68740index fea790a..ebb0e82 100644
68741--- a/lib/dma-debug.c
68742+++ b/lib/dma-debug.c
68743@@ -925,7 +925,7 @@ out:
68744
68745 static void check_for_stack(struct device *dev, void *addr)
68746 {
68747- if (object_is_on_stack(addr))
68748+ if (object_starts_on_stack(addr))
68749 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
68750 "stack [addr=%p]\n", addr);
68751 }
68752diff --git a/lib/extable.c b/lib/extable.c
68753index 4cac81e..63e9b8f 100644
68754--- a/lib/extable.c
68755+++ b/lib/extable.c
68756@@ -13,6 +13,7 @@
68757 #include <linux/init.h>
68758 #include <linux/sort.h>
68759 #include <asm/uaccess.h>
68760+#include <asm/pgtable.h>
68761
68762 #ifndef ARCH_HAS_SORT_EXTABLE
68763 /*
68764@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
68765 void sort_extable(struct exception_table_entry *start,
68766 struct exception_table_entry *finish)
68767 {
68768+ pax_open_kernel();
68769 sort(start, finish - start, sizeof(struct exception_table_entry),
68770 cmp_ex, NULL);
68771+ pax_close_kernel();
68772 }
68773
68774 #ifdef CONFIG_MODULES
68775diff --git a/lib/inflate.c b/lib/inflate.c
68776index 013a761..c28f3fc 100644
68777--- a/lib/inflate.c
68778+++ b/lib/inflate.c
68779@@ -269,7 +269,7 @@ static void free(void *where)
68780 malloc_ptr = free_mem_ptr;
68781 }
68782 #else
68783-#define malloc(a) kmalloc(a, GFP_KERNEL)
68784+#define malloc(a) kmalloc((a), GFP_KERNEL)
68785 #define free(a) kfree(a)
68786 #endif
68787
68788diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
68789index bd2bea9..6b3c95e 100644
68790--- a/lib/is_single_threaded.c
68791+++ b/lib/is_single_threaded.c
68792@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
68793 struct task_struct *p, *t;
68794 bool ret;
68795
68796+ if (!mm)
68797+ return true;
68798+
68799 if (atomic_read(&task->signal->live) != 1)
68800 return false;
68801
68802diff --git a/lib/radix-tree.c b/lib/radix-tree.c
68803index dc63d08..95ae14a 100644
68804--- a/lib/radix-tree.c
68805+++ b/lib/radix-tree.c
68806@@ -78,7 +78,7 @@ struct radix_tree_preload {
68807 int nr;
68808 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
68809 };
68810-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
68811+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
68812
68813 static inline void *ptr_to_indirect(void *ptr)
68814 {
68815diff --git a/lib/vsprintf.c b/lib/vsprintf.c
68816index 38e612e..4fb99a8 100644
68817--- a/lib/vsprintf.c
68818+++ b/lib/vsprintf.c
68819@@ -16,6 +16,9 @@
68820 * - scnprintf and vscnprintf
68821 */
68822
68823+#ifdef CONFIG_GRKERNSEC_HIDESYM
68824+#define __INCLUDED_BY_HIDESYM 1
68825+#endif
68826 #include <stdarg.h>
68827 #include <linux/module.h>
68828 #include <linux/types.h>
68829@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
68830 char sym[KSYM_SYMBOL_LEN];
68831 if (ext == 'B')
68832 sprint_backtrace(sym, value);
68833- else if (ext != 'f' && ext != 's')
68834+ else if (ext != 'f' && ext != 's' && ext != 'a')
68835 sprint_symbol(sym, value);
68836 else
68837 kallsyms_lookup(value, NULL, NULL, NULL, sym);
68838@@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
68839 return number(buf, end, *(const netdev_features_t *)addr, spec);
68840 }
68841
68842+#ifdef CONFIG_GRKERNSEC_HIDESYM
68843+int kptr_restrict __read_mostly = 2;
68844+#else
68845 int kptr_restrict __read_mostly;
68846+#endif
68847
68848 /*
68849 * Show a '%p' thing. A kernel extension is that the '%p' is followed
68850@@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
68851 * - 'S' For symbolic direct pointers with offset
68852 * - 's' For symbolic direct pointers without offset
68853 * - 'B' For backtraced symbolic direct pointers with offset
68854+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
68855+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
68856 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
68857 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
68858 * - 'M' For a 6-byte MAC address, it prints the address in the
68859@@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68860 {
68861 if (!ptr && *fmt != 'K') {
68862 /*
68863- * Print (null) with the same width as a pointer so it makes
68864+ * Print (nil) with the same width as a pointer so it makes
68865 * tabular output look nice.
68866 */
68867 if (spec.field_width == -1)
68868 spec.field_width = 2 * sizeof(void *);
68869- return string(buf, end, "(null)", spec);
68870+ return string(buf, end, "(nil)", spec);
68871 }
68872
68873 switch (*fmt) {
68874@@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68875 /* Fallthrough */
68876 case 'S':
68877 case 's':
68878+#ifdef CONFIG_GRKERNSEC_HIDESYM
68879+ break;
68880+#else
68881+ return symbol_string(buf, end, ptr, spec, *fmt);
68882+#endif
68883+ case 'A':
68884+ case 'a':
68885 case 'B':
68886 return symbol_string(buf, end, ptr, spec, *fmt);
68887 case 'R':
68888@@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68889 typeof(type) value; \
68890 if (sizeof(type) == 8) { \
68891 args = PTR_ALIGN(args, sizeof(u32)); \
68892- *(u32 *)&value = *(u32 *)args; \
68893- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
68894+ *(u32 *)&value = *(const u32 *)args; \
68895+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
68896 } else { \
68897 args = PTR_ALIGN(args, sizeof(type)); \
68898- value = *(typeof(type) *)args; \
68899+ value = *(const typeof(type) *)args; \
68900 } \
68901 args += sizeof(type); \
68902 value; \
68903@@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68904 case FORMAT_TYPE_STR: {
68905 const char *str_arg = args;
68906 args += strlen(str_arg) + 1;
68907- str = string(str, end, (char *)str_arg, spec);
68908+ str = string(str, end, str_arg, spec);
68909 break;
68910 }
68911
68912diff --git a/localversion-grsec b/localversion-grsec
68913new file mode 100644
68914index 0000000..7cd6065
68915--- /dev/null
68916+++ b/localversion-grsec
68917@@ -0,0 +1 @@
68918+-grsec
68919diff --git a/mm/Kconfig b/mm/Kconfig
68920index e338407..49b5b7a 100644
68921--- a/mm/Kconfig
68922+++ b/mm/Kconfig
68923@@ -247,10 +247,10 @@ config KSM
68924 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
68925
68926 config DEFAULT_MMAP_MIN_ADDR
68927- int "Low address space to protect from user allocation"
68928+ int "Low address space to protect from user allocation"
68929 depends on MMU
68930- default 4096
68931- help
68932+ default 65536
68933+ help
68934 This is the portion of low virtual memory which should be protected
68935 from userspace allocation. Keeping a user from writing to low pages
68936 can help reduce the impact of kernel NULL pointer bugs.
68937diff --git a/mm/filemap.c b/mm/filemap.c
68938index b662757..3081ddd 100644
68939--- a/mm/filemap.c
68940+++ b/mm/filemap.c
68941@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
68942 struct address_space *mapping = file->f_mapping;
68943
68944 if (!mapping->a_ops->readpage)
68945- return -ENOEXEC;
68946+ return -ENODEV;
68947 file_accessed(file);
68948 vma->vm_ops = &generic_file_vm_ops;
68949 vma->vm_flags |= VM_CAN_NONLINEAR;
68950@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
68951 *pos = i_size_read(inode);
68952
68953 if (limit != RLIM_INFINITY) {
68954+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
68955 if (*pos >= limit) {
68956 send_sig(SIGXFSZ, current, 0);
68957 return -EFBIG;
68958diff --git a/mm/fremap.c b/mm/fremap.c
68959index 9ed4fd4..c42648d 100644
68960--- a/mm/fremap.c
68961+++ b/mm/fremap.c
68962@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
68963 retry:
68964 vma = find_vma(mm, start);
68965
68966+#ifdef CONFIG_PAX_SEGMEXEC
68967+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
68968+ goto out;
68969+#endif
68970+
68971 /*
68972 * Make sure the vma is shared, that it supports prefaulting,
68973 * and that the remapped range is valid and fully within
68974diff --git a/mm/highmem.c b/mm/highmem.c
68975index 57d82c6..e9e0552 100644
68976--- a/mm/highmem.c
68977+++ b/mm/highmem.c
68978@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
68979 * So no dangers, even with speculative execution.
68980 */
68981 page = pte_page(pkmap_page_table[i]);
68982+ pax_open_kernel();
68983 pte_clear(&init_mm, (unsigned long)page_address(page),
68984 &pkmap_page_table[i]);
68985-
68986+ pax_close_kernel();
68987 set_page_address(page, NULL);
68988 need_flush = 1;
68989 }
68990@@ -186,9 +187,11 @@ start:
68991 }
68992 }
68993 vaddr = PKMAP_ADDR(last_pkmap_nr);
68994+
68995+ pax_open_kernel();
68996 set_pte_at(&init_mm, vaddr,
68997 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
68998-
68999+ pax_close_kernel();
69000 pkmap_count[last_pkmap_nr] = 1;
69001 set_page_address(page, (void *)vaddr);
69002
69003diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69004index 8f7fc39..69bf1e9 100644
69005--- a/mm/huge_memory.c
69006+++ b/mm/huge_memory.c
69007@@ -733,7 +733,7 @@ out:
69008 * run pte_offset_map on the pmd, if an huge pmd could
69009 * materialize from under us from a different thread.
69010 */
69011- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69012+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69013 return VM_FAULT_OOM;
69014 /* if an huge pmd materialized from under us just retry later */
69015 if (unlikely(pmd_trans_huge(*pmd)))
69016diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69017index a876871..132cde0 100644
69018--- a/mm/hugetlb.c
69019+++ b/mm/hugetlb.c
69020@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69021 return 1;
69022 }
69023
69024+#ifdef CONFIG_PAX_SEGMEXEC
69025+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69026+{
69027+ struct mm_struct *mm = vma->vm_mm;
69028+ struct vm_area_struct *vma_m;
69029+ unsigned long address_m;
69030+ pte_t *ptep_m;
69031+
69032+ vma_m = pax_find_mirror_vma(vma);
69033+ if (!vma_m)
69034+ return;
69035+
69036+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69037+ address_m = address + SEGMEXEC_TASK_SIZE;
69038+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69039+ get_page(page_m);
69040+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
69041+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69042+}
69043+#endif
69044+
69045 /*
69046 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69047 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69048@@ -2459,6 +2480,11 @@ retry_avoidcopy:
69049 make_huge_pte(vma, new_page, 1));
69050 page_remove_rmap(old_page);
69051 hugepage_add_new_anon_rmap(new_page, vma, address);
69052+
69053+#ifdef CONFIG_PAX_SEGMEXEC
69054+ pax_mirror_huge_pte(vma, address, new_page);
69055+#endif
69056+
69057 /* Make the old page be freed below */
69058 new_page = old_page;
69059 mmu_notifier_invalidate_range_end(mm,
69060@@ -2613,6 +2639,10 @@ retry:
69061 && (vma->vm_flags & VM_SHARED)));
69062 set_huge_pte_at(mm, address, ptep, new_pte);
69063
69064+#ifdef CONFIG_PAX_SEGMEXEC
69065+ pax_mirror_huge_pte(vma, address, page);
69066+#endif
69067+
69068 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69069 /* Optimization, do the COW without a second fault */
69070 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69071@@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69072 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69073 struct hstate *h = hstate_vma(vma);
69074
69075+#ifdef CONFIG_PAX_SEGMEXEC
69076+ struct vm_area_struct *vma_m;
69077+#endif
69078+
69079 address &= huge_page_mask(h);
69080
69081 ptep = huge_pte_offset(mm, address);
69082@@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69083 VM_FAULT_SET_HINDEX(h - hstates);
69084 }
69085
69086+#ifdef CONFIG_PAX_SEGMEXEC
69087+ vma_m = pax_find_mirror_vma(vma);
69088+ if (vma_m) {
69089+ unsigned long address_m;
69090+
69091+ if (vma->vm_start > vma_m->vm_start) {
69092+ address_m = address;
69093+ address -= SEGMEXEC_TASK_SIZE;
69094+ vma = vma_m;
69095+ h = hstate_vma(vma);
69096+ } else
69097+ address_m = address + SEGMEXEC_TASK_SIZE;
69098+
69099+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69100+ return VM_FAULT_OOM;
69101+ address_m &= HPAGE_MASK;
69102+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69103+ }
69104+#endif
69105+
69106 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69107 if (!ptep)
69108 return VM_FAULT_OOM;
69109diff --git a/mm/internal.h b/mm/internal.h
69110index 2189af4..f2ca332 100644
69111--- a/mm/internal.h
69112+++ b/mm/internal.h
69113@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69114 * in mm/page_alloc.c
69115 */
69116 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69117+extern void free_compound_page(struct page *page);
69118 extern void prep_compound_page(struct page *page, unsigned long order);
69119 #ifdef CONFIG_MEMORY_FAILURE
69120 extern bool is_free_buddy_page(struct page *page);
69121diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69122index 45eb621..6ccd8ea 100644
69123--- a/mm/kmemleak.c
69124+++ b/mm/kmemleak.c
69125@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69126
69127 for (i = 0; i < object->trace_len; i++) {
69128 void *ptr = (void *)object->trace[i];
69129- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69130+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69131 }
69132 }
69133
69134diff --git a/mm/maccess.c b/mm/maccess.c
69135index d53adf9..03a24bf 100644
69136--- a/mm/maccess.c
69137+++ b/mm/maccess.c
69138@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69139 set_fs(KERNEL_DS);
69140 pagefault_disable();
69141 ret = __copy_from_user_inatomic(dst,
69142- (__force const void __user *)src, size);
69143+ (const void __force_user *)src, size);
69144 pagefault_enable();
69145 set_fs(old_fs);
69146
69147@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69148
69149 set_fs(KERNEL_DS);
69150 pagefault_disable();
69151- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69152+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69153 pagefault_enable();
69154 set_fs(old_fs);
69155
69156diff --git a/mm/madvise.c b/mm/madvise.c
69157index 74bf193..feb6fd3 100644
69158--- a/mm/madvise.c
69159+++ b/mm/madvise.c
69160@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69161 pgoff_t pgoff;
69162 unsigned long new_flags = vma->vm_flags;
69163
69164+#ifdef CONFIG_PAX_SEGMEXEC
69165+ struct vm_area_struct *vma_m;
69166+#endif
69167+
69168 switch (behavior) {
69169 case MADV_NORMAL:
69170 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69171@@ -110,6 +114,13 @@ success:
69172 /*
69173 * vm_flags is protected by the mmap_sem held in write mode.
69174 */
69175+
69176+#ifdef CONFIG_PAX_SEGMEXEC
69177+ vma_m = pax_find_mirror_vma(vma);
69178+ if (vma_m)
69179+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69180+#endif
69181+
69182 vma->vm_flags = new_flags;
69183
69184 out:
69185@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69186 struct vm_area_struct ** prev,
69187 unsigned long start, unsigned long end)
69188 {
69189+
69190+#ifdef CONFIG_PAX_SEGMEXEC
69191+ struct vm_area_struct *vma_m;
69192+#endif
69193+
69194 *prev = vma;
69195 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69196 return -EINVAL;
69197@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69198 zap_page_range(vma, start, end - start, &details);
69199 } else
69200 zap_page_range(vma, start, end - start, NULL);
69201+
69202+#ifdef CONFIG_PAX_SEGMEXEC
69203+ vma_m = pax_find_mirror_vma(vma);
69204+ if (vma_m) {
69205+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69206+ struct zap_details details = {
69207+ .nonlinear_vma = vma_m,
69208+ .last_index = ULONG_MAX,
69209+ };
69210+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69211+ } else
69212+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69213+ }
69214+#endif
69215+
69216 return 0;
69217 }
69218
69219@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69220 if (end < start)
69221 goto out;
69222
69223+#ifdef CONFIG_PAX_SEGMEXEC
69224+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69225+ if (end > SEGMEXEC_TASK_SIZE)
69226+ goto out;
69227+ } else
69228+#endif
69229+
69230+ if (end > TASK_SIZE)
69231+ goto out;
69232+
69233 error = 0;
69234 if (end == start)
69235 goto out;
69236diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69237index 56080ea..115071e 100644
69238--- a/mm/memory-failure.c
69239+++ b/mm/memory-failure.c
69240@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69241
69242 int sysctl_memory_failure_recovery __read_mostly = 1;
69243
69244-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69245+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69246
69247 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69248
69249@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
69250 si.si_signo = SIGBUS;
69251 si.si_errno = 0;
69252 si.si_code = BUS_MCEERR_AO;
69253- si.si_addr = (void *)addr;
69254+ si.si_addr = (void __user *)addr;
69255 #ifdef __ARCH_SI_TRAPNO
69256 si.si_trapno = trapno;
69257 #endif
69258@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69259 }
69260
69261 nr_pages = 1 << compound_trans_order(hpage);
69262- atomic_long_add(nr_pages, &mce_bad_pages);
69263+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69264
69265 /*
69266 * We need/can do nothing about count=0 pages.
69267@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69268 if (!PageHWPoison(hpage)
69269 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69270 || (p != hpage && TestSetPageHWPoison(hpage))) {
69271- atomic_long_sub(nr_pages, &mce_bad_pages);
69272+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69273 return 0;
69274 }
69275 set_page_hwpoison_huge_page(hpage);
69276@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69277 }
69278 if (hwpoison_filter(p)) {
69279 if (TestClearPageHWPoison(p))
69280- atomic_long_sub(nr_pages, &mce_bad_pages);
69281+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69282 unlock_page(hpage);
69283 put_page(hpage);
69284 return 0;
69285@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
69286 return 0;
69287 }
69288 if (TestClearPageHWPoison(p))
69289- atomic_long_sub(nr_pages, &mce_bad_pages);
69290+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69291 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69292 return 0;
69293 }
69294@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
69295 */
69296 if (TestClearPageHWPoison(page)) {
69297 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69298- atomic_long_sub(nr_pages, &mce_bad_pages);
69299+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69300 freeit = 1;
69301 if (PageHuge(page))
69302 clear_page_hwpoison_huge_page(page);
69303@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69304 }
69305 done:
69306 if (!PageHWPoison(hpage))
69307- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69308+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69309 set_page_hwpoison_huge_page(hpage);
69310 dequeue_hwpoisoned_huge_page(hpage);
69311 /* keep elevated page count for bad page */
69312@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
69313 return ret;
69314
69315 done:
69316- atomic_long_add(1, &mce_bad_pages);
69317+ atomic_long_add_unchecked(1, &mce_bad_pages);
69318 SetPageHWPoison(page);
69319 /* keep elevated page count for bad page */
69320 return ret;
69321diff --git a/mm/memory.c b/mm/memory.c
69322index fa2f04e..a8a40c8 100644
69323--- a/mm/memory.c
69324+++ b/mm/memory.c
69325@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69326 return;
69327
69328 pmd = pmd_offset(pud, start);
69329+
69330+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69331 pud_clear(pud);
69332 pmd_free_tlb(tlb, pmd, start);
69333+#endif
69334+
69335 }
69336
69337 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69338@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69339 if (end - 1 > ceiling - 1)
69340 return;
69341
69342+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69343 pud = pud_offset(pgd, start);
69344 pgd_clear(pgd);
69345 pud_free_tlb(tlb, pud, start);
69346+#endif
69347+
69348 }
69349
69350 /*
69351@@ -1585,12 +1592,6 @@ no_page_table:
69352 return page;
69353 }
69354
69355-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69356-{
69357- return stack_guard_page_start(vma, addr) ||
69358- stack_guard_page_end(vma, addr+PAGE_SIZE);
69359-}
69360-
69361 /**
69362 * __get_user_pages() - pin user pages in memory
69363 * @tsk: task_struct of target task
69364@@ -1663,10 +1664,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69365 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69366 i = 0;
69367
69368- do {
69369+ while (nr_pages) {
69370 struct vm_area_struct *vma;
69371
69372- vma = find_extend_vma(mm, start);
69373+ vma = find_vma(mm, start);
69374 if (!vma && in_gate_area(mm, start)) {
69375 unsigned long pg = start & PAGE_MASK;
69376 pgd_t *pgd;
69377@@ -1714,7 +1715,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69378 goto next_page;
69379 }
69380
69381- if (!vma ||
69382+ if (!vma || start < vma->vm_start ||
69383 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69384 !(vm_flags & vma->vm_flags))
69385 return i ? : -EFAULT;
69386@@ -1741,11 +1742,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69387 int ret;
69388 unsigned int fault_flags = 0;
69389
69390- /* For mlock, just skip the stack guard page. */
69391- if (foll_flags & FOLL_MLOCK) {
69392- if (stack_guard_page(vma, start))
69393- goto next_page;
69394- }
69395 if (foll_flags & FOLL_WRITE)
69396 fault_flags |= FAULT_FLAG_WRITE;
69397 if (nonblocking)
69398@@ -1819,7 +1815,7 @@ next_page:
69399 start += PAGE_SIZE;
69400 nr_pages--;
69401 } while (nr_pages && start < vma->vm_end);
69402- } while (nr_pages);
69403+ }
69404 return i;
69405 }
69406 EXPORT_SYMBOL(__get_user_pages);
69407@@ -2026,6 +2022,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69408 page_add_file_rmap(page);
69409 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69410
69411+#ifdef CONFIG_PAX_SEGMEXEC
69412+ pax_mirror_file_pte(vma, addr, page, ptl);
69413+#endif
69414+
69415 retval = 0;
69416 pte_unmap_unlock(pte, ptl);
69417 return retval;
69418@@ -2060,10 +2060,22 @@ out:
69419 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69420 struct page *page)
69421 {
69422+
69423+#ifdef CONFIG_PAX_SEGMEXEC
69424+ struct vm_area_struct *vma_m;
69425+#endif
69426+
69427 if (addr < vma->vm_start || addr >= vma->vm_end)
69428 return -EFAULT;
69429 if (!page_count(page))
69430 return -EINVAL;
69431+
69432+#ifdef CONFIG_PAX_SEGMEXEC
69433+ vma_m = pax_find_mirror_vma(vma);
69434+ if (vma_m)
69435+ vma_m->vm_flags |= VM_INSERTPAGE;
69436+#endif
69437+
69438 vma->vm_flags |= VM_INSERTPAGE;
69439 return insert_page(vma, addr, page, vma->vm_page_prot);
69440 }
69441@@ -2149,6 +2161,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69442 unsigned long pfn)
69443 {
69444 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69445+ BUG_ON(vma->vm_mirror);
69446
69447 if (addr < vma->vm_start || addr >= vma->vm_end)
69448 return -EFAULT;
69449@@ -2464,6 +2477,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69450 copy_user_highpage(dst, src, va, vma);
69451 }
69452
69453+#ifdef CONFIG_PAX_SEGMEXEC
69454+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69455+{
69456+ struct mm_struct *mm = vma->vm_mm;
69457+ spinlock_t *ptl;
69458+ pte_t *pte, entry;
69459+
69460+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69461+ entry = *pte;
69462+ if (!pte_present(entry)) {
69463+ if (!pte_none(entry)) {
69464+ BUG_ON(pte_file(entry));
69465+ free_swap_and_cache(pte_to_swp_entry(entry));
69466+ pte_clear_not_present_full(mm, address, pte, 0);
69467+ }
69468+ } else {
69469+ struct page *page;
69470+
69471+ flush_cache_page(vma, address, pte_pfn(entry));
69472+ entry = ptep_clear_flush(vma, address, pte);
69473+ BUG_ON(pte_dirty(entry));
69474+ page = vm_normal_page(vma, address, entry);
69475+ if (page) {
69476+ update_hiwater_rss(mm);
69477+ if (PageAnon(page))
69478+ dec_mm_counter_fast(mm, MM_ANONPAGES);
69479+ else
69480+ dec_mm_counter_fast(mm, MM_FILEPAGES);
69481+ page_remove_rmap(page);
69482+ page_cache_release(page);
69483+ }
69484+ }
69485+ pte_unmap_unlock(pte, ptl);
69486+}
69487+
69488+/* PaX: if vma is mirrored, synchronize the mirror's PTE
69489+ *
69490+ * the ptl of the lower mapped page is held on entry and is not released on exit
69491+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69492+ */
69493+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69494+{
69495+ struct mm_struct *mm = vma->vm_mm;
69496+ unsigned long address_m;
69497+ spinlock_t *ptl_m;
69498+ struct vm_area_struct *vma_m;
69499+ pmd_t *pmd_m;
69500+ pte_t *pte_m, entry_m;
69501+
69502+ BUG_ON(!page_m || !PageAnon(page_m));
69503+
69504+ vma_m = pax_find_mirror_vma(vma);
69505+ if (!vma_m)
69506+ return;
69507+
69508+ BUG_ON(!PageLocked(page_m));
69509+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69510+ address_m = address + SEGMEXEC_TASK_SIZE;
69511+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69512+ pte_m = pte_offset_map(pmd_m, address_m);
69513+ ptl_m = pte_lockptr(mm, pmd_m);
69514+ if (ptl != ptl_m) {
69515+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69516+ if (!pte_none(*pte_m))
69517+ goto out;
69518+ }
69519+
69520+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69521+ page_cache_get(page_m);
69522+ page_add_anon_rmap(page_m, vma_m, address_m);
69523+ inc_mm_counter_fast(mm, MM_ANONPAGES);
69524+ set_pte_at(mm, address_m, pte_m, entry_m);
69525+ update_mmu_cache(vma_m, address_m, entry_m);
69526+out:
69527+ if (ptl != ptl_m)
69528+ spin_unlock(ptl_m);
69529+ pte_unmap(pte_m);
69530+ unlock_page(page_m);
69531+}
69532+
69533+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69534+{
69535+ struct mm_struct *mm = vma->vm_mm;
69536+ unsigned long address_m;
69537+ spinlock_t *ptl_m;
69538+ struct vm_area_struct *vma_m;
69539+ pmd_t *pmd_m;
69540+ pte_t *pte_m, entry_m;
69541+
69542+ BUG_ON(!page_m || PageAnon(page_m));
69543+
69544+ vma_m = pax_find_mirror_vma(vma);
69545+ if (!vma_m)
69546+ return;
69547+
69548+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69549+ address_m = address + SEGMEXEC_TASK_SIZE;
69550+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69551+ pte_m = pte_offset_map(pmd_m, address_m);
69552+ ptl_m = pte_lockptr(mm, pmd_m);
69553+ if (ptl != ptl_m) {
69554+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69555+ if (!pte_none(*pte_m))
69556+ goto out;
69557+ }
69558+
69559+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69560+ page_cache_get(page_m);
69561+ page_add_file_rmap(page_m);
69562+ inc_mm_counter_fast(mm, MM_FILEPAGES);
69563+ set_pte_at(mm, address_m, pte_m, entry_m);
69564+ update_mmu_cache(vma_m, address_m, entry_m);
69565+out:
69566+ if (ptl != ptl_m)
69567+ spin_unlock(ptl_m);
69568+ pte_unmap(pte_m);
69569+}
69570+
69571+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
69572+{
69573+ struct mm_struct *mm = vma->vm_mm;
69574+ unsigned long address_m;
69575+ spinlock_t *ptl_m;
69576+ struct vm_area_struct *vma_m;
69577+ pmd_t *pmd_m;
69578+ pte_t *pte_m, entry_m;
69579+
69580+ vma_m = pax_find_mirror_vma(vma);
69581+ if (!vma_m)
69582+ return;
69583+
69584+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69585+ address_m = address + SEGMEXEC_TASK_SIZE;
69586+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69587+ pte_m = pte_offset_map(pmd_m, address_m);
69588+ ptl_m = pte_lockptr(mm, pmd_m);
69589+ if (ptl != ptl_m) {
69590+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69591+ if (!pte_none(*pte_m))
69592+ goto out;
69593+ }
69594+
69595+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
69596+ set_pte_at(mm, address_m, pte_m, entry_m);
69597+out:
69598+ if (ptl != ptl_m)
69599+ spin_unlock(ptl_m);
69600+ pte_unmap(pte_m);
69601+}
69602+
69603+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
69604+{
69605+ struct page *page_m;
69606+ pte_t entry;
69607+
69608+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
69609+ goto out;
69610+
69611+ entry = *pte;
69612+ page_m = vm_normal_page(vma, address, entry);
69613+ if (!page_m)
69614+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
69615+ else if (PageAnon(page_m)) {
69616+ if (pax_find_mirror_vma(vma)) {
69617+ pte_unmap_unlock(pte, ptl);
69618+ lock_page(page_m);
69619+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
69620+ if (pte_same(entry, *pte))
69621+ pax_mirror_anon_pte(vma, address, page_m, ptl);
69622+ else
69623+ unlock_page(page_m);
69624+ }
69625+ } else
69626+ pax_mirror_file_pte(vma, address, page_m, ptl);
69627+
69628+out:
69629+ pte_unmap_unlock(pte, ptl);
69630+}
69631+#endif
69632+
69633 /*
69634 * This routine handles present pages, when users try to write
69635 * to a shared page. It is done by copying the page to a new address
69636@@ -2675,6 +2868,12 @@ gotten:
69637 */
69638 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69639 if (likely(pte_same(*page_table, orig_pte))) {
69640+
69641+#ifdef CONFIG_PAX_SEGMEXEC
69642+ if (pax_find_mirror_vma(vma))
69643+ BUG_ON(!trylock_page(new_page));
69644+#endif
69645+
69646 if (old_page) {
69647 if (!PageAnon(old_page)) {
69648 dec_mm_counter_fast(mm, MM_FILEPAGES);
69649@@ -2726,6 +2925,10 @@ gotten:
69650 page_remove_rmap(old_page);
69651 }
69652
69653+#ifdef CONFIG_PAX_SEGMEXEC
69654+ pax_mirror_anon_pte(vma, address, new_page, ptl);
69655+#endif
69656+
69657 /* Free the old page.. */
69658 new_page = old_page;
69659 ret |= VM_FAULT_WRITE;
69660@@ -3005,6 +3208,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69661 swap_free(entry);
69662 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
69663 try_to_free_swap(page);
69664+
69665+#ifdef CONFIG_PAX_SEGMEXEC
69666+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
69667+#endif
69668+
69669 unlock_page(page);
69670 if (swapcache) {
69671 /*
69672@@ -3028,6 +3236,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69673
69674 /* No need to invalidate - it was non-present before */
69675 update_mmu_cache(vma, address, page_table);
69676+
69677+#ifdef CONFIG_PAX_SEGMEXEC
69678+ pax_mirror_anon_pte(vma, address, page, ptl);
69679+#endif
69680+
69681 unlock:
69682 pte_unmap_unlock(page_table, ptl);
69683 out:
69684@@ -3047,40 +3260,6 @@ out_release:
69685 }
69686
69687 /*
69688- * This is like a special single-page "expand_{down|up}wards()",
69689- * except we must first make sure that 'address{-|+}PAGE_SIZE'
69690- * doesn't hit another vma.
69691- */
69692-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
69693-{
69694- address &= PAGE_MASK;
69695- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
69696- struct vm_area_struct *prev = vma->vm_prev;
69697-
69698- /*
69699- * Is there a mapping abutting this one below?
69700- *
69701- * That's only ok if it's the same stack mapping
69702- * that has gotten split..
69703- */
69704- if (prev && prev->vm_end == address)
69705- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
69706-
69707- expand_downwards(vma, address - PAGE_SIZE);
69708- }
69709- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
69710- struct vm_area_struct *next = vma->vm_next;
69711-
69712- /* As VM_GROWSDOWN but s/below/above/ */
69713- if (next && next->vm_start == address + PAGE_SIZE)
69714- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
69715-
69716- expand_upwards(vma, address + PAGE_SIZE);
69717- }
69718- return 0;
69719-}
69720-
69721-/*
69722 * We enter with non-exclusive mmap_sem (to exclude vma changes,
69723 * but allow concurrent faults), and pte mapped but not yet locked.
69724 * We return with mmap_sem still held, but pte unmapped and unlocked.
69725@@ -3089,27 +3268,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69726 unsigned long address, pte_t *page_table, pmd_t *pmd,
69727 unsigned int flags)
69728 {
69729- struct page *page;
69730+ struct page *page = NULL;
69731 spinlock_t *ptl;
69732 pte_t entry;
69733
69734- pte_unmap(page_table);
69735-
69736- /* Check if we need to add a guard page to the stack */
69737- if (check_stack_guard_page(vma, address) < 0)
69738- return VM_FAULT_SIGBUS;
69739-
69740- /* Use the zero-page for reads */
69741 if (!(flags & FAULT_FLAG_WRITE)) {
69742 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
69743 vma->vm_page_prot));
69744- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69745+ ptl = pte_lockptr(mm, pmd);
69746+ spin_lock(ptl);
69747 if (!pte_none(*page_table))
69748 goto unlock;
69749 goto setpte;
69750 }
69751
69752 /* Allocate our own private page. */
69753+ pte_unmap(page_table);
69754+
69755 if (unlikely(anon_vma_prepare(vma)))
69756 goto oom;
69757 page = alloc_zeroed_user_highpage_movable(vma, address);
69758@@ -3128,6 +3303,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69759 if (!pte_none(*page_table))
69760 goto release;
69761
69762+#ifdef CONFIG_PAX_SEGMEXEC
69763+ if (pax_find_mirror_vma(vma))
69764+ BUG_ON(!trylock_page(page));
69765+#endif
69766+
69767 inc_mm_counter_fast(mm, MM_ANONPAGES);
69768 page_add_new_anon_rmap(page, vma, address);
69769 setpte:
69770@@ -3135,6 +3315,12 @@ setpte:
69771
69772 /* No need to invalidate - it was non-present before */
69773 update_mmu_cache(vma, address, page_table);
69774+
69775+#ifdef CONFIG_PAX_SEGMEXEC
69776+ if (page)
69777+ pax_mirror_anon_pte(vma, address, page, ptl);
69778+#endif
69779+
69780 unlock:
69781 pte_unmap_unlock(page_table, ptl);
69782 return 0;
69783@@ -3278,6 +3464,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69784 */
69785 /* Only go through if we didn't race with anybody else... */
69786 if (likely(pte_same(*page_table, orig_pte))) {
69787+
69788+#ifdef CONFIG_PAX_SEGMEXEC
69789+ if (anon && pax_find_mirror_vma(vma))
69790+ BUG_ON(!trylock_page(page));
69791+#endif
69792+
69793 flush_icache_page(vma, page);
69794 entry = mk_pte(page, vma->vm_page_prot);
69795 if (flags & FAULT_FLAG_WRITE)
69796@@ -3297,6 +3489,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69797
69798 /* no need to invalidate: a not-present page won't be cached */
69799 update_mmu_cache(vma, address, page_table);
69800+
69801+#ifdef CONFIG_PAX_SEGMEXEC
69802+ if (anon)
69803+ pax_mirror_anon_pte(vma, address, page, ptl);
69804+ else
69805+ pax_mirror_file_pte(vma, address, page, ptl);
69806+#endif
69807+
69808 } else {
69809 if (cow_page)
69810 mem_cgroup_uncharge_page(cow_page);
69811@@ -3450,6 +3650,12 @@ int handle_pte_fault(struct mm_struct *mm,
69812 if (flags & FAULT_FLAG_WRITE)
69813 flush_tlb_fix_spurious_fault(vma, address);
69814 }
69815+
69816+#ifdef CONFIG_PAX_SEGMEXEC
69817+ pax_mirror_pte(vma, address, pte, pmd, ptl);
69818+ return 0;
69819+#endif
69820+
69821 unlock:
69822 pte_unmap_unlock(pte, ptl);
69823 return 0;
69824@@ -3466,6 +3672,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69825 pmd_t *pmd;
69826 pte_t *pte;
69827
69828+#ifdef CONFIG_PAX_SEGMEXEC
69829+ struct vm_area_struct *vma_m;
69830+#endif
69831+
69832 __set_current_state(TASK_RUNNING);
69833
69834 count_vm_event(PGFAULT);
69835@@ -3477,6 +3687,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69836 if (unlikely(is_vm_hugetlb_page(vma)))
69837 return hugetlb_fault(mm, vma, address, flags);
69838
69839+#ifdef CONFIG_PAX_SEGMEXEC
69840+ vma_m = pax_find_mirror_vma(vma);
69841+ if (vma_m) {
69842+ unsigned long address_m;
69843+ pgd_t *pgd_m;
69844+ pud_t *pud_m;
69845+ pmd_t *pmd_m;
69846+
69847+ if (vma->vm_start > vma_m->vm_start) {
69848+ address_m = address;
69849+ address -= SEGMEXEC_TASK_SIZE;
69850+ vma = vma_m;
69851+ } else
69852+ address_m = address + SEGMEXEC_TASK_SIZE;
69853+
69854+ pgd_m = pgd_offset(mm, address_m);
69855+ pud_m = pud_alloc(mm, pgd_m, address_m);
69856+ if (!pud_m)
69857+ return VM_FAULT_OOM;
69858+ pmd_m = pmd_alloc(mm, pud_m, address_m);
69859+ if (!pmd_m)
69860+ return VM_FAULT_OOM;
69861+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
69862+ return VM_FAULT_OOM;
69863+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
69864+ }
69865+#endif
69866+
69867 pgd = pgd_offset(mm, address);
69868 pud = pud_alloc(mm, pgd, address);
69869 if (!pud)
69870@@ -3506,7 +3744,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69871 * run pte_offset_map on the pmd, if an huge pmd could
69872 * materialize from under us from a different thread.
69873 */
69874- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
69875+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69876 return VM_FAULT_OOM;
69877 /* if an huge pmd materialized from under us just retry later */
69878 if (unlikely(pmd_trans_huge(*pmd)))
69879@@ -3610,7 +3848,7 @@ static int __init gate_vma_init(void)
69880 gate_vma.vm_start = FIXADDR_USER_START;
69881 gate_vma.vm_end = FIXADDR_USER_END;
69882 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
69883- gate_vma.vm_page_prot = __P101;
69884+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
69885 /*
69886 * Make sure the vDSO gets into every core dump.
69887 * Dumping its contents makes post-mortem fully interpretable later
69888diff --git a/mm/mempolicy.c b/mm/mempolicy.c
69889index 47296fe..5c3d263 100644
69890--- a/mm/mempolicy.c
69891+++ b/mm/mempolicy.c
69892@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69893 unsigned long vmstart;
69894 unsigned long vmend;
69895
69896+#ifdef CONFIG_PAX_SEGMEXEC
69897+ struct vm_area_struct *vma_m;
69898+#endif
69899+
69900 vma = find_vma(mm, start);
69901 if (!vma || vma->vm_start > start)
69902 return -EFAULT;
69903@@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69904 err = policy_vma(vma, new_pol);
69905 if (err)
69906 goto out;
69907+
69908+#ifdef CONFIG_PAX_SEGMEXEC
69909+ vma_m = pax_find_mirror_vma(vma);
69910+ if (vma_m) {
69911+ err = policy_vma(vma_m, new_pol);
69912+ if (err)
69913+ goto out;
69914+ }
69915+#endif
69916+
69917 }
69918
69919 out:
69920@@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
69921
69922 if (end < start)
69923 return -EINVAL;
69924+
69925+#ifdef CONFIG_PAX_SEGMEXEC
69926+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69927+ if (end > SEGMEXEC_TASK_SIZE)
69928+ return -EINVAL;
69929+ } else
69930+#endif
69931+
69932+ if (end > TASK_SIZE)
69933+ return -EINVAL;
69934+
69935 if (end == start)
69936 return 0;
69937
69938@@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
69939 if (!mm)
69940 goto out;
69941
69942+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69943+ if (mm != current->mm &&
69944+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
69945+ err = -EPERM;
69946+ goto out;
69947+ }
69948+#endif
69949+
69950 /*
69951 * Check if this process has the right to modify the specified
69952 * process. The right exists if the process has administrative
69953@@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
69954 rcu_read_lock();
69955 tcred = __task_cred(task);
69956 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
69957- cred->uid != tcred->suid && cred->uid != tcred->uid &&
69958- !capable(CAP_SYS_NICE)) {
69959+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
69960 rcu_read_unlock();
69961 err = -EPERM;
69962 goto out;
69963diff --git a/mm/migrate.c b/mm/migrate.c
69964index 1503b6b..156c672 100644
69965--- a/mm/migrate.c
69966+++ b/mm/migrate.c
69967@@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
69968 if (!mm)
69969 return -EINVAL;
69970
69971+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69972+ if (mm != current->mm &&
69973+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
69974+ err = -EPERM;
69975+ goto out;
69976+ }
69977+#endif
69978+
69979 /*
69980 * Check if this process has the right to modify the specified
69981 * process. The right exists if the process has administrative
69982@@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
69983 rcu_read_lock();
69984 tcred = __task_cred(task);
69985 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
69986- cred->uid != tcred->suid && cred->uid != tcred->uid &&
69987- !capable(CAP_SYS_NICE)) {
69988+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
69989 rcu_read_unlock();
69990 err = -EPERM;
69991 goto out;
69992diff --git a/mm/mlock.c b/mm/mlock.c
69993index ef726e8..13e0901 100644
69994--- a/mm/mlock.c
69995+++ b/mm/mlock.c
69996@@ -13,6 +13,7 @@
69997 #include <linux/pagemap.h>
69998 #include <linux/mempolicy.h>
69999 #include <linux/syscalls.h>
70000+#include <linux/security.h>
70001 #include <linux/sched.h>
70002 #include <linux/export.h>
70003 #include <linux/rmap.h>
70004@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70005 return -EINVAL;
70006 if (end == start)
70007 return 0;
70008+ if (end > TASK_SIZE)
70009+ return -EINVAL;
70010+
70011 vma = find_vma(current->mm, start);
70012 if (!vma || vma->vm_start > start)
70013 return -ENOMEM;
70014@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70015 for (nstart = start ; ; ) {
70016 vm_flags_t newflags;
70017
70018+#ifdef CONFIG_PAX_SEGMEXEC
70019+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70020+ break;
70021+#endif
70022+
70023 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70024
70025 newflags = vma->vm_flags | VM_LOCKED;
70026@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70027 lock_limit >>= PAGE_SHIFT;
70028
70029 /* check against resource limits */
70030+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70031 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70032 error = do_mlock(start, len, 1);
70033 up_write(&current->mm->mmap_sem);
70034@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70035 static int do_mlockall(int flags)
70036 {
70037 struct vm_area_struct * vma, * prev = NULL;
70038- unsigned int def_flags = 0;
70039
70040 if (flags & MCL_FUTURE)
70041- def_flags = VM_LOCKED;
70042- current->mm->def_flags = def_flags;
70043+ current->mm->def_flags |= VM_LOCKED;
70044+ else
70045+ current->mm->def_flags &= ~VM_LOCKED;
70046 if (flags == MCL_FUTURE)
70047 goto out;
70048
70049 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70050 vm_flags_t newflags;
70051
70052+#ifdef CONFIG_PAX_SEGMEXEC
70053+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70054+ break;
70055+#endif
70056+
70057+ BUG_ON(vma->vm_end > TASK_SIZE);
70058 newflags = vma->vm_flags | VM_LOCKED;
70059 if (!(flags & MCL_CURRENT))
70060 newflags &= ~VM_LOCKED;
70061@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70062 lock_limit >>= PAGE_SHIFT;
70063
70064 ret = -ENOMEM;
70065+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70066 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70067 capable(CAP_IPC_LOCK))
70068 ret = do_mlockall(flags);
70069diff --git a/mm/mmap.c b/mm/mmap.c
70070index da15a79..2e3d9ff 100644
70071--- a/mm/mmap.c
70072+++ b/mm/mmap.c
70073@@ -46,6 +46,16 @@
70074 #define arch_rebalance_pgtables(addr, len) (addr)
70075 #endif
70076
70077+static inline void verify_mm_writelocked(struct mm_struct *mm)
70078+{
70079+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70080+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70081+ up_read(&mm->mmap_sem);
70082+ BUG();
70083+ }
70084+#endif
70085+}
70086+
70087 static void unmap_region(struct mm_struct *mm,
70088 struct vm_area_struct *vma, struct vm_area_struct *prev,
70089 unsigned long start, unsigned long end);
70090@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70091 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70092 *
70093 */
70094-pgprot_t protection_map[16] = {
70095+pgprot_t protection_map[16] __read_only = {
70096 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70097 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70098 };
70099
70100-pgprot_t vm_get_page_prot(unsigned long vm_flags)
70101+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70102 {
70103- return __pgprot(pgprot_val(protection_map[vm_flags &
70104+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70105 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70106 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70107+
70108+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70109+ if (!(__supported_pte_mask & _PAGE_NX) &&
70110+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70111+ (vm_flags & (VM_READ | VM_WRITE)))
70112+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70113+#endif
70114+
70115+ return prot;
70116 }
70117 EXPORT_SYMBOL(vm_get_page_prot);
70118
70119 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70120 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70121 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70122+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70123 /*
70124 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70125 * other variables. It can be updated by several CPUs frequently.
70126@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70127 struct vm_area_struct *next = vma->vm_next;
70128
70129 might_sleep();
70130+ BUG_ON(vma->vm_mirror);
70131 if (vma->vm_ops && vma->vm_ops->close)
70132 vma->vm_ops->close(vma);
70133 if (vma->vm_file) {
70134@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70135 * not page aligned -Ram Gupta
70136 */
70137 rlim = rlimit(RLIMIT_DATA);
70138+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70139 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70140 (mm->end_data - mm->start_data) > rlim)
70141 goto out;
70142@@ -689,6 +711,12 @@ static int
70143 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70144 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70145 {
70146+
70147+#ifdef CONFIG_PAX_SEGMEXEC
70148+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70149+ return 0;
70150+#endif
70151+
70152 if (is_mergeable_vma(vma, file, vm_flags) &&
70153 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70154 if (vma->vm_pgoff == vm_pgoff)
70155@@ -708,6 +736,12 @@ static int
70156 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70157 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70158 {
70159+
70160+#ifdef CONFIG_PAX_SEGMEXEC
70161+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70162+ return 0;
70163+#endif
70164+
70165 if (is_mergeable_vma(vma, file, vm_flags) &&
70166 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70167 pgoff_t vm_pglen;
70168@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70169 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70170 struct vm_area_struct *prev, unsigned long addr,
70171 unsigned long end, unsigned long vm_flags,
70172- struct anon_vma *anon_vma, struct file *file,
70173+ struct anon_vma *anon_vma, struct file *file,
70174 pgoff_t pgoff, struct mempolicy *policy)
70175 {
70176 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70177 struct vm_area_struct *area, *next;
70178 int err;
70179
70180+#ifdef CONFIG_PAX_SEGMEXEC
70181+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70182+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70183+
70184+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70185+#endif
70186+
70187 /*
70188 * We later require that vma->vm_flags == vm_flags,
70189 * so this tests vma->vm_flags & VM_SPECIAL, too.
70190@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70191 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70192 next = next->vm_next;
70193
70194+#ifdef CONFIG_PAX_SEGMEXEC
70195+ if (prev)
70196+ prev_m = pax_find_mirror_vma(prev);
70197+ if (area)
70198+ area_m = pax_find_mirror_vma(area);
70199+ if (next)
70200+ next_m = pax_find_mirror_vma(next);
70201+#endif
70202+
70203 /*
70204 * Can it merge with the predecessor?
70205 */
70206@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70207 /* cases 1, 6 */
70208 err = vma_adjust(prev, prev->vm_start,
70209 next->vm_end, prev->vm_pgoff, NULL);
70210- } else /* cases 2, 5, 7 */
70211+
70212+#ifdef CONFIG_PAX_SEGMEXEC
70213+ if (!err && prev_m)
70214+ err = vma_adjust(prev_m, prev_m->vm_start,
70215+ next_m->vm_end, prev_m->vm_pgoff, NULL);
70216+#endif
70217+
70218+ } else { /* cases 2, 5, 7 */
70219 err = vma_adjust(prev, prev->vm_start,
70220 end, prev->vm_pgoff, NULL);
70221+
70222+#ifdef CONFIG_PAX_SEGMEXEC
70223+ if (!err && prev_m)
70224+ err = vma_adjust(prev_m, prev_m->vm_start,
70225+ end_m, prev_m->vm_pgoff, NULL);
70226+#endif
70227+
70228+ }
70229 if (err)
70230 return NULL;
70231 khugepaged_enter_vma_merge(prev);
70232@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70233 mpol_equal(policy, vma_policy(next)) &&
70234 can_vma_merge_before(next, vm_flags,
70235 anon_vma, file, pgoff+pglen)) {
70236- if (prev && addr < prev->vm_end) /* case 4 */
70237+ if (prev && addr < prev->vm_end) { /* case 4 */
70238 err = vma_adjust(prev, prev->vm_start,
70239 addr, prev->vm_pgoff, NULL);
70240- else /* cases 3, 8 */
70241+
70242+#ifdef CONFIG_PAX_SEGMEXEC
70243+ if (!err && prev_m)
70244+ err = vma_adjust(prev_m, prev_m->vm_start,
70245+ addr_m, prev_m->vm_pgoff, NULL);
70246+#endif
70247+
70248+ } else { /* cases 3, 8 */
70249 err = vma_adjust(area, addr, next->vm_end,
70250 next->vm_pgoff - pglen, NULL);
70251+
70252+#ifdef CONFIG_PAX_SEGMEXEC
70253+ if (!err && area_m)
70254+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
70255+ next_m->vm_pgoff - pglen, NULL);
70256+#endif
70257+
70258+ }
70259 if (err)
70260 return NULL;
70261 khugepaged_enter_vma_merge(area);
70262@@ -921,14 +1001,11 @@ none:
70263 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70264 struct file *file, long pages)
70265 {
70266- const unsigned long stack_flags
70267- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70268-
70269 if (file) {
70270 mm->shared_vm += pages;
70271 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70272 mm->exec_vm += pages;
70273- } else if (flags & stack_flags)
70274+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70275 mm->stack_vm += pages;
70276 if (flags & (VM_RESERVED|VM_IO))
70277 mm->reserved_vm += pages;
70278@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70279 * (the exception is when the underlying filesystem is noexec
70280 * mounted, in which case we dont add PROT_EXEC.)
70281 */
70282- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70283+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70284 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70285 prot |= PROT_EXEC;
70286
70287@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70288 /* Obtain the address to map to. we verify (or select) it and ensure
70289 * that it represents a valid section of the address space.
70290 */
70291- addr = get_unmapped_area(file, addr, len, pgoff, flags);
70292+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70293 if (addr & ~PAGE_MASK)
70294 return addr;
70295
70296@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70297 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70298 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70299
70300+#ifdef CONFIG_PAX_MPROTECT
70301+ if (mm->pax_flags & MF_PAX_MPROTECT) {
70302+#ifndef CONFIG_PAX_MPROTECT_COMPAT
70303+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70304+ gr_log_rwxmmap(file);
70305+
70306+#ifdef CONFIG_PAX_EMUPLT
70307+ vm_flags &= ~VM_EXEC;
70308+#else
70309+ return -EPERM;
70310+#endif
70311+
70312+ }
70313+
70314+ if (!(vm_flags & VM_EXEC))
70315+ vm_flags &= ~VM_MAYEXEC;
70316+#else
70317+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70318+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70319+#endif
70320+ else
70321+ vm_flags &= ~VM_MAYWRITE;
70322+ }
70323+#endif
70324+
70325+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70326+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70327+ vm_flags &= ~VM_PAGEEXEC;
70328+#endif
70329+
70330 if (flags & MAP_LOCKED)
70331 if (!can_do_mlock())
70332 return -EPERM;
70333@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70334 locked += mm->locked_vm;
70335 lock_limit = rlimit(RLIMIT_MEMLOCK);
70336 lock_limit >>= PAGE_SHIFT;
70337+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70338 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70339 return -EAGAIN;
70340 }
70341@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70342 if (error)
70343 return error;
70344
70345+ if (!gr_acl_handle_mmap(file, prot))
70346+ return -EACCES;
70347+
70348 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70349 }
70350 EXPORT_SYMBOL(do_mmap_pgoff);
70351@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70352 vm_flags_t vm_flags = vma->vm_flags;
70353
70354 /* If it was private or non-writable, the write bit is already clear */
70355- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70356+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70357 return 0;
70358
70359 /* The backer wishes to know when pages are first written to? */
70360@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70361 unsigned long charged = 0;
70362 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70363
70364+#ifdef CONFIG_PAX_SEGMEXEC
70365+ struct vm_area_struct *vma_m = NULL;
70366+#endif
70367+
70368+ /*
70369+ * mm->mmap_sem is required to protect against another thread
70370+ * changing the mappings in case we sleep.
70371+ */
70372+ verify_mm_writelocked(mm);
70373+
70374 /* Clear old maps */
70375 error = -ENOMEM;
70376-munmap_back:
70377 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70378 if (vma && vma->vm_start < addr + len) {
70379 if (do_munmap(mm, addr, len))
70380 return -ENOMEM;
70381- goto munmap_back;
70382+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70383+ BUG_ON(vma && vma->vm_start < addr + len);
70384 }
70385
70386 /* Check against address space limit. */
70387@@ -1258,6 +1379,16 @@ munmap_back:
70388 goto unacct_error;
70389 }
70390
70391+#ifdef CONFIG_PAX_SEGMEXEC
70392+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70393+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70394+ if (!vma_m) {
70395+ error = -ENOMEM;
70396+ goto free_vma;
70397+ }
70398+ }
70399+#endif
70400+
70401 vma->vm_mm = mm;
70402 vma->vm_start = addr;
70403 vma->vm_end = addr + len;
70404@@ -1282,6 +1413,19 @@ munmap_back:
70405 error = file->f_op->mmap(file, vma);
70406 if (error)
70407 goto unmap_and_free_vma;
70408+
70409+#ifdef CONFIG_PAX_SEGMEXEC
70410+ if (vma_m && (vm_flags & VM_EXECUTABLE))
70411+ added_exe_file_vma(mm);
70412+#endif
70413+
70414+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70415+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70416+ vma->vm_flags |= VM_PAGEEXEC;
70417+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70418+ }
70419+#endif
70420+
70421 if (vm_flags & VM_EXECUTABLE)
70422 added_exe_file_vma(mm);
70423
70424@@ -1319,6 +1463,11 @@ munmap_back:
70425 vma_link(mm, vma, prev, rb_link, rb_parent);
70426 file = vma->vm_file;
70427
70428+#ifdef CONFIG_PAX_SEGMEXEC
70429+ if (vma_m)
70430+ BUG_ON(pax_mirror_vma(vma_m, vma));
70431+#endif
70432+
70433 /* Once vma denies write, undo our temporary denial count */
70434 if (correct_wcount)
70435 atomic_inc(&inode->i_writecount);
70436@@ -1327,6 +1476,7 @@ out:
70437
70438 mm->total_vm += len >> PAGE_SHIFT;
70439 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70440+ track_exec_limit(mm, addr, addr + len, vm_flags);
70441 if (vm_flags & VM_LOCKED) {
70442 if (!mlock_vma_pages_range(vma, addr, addr + len))
70443 mm->locked_vm += (len >> PAGE_SHIFT);
70444@@ -1344,6 +1494,12 @@ unmap_and_free_vma:
70445 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70446 charged = 0;
70447 free_vma:
70448+
70449+#ifdef CONFIG_PAX_SEGMEXEC
70450+ if (vma_m)
70451+ kmem_cache_free(vm_area_cachep, vma_m);
70452+#endif
70453+
70454 kmem_cache_free(vm_area_cachep, vma);
70455 unacct_error:
70456 if (charged)
70457@@ -1351,6 +1507,44 @@ unacct_error:
70458 return error;
70459 }
70460
70461+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70462+{
70463+ if (!vma) {
70464+#ifdef CONFIG_STACK_GROWSUP
70465+ if (addr > sysctl_heap_stack_gap)
70466+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70467+ else
70468+ vma = find_vma(current->mm, 0);
70469+ if (vma && (vma->vm_flags & VM_GROWSUP))
70470+ return false;
70471+#endif
70472+ return true;
70473+ }
70474+
70475+ if (addr + len > vma->vm_start)
70476+ return false;
70477+
70478+ if (vma->vm_flags & VM_GROWSDOWN)
70479+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70480+#ifdef CONFIG_STACK_GROWSUP
70481+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70482+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70483+#endif
70484+
70485+ return true;
70486+}
70487+
70488+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70489+{
70490+ if (vma->vm_start < len)
70491+ return -ENOMEM;
70492+ if (!(vma->vm_flags & VM_GROWSDOWN))
70493+ return vma->vm_start - len;
70494+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
70495+ return vma->vm_start - len - sysctl_heap_stack_gap;
70496+ return -ENOMEM;
70497+}
70498+
70499 /* Get an address range which is currently unmapped.
70500 * For shmat() with addr=0.
70501 *
70502@@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70503 if (flags & MAP_FIXED)
70504 return addr;
70505
70506+#ifdef CONFIG_PAX_RANDMMAP
70507+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70508+#endif
70509+
70510 if (addr) {
70511 addr = PAGE_ALIGN(addr);
70512- vma = find_vma(mm, addr);
70513- if (TASK_SIZE - len >= addr &&
70514- (!vma || addr + len <= vma->vm_start))
70515- return addr;
70516+ if (TASK_SIZE - len >= addr) {
70517+ vma = find_vma(mm, addr);
70518+ if (check_heap_stack_gap(vma, addr, len))
70519+ return addr;
70520+ }
70521 }
70522 if (len > mm->cached_hole_size) {
70523- start_addr = addr = mm->free_area_cache;
70524+ start_addr = addr = mm->free_area_cache;
70525 } else {
70526- start_addr = addr = TASK_UNMAPPED_BASE;
70527- mm->cached_hole_size = 0;
70528+ start_addr = addr = mm->mmap_base;
70529+ mm->cached_hole_size = 0;
70530 }
70531
70532 full_search:
70533@@ -1399,34 +1598,40 @@ full_search:
70534 * Start a new search - just in case we missed
70535 * some holes.
70536 */
70537- if (start_addr != TASK_UNMAPPED_BASE) {
70538- addr = TASK_UNMAPPED_BASE;
70539- start_addr = addr;
70540+ if (start_addr != mm->mmap_base) {
70541+ start_addr = addr = mm->mmap_base;
70542 mm->cached_hole_size = 0;
70543 goto full_search;
70544 }
70545 return -ENOMEM;
70546 }
70547- if (!vma || addr + len <= vma->vm_start) {
70548- /*
70549- * Remember the place where we stopped the search:
70550- */
70551- mm->free_area_cache = addr + len;
70552- return addr;
70553- }
70554+ if (check_heap_stack_gap(vma, addr, len))
70555+ break;
70556 if (addr + mm->cached_hole_size < vma->vm_start)
70557 mm->cached_hole_size = vma->vm_start - addr;
70558 addr = vma->vm_end;
70559 }
70560+
70561+ /*
70562+ * Remember the place where we stopped the search:
70563+ */
70564+ mm->free_area_cache = addr + len;
70565+ return addr;
70566 }
70567 #endif
70568
70569 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
70570 {
70571+
70572+#ifdef CONFIG_PAX_SEGMEXEC
70573+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70574+ return;
70575+#endif
70576+
70577 /*
70578 * Is this a new hole at the lowest possible address?
70579 */
70580- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
70581+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
70582 mm->free_area_cache = addr;
70583 mm->cached_hole_size = ~0UL;
70584 }
70585@@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70586 {
70587 struct vm_area_struct *vma;
70588 struct mm_struct *mm = current->mm;
70589- unsigned long addr = addr0;
70590+ unsigned long base = mm->mmap_base, addr = addr0;
70591
70592 /* requested length too big for entire address space */
70593 if (len > TASK_SIZE)
70594@@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70595 if (flags & MAP_FIXED)
70596 return addr;
70597
70598+#ifdef CONFIG_PAX_RANDMMAP
70599+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70600+#endif
70601+
70602 /* requesting a specific address */
70603 if (addr) {
70604 addr = PAGE_ALIGN(addr);
70605- vma = find_vma(mm, addr);
70606- if (TASK_SIZE - len >= addr &&
70607- (!vma || addr + len <= vma->vm_start))
70608- return addr;
70609+ if (TASK_SIZE - len >= addr) {
70610+ vma = find_vma(mm, addr);
70611+ if (check_heap_stack_gap(vma, addr, len))
70612+ return addr;
70613+ }
70614 }
70615
70616 /* check if free_area_cache is useful for us */
70617@@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70618 /* make sure it can fit in the remaining address space */
70619 if (addr > len) {
70620 vma = find_vma(mm, addr-len);
70621- if (!vma || addr <= vma->vm_start)
70622+ if (check_heap_stack_gap(vma, addr - len, len))
70623 /* remember the address as a hint for next time */
70624 return (mm->free_area_cache = addr-len);
70625 }
70626@@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70627 * return with success:
70628 */
70629 vma = find_vma(mm, addr);
70630- if (!vma || addr+len <= vma->vm_start)
70631+ if (check_heap_stack_gap(vma, addr, len))
70632 /* remember the address as a hint for next time */
70633 return (mm->free_area_cache = addr);
70634
70635@@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70636 mm->cached_hole_size = vma->vm_start - addr;
70637
70638 /* try just below the current vma->vm_start */
70639- addr = vma->vm_start-len;
70640- } while (len < vma->vm_start);
70641+ addr = skip_heap_stack_gap(vma, len);
70642+ } while (!IS_ERR_VALUE(addr));
70643
70644 bottomup:
70645 /*
70646@@ -1510,13 +1720,21 @@ bottomup:
70647 * can happen with large stack limits and large mmap()
70648 * allocations.
70649 */
70650+ mm->mmap_base = TASK_UNMAPPED_BASE;
70651+
70652+#ifdef CONFIG_PAX_RANDMMAP
70653+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70654+ mm->mmap_base += mm->delta_mmap;
70655+#endif
70656+
70657+ mm->free_area_cache = mm->mmap_base;
70658 mm->cached_hole_size = ~0UL;
70659- mm->free_area_cache = TASK_UNMAPPED_BASE;
70660 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
70661 /*
70662 * Restore the topdown base:
70663 */
70664- mm->free_area_cache = mm->mmap_base;
70665+ mm->mmap_base = base;
70666+ mm->free_area_cache = base;
70667 mm->cached_hole_size = ~0UL;
70668
70669 return addr;
70670@@ -1525,6 +1743,12 @@ bottomup:
70671
70672 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70673 {
70674+
70675+#ifdef CONFIG_PAX_SEGMEXEC
70676+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70677+ return;
70678+#endif
70679+
70680 /*
70681 * Is this a new hole at the highest possible address?
70682 */
70683@@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70684 mm->free_area_cache = addr;
70685
70686 /* dont allow allocations above current base */
70687- if (mm->free_area_cache > mm->mmap_base)
70688+ if (mm->free_area_cache > mm->mmap_base) {
70689 mm->free_area_cache = mm->mmap_base;
70690+ mm->cached_hole_size = ~0UL;
70691+ }
70692 }
70693
70694 unsigned long
70695@@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
70696 return vma;
70697 }
70698
70699+#ifdef CONFIG_PAX_SEGMEXEC
70700+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
70701+{
70702+ struct vm_area_struct *vma_m;
70703+
70704+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
70705+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
70706+ BUG_ON(vma->vm_mirror);
70707+ return NULL;
70708+ }
70709+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
70710+ vma_m = vma->vm_mirror;
70711+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
70712+ BUG_ON(vma->vm_file != vma_m->vm_file);
70713+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
70714+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
70715+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
70716+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
70717+ return vma_m;
70718+}
70719+#endif
70720+
70721 /*
70722 * Verify that the stack growth is acceptable and
70723 * update accounting. This is shared with both the
70724@@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70725 return -ENOMEM;
70726
70727 /* Stack limit test */
70728+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
70729 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
70730 return -ENOMEM;
70731
70732@@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70733 locked = mm->locked_vm + grow;
70734 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
70735 limit >>= PAGE_SHIFT;
70736+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70737 if (locked > limit && !capable(CAP_IPC_LOCK))
70738 return -ENOMEM;
70739 }
70740@@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70741 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
70742 * vma is the last one with address > vma->vm_end. Have to extend vma.
70743 */
70744+#ifndef CONFIG_IA64
70745+static
70746+#endif
70747 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70748 {
70749 int error;
70750+ bool locknext;
70751
70752 if (!(vma->vm_flags & VM_GROWSUP))
70753 return -EFAULT;
70754
70755+ /* Also guard against wrapping around to address 0. */
70756+ if (address < PAGE_ALIGN(address+1))
70757+ address = PAGE_ALIGN(address+1);
70758+ else
70759+ return -ENOMEM;
70760+
70761 /*
70762 * We must make sure the anon_vma is allocated
70763 * so that the anon_vma locking is not a noop.
70764 */
70765 if (unlikely(anon_vma_prepare(vma)))
70766 return -ENOMEM;
70767+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
70768+ if (locknext && anon_vma_prepare(vma->vm_next))
70769+ return -ENOMEM;
70770 vma_lock_anon_vma(vma);
70771+ if (locknext)
70772+ vma_lock_anon_vma(vma->vm_next);
70773
70774 /*
70775 * vma->vm_start/vm_end cannot change under us because the caller
70776 * is required to hold the mmap_sem in read mode. We need the
70777- * anon_vma lock to serialize against concurrent expand_stacks.
70778- * Also guard against wrapping around to address 0.
70779+ * anon_vma locks to serialize against concurrent expand_stacks
70780+ * and expand_upwards.
70781 */
70782- if (address < PAGE_ALIGN(address+4))
70783- address = PAGE_ALIGN(address+4);
70784- else {
70785- vma_unlock_anon_vma(vma);
70786- return -ENOMEM;
70787- }
70788 error = 0;
70789
70790 /* Somebody else might have raced and expanded it already */
70791- if (address > vma->vm_end) {
70792+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
70793+ error = -ENOMEM;
70794+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
70795 unsigned long size, grow;
70796
70797 size = address - vma->vm_start;
70798@@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70799 }
70800 }
70801 }
70802+ if (locknext)
70803+ vma_unlock_anon_vma(vma->vm_next);
70804 vma_unlock_anon_vma(vma);
70805 khugepaged_enter_vma_merge(vma);
70806 return error;
70807@@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
70808 unsigned long address)
70809 {
70810 int error;
70811+ bool lockprev = false;
70812+ struct vm_area_struct *prev;
70813
70814 /*
70815 * We must make sure the anon_vma is allocated
70816@@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
70817 if (error)
70818 return error;
70819
70820+ prev = vma->vm_prev;
70821+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
70822+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
70823+#endif
70824+ if (lockprev && anon_vma_prepare(prev))
70825+ return -ENOMEM;
70826+ if (lockprev)
70827+ vma_lock_anon_vma(prev);
70828+
70829 vma_lock_anon_vma(vma);
70830
70831 /*
70832@@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
70833 */
70834
70835 /* Somebody else might have raced and expanded it already */
70836- if (address < vma->vm_start) {
70837+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
70838+ error = -ENOMEM;
70839+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
70840 unsigned long size, grow;
70841
70842+#ifdef CONFIG_PAX_SEGMEXEC
70843+ struct vm_area_struct *vma_m;
70844+
70845+ vma_m = pax_find_mirror_vma(vma);
70846+#endif
70847+
70848 size = vma->vm_end - address;
70849 grow = (vma->vm_start - address) >> PAGE_SHIFT;
70850
70851@@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
70852 if (!error) {
70853 vma->vm_start = address;
70854 vma->vm_pgoff -= grow;
70855+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
70856+
70857+#ifdef CONFIG_PAX_SEGMEXEC
70858+ if (vma_m) {
70859+ vma_m->vm_start -= grow << PAGE_SHIFT;
70860+ vma_m->vm_pgoff -= grow;
70861+ }
70862+#endif
70863+
70864 perf_event_mmap(vma);
70865 }
70866 }
70867 }
70868 vma_unlock_anon_vma(vma);
70869+ if (lockprev)
70870+ vma_unlock_anon_vma(prev);
70871 khugepaged_enter_vma_merge(vma);
70872 return error;
70873 }
70874@@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
70875 do {
70876 long nrpages = vma_pages(vma);
70877
70878+#ifdef CONFIG_PAX_SEGMEXEC
70879+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
70880+ vma = remove_vma(vma);
70881+ continue;
70882+ }
70883+#endif
70884+
70885 mm->total_vm -= nrpages;
70886 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
70887 vma = remove_vma(vma);
70888@@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
70889 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
70890 vma->vm_prev = NULL;
70891 do {
70892+
70893+#ifdef CONFIG_PAX_SEGMEXEC
70894+ if (vma->vm_mirror) {
70895+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
70896+ vma->vm_mirror->vm_mirror = NULL;
70897+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
70898+ vma->vm_mirror = NULL;
70899+ }
70900+#endif
70901+
70902 rb_erase(&vma->vm_rb, &mm->mm_rb);
70903 mm->map_count--;
70904 tail_vma = vma;
70905@@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
70906 struct vm_area_struct *new;
70907 int err = -ENOMEM;
70908
70909+#ifdef CONFIG_PAX_SEGMEXEC
70910+ struct vm_area_struct *vma_m, *new_m = NULL;
70911+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
70912+#endif
70913+
70914 if (is_vm_hugetlb_page(vma) && (addr &
70915 ~(huge_page_mask(hstate_vma(vma)))))
70916 return -EINVAL;
70917
70918+#ifdef CONFIG_PAX_SEGMEXEC
70919+ vma_m = pax_find_mirror_vma(vma);
70920+#endif
70921+
70922 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70923 if (!new)
70924 goto out_err;
70925
70926+#ifdef CONFIG_PAX_SEGMEXEC
70927+ if (vma_m) {
70928+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70929+ if (!new_m) {
70930+ kmem_cache_free(vm_area_cachep, new);
70931+ goto out_err;
70932+ }
70933+ }
70934+#endif
70935+
70936 /* most fields are the same, copy all, and then fixup */
70937 *new = *vma;
70938
70939@@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
70940 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
70941 }
70942
70943+#ifdef CONFIG_PAX_SEGMEXEC
70944+ if (vma_m) {
70945+ *new_m = *vma_m;
70946+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
70947+ new_m->vm_mirror = new;
70948+ new->vm_mirror = new_m;
70949+
70950+ if (new_below)
70951+ new_m->vm_end = addr_m;
70952+ else {
70953+ new_m->vm_start = addr_m;
70954+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
70955+ }
70956+ }
70957+#endif
70958+
70959 pol = mpol_dup(vma_policy(vma));
70960 if (IS_ERR(pol)) {
70961 err = PTR_ERR(pol);
70962@@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
70963 else
70964 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
70965
70966+#ifdef CONFIG_PAX_SEGMEXEC
70967+ if (!err && vma_m) {
70968+ if (anon_vma_clone(new_m, vma_m))
70969+ goto out_free_mpol;
70970+
70971+ mpol_get(pol);
70972+ vma_set_policy(new_m, pol);
70973+
70974+ if (new_m->vm_file) {
70975+ get_file(new_m->vm_file);
70976+ if (vma_m->vm_flags & VM_EXECUTABLE)
70977+ added_exe_file_vma(mm);
70978+ }
70979+
70980+ if (new_m->vm_ops && new_m->vm_ops->open)
70981+ new_m->vm_ops->open(new_m);
70982+
70983+ if (new_below)
70984+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
70985+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
70986+ else
70987+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
70988+
70989+ if (err) {
70990+ if (new_m->vm_ops && new_m->vm_ops->close)
70991+ new_m->vm_ops->close(new_m);
70992+ if (new_m->vm_file) {
70993+ if (vma_m->vm_flags & VM_EXECUTABLE)
70994+ removed_exe_file_vma(mm);
70995+ fput(new_m->vm_file);
70996+ }
70997+ mpol_put(pol);
70998+ }
70999+ }
71000+#endif
71001+
71002 /* Success. */
71003 if (!err)
71004 return 0;
71005@@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71006 removed_exe_file_vma(mm);
71007 fput(new->vm_file);
71008 }
71009- unlink_anon_vmas(new);
71010 out_free_mpol:
71011 mpol_put(pol);
71012 out_free_vma:
71013+
71014+#ifdef CONFIG_PAX_SEGMEXEC
71015+ if (new_m) {
71016+ unlink_anon_vmas(new_m);
71017+ kmem_cache_free(vm_area_cachep, new_m);
71018+ }
71019+#endif
71020+
71021+ unlink_anon_vmas(new);
71022 kmem_cache_free(vm_area_cachep, new);
71023 out_err:
71024 return err;
71025@@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71026 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71027 unsigned long addr, int new_below)
71028 {
71029+
71030+#ifdef CONFIG_PAX_SEGMEXEC
71031+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71032+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71033+ if (mm->map_count >= sysctl_max_map_count-1)
71034+ return -ENOMEM;
71035+ } else
71036+#endif
71037+
71038 if (mm->map_count >= sysctl_max_map_count)
71039 return -ENOMEM;
71040
71041@@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71042 * work. This now handles partial unmappings.
71043 * Jeremy Fitzhardinge <jeremy@goop.org>
71044 */
71045+#ifdef CONFIG_PAX_SEGMEXEC
71046 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71047 {
71048+ int ret = __do_munmap(mm, start, len);
71049+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71050+ return ret;
71051+
71052+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71053+}
71054+
71055+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71056+#else
71057+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71058+#endif
71059+{
71060 unsigned long end;
71061 struct vm_area_struct *vma, *prev, *last;
71062
71063+ /*
71064+ * mm->mmap_sem is required to protect against another thread
71065+ * changing the mappings in case we sleep.
71066+ */
71067+ verify_mm_writelocked(mm);
71068+
71069 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71070 return -EINVAL;
71071
71072@@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71073 /* Fix up all other VM information */
71074 remove_vma_list(mm, vma);
71075
71076+ track_exec_limit(mm, start, end, 0UL);
71077+
71078 return 0;
71079 }
71080
71081@@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71082
71083 profile_munmap(addr);
71084
71085+#ifdef CONFIG_PAX_SEGMEXEC
71086+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71087+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71088+ return -EINVAL;
71089+#endif
71090+
71091 down_write(&mm->mmap_sem);
71092 ret = do_munmap(mm, addr, len);
71093 up_write(&mm->mmap_sem);
71094 return ret;
71095 }
71096
71097-static inline void verify_mm_writelocked(struct mm_struct *mm)
71098-{
71099-#ifdef CONFIG_DEBUG_VM
71100- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71101- WARN_ON(1);
71102- up_read(&mm->mmap_sem);
71103- }
71104-#endif
71105-}
71106-
71107 /*
71108 * this is really a simplified "do_mmap". it only handles
71109 * anonymous maps. eventually we may be able to do some
71110@@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71111 struct rb_node ** rb_link, * rb_parent;
71112 pgoff_t pgoff = addr >> PAGE_SHIFT;
71113 int error;
71114+ unsigned long charged;
71115
71116 len = PAGE_ALIGN(len);
71117 if (!len)
71118@@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71119
71120 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71121
71122+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71123+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71124+ flags &= ~VM_EXEC;
71125+
71126+#ifdef CONFIG_PAX_MPROTECT
71127+ if (mm->pax_flags & MF_PAX_MPROTECT)
71128+ flags &= ~VM_MAYEXEC;
71129+#endif
71130+
71131+ }
71132+#endif
71133+
71134 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71135 if (error & ~PAGE_MASK)
71136 return error;
71137
71138+ charged = len >> PAGE_SHIFT;
71139+
71140 /*
71141 * mlock MCL_FUTURE?
71142 */
71143 if (mm->def_flags & VM_LOCKED) {
71144 unsigned long locked, lock_limit;
71145- locked = len >> PAGE_SHIFT;
71146+ locked = charged;
71147 locked += mm->locked_vm;
71148 lock_limit = rlimit(RLIMIT_MEMLOCK);
71149 lock_limit >>= PAGE_SHIFT;
71150@@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71151 /*
71152 * Clear old maps. this also does some error checking for us
71153 */
71154- munmap_back:
71155 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71156 if (vma && vma->vm_start < addr + len) {
71157 if (do_munmap(mm, addr, len))
71158 return -ENOMEM;
71159- goto munmap_back;
71160+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71161+ BUG_ON(vma && vma->vm_start < addr + len);
71162 }
71163
71164 /* Check against address space limits *after* clearing old maps... */
71165- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71166+ if (!may_expand_vm(mm, charged))
71167 return -ENOMEM;
71168
71169 if (mm->map_count > sysctl_max_map_count)
71170 return -ENOMEM;
71171
71172- if (security_vm_enough_memory(len >> PAGE_SHIFT))
71173+ if (security_vm_enough_memory(charged))
71174 return -ENOMEM;
71175
71176 /* Can we just expand an old private anonymous mapping? */
71177@@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71178 */
71179 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71180 if (!vma) {
71181- vm_unacct_memory(len >> PAGE_SHIFT);
71182+ vm_unacct_memory(charged);
71183 return -ENOMEM;
71184 }
71185
71186@@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71187 vma_link(mm, vma, prev, rb_link, rb_parent);
71188 out:
71189 perf_event_mmap(vma);
71190- mm->total_vm += len >> PAGE_SHIFT;
71191+ mm->total_vm += charged;
71192 if (flags & VM_LOCKED) {
71193 if (!mlock_vma_pages_range(vma, addr, addr + len))
71194- mm->locked_vm += (len >> PAGE_SHIFT);
71195+ mm->locked_vm += charged;
71196 }
71197+ track_exec_limit(mm, addr, addr + len, flags);
71198 return addr;
71199 }
71200
71201@@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
71202 * Walk the list again, actually closing and freeing it,
71203 * with preemption enabled, without holding any MM locks.
71204 */
71205- while (vma)
71206+ while (vma) {
71207+ vma->vm_mirror = NULL;
71208 vma = remove_vma(vma);
71209+ }
71210
71211 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71212 }
71213@@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71214 struct vm_area_struct * __vma, * prev;
71215 struct rb_node ** rb_link, * rb_parent;
71216
71217+#ifdef CONFIG_PAX_SEGMEXEC
71218+ struct vm_area_struct *vma_m = NULL;
71219+#endif
71220+
71221+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71222+ return -EPERM;
71223+
71224 /*
71225 * The vm_pgoff of a purely anonymous vma should be irrelevant
71226 * until its first write fault, when page's anon_vma and index
71227@@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71228 if ((vma->vm_flags & VM_ACCOUNT) &&
71229 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71230 return -ENOMEM;
71231+
71232+#ifdef CONFIG_PAX_SEGMEXEC
71233+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71234+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71235+ if (!vma_m)
71236+ return -ENOMEM;
71237+ }
71238+#endif
71239+
71240 vma_link(mm, vma, prev, rb_link, rb_parent);
71241+
71242+#ifdef CONFIG_PAX_SEGMEXEC
71243+ if (vma_m)
71244+ BUG_ON(pax_mirror_vma(vma_m, vma));
71245+#endif
71246+
71247 return 0;
71248 }
71249
71250@@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71251 struct mempolicy *pol;
71252 bool faulted_in_anon_vma = true;
71253
71254+ BUG_ON(vma->vm_mirror);
71255+
71256 /*
71257 * If anonymous vma has not yet been faulted, update new pgoff
71258 * to match new location, to increase its chance of merging.
71259@@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71260 return NULL;
71261 }
71262
71263+#ifdef CONFIG_PAX_SEGMEXEC
71264+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71265+{
71266+ struct vm_area_struct *prev_m;
71267+ struct rb_node **rb_link_m, *rb_parent_m;
71268+ struct mempolicy *pol_m;
71269+
71270+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71271+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71272+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71273+ *vma_m = *vma;
71274+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71275+ if (anon_vma_clone(vma_m, vma))
71276+ return -ENOMEM;
71277+ pol_m = vma_policy(vma_m);
71278+ mpol_get(pol_m);
71279+ vma_set_policy(vma_m, pol_m);
71280+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71281+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71282+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71283+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71284+ if (vma_m->vm_file)
71285+ get_file(vma_m->vm_file);
71286+ if (vma_m->vm_ops && vma_m->vm_ops->open)
71287+ vma_m->vm_ops->open(vma_m);
71288+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71289+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71290+ vma_m->vm_mirror = vma;
71291+ vma->vm_mirror = vma_m;
71292+ return 0;
71293+}
71294+#endif
71295+
71296 /*
71297 * Return true if the calling process may expand its vm space by the passed
71298 * number of pages
71299@@ -2392,7 +2882,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71300 unsigned long lim;
71301
71302 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71303-
71304+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71305 if (cur + npages > lim)
71306 return 0;
71307 return 1;
71308@@ -2463,6 +2953,22 @@ int install_special_mapping(struct mm_struct *mm,
71309 vma->vm_start = addr;
71310 vma->vm_end = addr + len;
71311
71312+#ifdef CONFIG_PAX_MPROTECT
71313+ if (mm->pax_flags & MF_PAX_MPROTECT) {
71314+#ifndef CONFIG_PAX_MPROTECT_COMPAT
71315+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71316+ return -EPERM;
71317+ if (!(vm_flags & VM_EXEC))
71318+ vm_flags &= ~VM_MAYEXEC;
71319+#else
71320+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71321+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71322+#endif
71323+ else
71324+ vm_flags &= ~VM_MAYWRITE;
71325+ }
71326+#endif
71327+
71328 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71329 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71330
71331diff --git a/mm/mprotect.c b/mm/mprotect.c
71332index f437d05..e3763f6 100644
71333--- a/mm/mprotect.c
71334+++ b/mm/mprotect.c
71335@@ -23,10 +23,16 @@
71336 #include <linux/mmu_notifier.h>
71337 #include <linux/migrate.h>
71338 #include <linux/perf_event.h>
71339+
71340+#ifdef CONFIG_PAX_MPROTECT
71341+#include <linux/elf.h>
71342+#endif
71343+
71344 #include <asm/uaccess.h>
71345 #include <asm/pgtable.h>
71346 #include <asm/cacheflush.h>
71347 #include <asm/tlbflush.h>
71348+#include <asm/mmu_context.h>
71349
71350 #ifndef pgprot_modify
71351 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71352@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
71353 flush_tlb_range(vma, start, end);
71354 }
71355
71356+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71357+/* called while holding the mmap semaphor for writing except stack expansion */
71358+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71359+{
71360+ unsigned long oldlimit, newlimit = 0UL;
71361+
71362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71363+ return;
71364+
71365+ spin_lock(&mm->page_table_lock);
71366+ oldlimit = mm->context.user_cs_limit;
71367+ if ((prot & VM_EXEC) && oldlimit < end)
71368+ /* USER_CS limit moved up */
71369+ newlimit = end;
71370+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71371+ /* USER_CS limit moved down */
71372+ newlimit = start;
71373+
71374+ if (newlimit) {
71375+ mm->context.user_cs_limit = newlimit;
71376+
71377+#ifdef CONFIG_SMP
71378+ wmb();
71379+ cpus_clear(mm->context.cpu_user_cs_mask);
71380+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71381+#endif
71382+
71383+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71384+ }
71385+ spin_unlock(&mm->page_table_lock);
71386+ if (newlimit == end) {
71387+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
71388+
71389+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
71390+ if (is_vm_hugetlb_page(vma))
71391+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71392+ else
71393+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71394+ }
71395+}
71396+#endif
71397+
71398 int
71399 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71400 unsigned long start, unsigned long end, unsigned long newflags)
71401@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71402 int error;
71403 int dirty_accountable = 0;
71404
71405+#ifdef CONFIG_PAX_SEGMEXEC
71406+ struct vm_area_struct *vma_m = NULL;
71407+ unsigned long start_m, end_m;
71408+
71409+ start_m = start + SEGMEXEC_TASK_SIZE;
71410+ end_m = end + SEGMEXEC_TASK_SIZE;
71411+#endif
71412+
71413 if (newflags == oldflags) {
71414 *pprev = vma;
71415 return 0;
71416 }
71417
71418+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71419+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71420+
71421+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71422+ return -ENOMEM;
71423+
71424+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71425+ return -ENOMEM;
71426+ }
71427+
71428 /*
71429 * If we make a private mapping writable we increase our commit;
71430 * but (without finer accounting) cannot reduce our commit if we
71431@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71432 }
71433 }
71434
71435+#ifdef CONFIG_PAX_SEGMEXEC
71436+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71437+ if (start != vma->vm_start) {
71438+ error = split_vma(mm, vma, start, 1);
71439+ if (error)
71440+ goto fail;
71441+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71442+ *pprev = (*pprev)->vm_next;
71443+ }
71444+
71445+ if (end != vma->vm_end) {
71446+ error = split_vma(mm, vma, end, 0);
71447+ if (error)
71448+ goto fail;
71449+ }
71450+
71451+ if (pax_find_mirror_vma(vma)) {
71452+ error = __do_munmap(mm, start_m, end_m - start_m);
71453+ if (error)
71454+ goto fail;
71455+ } else {
71456+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71457+ if (!vma_m) {
71458+ error = -ENOMEM;
71459+ goto fail;
71460+ }
71461+ vma->vm_flags = newflags;
71462+ error = pax_mirror_vma(vma_m, vma);
71463+ if (error) {
71464+ vma->vm_flags = oldflags;
71465+ goto fail;
71466+ }
71467+ }
71468+ }
71469+#endif
71470+
71471 /*
71472 * First try to merge with previous and/or next vma.
71473 */
71474@@ -204,9 +306,21 @@ success:
71475 * vm_flags and vm_page_prot are protected by the mmap_sem
71476 * held in write mode.
71477 */
71478+
71479+#ifdef CONFIG_PAX_SEGMEXEC
71480+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71481+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71482+#endif
71483+
71484 vma->vm_flags = newflags;
71485+
71486+#ifdef CONFIG_PAX_MPROTECT
71487+ if (mm->binfmt && mm->binfmt->handle_mprotect)
71488+ mm->binfmt->handle_mprotect(vma, newflags);
71489+#endif
71490+
71491 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71492- vm_get_page_prot(newflags));
71493+ vm_get_page_prot(vma->vm_flags));
71494
71495 if (vma_wants_writenotify(vma)) {
71496 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71497@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71498 end = start + len;
71499 if (end <= start)
71500 return -ENOMEM;
71501+
71502+#ifdef CONFIG_PAX_SEGMEXEC
71503+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71504+ if (end > SEGMEXEC_TASK_SIZE)
71505+ return -EINVAL;
71506+ } else
71507+#endif
71508+
71509+ if (end > TASK_SIZE)
71510+ return -EINVAL;
71511+
71512 if (!arch_validate_prot(prot))
71513 return -EINVAL;
71514
71515@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71516 /*
71517 * Does the application expect PROT_READ to imply PROT_EXEC:
71518 */
71519- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71520+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71521 prot |= PROT_EXEC;
71522
71523 vm_flags = calc_vm_prot_bits(prot);
71524@@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71525 if (start > vma->vm_start)
71526 prev = vma;
71527
71528+#ifdef CONFIG_PAX_MPROTECT
71529+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71530+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
71531+#endif
71532+
71533 for (nstart = start ; ; ) {
71534 unsigned long newflags;
71535
71536@@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71537
71538 /* newflags >> 4 shift VM_MAY% in place of VM_% */
71539 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
71540+ if (prot & (PROT_WRITE | PROT_EXEC))
71541+ gr_log_rwxmprotect(vma->vm_file);
71542+
71543+ error = -EACCES;
71544+ goto out;
71545+ }
71546+
71547+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
71548 error = -EACCES;
71549 goto out;
71550 }
71551@@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71552 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
71553 if (error)
71554 goto out;
71555+
71556+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
71557+
71558 nstart = tmp;
71559
71560 if (nstart < prev->vm_end)
71561diff --git a/mm/mremap.c b/mm/mremap.c
71562index 87bb839..c3bfadb 100644
71563--- a/mm/mremap.c
71564+++ b/mm/mremap.c
71565@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
71566 continue;
71567 pte = ptep_get_and_clear(mm, old_addr, old_pte);
71568 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
71569+
71570+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71571+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
71572+ pte = pte_exprotect(pte);
71573+#endif
71574+
71575 set_pte_at(mm, new_addr, new_pte, pte);
71576 }
71577
71578@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
71579 if (is_vm_hugetlb_page(vma))
71580 goto Einval;
71581
71582+#ifdef CONFIG_PAX_SEGMEXEC
71583+ if (pax_find_mirror_vma(vma))
71584+ goto Einval;
71585+#endif
71586+
71587 /* We can't remap across vm area boundaries */
71588 if (old_len > vma->vm_end - addr)
71589 goto Efault;
71590@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
71591 unsigned long ret = -EINVAL;
71592 unsigned long charged = 0;
71593 unsigned long map_flags;
71594+ unsigned long pax_task_size = TASK_SIZE;
71595
71596 if (new_addr & ~PAGE_MASK)
71597 goto out;
71598
71599- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
71600+#ifdef CONFIG_PAX_SEGMEXEC
71601+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
71602+ pax_task_size = SEGMEXEC_TASK_SIZE;
71603+#endif
71604+
71605+ pax_task_size -= PAGE_SIZE;
71606+
71607+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
71608 goto out;
71609
71610 /* Check if the location we're moving into overlaps the
71611 * old location at all, and fail if it does.
71612 */
71613- if ((new_addr <= addr) && (new_addr+new_len) > addr)
71614- goto out;
71615-
71616- if ((addr <= new_addr) && (addr+old_len) > new_addr)
71617+ if (addr + old_len > new_addr && new_addr + new_len > addr)
71618 goto out;
71619
71620 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71621@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
71622 struct vm_area_struct *vma;
71623 unsigned long ret = -EINVAL;
71624 unsigned long charged = 0;
71625+ unsigned long pax_task_size = TASK_SIZE;
71626
71627 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
71628 goto out;
71629@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
71630 if (!new_len)
71631 goto out;
71632
71633+#ifdef CONFIG_PAX_SEGMEXEC
71634+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
71635+ pax_task_size = SEGMEXEC_TASK_SIZE;
71636+#endif
71637+
71638+ pax_task_size -= PAGE_SIZE;
71639+
71640+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
71641+ old_len > pax_task_size || addr > pax_task_size-old_len)
71642+ goto out;
71643+
71644 if (flags & MREMAP_FIXED) {
71645 if (flags & MREMAP_MAYMOVE)
71646 ret = mremap_to(addr, old_len, new_addr, new_len);
71647@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
71648 addr + new_len);
71649 }
71650 ret = addr;
71651+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
71652 goto out;
71653 }
71654 }
71655@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
71656 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71657 if (ret)
71658 goto out;
71659+
71660+ map_flags = vma->vm_flags;
71661 ret = move_vma(vma, addr, old_len, new_len, new_addr);
71662+ if (!(ret & ~PAGE_MASK)) {
71663+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
71664+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
71665+ }
71666 }
71667 out:
71668 if (ret & ~PAGE_MASK)
71669diff --git a/mm/nommu.c b/mm/nommu.c
71670index f59e170..34e2a2b 100644
71671--- a/mm/nommu.c
71672+++ b/mm/nommu.c
71673@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
71674 int sysctl_overcommit_ratio = 50; /* default is 50% */
71675 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
71676 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
71677-int heap_stack_gap = 0;
71678
71679 atomic_long_t mmap_pages_allocated;
71680
71681@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
71682 EXPORT_SYMBOL(find_vma);
71683
71684 /*
71685- * find a VMA
71686- * - we don't extend stack VMAs under NOMMU conditions
71687- */
71688-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
71689-{
71690- return find_vma(mm, addr);
71691-}
71692-
71693-/*
71694 * expand a stack to a given address
71695 * - not supported under NOMMU conditions
71696 */
71697@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71698
71699 /* most fields are the same, copy all, and then fixup */
71700 *new = *vma;
71701+ INIT_LIST_HEAD(&new->anon_vma_chain);
71702 *region = *vma->vm_region;
71703 new->vm_region = region;
71704
71705diff --git a/mm/page_alloc.c b/mm/page_alloc.c
71706index a13ded1..b949d15 100644
71707--- a/mm/page_alloc.c
71708+++ b/mm/page_alloc.c
71709@@ -335,7 +335,7 @@ out:
71710 * This usage means that zero-order pages may not be compound.
71711 */
71712
71713-static void free_compound_page(struct page *page)
71714+void free_compound_page(struct page *page)
71715 {
71716 __free_pages_ok(page, compound_order(page));
71717 }
71718@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71719 int i;
71720 int bad = 0;
71721
71722+#ifdef CONFIG_PAX_MEMORY_SANITIZE
71723+ unsigned long index = 1UL << order;
71724+#endif
71725+
71726 trace_mm_page_free(page, order);
71727 kmemcheck_free_shadow(page, order);
71728
71729@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71730 debug_check_no_obj_freed(page_address(page),
71731 PAGE_SIZE << order);
71732 }
71733+
71734+#ifdef CONFIG_PAX_MEMORY_SANITIZE
71735+ for (; index; --index)
71736+ sanitize_highpage(page + index - 1);
71737+#endif
71738+
71739 arch_free_page(page, order);
71740 kernel_map_pages(page, 1 << order, 0);
71741
71742@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
71743 arch_alloc_page(page, order);
71744 kernel_map_pages(page, 1 << order, 1);
71745
71746+#ifndef CONFIG_PAX_MEMORY_SANITIZE
71747 if (gfp_flags & __GFP_ZERO)
71748 prep_zero_page(page, order, gfp_flags);
71749+#endif
71750
71751 if (order && (gfp_flags & __GFP_COMP))
71752 prep_compound_page(page, order);
71753@@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
71754 unsigned long pfn;
71755
71756 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
71757+#ifdef CONFIG_X86_32
71758+ /* boot failures in VMware 8 on 32bit vanilla since
71759+ this change */
71760+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
71761+#else
71762 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
71763+#endif
71764 return 1;
71765 }
71766 return 0;
71767diff --git a/mm/percpu.c b/mm/percpu.c
71768index f47af91..7eeef99 100644
71769--- a/mm/percpu.c
71770+++ b/mm/percpu.c
71771@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
71772 static unsigned int pcpu_high_unit_cpu __read_mostly;
71773
71774 /* the address of the first chunk which starts with the kernel static area */
71775-void *pcpu_base_addr __read_mostly;
71776+void *pcpu_base_addr __read_only;
71777 EXPORT_SYMBOL_GPL(pcpu_base_addr);
71778
71779 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
71780diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
71781index c20ff48..137702a 100644
71782--- a/mm/process_vm_access.c
71783+++ b/mm/process_vm_access.c
71784@@ -13,6 +13,7 @@
71785 #include <linux/uio.h>
71786 #include <linux/sched.h>
71787 #include <linux/highmem.h>
71788+#include <linux/security.h>
71789 #include <linux/ptrace.h>
71790 #include <linux/slab.h>
71791 #include <linux/syscalls.h>
71792@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71793 size_t iov_l_curr_offset = 0;
71794 ssize_t iov_len;
71795
71796+ return -ENOSYS; // PaX: until properly audited
71797+
71798 /*
71799 * Work out how many pages of struct pages we're going to need
71800 * when eventually calling get_user_pages
71801 */
71802 for (i = 0; i < riovcnt; i++) {
71803 iov_len = rvec[i].iov_len;
71804- if (iov_len > 0) {
71805- nr_pages_iov = ((unsigned long)rvec[i].iov_base
71806- + iov_len)
71807- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
71808- / PAGE_SIZE + 1;
71809- nr_pages = max(nr_pages, nr_pages_iov);
71810- }
71811+ if (iov_len <= 0)
71812+ continue;
71813+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
71814+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
71815+ nr_pages = max(nr_pages, nr_pages_iov);
71816 }
71817
71818 if (nr_pages == 0)
71819@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71820 goto free_proc_pages;
71821 }
71822
71823+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
71824+ rc = -EPERM;
71825+ goto put_task_struct;
71826+ }
71827+
71828 mm = mm_access(task, PTRACE_MODE_ATTACH);
71829 if (!mm || IS_ERR(mm)) {
71830 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
71831diff --git a/mm/rmap.c b/mm/rmap.c
71832index c8454e0..b04f3a2 100644
71833--- a/mm/rmap.c
71834+++ b/mm/rmap.c
71835@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71836 struct anon_vma *anon_vma = vma->anon_vma;
71837 struct anon_vma_chain *avc;
71838
71839+#ifdef CONFIG_PAX_SEGMEXEC
71840+ struct anon_vma_chain *avc_m = NULL;
71841+#endif
71842+
71843 might_sleep();
71844 if (unlikely(!anon_vma)) {
71845 struct mm_struct *mm = vma->vm_mm;
71846@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71847 if (!avc)
71848 goto out_enomem;
71849
71850+#ifdef CONFIG_PAX_SEGMEXEC
71851+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
71852+ if (!avc_m)
71853+ goto out_enomem_free_avc;
71854+#endif
71855+
71856 anon_vma = find_mergeable_anon_vma(vma);
71857 allocated = NULL;
71858 if (!anon_vma) {
71859@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71860 /* page_table_lock to protect against threads */
71861 spin_lock(&mm->page_table_lock);
71862 if (likely(!vma->anon_vma)) {
71863+
71864+#ifdef CONFIG_PAX_SEGMEXEC
71865+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
71866+
71867+ if (vma_m) {
71868+ BUG_ON(vma_m->anon_vma);
71869+ vma_m->anon_vma = anon_vma;
71870+ avc_m->anon_vma = anon_vma;
71871+ avc_m->vma = vma;
71872+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
71873+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
71874+ avc_m = NULL;
71875+ }
71876+#endif
71877+
71878 vma->anon_vma = anon_vma;
71879 avc->anon_vma = anon_vma;
71880 avc->vma = vma;
71881@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71882
71883 if (unlikely(allocated))
71884 put_anon_vma(allocated);
71885+
71886+#ifdef CONFIG_PAX_SEGMEXEC
71887+ if (unlikely(avc_m))
71888+ anon_vma_chain_free(avc_m);
71889+#endif
71890+
71891 if (unlikely(avc))
71892 anon_vma_chain_free(avc);
71893 }
71894 return 0;
71895
71896 out_enomem_free_avc:
71897+
71898+#ifdef CONFIG_PAX_SEGMEXEC
71899+ if (avc_m)
71900+ anon_vma_chain_free(avc_m);
71901+#endif
71902+
71903 anon_vma_chain_free(avc);
71904 out_enomem:
71905 return -ENOMEM;
71906@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
71907 * Attach the anon_vmas from src to dst.
71908 * Returns 0 on success, -ENOMEM on failure.
71909 */
71910-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
71911+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
71912 {
71913 struct anon_vma_chain *avc, *pavc;
71914 struct anon_vma *root = NULL;
71915@@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
71916 * the corresponding VMA in the parent process is attached to.
71917 * Returns 0 on success, non-zero on failure.
71918 */
71919-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
71920+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
71921 {
71922 struct anon_vma_chain *avc;
71923 struct anon_vma *anon_vma;
71924diff --git a/mm/shmem.c b/mm/shmem.c
71925index 269d049..a9d2b50 100644
71926--- a/mm/shmem.c
71927+++ b/mm/shmem.c
71928@@ -31,7 +31,7 @@
71929 #include <linux/export.h>
71930 #include <linux/swap.h>
71931
71932-static struct vfsmount *shm_mnt;
71933+struct vfsmount *shm_mnt;
71934
71935 #ifdef CONFIG_SHMEM
71936 /*
71937@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
71938 #define BOGO_DIRENT_SIZE 20
71939
71940 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
71941-#define SHORT_SYMLINK_LEN 128
71942+#define SHORT_SYMLINK_LEN 64
71943
71944 struct shmem_xattr {
71945 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
71946@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
71947 int err = -ENOMEM;
71948
71949 /* Round up to L1_CACHE_BYTES to resist false sharing */
71950- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
71951- L1_CACHE_BYTES), GFP_KERNEL);
71952+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
71953 if (!sbinfo)
71954 return -ENOMEM;
71955
71956diff --git a/mm/slab.c b/mm/slab.c
71957index f0bd785..348b96a 100644
71958--- a/mm/slab.c
71959+++ b/mm/slab.c
71960@@ -153,7 +153,7 @@
71961
71962 /* Legal flag mask for kmem_cache_create(). */
71963 #if DEBUG
71964-# define CREATE_MASK (SLAB_RED_ZONE | \
71965+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
71966 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
71967 SLAB_CACHE_DMA | \
71968 SLAB_STORE_USER | \
71969@@ -161,7 +161,7 @@
71970 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
71971 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
71972 #else
71973-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
71974+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
71975 SLAB_CACHE_DMA | \
71976 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
71977 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
71978@@ -290,7 +290,7 @@ struct kmem_list3 {
71979 * Need this for bootstrapping a per node allocator.
71980 */
71981 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
71982-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
71983+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
71984 #define CACHE_CACHE 0
71985 #define SIZE_AC MAX_NUMNODES
71986 #define SIZE_L3 (2 * MAX_NUMNODES)
71987@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
71988 if ((x)->max_freeable < i) \
71989 (x)->max_freeable = i; \
71990 } while (0)
71991-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
71992-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
71993-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
71994-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
71995+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
71996+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
71997+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
71998+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
71999 #else
72000 #define STATS_INC_ACTIVE(x) do { } while (0)
72001 #define STATS_DEC_ACTIVE(x) do { } while (0)
72002@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72003 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72004 */
72005 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72006- const struct slab *slab, void *obj)
72007+ const struct slab *slab, const void *obj)
72008 {
72009 u32 offset = (obj - slab->s_mem);
72010 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72011@@ -568,7 +568,7 @@ struct cache_names {
72012 static struct cache_names __initdata cache_names[] = {
72013 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72014 #include <linux/kmalloc_sizes.h>
72015- {NULL,}
72016+ {NULL}
72017 #undef CACHE
72018 };
72019
72020@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72021 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72022 sizes[INDEX_AC].cs_size,
72023 ARCH_KMALLOC_MINALIGN,
72024- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72025+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72026 NULL);
72027
72028 if (INDEX_AC != INDEX_L3) {
72029@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72030 kmem_cache_create(names[INDEX_L3].name,
72031 sizes[INDEX_L3].cs_size,
72032 ARCH_KMALLOC_MINALIGN,
72033- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72034+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72035 NULL);
72036 }
72037
72038@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72039 sizes->cs_cachep = kmem_cache_create(names->name,
72040 sizes->cs_size,
72041 ARCH_KMALLOC_MINALIGN,
72042- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72043+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72044 NULL);
72045 }
72046 #ifdef CONFIG_ZONE_DMA
72047@@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
72048 }
72049 /* cpu stats */
72050 {
72051- unsigned long allochit = atomic_read(&cachep->allochit);
72052- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72053- unsigned long freehit = atomic_read(&cachep->freehit);
72054- unsigned long freemiss = atomic_read(&cachep->freemiss);
72055+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72056+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72057+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72058+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72059
72060 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72061 allochit, allocmiss, freehit, freemiss);
72062@@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
72063 {
72064 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72065 #ifdef CONFIG_DEBUG_SLAB_LEAK
72066- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72067+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72068 #endif
72069 return 0;
72070 }
72071 module_init(slab_proc_init);
72072 #endif
72073
72074+void check_object_size(const void *ptr, unsigned long n, bool to)
72075+{
72076+
72077+#ifdef CONFIG_PAX_USERCOPY
72078+ struct page *page;
72079+ struct kmem_cache *cachep = NULL;
72080+ struct slab *slabp;
72081+ unsigned int objnr;
72082+ unsigned long offset;
72083+ const char *type;
72084+
72085+ if (!n)
72086+ return;
72087+
72088+ type = "<null>";
72089+ if (ZERO_OR_NULL_PTR(ptr))
72090+ goto report;
72091+
72092+ if (!virt_addr_valid(ptr))
72093+ return;
72094+
72095+ page = virt_to_head_page(ptr);
72096+
72097+ type = "<process stack>";
72098+ if (!PageSlab(page)) {
72099+ if (object_is_on_stack(ptr, n) == -1)
72100+ goto report;
72101+ return;
72102+ }
72103+
72104+ cachep = page_get_cache(page);
72105+ type = cachep->name;
72106+ if (!(cachep->flags & SLAB_USERCOPY))
72107+ goto report;
72108+
72109+ slabp = page_get_slab(page);
72110+ objnr = obj_to_index(cachep, slabp, ptr);
72111+ BUG_ON(objnr >= cachep->num);
72112+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72113+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72114+ return;
72115+
72116+report:
72117+ pax_report_usercopy(ptr, n, to, type);
72118+#endif
72119+
72120+}
72121+EXPORT_SYMBOL(check_object_size);
72122+
72123 /**
72124 * ksize - get the actual amount of memory allocated for a given object
72125 * @objp: Pointer to the object
72126diff --git a/mm/slob.c b/mm/slob.c
72127index 8105be4..e045f96 100644
72128--- a/mm/slob.c
72129+++ b/mm/slob.c
72130@@ -29,7 +29,7 @@
72131 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72132 * alloc_pages() directly, allocating compound pages so the page order
72133 * does not have to be separately tracked, and also stores the exact
72134- * allocation size in page->private so that it can be used to accurately
72135+ * allocation size in slob_page->size so that it can be used to accurately
72136 * provide ksize(). These objects are detected in kfree() because slob_page()
72137 * is false for them.
72138 *
72139@@ -58,6 +58,7 @@
72140 */
72141
72142 #include <linux/kernel.h>
72143+#include <linux/sched.h>
72144 #include <linux/slab.h>
72145 #include <linux/mm.h>
72146 #include <linux/swap.h> /* struct reclaim_state */
72147@@ -102,7 +103,8 @@ struct slob_page {
72148 unsigned long flags; /* mandatory */
72149 atomic_t _count; /* mandatory */
72150 slobidx_t units; /* free units left in page */
72151- unsigned long pad[2];
72152+ unsigned long pad[1];
72153+ unsigned long size; /* size when >=PAGE_SIZE */
72154 slob_t *free; /* first free slob_t in page */
72155 struct list_head list; /* linked list of free pages */
72156 };
72157@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72158 */
72159 static inline int is_slob_page(struct slob_page *sp)
72160 {
72161- return PageSlab((struct page *)sp);
72162+ return PageSlab((struct page *)sp) && !sp->size;
72163 }
72164
72165 static inline void set_slob_page(struct slob_page *sp)
72166@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72167
72168 static inline struct slob_page *slob_page(const void *addr)
72169 {
72170- return (struct slob_page *)virt_to_page(addr);
72171+ return (struct slob_page *)virt_to_head_page(addr);
72172 }
72173
72174 /*
72175@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72176 /*
72177 * Return the size of a slob block.
72178 */
72179-static slobidx_t slob_units(slob_t *s)
72180+static slobidx_t slob_units(const slob_t *s)
72181 {
72182 if (s->units > 0)
72183 return s->units;
72184@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72185 /*
72186 * Return the next free slob block pointer after this one.
72187 */
72188-static slob_t *slob_next(slob_t *s)
72189+static slob_t *slob_next(const slob_t *s)
72190 {
72191 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72192 slobidx_t next;
72193@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72194 /*
72195 * Returns true if s is the last free block in its page.
72196 */
72197-static int slob_last(slob_t *s)
72198+static int slob_last(const slob_t *s)
72199 {
72200 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72201 }
72202@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72203 if (!page)
72204 return NULL;
72205
72206+ set_slob_page(page);
72207 return page_address(page);
72208 }
72209
72210@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72211 if (!b)
72212 return NULL;
72213 sp = slob_page(b);
72214- set_slob_page(sp);
72215
72216 spin_lock_irqsave(&slob_lock, flags);
72217 sp->units = SLOB_UNITS(PAGE_SIZE);
72218 sp->free = b;
72219+ sp->size = 0;
72220 INIT_LIST_HEAD(&sp->list);
72221 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72222 set_slob_page_free(sp, slob_list);
72223@@ -476,10 +479,9 @@ out:
72224 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72225 */
72226
72227-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72228+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72229 {
72230- unsigned int *m;
72231- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72232+ slob_t *m;
72233 void *ret;
72234
72235 gfp &= gfp_allowed_mask;
72236@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72237
72238 if (!m)
72239 return NULL;
72240- *m = size;
72241+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72242+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72243+ m[0].units = size;
72244+ m[1].units = align;
72245 ret = (void *)m + align;
72246
72247 trace_kmalloc_node(_RET_IP_, ret,
72248@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72249 gfp |= __GFP_COMP;
72250 ret = slob_new_pages(gfp, order, node);
72251 if (ret) {
72252- struct page *page;
72253- page = virt_to_page(ret);
72254- page->private = size;
72255+ struct slob_page *sp;
72256+ sp = slob_page(ret);
72257+ sp->size = size;
72258 }
72259
72260 trace_kmalloc_node(_RET_IP_, ret,
72261 size, PAGE_SIZE << order, gfp, node);
72262 }
72263
72264- kmemleak_alloc(ret, size, 1, gfp);
72265+ return ret;
72266+}
72267+
72268+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72269+{
72270+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72271+ void *ret = __kmalloc_node_align(size, gfp, node, align);
72272+
72273+ if (!ZERO_OR_NULL_PTR(ret))
72274+ kmemleak_alloc(ret, size, 1, gfp);
72275 return ret;
72276 }
72277 EXPORT_SYMBOL(__kmalloc_node);
72278@@ -533,13 +547,92 @@ void kfree(const void *block)
72279 sp = slob_page(block);
72280 if (is_slob_page(sp)) {
72281 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72282- unsigned int *m = (unsigned int *)(block - align);
72283- slob_free(m, *m + align);
72284- } else
72285+ slob_t *m = (slob_t *)(block - align);
72286+ slob_free(m, m[0].units + align);
72287+ } else {
72288+ clear_slob_page(sp);
72289+ free_slob_page(sp);
72290+ sp->size = 0;
72291 put_page(&sp->page);
72292+ }
72293 }
72294 EXPORT_SYMBOL(kfree);
72295
72296+void check_object_size(const void *ptr, unsigned long n, bool to)
72297+{
72298+
72299+#ifdef CONFIG_PAX_USERCOPY
72300+ struct slob_page *sp;
72301+ const slob_t *free;
72302+ const void *base;
72303+ unsigned long flags;
72304+ const char *type;
72305+
72306+ if (!n)
72307+ return;
72308+
72309+ type = "<null>";
72310+ if (ZERO_OR_NULL_PTR(ptr))
72311+ goto report;
72312+
72313+ if (!virt_addr_valid(ptr))
72314+ return;
72315+
72316+ type = "<process stack>";
72317+ sp = slob_page(ptr);
72318+ if (!PageSlab((struct page *)sp)) {
72319+ if (object_is_on_stack(ptr, n) == -1)
72320+ goto report;
72321+ return;
72322+ }
72323+
72324+ type = "<slob>";
72325+ if (sp->size) {
72326+ base = page_address(&sp->page);
72327+ if (base <= ptr && n <= sp->size - (ptr - base))
72328+ return;
72329+ goto report;
72330+ }
72331+
72332+ /* some tricky double walking to find the chunk */
72333+ spin_lock_irqsave(&slob_lock, flags);
72334+ base = (void *)((unsigned long)ptr & PAGE_MASK);
72335+ free = sp->free;
72336+
72337+ while (!slob_last(free) && (void *)free <= ptr) {
72338+ base = free + slob_units(free);
72339+ free = slob_next(free);
72340+ }
72341+
72342+ while (base < (void *)free) {
72343+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72344+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
72345+ int offset;
72346+
72347+ if (ptr < base + align)
72348+ break;
72349+
72350+ offset = ptr - base - align;
72351+ if (offset >= m) {
72352+ base += size;
72353+ continue;
72354+ }
72355+
72356+ if (n > m - offset)
72357+ break;
72358+
72359+ spin_unlock_irqrestore(&slob_lock, flags);
72360+ return;
72361+ }
72362+
72363+ spin_unlock_irqrestore(&slob_lock, flags);
72364+report:
72365+ pax_report_usercopy(ptr, n, to, type);
72366+#endif
72367+
72368+}
72369+EXPORT_SYMBOL(check_object_size);
72370+
72371 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72372 size_t ksize(const void *block)
72373 {
72374@@ -552,10 +645,10 @@ size_t ksize(const void *block)
72375 sp = slob_page(block);
72376 if (is_slob_page(sp)) {
72377 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72378- unsigned int *m = (unsigned int *)(block - align);
72379- return SLOB_UNITS(*m) * SLOB_UNIT;
72380+ slob_t *m = (slob_t *)(block - align);
72381+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72382 } else
72383- return sp->page.private;
72384+ return sp->size;
72385 }
72386 EXPORT_SYMBOL(ksize);
72387
72388@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72389 {
72390 struct kmem_cache *c;
72391
72392+#ifdef CONFIG_PAX_USERCOPY
72393+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
72394+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72395+#else
72396 c = slob_alloc(sizeof(struct kmem_cache),
72397 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72398+#endif
72399
72400 if (c) {
72401 c->name = name;
72402@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72403
72404 lockdep_trace_alloc(flags);
72405
72406+#ifdef CONFIG_PAX_USERCOPY
72407+ b = __kmalloc_node_align(c->size, flags, node, c->align);
72408+#else
72409 if (c->size < PAGE_SIZE) {
72410 b = slob_alloc(c->size, flags, c->align, node);
72411 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72412 SLOB_UNITS(c->size) * SLOB_UNIT,
72413 flags, node);
72414 } else {
72415+ struct slob_page *sp;
72416+
72417 b = slob_new_pages(flags, get_order(c->size), node);
72418+ sp = slob_page(b);
72419+ sp->size = c->size;
72420 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72421 PAGE_SIZE << get_order(c->size),
72422 flags, node);
72423 }
72424+#endif
72425
72426 if (c->ctor)
72427 c->ctor(b);
72428@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72429
72430 static void __kmem_cache_free(void *b, int size)
72431 {
72432- if (size < PAGE_SIZE)
72433+ struct slob_page *sp = slob_page(b);
72434+
72435+ if (is_slob_page(sp))
72436 slob_free(b, size);
72437- else
72438+ else {
72439+ clear_slob_page(sp);
72440+ free_slob_page(sp);
72441+ sp->size = 0;
72442 slob_free_pages(b, get_order(size));
72443+ }
72444 }
72445
72446 static void kmem_rcu_free(struct rcu_head *head)
72447@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72448
72449 void kmem_cache_free(struct kmem_cache *c, void *b)
72450 {
72451+ int size = c->size;
72452+
72453+#ifdef CONFIG_PAX_USERCOPY
72454+ if (size + c->align < PAGE_SIZE) {
72455+ size += c->align;
72456+ b -= c->align;
72457+ }
72458+#endif
72459+
72460 kmemleak_free_recursive(b, c->flags);
72461 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72462 struct slob_rcu *slob_rcu;
72463- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72464- slob_rcu->size = c->size;
72465+ slob_rcu = b + (size - sizeof(struct slob_rcu));
72466+ slob_rcu->size = size;
72467 call_rcu(&slob_rcu->head, kmem_rcu_free);
72468 } else {
72469- __kmem_cache_free(b, c->size);
72470+ __kmem_cache_free(b, size);
72471 }
72472
72473+#ifdef CONFIG_PAX_USERCOPY
72474+ trace_kfree(_RET_IP_, b);
72475+#else
72476 trace_kmem_cache_free(_RET_IP_, b);
72477+#endif
72478+
72479 }
72480 EXPORT_SYMBOL(kmem_cache_free);
72481
72482diff --git a/mm/slub.c b/mm/slub.c
72483index 4907563..e3d7905 100644
72484--- a/mm/slub.c
72485+++ b/mm/slub.c
72486@@ -208,7 +208,7 @@ struct track {
72487
72488 enum track_item { TRACK_ALLOC, TRACK_FREE };
72489
72490-#ifdef CONFIG_SYSFS
72491+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72492 static int sysfs_slab_add(struct kmem_cache *);
72493 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72494 static void sysfs_slab_remove(struct kmem_cache *);
72495@@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
72496 if (!t->addr)
72497 return;
72498
72499- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72500+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72501 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72502 #ifdef CONFIG_STACKTRACE
72503 {
72504@@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72505
72506 page = virt_to_head_page(x);
72507
72508+ BUG_ON(!PageSlab(page));
72509+
72510 slab_free(s, page, x, _RET_IP_);
72511
72512 trace_kmem_cache_free(_RET_IP_, x);
72513@@ -2604,7 +2606,7 @@ static int slub_min_objects;
72514 * Merge control. If this is set then no merging of slab caches will occur.
72515 * (Could be removed. This was introduced to pacify the merge skeptics.)
72516 */
72517-static int slub_nomerge;
72518+static int slub_nomerge = 1;
72519
72520 /*
72521 * Calculate the order of allocation given an slab object size.
72522@@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72523 else
72524 s->cpu_partial = 30;
72525
72526- s->refcount = 1;
72527+ atomic_set(&s->refcount, 1);
72528 #ifdef CONFIG_NUMA
72529 s->remote_node_defrag_ratio = 1000;
72530 #endif
72531@@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72532 void kmem_cache_destroy(struct kmem_cache *s)
72533 {
72534 down_write(&slub_lock);
72535- s->refcount--;
72536- if (!s->refcount) {
72537+ if (atomic_dec_and_test(&s->refcount)) {
72538 list_del(&s->list);
72539 up_write(&slub_lock);
72540 if (kmem_cache_close(s)) {
72541@@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
72542 EXPORT_SYMBOL(__kmalloc_node);
72543 #endif
72544
72545+void check_object_size(const void *ptr, unsigned long n, bool to)
72546+{
72547+
72548+#ifdef CONFIG_PAX_USERCOPY
72549+ struct page *page;
72550+ struct kmem_cache *s = NULL;
72551+ unsigned long offset;
72552+ const char *type;
72553+
72554+ if (!n)
72555+ return;
72556+
72557+ type = "<null>";
72558+ if (ZERO_OR_NULL_PTR(ptr))
72559+ goto report;
72560+
72561+ if (!virt_addr_valid(ptr))
72562+ return;
72563+
72564+ page = virt_to_head_page(ptr);
72565+
72566+ type = "<process stack>";
72567+ if (!PageSlab(page)) {
72568+ if (object_is_on_stack(ptr, n) == -1)
72569+ goto report;
72570+ return;
72571+ }
72572+
72573+ s = page->slab;
72574+ type = s->name;
72575+ if (!(s->flags & SLAB_USERCOPY))
72576+ goto report;
72577+
72578+ offset = (ptr - page_address(page)) % s->size;
72579+ if (offset <= s->objsize && n <= s->objsize - offset)
72580+ return;
72581+
72582+report:
72583+ pax_report_usercopy(ptr, n, to, type);
72584+#endif
72585+
72586+}
72587+EXPORT_SYMBOL(check_object_size);
72588+
72589 size_t ksize(const void *object)
72590 {
72591 struct page *page;
72592@@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
72593 int node;
72594
72595 list_add(&s->list, &slab_caches);
72596- s->refcount = -1;
72597+ atomic_set(&s->refcount, -1);
72598
72599 for_each_node_state(node, N_NORMAL_MEMORY) {
72600 struct kmem_cache_node *n = get_node(s, node);
72601@@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
72602
72603 /* Caches that are not of the two-to-the-power-of size */
72604 if (KMALLOC_MIN_SIZE <= 32) {
72605- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
72606+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
72607 caches++;
72608 }
72609
72610 if (KMALLOC_MIN_SIZE <= 64) {
72611- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
72612+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
72613 caches++;
72614 }
72615
72616 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
72617- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
72618+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
72619 caches++;
72620 }
72621
72622@@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
72623 /*
72624 * We may have set a slab to be unmergeable during bootstrap.
72625 */
72626- if (s->refcount < 0)
72627+ if (atomic_read(&s->refcount) < 0)
72628 return 1;
72629
72630 return 0;
72631@@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72632 down_write(&slub_lock);
72633 s = find_mergeable(size, align, flags, name, ctor);
72634 if (s) {
72635- s->refcount++;
72636+ atomic_inc(&s->refcount);
72637 /*
72638 * Adjust the object sizes so that we clear
72639 * the complete object on kzalloc.
72640@@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72641 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
72642
72643 if (sysfs_slab_alias(s, name)) {
72644- s->refcount--;
72645+ atomic_dec(&s->refcount);
72646 goto err;
72647 }
72648 up_write(&slub_lock);
72649@@ -4041,7 +4086,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
72650 }
72651 #endif
72652
72653-#ifdef CONFIG_SYSFS
72654+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72655 static int count_inuse(struct page *page)
72656 {
72657 return page->inuse;
72658@@ -4428,12 +4473,12 @@ static void resiliency_test(void)
72659 validate_slab_cache(kmalloc_caches[9]);
72660 }
72661 #else
72662-#ifdef CONFIG_SYSFS
72663+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72664 static void resiliency_test(void) {};
72665 #endif
72666 #endif
72667
72668-#ifdef CONFIG_SYSFS
72669+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72670 enum slab_stat_type {
72671 SL_ALL, /* All slabs */
72672 SL_PARTIAL, /* Only partially allocated slabs */
72673@@ -4676,7 +4721,7 @@ SLAB_ATTR_RO(ctor);
72674
72675 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
72676 {
72677- return sprintf(buf, "%d\n", s->refcount - 1);
72678+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
72679 }
72680 SLAB_ATTR_RO(aliases);
72681
72682@@ -5243,6 +5288,7 @@ static char *create_unique_id(struct kmem_cache *s)
72683 return name;
72684 }
72685
72686+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72687 static int sysfs_slab_add(struct kmem_cache *s)
72688 {
72689 int err;
72690@@ -5305,6 +5351,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
72691 kobject_del(&s->kobj);
72692 kobject_put(&s->kobj);
72693 }
72694+#endif
72695
72696 /*
72697 * Need to buffer aliases during bootup until sysfs becomes
72698@@ -5318,6 +5365,7 @@ struct saved_alias {
72699
72700 static struct saved_alias *alias_list;
72701
72702+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72703 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72704 {
72705 struct saved_alias *al;
72706@@ -5340,6 +5388,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72707 alias_list = al;
72708 return 0;
72709 }
72710+#endif
72711
72712 static int __init slab_sysfs_init(void)
72713 {
72714diff --git a/mm/swap.c b/mm/swap.c
72715index 14380e9..e244704 100644
72716--- a/mm/swap.c
72717+++ b/mm/swap.c
72718@@ -30,6 +30,7 @@
72719 #include <linux/backing-dev.h>
72720 #include <linux/memcontrol.h>
72721 #include <linux/gfp.h>
72722+#include <linux/hugetlb.h>
72723
72724 #include "internal.h"
72725
72726@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
72727
72728 __page_cache_release(page);
72729 dtor = get_compound_page_dtor(page);
72730+ if (!PageHuge(page))
72731+ BUG_ON(dtor != free_compound_page);
72732 (*dtor)(page);
72733 }
72734
72735diff --git a/mm/swapfile.c b/mm/swapfile.c
72736index d999f09..e00270a 100644
72737--- a/mm/swapfile.c
72738+++ b/mm/swapfile.c
72739@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
72740
72741 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
72742 /* Activity counter to indicate that a swapon or swapoff has occurred */
72743-static atomic_t proc_poll_event = ATOMIC_INIT(0);
72744+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
72745
72746 static inline unsigned char swap_count(unsigned char ent)
72747 {
72748@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
72749 }
72750 filp_close(swap_file, NULL);
72751 err = 0;
72752- atomic_inc(&proc_poll_event);
72753+ atomic_inc_unchecked(&proc_poll_event);
72754 wake_up_interruptible(&proc_poll_wait);
72755
72756 out_dput:
72757@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
72758
72759 poll_wait(file, &proc_poll_wait, wait);
72760
72761- if (seq->poll_event != atomic_read(&proc_poll_event)) {
72762- seq->poll_event = atomic_read(&proc_poll_event);
72763+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
72764+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72765 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
72766 }
72767
72768@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
72769 return ret;
72770
72771 seq = file->private_data;
72772- seq->poll_event = atomic_read(&proc_poll_event);
72773+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72774 return 0;
72775 }
72776
72777@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
72778 (p->flags & SWP_DISCARDABLE) ? "D" : "");
72779
72780 mutex_unlock(&swapon_mutex);
72781- atomic_inc(&proc_poll_event);
72782+ atomic_inc_unchecked(&proc_poll_event);
72783 wake_up_interruptible(&proc_poll_wait);
72784
72785 if (S_ISREG(inode->i_mode))
72786diff --git a/mm/util.c b/mm/util.c
72787index 136ac4f..5117eef 100644
72788--- a/mm/util.c
72789+++ b/mm/util.c
72790@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
72791 * allocated buffer. Use this if you don't want to free the buffer immediately
72792 * like, for example, with RCU.
72793 */
72794+#undef __krealloc
72795 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
72796 {
72797 void *ret;
72798@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
72799 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
72800 * %NULL pointer, the object pointed to is freed.
72801 */
72802+#undef krealloc
72803 void *krealloc(const void *p, size_t new_size, gfp_t flags)
72804 {
72805 void *ret;
72806@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
72807 void arch_pick_mmap_layout(struct mm_struct *mm)
72808 {
72809 mm->mmap_base = TASK_UNMAPPED_BASE;
72810+
72811+#ifdef CONFIG_PAX_RANDMMAP
72812+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72813+ mm->mmap_base += mm->delta_mmap;
72814+#endif
72815+
72816 mm->get_unmapped_area = arch_get_unmapped_area;
72817 mm->unmap_area = arch_unmap_area;
72818 }
72819diff --git a/mm/vmalloc.c b/mm/vmalloc.c
72820index 86ce9a5..0fa4d89 100644
72821--- a/mm/vmalloc.c
72822+++ b/mm/vmalloc.c
72823@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
72824
72825 pte = pte_offset_kernel(pmd, addr);
72826 do {
72827- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72828- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72829+
72830+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72831+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
72832+ BUG_ON(!pte_exec(*pte));
72833+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
72834+ continue;
72835+ }
72836+#endif
72837+
72838+ {
72839+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72840+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72841+ }
72842 } while (pte++, addr += PAGE_SIZE, addr != end);
72843 }
72844
72845@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72846 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
72847 {
72848 pte_t *pte;
72849+ int ret = -ENOMEM;
72850
72851 /*
72852 * nr is a running index into the array which helps higher level
72853@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72854 pte = pte_alloc_kernel(pmd, addr);
72855 if (!pte)
72856 return -ENOMEM;
72857+
72858+ pax_open_kernel();
72859 do {
72860 struct page *page = pages[*nr];
72861
72862- if (WARN_ON(!pte_none(*pte)))
72863- return -EBUSY;
72864- if (WARN_ON(!page))
72865- return -ENOMEM;
72866+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72867+ if (pgprot_val(prot) & _PAGE_NX)
72868+#endif
72869+
72870+ if (WARN_ON(!pte_none(*pte))) {
72871+ ret = -EBUSY;
72872+ goto out;
72873+ }
72874+ if (WARN_ON(!page)) {
72875+ ret = -ENOMEM;
72876+ goto out;
72877+ }
72878 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
72879 (*nr)++;
72880 } while (pte++, addr += PAGE_SIZE, addr != end);
72881- return 0;
72882+ ret = 0;
72883+out:
72884+ pax_close_kernel();
72885+ return ret;
72886 }
72887
72888 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
72889@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
72890 * and fall back on vmalloc() if that fails. Others
72891 * just put it in the vmalloc space.
72892 */
72893-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
72894+#ifdef CONFIG_MODULES
72895+#ifdef MODULES_VADDR
72896 unsigned long addr = (unsigned long)x;
72897 if (addr >= MODULES_VADDR && addr < MODULES_END)
72898 return 1;
72899 #endif
72900+
72901+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72902+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
72903+ return 1;
72904+#endif
72905+
72906+#endif
72907+
72908 return is_vmalloc_addr(x);
72909 }
72910
72911@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
72912
72913 if (!pgd_none(*pgd)) {
72914 pud_t *pud = pud_offset(pgd, addr);
72915+#ifdef CONFIG_X86
72916+ if (!pud_large(*pud))
72917+#endif
72918 if (!pud_none(*pud)) {
72919 pmd_t *pmd = pmd_offset(pud, addr);
72920+#ifdef CONFIG_X86
72921+ if (!pmd_large(*pmd))
72922+#endif
72923 if (!pmd_none(*pmd)) {
72924 pte_t *ptep, pte;
72925
72926@@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
72927 struct vm_struct *area;
72928
72929 BUG_ON(in_interrupt());
72930+
72931+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72932+ if (flags & VM_KERNEXEC) {
72933+ if (start != VMALLOC_START || end != VMALLOC_END)
72934+ return NULL;
72935+ start = (unsigned long)MODULES_EXEC_VADDR;
72936+ end = (unsigned long)MODULES_EXEC_END;
72937+ }
72938+#endif
72939+
72940 if (flags & VM_IOREMAP) {
72941 int bit = fls(size);
72942
72943@@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
72944 if (count > totalram_pages)
72945 return NULL;
72946
72947+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72948+ if (!(pgprot_val(prot) & _PAGE_NX))
72949+ flags |= VM_KERNEXEC;
72950+#endif
72951+
72952 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
72953 __builtin_return_address(0));
72954 if (!area)
72955@@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
72956 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
72957 goto fail;
72958
72959+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72960+ if (!(pgprot_val(prot) & _PAGE_NX))
72961+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
72962+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
72963+ else
72964+#endif
72965+
72966 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
72967 start, end, node, gfp_mask, caller);
72968 if (!area)
72969@@ -1704,6 +1766,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
72970 gfp_mask, prot, node, caller);
72971 }
72972
72973+#undef __vmalloc
72974 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
72975 {
72976 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
72977@@ -1727,6 +1790,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
72978 * For tight control over page level allocator and protection flags
72979 * use __vmalloc() instead.
72980 */
72981+#undef vmalloc
72982 void *vmalloc(unsigned long size)
72983 {
72984 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
72985@@ -1743,6 +1807,7 @@ EXPORT_SYMBOL(vmalloc);
72986 * For tight control over page level allocator and protection flags
72987 * use __vmalloc() instead.
72988 */
72989+#undef vzalloc
72990 void *vzalloc(unsigned long size)
72991 {
72992 return __vmalloc_node_flags(size, -1,
72993@@ -1757,6 +1822,7 @@ EXPORT_SYMBOL(vzalloc);
72994 * The resulting memory area is zeroed so it can be mapped to userspace
72995 * without leaking data.
72996 */
72997+#undef vmalloc_user
72998 void *vmalloc_user(unsigned long size)
72999 {
73000 struct vm_struct *area;
73001@@ -1784,6 +1850,7 @@ EXPORT_SYMBOL(vmalloc_user);
73002 * For tight control over page level allocator and protection flags
73003 * use __vmalloc() instead.
73004 */
73005+#undef vmalloc_node
73006 void *vmalloc_node(unsigned long size, int node)
73007 {
73008 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73009@@ -1803,6 +1870,7 @@ EXPORT_SYMBOL(vmalloc_node);
73010 * For tight control over page level allocator and protection flags
73011 * use __vmalloc_node() instead.
73012 */
73013+#undef vzalloc_node
73014 void *vzalloc_node(unsigned long size, int node)
73015 {
73016 return __vmalloc_node_flags(size, node,
73017@@ -1825,10 +1893,10 @@ EXPORT_SYMBOL(vzalloc_node);
73018 * For tight control over page level allocator and protection flags
73019 * use __vmalloc() instead.
73020 */
73021-
73022+#undef vmalloc_exec
73023 void *vmalloc_exec(unsigned long size)
73024 {
73025- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73026+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73027 -1, __builtin_return_address(0));
73028 }
73029
73030@@ -1847,6 +1915,7 @@ void *vmalloc_exec(unsigned long size)
73031 * Allocate enough 32bit PA addressable pages to cover @size from the
73032 * page level allocator and map them into contiguous kernel virtual space.
73033 */
73034+#undef vmalloc_32
73035 void *vmalloc_32(unsigned long size)
73036 {
73037 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73038@@ -1861,6 +1930,7 @@ EXPORT_SYMBOL(vmalloc_32);
73039 * The resulting memory area is 32bit addressable and zeroed so it can be
73040 * mapped to userspace without leaking data.
73041 */
73042+#undef vmalloc_32_user
73043 void *vmalloc_32_user(unsigned long size)
73044 {
73045 struct vm_struct *area;
73046@@ -2123,6 +2193,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73047 unsigned long uaddr = vma->vm_start;
73048 unsigned long usize = vma->vm_end - vma->vm_start;
73049
73050+ BUG_ON(vma->vm_mirror);
73051+
73052 if ((PAGE_SIZE-1) & (unsigned long)addr)
73053 return -EINVAL;
73054
73055diff --git a/mm/vmstat.c b/mm/vmstat.c
73056index f600557..1459fc8 100644
73057--- a/mm/vmstat.c
73058+++ b/mm/vmstat.c
73059@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73060 *
73061 * vm_stat contains the global counters
73062 */
73063-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73064+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73065 EXPORT_SYMBOL(vm_stat);
73066
73067 #ifdef CONFIG_SMP
73068@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73069 v = p->vm_stat_diff[i];
73070 p->vm_stat_diff[i] = 0;
73071 local_irq_restore(flags);
73072- atomic_long_add(v, &zone->vm_stat[i]);
73073+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73074 global_diff[i] += v;
73075 #ifdef CONFIG_NUMA
73076 /* 3 seconds idle till flush */
73077@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73078
73079 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73080 if (global_diff[i])
73081- atomic_long_add(global_diff[i], &vm_stat[i]);
73082+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73083 }
73084
73085 #endif
73086@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73087 start_cpu_timer(cpu);
73088 #endif
73089 #ifdef CONFIG_PROC_FS
73090- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73091- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73092- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73093- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73094+ {
73095+ mode_t gr_mode = S_IRUGO;
73096+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73097+ gr_mode = S_IRUSR;
73098+#endif
73099+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73100+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73101+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73102+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73103+#else
73104+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73105+#endif
73106+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73107+ }
73108 #endif
73109 return 0;
73110 }
73111diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73112index efea35b..9c8dd0b 100644
73113--- a/net/8021q/vlan.c
73114+++ b/net/8021q/vlan.c
73115@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73116 err = -EPERM;
73117 if (!capable(CAP_NET_ADMIN))
73118 break;
73119- if ((args.u.name_type >= 0) &&
73120- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73121+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73122 struct vlan_net *vn;
73123
73124 vn = net_generic(net, vlan_net_id);
73125diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73126index fccae26..e7ece2f 100644
73127--- a/net/9p/trans_fd.c
73128+++ b/net/9p/trans_fd.c
73129@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73130 oldfs = get_fs();
73131 set_fs(get_ds());
73132 /* The cast to a user pointer is valid due to the set_fs() */
73133- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73134+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73135 set_fs(oldfs);
73136
73137 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73138diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73139index 876fbe8..8bbea9f 100644
73140--- a/net/atm/atm_misc.c
73141+++ b/net/atm/atm_misc.c
73142@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73143 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73144 return 1;
73145 atm_return(vcc, truesize);
73146- atomic_inc(&vcc->stats->rx_drop);
73147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73148 return 0;
73149 }
73150 EXPORT_SYMBOL(atm_charge);
73151@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73152 }
73153 }
73154 atm_return(vcc, guess);
73155- atomic_inc(&vcc->stats->rx_drop);
73156+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73157 return NULL;
73158 }
73159 EXPORT_SYMBOL(atm_alloc_charge);
73160@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73161
73162 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73163 {
73164-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73165+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73166 __SONET_ITEMS
73167 #undef __HANDLE_ITEM
73168 }
73169@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73170
73171 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73172 {
73173-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73174+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73175 __SONET_ITEMS
73176 #undef __HANDLE_ITEM
73177 }
73178diff --git a/net/atm/lec.h b/net/atm/lec.h
73179index dfc0719..47c5322 100644
73180--- a/net/atm/lec.h
73181+++ b/net/atm/lec.h
73182@@ -48,7 +48,7 @@ struct lane2_ops {
73183 const u8 *tlvs, u32 sizeoftlvs);
73184 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73185 const u8 *tlvs, u32 sizeoftlvs);
73186-};
73187+} __no_const;
73188
73189 /*
73190 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73191diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73192index 0919a88..a23d54e 100644
73193--- a/net/atm/mpc.h
73194+++ b/net/atm/mpc.h
73195@@ -33,7 +33,7 @@ struct mpoa_client {
73196 struct mpc_parameters parameters; /* parameters for this client */
73197
73198 const struct net_device_ops *old_ops;
73199- struct net_device_ops new_ops;
73200+ net_device_ops_no_const new_ops;
73201 };
73202
73203
73204diff --git a/net/atm/proc.c b/net/atm/proc.c
73205index 0d020de..011c7bb 100644
73206--- a/net/atm/proc.c
73207+++ b/net/atm/proc.c
73208@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73209 const struct k_atm_aal_stats *stats)
73210 {
73211 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73212- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73213- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73214- atomic_read(&stats->rx_drop));
73215+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73216+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73217+ atomic_read_unchecked(&stats->rx_drop));
73218 }
73219
73220 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73221diff --git a/net/atm/resources.c b/net/atm/resources.c
73222index 23f45ce..c748f1a 100644
73223--- a/net/atm/resources.c
73224+++ b/net/atm/resources.c
73225@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73226 static void copy_aal_stats(struct k_atm_aal_stats *from,
73227 struct atm_aal_stats *to)
73228 {
73229-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73230+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73231 __AAL_STAT_ITEMS
73232 #undef __HANDLE_ITEM
73233 }
73234@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73235 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73236 struct atm_aal_stats *to)
73237 {
73238-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73239+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73240 __AAL_STAT_ITEMS
73241 #undef __HANDLE_ITEM
73242 }
73243diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73244index 3512e25..2b33401 100644
73245--- a/net/batman-adv/bat_iv_ogm.c
73246+++ b/net/batman-adv/bat_iv_ogm.c
73247@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73248
73249 /* change sequence number to network order */
73250 batman_ogm_packet->seqno =
73251- htonl((uint32_t)atomic_read(&hard_iface->seqno));
73252+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73253
73254 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73255 batman_ogm_packet->tt_crc = htons((uint16_t)
73256@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73257 else
73258 batman_ogm_packet->gw_flags = NO_FLAGS;
73259
73260- atomic_inc(&hard_iface->seqno);
73261+ atomic_inc_unchecked(&hard_iface->seqno);
73262
73263 slide_own_bcast_window(hard_iface);
73264 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73265@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
73266 return;
73267
73268 /* could be changed by schedule_own_packet() */
73269- if_incoming_seqno = atomic_read(&if_incoming->seqno);
73270+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73271
73272 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73273
73274diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73275index 7704df4..beb4e16 100644
73276--- a/net/batman-adv/hard-interface.c
73277+++ b/net/batman-adv/hard-interface.c
73278@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73279 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73280 dev_add_pack(&hard_iface->batman_adv_ptype);
73281
73282- atomic_set(&hard_iface->seqno, 1);
73283- atomic_set(&hard_iface->frag_seqno, 1);
73284+ atomic_set_unchecked(&hard_iface->seqno, 1);
73285+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73286 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73287 hard_iface->net_dev->name);
73288
73289diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73290index 987c75a..20d6f36 100644
73291--- a/net/batman-adv/soft-interface.c
73292+++ b/net/batman-adv/soft-interface.c
73293@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73294
73295 /* set broadcast sequence number */
73296 bcast_packet->seqno =
73297- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73298+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73299
73300 add_bcast_packet_to_list(bat_priv, skb, 1);
73301
73302@@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
73303 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73304
73305 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73306- atomic_set(&bat_priv->bcast_seqno, 1);
73307+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73308 atomic_set(&bat_priv->ttvn, 0);
73309 atomic_set(&bat_priv->tt_local_changes, 0);
73310 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73311diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73312index e9eb043..d174eeb 100644
73313--- a/net/batman-adv/types.h
73314+++ b/net/batman-adv/types.h
73315@@ -38,8 +38,8 @@ struct hard_iface {
73316 int16_t if_num;
73317 char if_status;
73318 struct net_device *net_dev;
73319- atomic_t seqno;
73320- atomic_t frag_seqno;
73321+ atomic_unchecked_t seqno;
73322+ atomic_unchecked_t frag_seqno;
73323 unsigned char *packet_buff;
73324 int packet_len;
73325 struct kobject *hardif_obj;
73326@@ -154,7 +154,7 @@ struct bat_priv {
73327 atomic_t orig_interval; /* uint */
73328 atomic_t hop_penalty; /* uint */
73329 atomic_t log_level; /* uint */
73330- atomic_t bcast_seqno;
73331+ atomic_unchecked_t bcast_seqno;
73332 atomic_t bcast_queue_left;
73333 atomic_t batman_queue_left;
73334 atomic_t ttvn; /* translation table version number */
73335diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73336index 07d1c1d..7e9bea9 100644
73337--- a/net/batman-adv/unicast.c
73338+++ b/net/batman-adv/unicast.c
73339@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73340 frag1->flags = UNI_FRAG_HEAD | large_tail;
73341 frag2->flags = large_tail;
73342
73343- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73344+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73345 frag1->seqno = htons(seqno - 1);
73346 frag2->seqno = htons(seqno);
73347
73348diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73349index 07bc69e..21e76b1 100644
73350--- a/net/bluetooth/hci_conn.c
73351+++ b/net/bluetooth/hci_conn.c
73352@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73353 memset(&cp, 0, sizeof(cp));
73354
73355 cp.handle = cpu_to_le16(conn->handle);
73356- memcpy(cp.ltk, ltk, sizeof(ltk));
73357+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73358
73359 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73360 }
73361diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73362index 32d338c..d24bcdb 100644
73363--- a/net/bluetooth/l2cap_core.c
73364+++ b/net/bluetooth/l2cap_core.c
73365@@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73366 break;
73367
73368 case L2CAP_CONF_RFC:
73369- if (olen == sizeof(rfc))
73370- memcpy(&rfc, (void *)val, olen);
73371+ if (olen != sizeof(rfc))
73372+ break;
73373+
73374+ memcpy(&rfc, (void *)val, olen);
73375
73376 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73377 rfc.mode != chan->mode)
73378@@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73379
73380 switch (type) {
73381 case L2CAP_CONF_RFC:
73382- if (olen == sizeof(rfc))
73383- memcpy(&rfc, (void *)val, olen);
73384+ if (olen != sizeof(rfc))
73385+ break;
73386+
73387+ memcpy(&rfc, (void *)val, olen);
73388 goto done;
73389 }
73390 }
73391diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73392index 5fe2ff3..10968b5 100644
73393--- a/net/bridge/netfilter/ebtables.c
73394+++ b/net/bridge/netfilter/ebtables.c
73395@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73396 tmp.valid_hooks = t->table->valid_hooks;
73397 }
73398 mutex_unlock(&ebt_mutex);
73399- if (copy_to_user(user, &tmp, *len) != 0){
73400+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73401 BUGPRINT("c2u Didn't work\n");
73402 ret = -EFAULT;
73403 break;
73404diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
73405index a97d97a..6f679ed 100644
73406--- a/net/caif/caif_socket.c
73407+++ b/net/caif/caif_socket.c
73408@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
73409 #ifdef CONFIG_DEBUG_FS
73410 struct debug_fs_counter {
73411 atomic_t caif_nr_socks;
73412- atomic_t caif_sock_create;
73413- atomic_t num_connect_req;
73414- atomic_t num_connect_resp;
73415- atomic_t num_connect_fail_resp;
73416- atomic_t num_disconnect;
73417- atomic_t num_remote_shutdown_ind;
73418- atomic_t num_tx_flow_off_ind;
73419- atomic_t num_tx_flow_on_ind;
73420- atomic_t num_rx_flow_off;
73421- atomic_t num_rx_flow_on;
73422+ atomic_unchecked_t caif_sock_create;
73423+ atomic_unchecked_t num_connect_req;
73424+ atomic_unchecked_t num_connect_resp;
73425+ atomic_unchecked_t num_connect_fail_resp;
73426+ atomic_unchecked_t num_disconnect;
73427+ atomic_unchecked_t num_remote_shutdown_ind;
73428+ atomic_unchecked_t num_tx_flow_off_ind;
73429+ atomic_unchecked_t num_tx_flow_on_ind;
73430+ atomic_unchecked_t num_rx_flow_off;
73431+ atomic_unchecked_t num_rx_flow_on;
73432 };
73433 static struct debug_fs_counter cnt;
73434 #define dbfs_atomic_inc(v) atomic_inc_return(v)
73435+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
73436 #define dbfs_atomic_dec(v) atomic_dec_return(v)
73437 #else
73438 #define dbfs_atomic_inc(v) 0
73439@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73440 atomic_read(&cf_sk->sk.sk_rmem_alloc),
73441 sk_rcvbuf_lowwater(cf_sk));
73442 set_rx_flow_off(cf_sk);
73443- dbfs_atomic_inc(&cnt.num_rx_flow_off);
73444+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73445 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73446 }
73447
73448@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73449 set_rx_flow_off(cf_sk);
73450 if (net_ratelimit())
73451 pr_debug("sending flow OFF due to rmem_schedule\n");
73452- dbfs_atomic_inc(&cnt.num_rx_flow_off);
73453+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73454 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73455 }
73456 skb->dev = NULL;
73457@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
73458 switch (flow) {
73459 case CAIF_CTRLCMD_FLOW_ON_IND:
73460 /* OK from modem to start sending again */
73461- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
73462+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
73463 set_tx_flow_on(cf_sk);
73464 cf_sk->sk.sk_state_change(&cf_sk->sk);
73465 break;
73466
73467 case CAIF_CTRLCMD_FLOW_OFF_IND:
73468 /* Modem asks us to shut up */
73469- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
73470+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
73471 set_tx_flow_off(cf_sk);
73472 cf_sk->sk.sk_state_change(&cf_sk->sk);
73473 break;
73474@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73475 /* We're now connected */
73476 caif_client_register_refcnt(&cf_sk->layer,
73477 cfsk_hold, cfsk_put);
73478- dbfs_atomic_inc(&cnt.num_connect_resp);
73479+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
73480 cf_sk->sk.sk_state = CAIF_CONNECTED;
73481 set_tx_flow_on(cf_sk);
73482 cf_sk->sk.sk_state_change(&cf_sk->sk);
73483@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73484
73485 case CAIF_CTRLCMD_INIT_FAIL_RSP:
73486 /* Connect request failed */
73487- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
73488+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
73489 cf_sk->sk.sk_err = ECONNREFUSED;
73490 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
73491 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73492@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73493
73494 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
73495 /* Modem has closed this connection, or device is down. */
73496- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
73497+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
73498 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73499 cf_sk->sk.sk_err = ECONNRESET;
73500 set_rx_flow_on(cf_sk);
73501@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
73502 return;
73503
73504 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
73505- dbfs_atomic_inc(&cnt.num_rx_flow_on);
73506+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
73507 set_rx_flow_on(cf_sk);
73508 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
73509 }
73510@@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
73511 /*ifindex = id of the interface.*/
73512 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
73513
73514- dbfs_atomic_inc(&cnt.num_connect_req);
73515+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
73516 cf_sk->layer.receive = caif_sktrecv_cb;
73517
73518 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
73519@@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
73520 spin_unlock_bh(&sk->sk_receive_queue.lock);
73521 sock->sk = NULL;
73522
73523- dbfs_atomic_inc(&cnt.num_disconnect);
73524+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
73525
73526 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
73527 if (cf_sk->debugfs_socket_dir != NULL)
73528@@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
73529 cf_sk->conn_req.protocol = protocol;
73530 /* Increase the number of sockets created. */
73531 dbfs_atomic_inc(&cnt.caif_nr_socks);
73532- num = dbfs_atomic_inc(&cnt.caif_sock_create);
73533+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
73534 #ifdef CONFIG_DEBUG_FS
73535 if (!IS_ERR(debugfsdir)) {
73536
73537diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73538index 5cf5222..6f704ad 100644
73539--- a/net/caif/cfctrl.c
73540+++ b/net/caif/cfctrl.c
73541@@ -9,6 +9,7 @@
73542 #include <linux/stddef.h>
73543 #include <linux/spinlock.h>
73544 #include <linux/slab.h>
73545+#include <linux/sched.h>
73546 #include <net/caif/caif_layer.h>
73547 #include <net/caif/cfpkt.h>
73548 #include <net/caif/cfctrl.h>
73549@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73550 memset(&dev_info, 0, sizeof(dev_info));
73551 dev_info.id = 0xff;
73552 cfsrvl_init(&this->serv, 0, &dev_info, false);
73553- atomic_set(&this->req_seq_no, 1);
73554- atomic_set(&this->rsp_seq_no, 1);
73555+ atomic_set_unchecked(&this->req_seq_no, 1);
73556+ atomic_set_unchecked(&this->rsp_seq_no, 1);
73557 this->serv.layer.receive = cfctrl_recv;
73558 sprintf(this->serv.layer.name, "ctrl");
73559 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73560@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73561 struct cfctrl_request_info *req)
73562 {
73563 spin_lock_bh(&ctrl->info_list_lock);
73564- atomic_inc(&ctrl->req_seq_no);
73565- req->sequence_no = atomic_read(&ctrl->req_seq_no);
73566+ atomic_inc_unchecked(&ctrl->req_seq_no);
73567+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73568 list_add_tail(&req->list, &ctrl->list);
73569 spin_unlock_bh(&ctrl->info_list_lock);
73570 }
73571@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73572 if (p != first)
73573 pr_warn("Requests are not received in order\n");
73574
73575- atomic_set(&ctrl->rsp_seq_no,
73576+ atomic_set_unchecked(&ctrl->rsp_seq_no,
73577 p->sequence_no);
73578 list_del(&p->list);
73579 goto out;
73580diff --git a/net/can/gw.c b/net/can/gw.c
73581index 3d79b12..8de85fa 100644
73582--- a/net/can/gw.c
73583+++ b/net/can/gw.c
73584@@ -96,7 +96,7 @@ struct cf_mod {
73585 struct {
73586 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73587 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73588- } csumfunc;
73589+ } __no_const csumfunc;
73590 };
73591
73592
73593diff --git a/net/compat.c b/net/compat.c
73594index 6def90e..c6992fa 100644
73595--- a/net/compat.c
73596+++ b/net/compat.c
73597@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73598 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73599 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73600 return -EFAULT;
73601- kmsg->msg_name = compat_ptr(tmp1);
73602- kmsg->msg_iov = compat_ptr(tmp2);
73603- kmsg->msg_control = compat_ptr(tmp3);
73604+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73605+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73606+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73607 return 0;
73608 }
73609
73610@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73611
73612 if (kern_msg->msg_namelen) {
73613 if (mode == VERIFY_READ) {
73614- int err = move_addr_to_kernel(kern_msg->msg_name,
73615+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73616 kern_msg->msg_namelen,
73617 kern_address);
73618 if (err < 0)
73619@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73620 kern_msg->msg_name = NULL;
73621
73622 tot_len = iov_from_user_compat_to_kern(kern_iov,
73623- (struct compat_iovec __user *)kern_msg->msg_iov,
73624+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
73625 kern_msg->msg_iovlen);
73626 if (tot_len >= 0)
73627 kern_msg->msg_iov = kern_iov;
73628@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73629
73630 #define CMSG_COMPAT_FIRSTHDR(msg) \
73631 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73632- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73633+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73634 (struct compat_cmsghdr __user *)NULL)
73635
73636 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73637 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73638 (ucmlen) <= (unsigned long) \
73639 ((mhdr)->msg_controllen - \
73640- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73641+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73642
73643 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73644 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73645 {
73646 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73647- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73648+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73649 msg->msg_controllen)
73650 return NULL;
73651 return (struct compat_cmsghdr __user *)ptr;
73652@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73653 {
73654 struct compat_timeval ctv;
73655 struct compat_timespec cts[3];
73656- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73657+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73658 struct compat_cmsghdr cmhdr;
73659 int cmlen;
73660
73661@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73662
73663 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
73664 {
73665- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73666+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73667 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
73668 int fdnum = scm->fp->count;
73669 struct file **fp = scm->fp->fp;
73670@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
73671 return -EFAULT;
73672 old_fs = get_fs();
73673 set_fs(KERNEL_DS);
73674- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
73675+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
73676 set_fs(old_fs);
73677
73678 return err;
73679@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
73680 len = sizeof(ktime);
73681 old_fs = get_fs();
73682 set_fs(KERNEL_DS);
73683- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
73684+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
73685 set_fs(old_fs);
73686
73687 if (!err) {
73688@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73689 case MCAST_JOIN_GROUP:
73690 case MCAST_LEAVE_GROUP:
73691 {
73692- struct compat_group_req __user *gr32 = (void *)optval;
73693+ struct compat_group_req __user *gr32 = (void __user *)optval;
73694 struct group_req __user *kgr =
73695 compat_alloc_user_space(sizeof(struct group_req));
73696 u32 interface;
73697@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73698 case MCAST_BLOCK_SOURCE:
73699 case MCAST_UNBLOCK_SOURCE:
73700 {
73701- struct compat_group_source_req __user *gsr32 = (void *)optval;
73702+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
73703 struct group_source_req __user *kgsr = compat_alloc_user_space(
73704 sizeof(struct group_source_req));
73705 u32 interface;
73706@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73707 }
73708 case MCAST_MSFILTER:
73709 {
73710- struct compat_group_filter __user *gf32 = (void *)optval;
73711+ struct compat_group_filter __user *gf32 = (void __user *)optval;
73712 struct group_filter __user *kgf;
73713 u32 interface, fmode, numsrc;
73714
73715@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
73716 char __user *optval, int __user *optlen,
73717 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
73718 {
73719- struct compat_group_filter __user *gf32 = (void *)optval;
73720+ struct compat_group_filter __user *gf32 = (void __user *)optval;
73721 struct group_filter __user *kgf;
73722 int __user *koptlen;
73723 u32 interface, fmode, numsrc;
73724diff --git a/net/core/datagram.c b/net/core/datagram.c
73725index 68bbf9f..5ef0d12 100644
73726--- a/net/core/datagram.c
73727+++ b/net/core/datagram.c
73728@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
73729 }
73730
73731 kfree_skb(skb);
73732- atomic_inc(&sk->sk_drops);
73733+ atomic_inc_unchecked(&sk->sk_drops);
73734 sk_mem_reclaim_partial(sk);
73735
73736 return err;
73737diff --git a/net/core/dev.c b/net/core/dev.c
73738index 6ca32f6..c7e9bbd 100644
73739--- a/net/core/dev.c
73740+++ b/net/core/dev.c
73741@@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
73742 if (no_module && capable(CAP_NET_ADMIN))
73743 no_module = request_module("netdev-%s", name);
73744 if (no_module && capable(CAP_SYS_MODULE)) {
73745+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73746+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
73747+#else
73748 if (!request_module("%s", name))
73749 pr_err("Loading kernel module for a network device "
73750 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
73751 "instead\n", name);
73752+#endif
73753 }
73754 }
73755 EXPORT_SYMBOL(dev_load);
73756@@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73757 {
73758 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
73759 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
73760- atomic_long_inc(&dev->rx_dropped);
73761+ atomic_long_inc_unchecked(&dev->rx_dropped);
73762 kfree_skb(skb);
73763 return NET_RX_DROP;
73764 }
73765@@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73766 nf_reset(skb);
73767
73768 if (unlikely(!is_skb_forwardable(dev, skb))) {
73769- atomic_long_inc(&dev->rx_dropped);
73770+ atomic_long_inc_unchecked(&dev->rx_dropped);
73771 kfree_skb(skb);
73772 return NET_RX_DROP;
73773 }
73774@@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
73775
73776 struct dev_gso_cb {
73777 void (*destructor)(struct sk_buff *skb);
73778-};
73779+} __no_const;
73780
73781 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
73782
73783@@ -2913,7 +2917,7 @@ enqueue:
73784
73785 local_irq_restore(flags);
73786
73787- atomic_long_inc(&skb->dev->rx_dropped);
73788+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73789 kfree_skb(skb);
73790 return NET_RX_DROP;
73791 }
73792@@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
73793 }
73794 EXPORT_SYMBOL(netif_rx_ni);
73795
73796-static void net_tx_action(struct softirq_action *h)
73797+static void net_tx_action(void)
73798 {
73799 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73800
73801@@ -3273,7 +3277,7 @@ ncls:
73802 if (pt_prev) {
73803 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
73804 } else {
73805- atomic_long_inc(&skb->dev->rx_dropped);
73806+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73807 kfree_skb(skb);
73808 /* Jamal, now you will not able to escape explaining
73809 * me how you were going to use this. :-)
73810@@ -3832,7 +3836,7 @@ void netif_napi_del(struct napi_struct *napi)
73811 }
73812 EXPORT_SYMBOL(netif_napi_del);
73813
73814-static void net_rx_action(struct softirq_action *h)
73815+static void net_rx_action(void)
73816 {
73817 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73818 unsigned long time_limit = jiffies + 2;
73819@@ -5889,7 +5893,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
73820 } else {
73821 netdev_stats_to_stats64(storage, &dev->stats);
73822 }
73823- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
73824+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
73825 return storage;
73826 }
73827 EXPORT_SYMBOL(dev_get_stats);
73828diff --git a/net/core/flow.c b/net/core/flow.c
73829index e318c7e..168b1d0 100644
73830--- a/net/core/flow.c
73831+++ b/net/core/flow.c
73832@@ -61,7 +61,7 @@ struct flow_cache {
73833 struct timer_list rnd_timer;
73834 };
73835
73836-atomic_t flow_cache_genid = ATOMIC_INIT(0);
73837+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
73838 EXPORT_SYMBOL(flow_cache_genid);
73839 static struct flow_cache flow_cache_global;
73840 static struct kmem_cache *flow_cachep __read_mostly;
73841@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
73842
73843 static int flow_entry_valid(struct flow_cache_entry *fle)
73844 {
73845- if (atomic_read(&flow_cache_genid) != fle->genid)
73846+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
73847 return 0;
73848 if (fle->object && !fle->object->ops->check(fle->object))
73849 return 0;
73850@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
73851 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
73852 fcp->hash_count++;
73853 }
73854- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
73855+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
73856 flo = fle->object;
73857 if (!flo)
73858 goto ret_object;
73859@@ -280,7 +280,7 @@ nocache:
73860 }
73861 flo = resolver(net, key, family, dir, flo, ctx);
73862 if (fle) {
73863- fle->genid = atomic_read(&flow_cache_genid);
73864+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
73865 if (!IS_ERR(flo))
73866 fle->object = flo;
73867 else
73868diff --git a/net/core/iovec.c b/net/core/iovec.c
73869index c40f27e..7f49254 100644
73870--- a/net/core/iovec.c
73871+++ b/net/core/iovec.c
73872@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73873 if (m->msg_namelen) {
73874 if (mode == VERIFY_READ) {
73875 void __user *namep;
73876- namep = (void __user __force *) m->msg_name;
73877+ namep = (void __force_user *) m->msg_name;
73878 err = move_addr_to_kernel(namep, m->msg_namelen,
73879 address);
73880 if (err < 0)
73881@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73882 }
73883
73884 size = m->msg_iovlen * sizeof(struct iovec);
73885- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
73886+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
73887 return -EFAULT;
73888
73889 m->msg_iov = iov;
73890diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
73891index f965dce..92c792a 100644
73892--- a/net/core/rtnetlink.c
73893+++ b/net/core/rtnetlink.c
73894@@ -57,7 +57,7 @@ struct rtnl_link {
73895 rtnl_doit_func doit;
73896 rtnl_dumpit_func dumpit;
73897 rtnl_calcit_func calcit;
73898-};
73899+} __no_const;
73900
73901 static DEFINE_MUTEX(rtnl_mutex);
73902
73903diff --git a/net/core/scm.c b/net/core/scm.c
73904index ff52ad0..aff1c0f 100644
73905--- a/net/core/scm.c
73906+++ b/net/core/scm.c
73907@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
73908 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73909 {
73910 struct cmsghdr __user *cm
73911- = (__force struct cmsghdr __user *)msg->msg_control;
73912+ = (struct cmsghdr __force_user *)msg->msg_control;
73913 struct cmsghdr cmhdr;
73914 int cmlen = CMSG_LEN(len);
73915 int err;
73916@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73917 err = -EFAULT;
73918 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
73919 goto out;
73920- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
73921+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
73922 goto out;
73923 cmlen = CMSG_SPACE(len);
73924 if (msg->msg_controllen < cmlen)
73925@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
73926 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
73927 {
73928 struct cmsghdr __user *cm
73929- = (__force struct cmsghdr __user*)msg->msg_control;
73930+ = (struct cmsghdr __force_user *)msg->msg_control;
73931
73932 int fdmax = 0;
73933 int fdnum = scm->fp->count;
73934@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
73935 if (fdnum < fdmax)
73936 fdmax = fdnum;
73937
73938- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
73939+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
73940 i++, cmfptr++)
73941 {
73942 int new_fd;
73943diff --git a/net/core/sock.c b/net/core/sock.c
73944index 02f8dfe..86dfd4a 100644
73945--- a/net/core/sock.c
73946+++ b/net/core/sock.c
73947@@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73948 struct sk_buff_head *list = &sk->sk_receive_queue;
73949
73950 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
73951- atomic_inc(&sk->sk_drops);
73952+ atomic_inc_unchecked(&sk->sk_drops);
73953 trace_sock_rcvqueue_full(sk, skb);
73954 return -ENOMEM;
73955 }
73956@@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73957 return err;
73958
73959 if (!sk_rmem_schedule(sk, skb->truesize)) {
73960- atomic_inc(&sk->sk_drops);
73961+ atomic_inc_unchecked(&sk->sk_drops);
73962 return -ENOBUFS;
73963 }
73964
73965@@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73966 skb_dst_force(skb);
73967
73968 spin_lock_irqsave(&list->lock, flags);
73969- skb->dropcount = atomic_read(&sk->sk_drops);
73970+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
73971 __skb_queue_tail(list, skb);
73972 spin_unlock_irqrestore(&list->lock, flags);
73973
73974@@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
73975 skb->dev = NULL;
73976
73977 if (sk_rcvqueues_full(sk, skb)) {
73978- atomic_inc(&sk->sk_drops);
73979+ atomic_inc_unchecked(&sk->sk_drops);
73980 goto discard_and_relse;
73981 }
73982 if (nested)
73983@@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
73984 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
73985 } else if (sk_add_backlog(sk, skb)) {
73986 bh_unlock_sock(sk);
73987- atomic_inc(&sk->sk_drops);
73988+ atomic_inc_unchecked(&sk->sk_drops);
73989 goto discard_and_relse;
73990 }
73991
73992@@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73993 if (len > sizeof(peercred))
73994 len = sizeof(peercred);
73995 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
73996- if (copy_to_user(optval, &peercred, len))
73997+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
73998 return -EFAULT;
73999 goto lenout;
74000 }
74001@@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74002 return -ENOTCONN;
74003 if (lv < len)
74004 return -EINVAL;
74005- if (copy_to_user(optval, address, len))
74006+ if (len > sizeof(address) || copy_to_user(optval, address, len))
74007 return -EFAULT;
74008 goto lenout;
74009 }
74010@@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74011
74012 if (len > lv)
74013 len = lv;
74014- if (copy_to_user(optval, &v, len))
74015+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
74016 return -EFAULT;
74017 lenout:
74018 if (put_user(len, optlen))
74019@@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74020 */
74021 smp_wmb();
74022 atomic_set(&sk->sk_refcnt, 1);
74023- atomic_set(&sk->sk_drops, 0);
74024+ atomic_set_unchecked(&sk->sk_drops, 0);
74025 }
74026 EXPORT_SYMBOL(sock_init_data);
74027
74028diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74029index b9868e1..849f809 100644
74030--- a/net/core/sock_diag.c
74031+++ b/net/core/sock_diag.c
74032@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74033
74034 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74035 {
74036+#ifndef CONFIG_GRKERNSEC_HIDESYM
74037 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74038 cookie[1] != INET_DIAG_NOCOOKIE) &&
74039 ((u32)(unsigned long)sk != cookie[0] ||
74040 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74041 return -ESTALE;
74042 else
74043+#endif
74044 return 0;
74045 }
74046 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74047
74048 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74049 {
74050+#ifdef CONFIG_GRKERNSEC_HIDESYM
74051+ cookie[0] = 0;
74052+ cookie[1] = 0;
74053+#else
74054 cookie[0] = (u32)(unsigned long)sk;
74055 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74056+#endif
74057 }
74058 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74059
74060diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74061index 02e75d1..9a57a7c 100644
74062--- a/net/decnet/sysctl_net_decnet.c
74063+++ b/net/decnet/sysctl_net_decnet.c
74064@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74065
74066 if (len > *lenp) len = *lenp;
74067
74068- if (copy_to_user(buffer, addr, len))
74069+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
74070 return -EFAULT;
74071
74072 *lenp = len;
74073@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74074
74075 if (len > *lenp) len = *lenp;
74076
74077- if (copy_to_user(buffer, devname, len))
74078+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
74079 return -EFAULT;
74080
74081 *lenp = len;
74082diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74083index 39a2d29..f39c0fe 100644
74084--- a/net/econet/Kconfig
74085+++ b/net/econet/Kconfig
74086@@ -4,7 +4,7 @@
74087
74088 config ECONET
74089 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74090- depends on EXPERIMENTAL && INET
74091+ depends on EXPERIMENTAL && INET && BROKEN
74092 ---help---
74093 Econet is a fairly old and slow networking protocol mainly used by
74094 Acorn computers to access file and print servers. It uses native
74095diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74096index 92fc5f6..b790d91 100644
74097--- a/net/ipv4/fib_frontend.c
74098+++ b/net/ipv4/fib_frontend.c
74099@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74100 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74101 fib_sync_up(dev);
74102 #endif
74103- atomic_inc(&net->ipv4.dev_addr_genid);
74104+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74105 rt_cache_flush(dev_net(dev), -1);
74106 break;
74107 case NETDEV_DOWN:
74108 fib_del_ifaddr(ifa, NULL);
74109- atomic_inc(&net->ipv4.dev_addr_genid);
74110+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74111 if (ifa->ifa_dev->ifa_list == NULL) {
74112 /* Last address was deleted from this interface.
74113 * Disable IP.
74114@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74115 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74116 fib_sync_up(dev);
74117 #endif
74118- atomic_inc(&net->ipv4.dev_addr_genid);
74119+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74120 rt_cache_flush(dev_net(dev), -1);
74121 break;
74122 case NETDEV_DOWN:
74123diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74124index 80106d8..232e898 100644
74125--- a/net/ipv4/fib_semantics.c
74126+++ b/net/ipv4/fib_semantics.c
74127@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74128 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74129 nh->nh_gw,
74130 nh->nh_parent->fib_scope);
74131- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74132+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74133
74134 return nh->nh_saddr;
74135 }
74136diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74137index 984ec65..97ac518 100644
74138--- a/net/ipv4/inet_hashtables.c
74139+++ b/net/ipv4/inet_hashtables.c
74140@@ -18,12 +18,15 @@
74141 #include <linux/sched.h>
74142 #include <linux/slab.h>
74143 #include <linux/wait.h>
74144+#include <linux/security.h>
74145
74146 #include <net/inet_connection_sock.h>
74147 #include <net/inet_hashtables.h>
74148 #include <net/secure_seq.h>
74149 #include <net/ip.h>
74150
74151+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74152+
74153 /*
74154 * Allocate and initialize a new local port bind bucket.
74155 * The bindhash mutex for snum's hash chain must be held here.
74156@@ -530,6 +533,8 @@ ok:
74157 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74158 spin_unlock(&head->lock);
74159
74160+ gr_update_task_in_ip_table(current, inet_sk(sk));
74161+
74162 if (tw) {
74163 inet_twsk_deschedule(tw, death_row);
74164 while (twrefcnt) {
74165diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74166index d4d61b6..b81aec8 100644
74167--- a/net/ipv4/inetpeer.c
74168+++ b/net/ipv4/inetpeer.c
74169@@ -487,8 +487,8 @@ relookup:
74170 if (p) {
74171 p->daddr = *daddr;
74172 atomic_set(&p->refcnt, 1);
74173- atomic_set(&p->rid, 0);
74174- atomic_set(&p->ip_id_count,
74175+ atomic_set_unchecked(&p->rid, 0);
74176+ atomic_set_unchecked(&p->ip_id_count,
74177 (daddr->family == AF_INET) ?
74178 secure_ip_id(daddr->addr.a4) :
74179 secure_ipv6_id(daddr->addr.a6));
74180diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74181index 1f23a57..7180dfe 100644
74182--- a/net/ipv4/ip_fragment.c
74183+++ b/net/ipv4/ip_fragment.c
74184@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74185 return 0;
74186
74187 start = qp->rid;
74188- end = atomic_inc_return(&peer->rid);
74189+ end = atomic_inc_return_unchecked(&peer->rid);
74190 qp->rid = end;
74191
74192 rc = qp->q.fragments && (end - start) > max;
74193diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74194index 8aa87c1..35c3248 100644
74195--- a/net/ipv4/ip_sockglue.c
74196+++ b/net/ipv4/ip_sockglue.c
74197@@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74198 len = min_t(unsigned int, len, opt->optlen);
74199 if (put_user(len, optlen))
74200 return -EFAULT;
74201- if (copy_to_user(optval, opt->__data, len))
74202+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74203+ copy_to_user(optval, opt->__data, len))
74204 return -EFAULT;
74205 return 0;
74206 }
74207@@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74208 if (sk->sk_type != SOCK_STREAM)
74209 return -ENOPROTOOPT;
74210
74211- msg.msg_control = optval;
74212+ msg.msg_control = (void __force_kernel *)optval;
74213 msg.msg_controllen = len;
74214 msg.msg_flags = flags;
74215
74216diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74217index 6e412a6..6640538 100644
74218--- a/net/ipv4/ipconfig.c
74219+++ b/net/ipv4/ipconfig.c
74220@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74221
74222 mm_segment_t oldfs = get_fs();
74223 set_fs(get_ds());
74224- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74225+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74226 set_fs(oldfs);
74227 return res;
74228 }
74229@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74230
74231 mm_segment_t oldfs = get_fs();
74232 set_fs(get_ds());
74233- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74234+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74235 set_fs(oldfs);
74236 return res;
74237 }
74238@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74239
74240 mm_segment_t oldfs = get_fs();
74241 set_fs(get_ds());
74242- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74243+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74244 set_fs(oldfs);
74245 return res;
74246 }
74247diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74248index 2133c30..5c4b40b 100644
74249--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
74250+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74251@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
74252
74253 *len = 0;
74254
74255- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
74256+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
74257 if (*octets == NULL)
74258 return 0;
74259
74260diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74261index b072386..abdebcf 100644
74262--- a/net/ipv4/ping.c
74263+++ b/net/ipv4/ping.c
74264@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74265 sk_rmem_alloc_get(sp),
74266 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74267 atomic_read(&sp->sk_refcnt), sp,
74268- atomic_read(&sp->sk_drops), len);
74269+ atomic_read_unchecked(&sp->sk_drops), len);
74270 }
74271
74272 static int ping_seq_show(struct seq_file *seq, void *v)
74273diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74274index 3ccda5a..3c1e61d 100644
74275--- a/net/ipv4/raw.c
74276+++ b/net/ipv4/raw.c
74277@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74278 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74279 {
74280 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74281- atomic_inc(&sk->sk_drops);
74282+ atomic_inc_unchecked(&sk->sk_drops);
74283 kfree_skb(skb);
74284 return NET_RX_DROP;
74285 }
74286@@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
74287
74288 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74289 {
74290+ struct icmp_filter filter;
74291+
74292 if (optlen > sizeof(struct icmp_filter))
74293 optlen = sizeof(struct icmp_filter);
74294- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74295+ if (copy_from_user(&filter, optval, optlen))
74296 return -EFAULT;
74297+ raw_sk(sk)->filter = filter;
74298 return 0;
74299 }
74300
74301 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74302 {
74303 int len, ret = -EFAULT;
74304+ struct icmp_filter filter;
74305
74306 if (get_user(len, optlen))
74307 goto out;
74308@@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74309 if (len > sizeof(struct icmp_filter))
74310 len = sizeof(struct icmp_filter);
74311 ret = -EFAULT;
74312- if (put_user(len, optlen) ||
74313- copy_to_user(optval, &raw_sk(sk)->filter, len))
74314+ filter = raw_sk(sk)->filter;
74315+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74316 goto out;
74317 ret = 0;
74318 out: return ret;
74319@@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74320 sk_wmem_alloc_get(sp),
74321 sk_rmem_alloc_get(sp),
74322 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74323- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74324+ atomic_read(&sp->sk_refcnt),
74325+#ifdef CONFIG_GRKERNSEC_HIDESYM
74326+ NULL,
74327+#else
74328+ sp,
74329+#endif
74330+ atomic_read_unchecked(&sp->sk_drops));
74331 }
74332
74333 static int raw_seq_show(struct seq_file *seq, void *v)
74334diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74335index 0197747..7adb0dc 100644
74336--- a/net/ipv4/route.c
74337+++ b/net/ipv4/route.c
74338@@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74339
74340 static inline int rt_genid(struct net *net)
74341 {
74342- return atomic_read(&net->ipv4.rt_genid);
74343+ return atomic_read_unchecked(&net->ipv4.rt_genid);
74344 }
74345
74346 #ifdef CONFIG_PROC_FS
74347@@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
74348 unsigned char shuffle;
74349
74350 get_random_bytes(&shuffle, sizeof(shuffle));
74351- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74352+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74353 inetpeer_invalidate_tree(AF_INET);
74354 }
74355
74356@@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
74357 error = rt->dst.error;
74358 if (peer) {
74359 inet_peer_refcheck(rt->peer);
74360- id = atomic_read(&peer->ip_id_count) & 0xffff;
74361+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74362 if (peer->tcp_ts_stamp) {
74363 ts = peer->tcp_ts;
74364 tsage = get_seconds() - peer->tcp_ts_stamp;
74365diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74366index fd54c5f..96d6407 100644
74367--- a/net/ipv4/tcp_ipv4.c
74368+++ b/net/ipv4/tcp_ipv4.c
74369@@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
74370 int sysctl_tcp_low_latency __read_mostly;
74371 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74372
74373+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74374+extern int grsec_enable_blackhole;
74375+#endif
74376
74377 #ifdef CONFIG_TCP_MD5SIG
74378 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
74379@@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74380 return 0;
74381
74382 reset:
74383+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74384+ if (!grsec_enable_blackhole)
74385+#endif
74386 tcp_v4_send_reset(rsk, skb);
74387 discard:
74388 kfree_skb(skb);
74389@@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74390 TCP_SKB_CB(skb)->sacked = 0;
74391
74392 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74393- if (!sk)
74394+ if (!sk) {
74395+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74396+ ret = 1;
74397+#endif
74398 goto no_tcp_socket;
74399-
74400+ }
74401 process:
74402- if (sk->sk_state == TCP_TIME_WAIT)
74403+ if (sk->sk_state == TCP_TIME_WAIT) {
74404+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74405+ ret = 2;
74406+#endif
74407 goto do_time_wait;
74408+ }
74409
74410 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74411 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74412@@ -1755,6 +1768,10 @@ no_tcp_socket:
74413 bad_packet:
74414 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74415 } else {
74416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74417+ if (!grsec_enable_blackhole || (ret == 1 &&
74418+ (skb->dev->flags & IFF_LOOPBACK)))
74419+#endif
74420 tcp_v4_send_reset(NULL, skb);
74421 }
74422
74423@@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74424 0, /* non standard timer */
74425 0, /* open_requests have no inode */
74426 atomic_read(&sk->sk_refcnt),
74427+#ifdef CONFIG_GRKERNSEC_HIDESYM
74428+ NULL,
74429+#else
74430 req,
74431+#endif
74432 len);
74433 }
74434
74435@@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74436 sock_i_uid(sk),
74437 icsk->icsk_probes_out,
74438 sock_i_ino(sk),
74439- atomic_read(&sk->sk_refcnt), sk,
74440+ atomic_read(&sk->sk_refcnt),
74441+#ifdef CONFIG_GRKERNSEC_HIDESYM
74442+ NULL,
74443+#else
74444+ sk,
74445+#endif
74446 jiffies_to_clock_t(icsk->icsk_rto),
74447 jiffies_to_clock_t(icsk->icsk_ack.ato),
74448 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74449@@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74450 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74451 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74452 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74453- atomic_read(&tw->tw_refcnt), tw, len);
74454+ atomic_read(&tw->tw_refcnt),
74455+#ifdef CONFIG_GRKERNSEC_HIDESYM
74456+ NULL,
74457+#else
74458+ tw,
74459+#endif
74460+ len);
74461 }
74462
74463 #define TMPSZ 150
74464diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74465index 550e755..25721b3 100644
74466--- a/net/ipv4/tcp_minisocks.c
74467+++ b/net/ipv4/tcp_minisocks.c
74468@@ -27,6 +27,10 @@
74469 #include <net/inet_common.h>
74470 #include <net/xfrm.h>
74471
74472+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74473+extern int grsec_enable_blackhole;
74474+#endif
74475+
74476 int sysctl_tcp_syncookies __read_mostly = 1;
74477 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74478
74479@@ -753,6 +757,10 @@ listen_overflow:
74480
74481 embryonic_reset:
74482 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74483+
74484+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74485+ if (!grsec_enable_blackhole)
74486+#endif
74487 if (!(flg & TCP_FLAG_RST))
74488 req->rsk_ops->send_reset(sk, skb);
74489
74490diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74491index 85ee7eb..53277ab 100644
74492--- a/net/ipv4/tcp_probe.c
74493+++ b/net/ipv4/tcp_probe.c
74494@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74495 if (cnt + width >= len)
74496 break;
74497
74498- if (copy_to_user(buf + cnt, tbuf, width))
74499+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74500 return -EFAULT;
74501 cnt += width;
74502 }
74503diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74504index cd2e072..1fffee2 100644
74505--- a/net/ipv4/tcp_timer.c
74506+++ b/net/ipv4/tcp_timer.c
74507@@ -22,6 +22,10 @@
74508 #include <linux/gfp.h>
74509 #include <net/tcp.h>
74510
74511+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74512+extern int grsec_lastack_retries;
74513+#endif
74514+
74515 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74516 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74517 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74518@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74519 }
74520 }
74521
74522+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74523+ if ((sk->sk_state == TCP_LAST_ACK) &&
74524+ (grsec_lastack_retries > 0) &&
74525+ (grsec_lastack_retries < retry_until))
74526+ retry_until = grsec_lastack_retries;
74527+#endif
74528+
74529 if (retransmits_timed_out(sk, retry_until,
74530 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74531 /* Has it gone just too far? */
74532diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74533index 5d075b5..d907d5f 100644
74534--- a/net/ipv4/udp.c
74535+++ b/net/ipv4/udp.c
74536@@ -86,6 +86,7 @@
74537 #include <linux/types.h>
74538 #include <linux/fcntl.h>
74539 #include <linux/module.h>
74540+#include <linux/security.h>
74541 #include <linux/socket.h>
74542 #include <linux/sockios.h>
74543 #include <linux/igmp.h>
74544@@ -108,6 +109,10 @@
74545 #include <trace/events/udp.h>
74546 #include "udp_impl.h"
74547
74548+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74549+extern int grsec_enable_blackhole;
74550+#endif
74551+
74552 struct udp_table udp_table __read_mostly;
74553 EXPORT_SYMBOL(udp_table);
74554
74555@@ -566,6 +571,9 @@ found:
74556 return s;
74557 }
74558
74559+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74560+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74561+
74562 /*
74563 * This routine is called by the ICMP module when it gets some
74564 * sort of error condition. If err < 0 then the socket should
74565@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74566 dport = usin->sin_port;
74567 if (dport == 0)
74568 return -EINVAL;
74569+
74570+ err = gr_search_udp_sendmsg(sk, usin);
74571+ if (err)
74572+ return err;
74573 } else {
74574 if (sk->sk_state != TCP_ESTABLISHED)
74575 return -EDESTADDRREQ;
74576+
74577+ err = gr_search_udp_sendmsg(sk, NULL);
74578+ if (err)
74579+ return err;
74580+
74581 daddr = inet->inet_daddr;
74582 dport = inet->inet_dport;
74583 /* Open fast path for connected socket.
74584@@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
74585 udp_lib_checksum_complete(skb)) {
74586 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74587 IS_UDPLITE(sk));
74588- atomic_inc(&sk->sk_drops);
74589+ atomic_inc_unchecked(&sk->sk_drops);
74590 __skb_unlink(skb, rcvq);
74591 __skb_queue_tail(&list_kill, skb);
74592 }
74593@@ -1186,6 +1203,10 @@ try_again:
74594 if (!skb)
74595 goto out;
74596
74597+ err = gr_search_udp_recvmsg(sk, skb);
74598+ if (err)
74599+ goto out_free;
74600+
74601 ulen = skb->len - sizeof(struct udphdr);
74602 copied = len;
74603 if (copied > ulen)
74604@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74605
74606 drop:
74607 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74608- atomic_inc(&sk->sk_drops);
74609+ atomic_inc_unchecked(&sk->sk_drops);
74610 kfree_skb(skb);
74611 return -1;
74612 }
74613@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74614 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74615
74616 if (!skb1) {
74617- atomic_inc(&sk->sk_drops);
74618+ atomic_inc_unchecked(&sk->sk_drops);
74619 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74620 IS_UDPLITE(sk));
74621 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74622@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74623 goto csum_error;
74624
74625 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74626+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74627+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74628+#endif
74629 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74630
74631 /*
74632@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74633 sk_wmem_alloc_get(sp),
74634 sk_rmem_alloc_get(sp),
74635 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74636- atomic_read(&sp->sk_refcnt), sp,
74637- atomic_read(&sp->sk_drops), len);
74638+ atomic_read(&sp->sk_refcnt),
74639+#ifdef CONFIG_GRKERNSEC_HIDESYM
74640+ NULL,
74641+#else
74642+ sp,
74643+#endif
74644+ atomic_read_unchecked(&sp->sk_drops), len);
74645 }
74646
74647 int udp4_seq_show(struct seq_file *seq, void *v)
74648diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74649index 6b8ebc5..1d624f4 100644
74650--- a/net/ipv6/addrconf.c
74651+++ b/net/ipv6/addrconf.c
74652@@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74653 p.iph.ihl = 5;
74654 p.iph.protocol = IPPROTO_IPV6;
74655 p.iph.ttl = 64;
74656- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74657+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74658
74659 if (ops->ndo_do_ioctl) {
74660 mm_segment_t oldfs = get_fs();
74661diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
74662index 02dd203..e03fcc9 100644
74663--- a/net/ipv6/inet6_connection_sock.c
74664+++ b/net/ipv6/inet6_connection_sock.c
74665@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
74666 #ifdef CONFIG_XFRM
74667 {
74668 struct rt6_info *rt = (struct rt6_info *)dst;
74669- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
74670+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
74671 }
74672 #endif
74673 }
74674@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
74675 #ifdef CONFIG_XFRM
74676 if (dst) {
74677 struct rt6_info *rt = (struct rt6_info *)dst;
74678- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
74679+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
74680 __sk_dst_reset(sk);
74681 dst = NULL;
74682 }
74683diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
74684index 18a2719..779f36a 100644
74685--- a/net/ipv6/ipv6_sockglue.c
74686+++ b/net/ipv6/ipv6_sockglue.c
74687@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
74688 if (sk->sk_type != SOCK_STREAM)
74689 return -ENOPROTOOPT;
74690
74691- msg.msg_control = optval;
74692+ msg.msg_control = (void __force_kernel *)optval;
74693 msg.msg_controllen = len;
74694 msg.msg_flags = flags;
74695
74696diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
74697index d02f7e4..2d2a0f1 100644
74698--- a/net/ipv6/raw.c
74699+++ b/net/ipv6/raw.c
74700@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
74701 {
74702 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
74703 skb_checksum_complete(skb)) {
74704- atomic_inc(&sk->sk_drops);
74705+ atomic_inc_unchecked(&sk->sk_drops);
74706 kfree_skb(skb);
74707 return NET_RX_DROP;
74708 }
74709@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74710 struct raw6_sock *rp = raw6_sk(sk);
74711
74712 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
74713- atomic_inc(&sk->sk_drops);
74714+ atomic_inc_unchecked(&sk->sk_drops);
74715 kfree_skb(skb);
74716 return NET_RX_DROP;
74717 }
74718@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74719
74720 if (inet->hdrincl) {
74721 if (skb_checksum_complete(skb)) {
74722- atomic_inc(&sk->sk_drops);
74723+ atomic_inc_unchecked(&sk->sk_drops);
74724 kfree_skb(skb);
74725 return NET_RX_DROP;
74726 }
74727@@ -602,7 +602,7 @@ out:
74728 return err;
74729 }
74730
74731-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
74732+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
74733 struct flowi6 *fl6, struct dst_entry **dstp,
74734 unsigned int flags)
74735 {
74736@@ -912,12 +912,15 @@ do_confirm:
74737 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
74738 char __user *optval, int optlen)
74739 {
74740+ struct icmp6_filter filter;
74741+
74742 switch (optname) {
74743 case ICMPV6_FILTER:
74744 if (optlen > sizeof(struct icmp6_filter))
74745 optlen = sizeof(struct icmp6_filter);
74746- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
74747+ if (copy_from_user(&filter, optval, optlen))
74748 return -EFAULT;
74749+ raw6_sk(sk)->filter = filter;
74750 return 0;
74751 default:
74752 return -ENOPROTOOPT;
74753@@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74754 char __user *optval, int __user *optlen)
74755 {
74756 int len;
74757+ struct icmp6_filter filter;
74758
74759 switch (optname) {
74760 case ICMPV6_FILTER:
74761@@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74762 len = sizeof(struct icmp6_filter);
74763 if (put_user(len, optlen))
74764 return -EFAULT;
74765- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
74766+ filter = raw6_sk(sk)->filter;
74767+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
74768 return -EFAULT;
74769 return 0;
74770 default:
74771@@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74772 0, 0L, 0,
74773 sock_i_uid(sp), 0,
74774 sock_i_ino(sp),
74775- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74776+ atomic_read(&sp->sk_refcnt),
74777+#ifdef CONFIG_GRKERNSEC_HIDESYM
74778+ NULL,
74779+#else
74780+ sp,
74781+#endif
74782+ atomic_read_unchecked(&sp->sk_drops));
74783 }
74784
74785 static int raw6_seq_show(struct seq_file *seq, void *v)
74786diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
74787index 3edd05a..63aad01 100644
74788--- a/net/ipv6/tcp_ipv6.c
74789+++ b/net/ipv6/tcp_ipv6.c
74790@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
74791 }
74792 #endif
74793
74794+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74795+extern int grsec_enable_blackhole;
74796+#endif
74797+
74798 static void tcp_v6_hash(struct sock *sk)
74799 {
74800 if (sk->sk_state != TCP_CLOSE) {
74801@@ -1650,6 +1654,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
74802 return 0;
74803
74804 reset:
74805+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74806+ if (!grsec_enable_blackhole)
74807+#endif
74808 tcp_v6_send_reset(sk, skb);
74809 discard:
74810 if (opt_skb)
74811@@ -1729,12 +1736,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
74812 TCP_SKB_CB(skb)->sacked = 0;
74813
74814 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74815- if (!sk)
74816+ if (!sk) {
74817+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74818+ ret = 1;
74819+#endif
74820 goto no_tcp_socket;
74821+ }
74822
74823 process:
74824- if (sk->sk_state == TCP_TIME_WAIT)
74825+ if (sk->sk_state == TCP_TIME_WAIT) {
74826+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74827+ ret = 2;
74828+#endif
74829 goto do_time_wait;
74830+ }
74831
74832 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
74833 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74834@@ -1782,6 +1797,10 @@ no_tcp_socket:
74835 bad_packet:
74836 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74837 } else {
74838+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74839+ if (!grsec_enable_blackhole || (ret == 1 &&
74840+ (skb->dev->flags & IFF_LOOPBACK)))
74841+#endif
74842 tcp_v6_send_reset(NULL, skb);
74843 }
74844
74845@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
74846 uid,
74847 0, /* non standard timer */
74848 0, /* open_requests have no inode */
74849- 0, req);
74850+ 0,
74851+#ifdef CONFIG_GRKERNSEC_HIDESYM
74852+ NULL
74853+#else
74854+ req
74855+#endif
74856+ );
74857 }
74858
74859 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74860@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74861 sock_i_uid(sp),
74862 icsk->icsk_probes_out,
74863 sock_i_ino(sp),
74864- atomic_read(&sp->sk_refcnt), sp,
74865+ atomic_read(&sp->sk_refcnt),
74866+#ifdef CONFIG_GRKERNSEC_HIDESYM
74867+ NULL,
74868+#else
74869+ sp,
74870+#endif
74871 jiffies_to_clock_t(icsk->icsk_rto),
74872 jiffies_to_clock_t(icsk->icsk_ack.ato),
74873 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
74874@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
74875 dest->s6_addr32[2], dest->s6_addr32[3], destp,
74876 tw->tw_substate, 0, 0,
74877 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74878- atomic_read(&tw->tw_refcnt), tw);
74879+ atomic_read(&tw->tw_refcnt),
74880+#ifdef CONFIG_GRKERNSEC_HIDESYM
74881+ NULL
74882+#else
74883+ tw
74884+#endif
74885+ );
74886 }
74887
74888 static int tcp6_seq_show(struct seq_file *seq, void *v)
74889diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
74890index 4f96b5c..75543d7 100644
74891--- a/net/ipv6/udp.c
74892+++ b/net/ipv6/udp.c
74893@@ -50,6 +50,10 @@
74894 #include <linux/seq_file.h>
74895 #include "udp_impl.h"
74896
74897+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74898+extern int grsec_enable_blackhole;
74899+#endif
74900+
74901 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
74902 {
74903 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
74904@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
74905
74906 return 0;
74907 drop:
74908- atomic_inc(&sk->sk_drops);
74909+ atomic_inc_unchecked(&sk->sk_drops);
74910 drop_no_sk_drops_inc:
74911 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74912 kfree_skb(skb);
74913@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74914 continue;
74915 }
74916 drop:
74917- atomic_inc(&sk->sk_drops);
74918+ atomic_inc_unchecked(&sk->sk_drops);
74919 UDP6_INC_STATS_BH(sock_net(sk),
74920 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
74921 UDP6_INC_STATS_BH(sock_net(sk),
74922@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74923 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
74924 proto == IPPROTO_UDPLITE);
74925
74926+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74927+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74928+#endif
74929 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
74930
74931 kfree_skb(skb);
74932@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74933 if (!sock_owned_by_user(sk))
74934 udpv6_queue_rcv_skb(sk, skb);
74935 else if (sk_add_backlog(sk, skb)) {
74936- atomic_inc(&sk->sk_drops);
74937+ atomic_inc_unchecked(&sk->sk_drops);
74938 bh_unlock_sock(sk);
74939 sock_put(sk);
74940 goto discard;
74941@@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
74942 0, 0L, 0,
74943 sock_i_uid(sp), 0,
74944 sock_i_ino(sp),
74945- atomic_read(&sp->sk_refcnt), sp,
74946- atomic_read(&sp->sk_drops));
74947+ atomic_read(&sp->sk_refcnt),
74948+#ifdef CONFIG_GRKERNSEC_HIDESYM
74949+ NULL,
74950+#else
74951+ sp,
74952+#endif
74953+ atomic_read_unchecked(&sp->sk_drops));
74954 }
74955
74956 int udp6_seq_show(struct seq_file *seq, void *v)
74957diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
74958index 253695d..9481ce8 100644
74959--- a/net/irda/ircomm/ircomm_tty.c
74960+++ b/net/irda/ircomm/ircomm_tty.c
74961@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74962 add_wait_queue(&self->open_wait, &wait);
74963
74964 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
74965- __FILE__,__LINE__, tty->driver->name, self->open_count );
74966+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74967
74968 /* As far as I can see, we protect open_count - Jean II */
74969 spin_lock_irqsave(&self->spinlock, flags);
74970 if (!tty_hung_up_p(filp)) {
74971 extra_count = 1;
74972- self->open_count--;
74973+ local_dec(&self->open_count);
74974 }
74975 spin_unlock_irqrestore(&self->spinlock, flags);
74976- self->blocked_open++;
74977+ local_inc(&self->blocked_open);
74978
74979 while (1) {
74980 if (tty->termios->c_cflag & CBAUD) {
74981@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74982 }
74983
74984 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
74985- __FILE__,__LINE__, tty->driver->name, self->open_count );
74986+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74987
74988 schedule();
74989 }
74990@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74991 if (extra_count) {
74992 /* ++ is not atomic, so this should be protected - Jean II */
74993 spin_lock_irqsave(&self->spinlock, flags);
74994- self->open_count++;
74995+ local_inc(&self->open_count);
74996 spin_unlock_irqrestore(&self->spinlock, flags);
74997 }
74998- self->blocked_open--;
74999+ local_dec(&self->blocked_open);
75000
75001 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75002- __FILE__,__LINE__, tty->driver->name, self->open_count);
75003+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75004
75005 if (!retval)
75006 self->flags |= ASYNC_NORMAL_ACTIVE;
75007@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75008 }
75009 /* ++ is not atomic, so this should be protected - Jean II */
75010 spin_lock_irqsave(&self->spinlock, flags);
75011- self->open_count++;
75012+ local_inc(&self->open_count);
75013
75014 tty->driver_data = self;
75015 self->tty = tty;
75016 spin_unlock_irqrestore(&self->spinlock, flags);
75017
75018 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75019- self->line, self->open_count);
75020+ self->line, local_read(&self->open_count));
75021
75022 /* Not really used by us, but lets do it anyway */
75023 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75024@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75025 return;
75026 }
75027
75028- if ((tty->count == 1) && (self->open_count != 1)) {
75029+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75030 /*
75031 * Uh, oh. tty->count is 1, which means that the tty
75032 * structure will be freed. state->count should always
75033@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75034 */
75035 IRDA_DEBUG(0, "%s(), bad serial port count; "
75036 "tty->count is 1, state->count is %d\n", __func__ ,
75037- self->open_count);
75038- self->open_count = 1;
75039+ local_read(&self->open_count));
75040+ local_set(&self->open_count, 1);
75041 }
75042
75043- if (--self->open_count < 0) {
75044+ if (local_dec_return(&self->open_count) < 0) {
75045 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75046- __func__, self->line, self->open_count);
75047- self->open_count = 0;
75048+ __func__, self->line, local_read(&self->open_count));
75049+ local_set(&self->open_count, 0);
75050 }
75051- if (self->open_count) {
75052+ if (local_read(&self->open_count)) {
75053 spin_unlock_irqrestore(&self->spinlock, flags);
75054
75055 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75056@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75057 tty->closing = 0;
75058 self->tty = NULL;
75059
75060- if (self->blocked_open) {
75061+ if (local_read(&self->blocked_open)) {
75062 if (self->close_delay)
75063 schedule_timeout_interruptible(self->close_delay);
75064 wake_up_interruptible(&self->open_wait);
75065@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75066 spin_lock_irqsave(&self->spinlock, flags);
75067 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75068 self->tty = NULL;
75069- self->open_count = 0;
75070+ local_set(&self->open_count, 0);
75071 spin_unlock_irqrestore(&self->spinlock, flags);
75072
75073 wake_up_interruptible(&self->open_wait);
75074@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75075 seq_putc(m, '\n');
75076
75077 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75078- seq_printf(m, "Open count: %d\n", self->open_count);
75079+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75080 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75081 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75082
75083diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75084index d5c5b8f..33beff0 100644
75085--- a/net/iucv/af_iucv.c
75086+++ b/net/iucv/af_iucv.c
75087@@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
75088
75089 write_lock_bh(&iucv_sk_list.lock);
75090
75091- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75092+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75093 while (__iucv_get_sock_by_name(name)) {
75094 sprintf(name, "%08x",
75095- atomic_inc_return(&iucv_sk_list.autobind_name));
75096+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75097 }
75098
75099 write_unlock_bh(&iucv_sk_list.lock);
75100diff --git a/net/key/af_key.c b/net/key/af_key.c
75101index 11dbb22..c20f667 100644
75102--- a/net/key/af_key.c
75103+++ b/net/key/af_key.c
75104@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75105 static u32 get_acqseq(void)
75106 {
75107 u32 res;
75108- static atomic_t acqseq;
75109+ static atomic_unchecked_t acqseq;
75110
75111 do {
75112- res = atomic_inc_return(&acqseq);
75113+ res = atomic_inc_return_unchecked(&acqseq);
75114 } while (!res);
75115 return res;
75116 }
75117diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75118index 2f0642d..e5c6fba 100644
75119--- a/net/mac80211/ieee80211_i.h
75120+++ b/net/mac80211/ieee80211_i.h
75121@@ -28,6 +28,7 @@
75122 #include <net/ieee80211_radiotap.h>
75123 #include <net/cfg80211.h>
75124 #include <net/mac80211.h>
75125+#include <asm/local.h>
75126 #include "key.h"
75127 #include "sta_info.h"
75128
75129@@ -781,7 +782,7 @@ struct ieee80211_local {
75130 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75131 spinlock_t queue_stop_reason_lock;
75132
75133- int open_count;
75134+ local_t open_count;
75135 int monitors, cooked_mntrs;
75136 /* number of interfaces with corresponding FIF_ flags */
75137 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75138diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75139index 8e2137b..2974283 100644
75140--- a/net/mac80211/iface.c
75141+++ b/net/mac80211/iface.c
75142@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75143 break;
75144 }
75145
75146- if (local->open_count == 0) {
75147+ if (local_read(&local->open_count) == 0) {
75148 res = drv_start(local);
75149 if (res)
75150 goto err_del_bss;
75151@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75152 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75153
75154 if (!is_valid_ether_addr(dev->dev_addr)) {
75155- if (!local->open_count)
75156+ if (!local_read(&local->open_count))
75157 drv_stop(local);
75158 return -EADDRNOTAVAIL;
75159 }
75160@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75161 mutex_unlock(&local->mtx);
75162
75163 if (coming_up)
75164- local->open_count++;
75165+ local_inc(&local->open_count);
75166
75167 if (hw_reconf_flags)
75168 ieee80211_hw_config(local, hw_reconf_flags);
75169@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75170 err_del_interface:
75171 drv_remove_interface(local, sdata);
75172 err_stop:
75173- if (!local->open_count)
75174+ if (!local_read(&local->open_count))
75175 drv_stop(local);
75176 err_del_bss:
75177 sdata->bss = NULL;
75178@@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75179 }
75180
75181 if (going_down)
75182- local->open_count--;
75183+ local_dec(&local->open_count);
75184
75185 switch (sdata->vif.type) {
75186 case NL80211_IFTYPE_AP_VLAN:
75187@@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75188
75189 ieee80211_recalc_ps(local, -1);
75190
75191- if (local->open_count == 0) {
75192+ if (local_read(&local->open_count) == 0) {
75193 if (local->ops->napi_poll)
75194 napi_disable(&local->napi);
75195 ieee80211_clear_tx_pending(local);
75196diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75197index b142bd4..a651749 100644
75198--- a/net/mac80211/main.c
75199+++ b/net/mac80211/main.c
75200@@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75201 local->hw.conf.power_level = power;
75202 }
75203
75204- if (changed && local->open_count) {
75205+ if (changed && local_read(&local->open_count)) {
75206 ret = drv_config(local, changed);
75207 /*
75208 * Goal:
75209diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75210index 596efaf..8f1911f 100644
75211--- a/net/mac80211/pm.c
75212+++ b/net/mac80211/pm.c
75213@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75214 struct ieee80211_sub_if_data *sdata;
75215 struct sta_info *sta;
75216
75217- if (!local->open_count)
75218+ if (!local_read(&local->open_count))
75219 goto suspend;
75220
75221 ieee80211_scan_cancel(local);
75222@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75223 cancel_work_sync(&local->dynamic_ps_enable_work);
75224 del_timer_sync(&local->dynamic_ps_timer);
75225
75226- local->wowlan = wowlan && local->open_count;
75227+ local->wowlan = wowlan && local_read(&local->open_count);
75228 if (local->wowlan) {
75229 int err = drv_suspend(local, wowlan);
75230 if (err < 0) {
75231@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75232 }
75233
75234 /* stop hardware - this must stop RX */
75235- if (local->open_count)
75236+ if (local_read(&local->open_count))
75237 ieee80211_stop_device(local);
75238
75239 suspend:
75240diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75241index f9b8e81..bb89b46 100644
75242--- a/net/mac80211/rate.c
75243+++ b/net/mac80211/rate.c
75244@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75245
75246 ASSERT_RTNL();
75247
75248- if (local->open_count)
75249+ if (local_read(&local->open_count))
75250 return -EBUSY;
75251
75252 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75253diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75254index c97a065..ff61928 100644
75255--- a/net/mac80211/rc80211_pid_debugfs.c
75256+++ b/net/mac80211/rc80211_pid_debugfs.c
75257@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75258
75259 spin_unlock_irqrestore(&events->lock, status);
75260
75261- if (copy_to_user(buf, pb, p))
75262+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75263 return -EFAULT;
75264
75265 return p;
75266diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75267index 9919892..8c49803 100644
75268--- a/net/mac80211/util.c
75269+++ b/net/mac80211/util.c
75270@@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75271 }
75272 #endif
75273 /* everything else happens only if HW was up & running */
75274- if (!local->open_count)
75275+ if (!local_read(&local->open_count))
75276 goto wake_up;
75277
75278 /*
75279diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75280index f8ac4ef..b02560b 100644
75281--- a/net/netfilter/Kconfig
75282+++ b/net/netfilter/Kconfig
75283@@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
75284
75285 To compile it as a module, choose M here. If unsure, say N.
75286
75287+config NETFILTER_XT_MATCH_GRADM
75288+ tristate '"gradm" match support'
75289+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75290+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75291+ ---help---
75292+ The gradm match allows to match on grsecurity RBAC being enabled.
75293+ It is useful when iptables rules are applied early on bootup to
75294+ prevent connections to the machine (except from a trusted host)
75295+ while the RBAC system is disabled.
75296+
75297 config NETFILTER_XT_MATCH_HASHLIMIT
75298 tristate '"hashlimit" match support'
75299 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75300diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75301index 40f4c3d..0d5dd6b 100644
75302--- a/net/netfilter/Makefile
75303+++ b/net/netfilter/Makefile
75304@@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75305 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75306 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75307 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75308+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75309 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75310 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75311 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75312diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75313index 29fa5ba..8debc79 100644
75314--- a/net/netfilter/ipvs/ip_vs_conn.c
75315+++ b/net/netfilter/ipvs/ip_vs_conn.c
75316@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75317 /* Increase the refcnt counter of the dest */
75318 atomic_inc(&dest->refcnt);
75319
75320- conn_flags = atomic_read(&dest->conn_flags);
75321+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
75322 if (cp->protocol != IPPROTO_UDP)
75323 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75324 /* Bind with the destination and its corresponding transmitter */
75325@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75326 atomic_set(&cp->refcnt, 1);
75327
75328 atomic_set(&cp->n_control, 0);
75329- atomic_set(&cp->in_pkts, 0);
75330+ atomic_set_unchecked(&cp->in_pkts, 0);
75331
75332 atomic_inc(&ipvs->conn_count);
75333 if (flags & IP_VS_CONN_F_NO_CPORT)
75334@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75335
75336 /* Don't drop the entry if its number of incoming packets is not
75337 located in [0, 8] */
75338- i = atomic_read(&cp->in_pkts);
75339+ i = atomic_read_unchecked(&cp->in_pkts);
75340 if (i > 8 || i < 0) return 0;
75341
75342 if (!todrop_rate[i]) return 0;
75343diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75344index 2555816..31492d9 100644
75345--- a/net/netfilter/ipvs/ip_vs_core.c
75346+++ b/net/netfilter/ipvs/ip_vs_core.c
75347@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75348 ret = cp->packet_xmit(skb, cp, pd->pp);
75349 /* do not touch skb anymore */
75350
75351- atomic_inc(&cp->in_pkts);
75352+ atomic_inc_unchecked(&cp->in_pkts);
75353 ip_vs_conn_put(cp);
75354 return ret;
75355 }
75356@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75357 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75358 pkts = sysctl_sync_threshold(ipvs);
75359 else
75360- pkts = atomic_add_return(1, &cp->in_pkts);
75361+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75362
75363 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75364 cp->protocol == IPPROTO_SCTP) {
75365diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75366index b3afe18..08ec940 100644
75367--- a/net/netfilter/ipvs/ip_vs_ctl.c
75368+++ b/net/netfilter/ipvs/ip_vs_ctl.c
75369@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75370 ip_vs_rs_hash(ipvs, dest);
75371 write_unlock_bh(&ipvs->rs_lock);
75372 }
75373- atomic_set(&dest->conn_flags, conn_flags);
75374+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
75375
75376 /* bind the service */
75377 if (!dest->svc) {
75378@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75379 " %-7s %-6d %-10d %-10d\n",
75380 &dest->addr.in6,
75381 ntohs(dest->port),
75382- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75383+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75384 atomic_read(&dest->weight),
75385 atomic_read(&dest->activeconns),
75386 atomic_read(&dest->inactconns));
75387@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75388 "%-7s %-6d %-10d %-10d\n",
75389 ntohl(dest->addr.ip),
75390 ntohs(dest->port),
75391- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75392+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75393 atomic_read(&dest->weight),
75394 atomic_read(&dest->activeconns),
75395 atomic_read(&dest->inactconns));
75396@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75397
75398 entry.addr = dest->addr.ip;
75399 entry.port = dest->port;
75400- entry.conn_flags = atomic_read(&dest->conn_flags);
75401+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75402 entry.weight = atomic_read(&dest->weight);
75403 entry.u_threshold = dest->u_threshold;
75404 entry.l_threshold = dest->l_threshold;
75405@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75406 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75407
75408 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75409- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75410+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75411 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75412 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75413 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75414diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75415index 8a0d6d6..90ec197 100644
75416--- a/net/netfilter/ipvs/ip_vs_sync.c
75417+++ b/net/netfilter/ipvs/ip_vs_sync.c
75418@@ -649,7 +649,7 @@ control:
75419 * i.e only increment in_pkts for Templates.
75420 */
75421 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75422- int pkts = atomic_add_return(1, &cp->in_pkts);
75423+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75424
75425 if (pkts % sysctl_sync_period(ipvs) != 1)
75426 return;
75427@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75428
75429 if (opt)
75430 memcpy(&cp->in_seq, opt, sizeof(*opt));
75431- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75432+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75433 cp->state = state;
75434 cp->old_state = cp->state;
75435 /*
75436diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75437index 7fd66de..e6fb361 100644
75438--- a/net/netfilter/ipvs/ip_vs_xmit.c
75439+++ b/net/netfilter/ipvs/ip_vs_xmit.c
75440@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75441 else
75442 rc = NF_ACCEPT;
75443 /* do not touch skb anymore */
75444- atomic_inc(&cp->in_pkts);
75445+ atomic_inc_unchecked(&cp->in_pkts);
75446 goto out;
75447 }
75448
75449@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75450 else
75451 rc = NF_ACCEPT;
75452 /* do not touch skb anymore */
75453- atomic_inc(&cp->in_pkts);
75454+ atomic_inc_unchecked(&cp->in_pkts);
75455 goto out;
75456 }
75457
75458diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75459index 66b2c54..c7884e3 100644
75460--- a/net/netfilter/nfnetlink_log.c
75461+++ b/net/netfilter/nfnetlink_log.c
75462@@ -70,7 +70,7 @@ struct nfulnl_instance {
75463 };
75464
75465 static DEFINE_SPINLOCK(instances_lock);
75466-static atomic_t global_seq;
75467+static atomic_unchecked_t global_seq;
75468
75469 #define INSTANCE_BUCKETS 16
75470 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75471@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75472 /* global sequence number */
75473 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75474 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75475- htonl(atomic_inc_return(&global_seq)));
75476+ htonl(atomic_inc_return_unchecked(&global_seq)));
75477
75478 if (data_len) {
75479 struct nlattr *nla;
75480diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75481new file mode 100644
75482index 0000000..6905327
75483--- /dev/null
75484+++ b/net/netfilter/xt_gradm.c
75485@@ -0,0 +1,51 @@
75486+/*
75487+ * gradm match for netfilter
75488