]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.1.10-201201201822.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.10-201201201822.patch
CommitLineData
47d7767f
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index d6e6724..a024ce8 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 7c8f52a..371cd76 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=. missing-syscalls
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1087,6 +1130,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1102,7 +1146,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,7 +1242,7 @@ distclean: mrproper
314 @find $(srctree) $(RCS_FIND_IGNORE) \
315 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
316 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
317- -o -name '.*.rej' -o -size 0 \
318+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
319 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
320 -type f -print | xargs rm -f
321
322@@ -1360,6 +1404,7 @@ PHONY += $(module-dirs) modules
323 $(module-dirs): crmodverdir $(objtree)/Module.symvers
324 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
325
326+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
327 modules: $(module-dirs)
328 @$(kecho) ' Building modules, stage 2.';
329 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
330@@ -1486,17 +1531,19 @@ else
331 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
332 endif
333
334-%.s: %.c prepare scripts FORCE
335+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
336+%.s: %.c gcc-plugins prepare scripts FORCE
337 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
338 %.i: %.c prepare scripts FORCE
339 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
340-%.o: %.c prepare scripts FORCE
341+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
342+%.o: %.c gcc-plugins prepare scripts FORCE
343 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
344 %.lst: %.c prepare scripts FORCE
345 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
346-%.s: %.S prepare scripts FORCE
347+%.s: %.S gcc-plugins prepare scripts FORCE
348 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
349-%.o: %.S prepare scripts FORCE
350+%.o: %.S gcc-plugins prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 %.symtypes: %.c prepare scripts FORCE
353 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
354@@ -1506,11 +1553,13 @@ endif
355 $(cmd_crmodverdir)
356 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
357 $(build)=$(build-dir)
358-%/: prepare scripts FORCE
359+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
360+%/: gcc-plugins prepare scripts FORCE
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364-%.ko: prepare scripts FORCE
365+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
366+%.ko: gcc-plugins prepare scripts FORCE
367 $(cmd_crmodverdir)
368 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
369 $(build)=$(build-dir) $(@:.ko=.o)
370diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
371index da5449e..7418343 100644
372--- a/arch/alpha/include/asm/elf.h
373+++ b/arch/alpha/include/asm/elf.h
374@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
375
376 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
377
378+#ifdef CONFIG_PAX_ASLR
379+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
380+
381+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
382+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
383+#endif
384+
385 /* $0 is set by ld.so to a pointer to a function which might be
386 registered using atexit. This provides a mean for the dynamic
387 linker to call DT_FINI functions for shared libraries that have
388diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
389index de98a73..bd4f1f8 100644
390--- a/arch/alpha/include/asm/pgtable.h
391+++ b/arch/alpha/include/asm/pgtable.h
392@@ -101,6 +101,17 @@ struct vm_area_struct;
393 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
394 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
396+
397+#ifdef CONFIG_PAX_PAGEEXEC
398+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
399+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
401+#else
402+# define PAGE_SHARED_NOEXEC PAGE_SHARED
403+# define PAGE_COPY_NOEXEC PAGE_COPY
404+# define PAGE_READONLY_NOEXEC PAGE_READONLY
405+#endif
406+
407 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
408
409 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
410diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
411index 2fd00b7..cfd5069 100644
412--- a/arch/alpha/kernel/module.c
413+++ b/arch/alpha/kernel/module.c
414@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
415
416 /* The small sections were sorted to the end of the segment.
417 The following should definitely cover them. */
418- gp = (u64)me->module_core + me->core_size - 0x8000;
419+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
420 got = sechdrs[me->arch.gotsecindex].sh_addr;
421
422 for (i = 0; i < n; i++) {
423diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
424index 01e8715..be0e80f 100644
425--- a/arch/alpha/kernel/osf_sys.c
426+++ b/arch/alpha/kernel/osf_sys.c
427@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
428 /* At this point: (!vma || addr < vma->vm_end). */
429 if (limit - len < addr)
430 return -ENOMEM;
431- if (!vma || addr + len <= vma->vm_start)
432+ if (check_heap_stack_gap(vma, addr, len))
433 return addr;
434 addr = vma->vm_end;
435 vma = vma->vm_next;
436@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
437 merely specific addresses, but regions of memory -- perhaps
438 this feature should be incorporated into all ports? */
439
440+#ifdef CONFIG_PAX_RANDMMAP
441+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
442+#endif
443+
444 if (addr) {
445 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
446 if (addr != (unsigned long) -ENOMEM)
447@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
448 }
449
450 /* Next, try allocating at TASK_UNMAPPED_BASE. */
451- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
452- len, limit);
453+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
454+
455 if (addr != (unsigned long) -ENOMEM)
456 return addr;
457
458diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
459index fadd5f8..904e73a 100644
460--- a/arch/alpha/mm/fault.c
461+++ b/arch/alpha/mm/fault.c
462@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
463 __reload_thread(pcb);
464 }
465
466+#ifdef CONFIG_PAX_PAGEEXEC
467+/*
468+ * PaX: decide what to do with offenders (regs->pc = fault address)
469+ *
470+ * returns 1 when task should be killed
471+ * 2 when patched PLT trampoline was detected
472+ * 3 when unpatched PLT trampoline was detected
473+ */
474+static int pax_handle_fetch_fault(struct pt_regs *regs)
475+{
476+
477+#ifdef CONFIG_PAX_EMUPLT
478+ int err;
479+
480+ do { /* PaX: patched PLT emulation #1 */
481+ unsigned int ldah, ldq, jmp;
482+
483+ err = get_user(ldah, (unsigned int *)regs->pc);
484+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
485+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
486+
487+ if (err)
488+ break;
489+
490+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
491+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
492+ jmp == 0x6BFB0000U)
493+ {
494+ unsigned long r27, addr;
495+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
496+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
497+
498+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
499+ err = get_user(r27, (unsigned long *)addr);
500+ if (err)
501+ break;
502+
503+ regs->r27 = r27;
504+ regs->pc = r27;
505+ return 2;
506+ }
507+ } while (0);
508+
509+ do { /* PaX: patched PLT emulation #2 */
510+ unsigned int ldah, lda, br;
511+
512+ err = get_user(ldah, (unsigned int *)regs->pc);
513+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
514+ err |= get_user(br, (unsigned int *)(regs->pc+8));
515+
516+ if (err)
517+ break;
518+
519+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
521+ (br & 0xFFE00000U) == 0xC3E00000U)
522+ {
523+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
524+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
526+
527+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
529+ return 2;
530+ }
531+ } while (0);
532+
533+ do { /* PaX: unpatched PLT emulation */
534+ unsigned int br;
535+
536+ err = get_user(br, (unsigned int *)regs->pc);
537+
538+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
539+ unsigned int br2, ldq, nop, jmp;
540+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
541+
542+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
543+ err = get_user(br2, (unsigned int *)addr);
544+ err |= get_user(ldq, (unsigned int *)(addr+4));
545+ err |= get_user(nop, (unsigned int *)(addr+8));
546+ err |= get_user(jmp, (unsigned int *)(addr+12));
547+ err |= get_user(resolver, (unsigned long *)(addr+16));
548+
549+ if (err)
550+ break;
551+
552+ if (br2 == 0xC3600000U &&
553+ ldq == 0xA77B000CU &&
554+ nop == 0x47FF041FU &&
555+ jmp == 0x6B7B0000U)
556+ {
557+ regs->r28 = regs->pc+4;
558+ regs->r27 = addr+16;
559+ regs->pc = resolver;
560+ return 3;
561+ }
562+ }
563+ } while (0);
564+#endif
565+
566+ return 1;
567+}
568+
569+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
570+{
571+ unsigned long i;
572+
573+ printk(KERN_ERR "PAX: bytes at PC: ");
574+ for (i = 0; i < 5; i++) {
575+ unsigned int c;
576+ if (get_user(c, (unsigned int *)pc+i))
577+ printk(KERN_CONT "???????? ");
578+ else
579+ printk(KERN_CONT "%08x ", c);
580+ }
581+ printk("\n");
582+}
583+#endif
584
585 /*
586 * This routine handles page faults. It determines the address,
587@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
588 good_area:
589 si_code = SEGV_ACCERR;
590 if (cause < 0) {
591- if (!(vma->vm_flags & VM_EXEC))
592+ if (!(vma->vm_flags & VM_EXEC)) {
593+
594+#ifdef CONFIG_PAX_PAGEEXEC
595+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
596+ goto bad_area;
597+
598+ up_read(&mm->mmap_sem);
599+ switch (pax_handle_fetch_fault(regs)) {
600+
601+#ifdef CONFIG_PAX_EMUPLT
602+ case 2:
603+ case 3:
604+ return;
605+#endif
606+
607+ }
608+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
609+ do_group_exit(SIGKILL);
610+#else
611 goto bad_area;
612+#endif
613+
614+ }
615 } else if (!cause) {
616 /* Allow reads even for write-only mappings */
617 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
618diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
619index 86976d0..8a57797 100644
620--- a/arch/arm/include/asm/atomic.h
621+++ b/arch/arm/include/asm/atomic.h
622@@ -239,6 +239,14 @@ typedef struct {
623 u64 __aligned(8) counter;
624 } atomic64_t;
625
626+#ifdef CONFIG_PAX_REFCOUNT
627+typedef struct {
628+ u64 __aligned(8) counter;
629+} atomic64_unchecked_t;
630+#else
631+typedef atomic64_t atomic64_unchecked_t;
632+#endif
633+
634 #define ATOMIC64_INIT(i) { (i) }
635
636 static inline u64 atomic64_read(atomic64_t *v)
637diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
638index 0e9ce8d..6ef1e03 100644
639--- a/arch/arm/include/asm/elf.h
640+++ b/arch/arm/include/asm/elf.h
641@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647+
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
650+
651+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
653+#endif
654
655 /* When the program starts, a1 contains a pointer to a function to be
656 registered with atexit, as per the SVR4 ABI. A value of 0 means we
657@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
658 extern void elf_set_personality(const struct elf32_hdr *);
659 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
660
661-struct mm_struct;
662-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
663-#define arch_randomize_brk arch_randomize_brk
664-
665 extern int vectors_user_mapping(void);
666 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
667 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
668diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
669index e51b1e8..32a3113 100644
670--- a/arch/arm/include/asm/kmap_types.h
671+++ b/arch/arm/include/asm/kmap_types.h
672@@ -21,6 +21,7 @@ enum km_type {
673 KM_L1_CACHE,
674 KM_L2_CACHE,
675 KM_KDB,
676+ KM_CLEARPAGE,
677 KM_TYPE_NR
678 };
679
680diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
681index b293616..96310e5 100644
682--- a/arch/arm/include/asm/uaccess.h
683+++ b/arch/arm/include/asm/uaccess.h
684@@ -22,6 +22,8 @@
685 #define VERIFY_READ 0
686 #define VERIFY_WRITE 1
687
688+extern void check_object_size(const void *ptr, unsigned long n, bool to);
689+
690 /*
691 * The exception table consists of pairs of addresses: the first is the
692 * address of an instruction that is allowed to fault, and the second is
693@@ -387,8 +389,23 @@ do { \
694
695
696 #ifdef CONFIG_MMU
697-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
698-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
699+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
700+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
701+
702+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
703+{
704+ if (!__builtin_constant_p(n))
705+ check_object_size(to, n, false);
706+ return ___copy_from_user(to, from, n);
707+}
708+
709+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
710+{
711+ if (!__builtin_constant_p(n))
712+ check_object_size(from, n, true);
713+ return ___copy_to_user(to, from, n);
714+}
715+
716 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
717 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
718 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
719@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
720
721 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
722 {
723+ if ((long)n < 0)
724+ return n;
725+
726 if (access_ok(VERIFY_READ, from, n))
727 n = __copy_from_user(to, from, n);
728 else /* security hole - plug it */
729@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
730
731 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
732 {
733+ if ((long)n < 0)
734+ return n;
735+
736 if (access_ok(VERIFY_WRITE, to, n))
737 n = __copy_to_user(to, from, n);
738 return n;
739diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
740index aeef960..2966009 100644
741--- a/arch/arm/kernel/armksyms.c
742+++ b/arch/arm/kernel/armksyms.c
743@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
744 #ifdef CONFIG_MMU
745 EXPORT_SYMBOL(copy_page);
746
747-EXPORT_SYMBOL(__copy_from_user);
748-EXPORT_SYMBOL(__copy_to_user);
749+EXPORT_SYMBOL(___copy_from_user);
750+EXPORT_SYMBOL(___copy_to_user);
751 EXPORT_SYMBOL(__clear_user);
752
753 EXPORT_SYMBOL(__get_user_1);
754diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
755index c9d11ea..5078081 100644
756--- a/arch/arm/kernel/process.c
757+++ b/arch/arm/kernel/process.c
758@@ -28,7 +28,6 @@
759 #include <linux/tick.h>
760 #include <linux/utsname.h>
761 #include <linux/uaccess.h>
762-#include <linux/random.h>
763 #include <linux/hw_breakpoint.h>
764 #include <linux/cpuidle.h>
765
766@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
767 return 0;
768 }
769
770-unsigned long arch_randomize_brk(struct mm_struct *mm)
771-{
772- unsigned long range_end = mm->brk + 0x02000000;
773- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
774-}
775-
776 #ifdef CONFIG_MMU
777 /*
778 * The vectors page is always readable from user space for the
779diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
780index bc9f9da..c75d826 100644
781--- a/arch/arm/kernel/traps.c
782+++ b/arch/arm/kernel/traps.c
783@@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
784
785 static DEFINE_SPINLOCK(die_lock);
786
787+extern void gr_handle_kernel_exploit(void);
788+
789 /*
790 * This function is protected against re-entrancy.
791 */
792@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err)
793 panic("Fatal exception in interrupt");
794 if (panic_on_oops)
795 panic("Fatal exception");
796+
797+ gr_handle_kernel_exploit();
798+
799 if (ret != NOTIFY_STOP)
800 do_exit(SIGSEGV);
801 }
802diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
803index 66a477a..bee61d3 100644
804--- a/arch/arm/lib/copy_from_user.S
805+++ b/arch/arm/lib/copy_from_user.S
806@@ -16,7 +16,7 @@
807 /*
808 * Prototype:
809 *
810- * size_t __copy_from_user(void *to, const void *from, size_t n)
811+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
812 *
813 * Purpose:
814 *
815@@ -84,11 +84,11 @@
816
817 .text
818
819-ENTRY(__copy_from_user)
820+ENTRY(___copy_from_user)
821
822 #include "copy_template.S"
823
824-ENDPROC(__copy_from_user)
825+ENDPROC(___copy_from_user)
826
827 .pushsection .fixup,"ax"
828 .align 0
829diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
830index d066df6..df28194 100644
831--- a/arch/arm/lib/copy_to_user.S
832+++ b/arch/arm/lib/copy_to_user.S
833@@ -16,7 +16,7 @@
834 /*
835 * Prototype:
836 *
837- * size_t __copy_to_user(void *to, const void *from, size_t n)
838+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
839 *
840 * Purpose:
841 *
842@@ -88,11 +88,11 @@
843 .text
844
845 ENTRY(__copy_to_user_std)
846-WEAK(__copy_to_user)
847+WEAK(___copy_to_user)
848
849 #include "copy_template.S"
850
851-ENDPROC(__copy_to_user)
852+ENDPROC(___copy_to_user)
853 ENDPROC(__copy_to_user_std)
854
855 .pushsection .fixup,"ax"
856diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
857index d0ece2a..5ae2f39 100644
858--- a/arch/arm/lib/uaccess.S
859+++ b/arch/arm/lib/uaccess.S
860@@ -20,7 +20,7 @@
861
862 #define PAGE_SHIFT 12
863
864-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
865+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
866 * Purpose : copy a block to user memory from kernel memory
867 * Params : to - user memory
868 * : from - kernel memory
869@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
870 sub r2, r2, ip
871 b .Lc2u_dest_aligned
872
873-ENTRY(__copy_to_user)
874+ENTRY(___copy_to_user)
875 stmfd sp!, {r2, r4 - r7, lr}
876 cmp r2, #4
877 blt .Lc2u_not_enough
878@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
879 ldrgtb r3, [r1], #0
880 USER( T(strgtb) r3, [r0], #1) @ May fault
881 b .Lc2u_finished
882-ENDPROC(__copy_to_user)
883+ENDPROC(___copy_to_user)
884
885 .pushsection .fixup,"ax"
886 .align 0
887 9001: ldmfd sp!, {r0, r4 - r7, pc}
888 .popsection
889
890-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
891+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
892 * Purpose : copy a block from user memory to kernel memory
893 * Params : to - kernel memory
894 * : from - user memory
895@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
896 sub r2, r2, ip
897 b .Lcfu_dest_aligned
898
899-ENTRY(__copy_from_user)
900+ENTRY(___copy_from_user)
901 stmfd sp!, {r0, r2, r4 - r7, lr}
902 cmp r2, #4
903 blt .Lcfu_not_enough
904@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
905 USER( T(ldrgtb) r3, [r1], #1) @ May fault
906 strgtb r3, [r0], #1
907 b .Lcfu_finished
908-ENDPROC(__copy_from_user)
909+ENDPROC(___copy_from_user)
910
911 .pushsection .fixup,"ax"
912 .align 0
913diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
914index 8b9b136..70d5100 100644
915--- a/arch/arm/lib/uaccess_with_memcpy.c
916+++ b/arch/arm/lib/uaccess_with_memcpy.c
917@@ -103,7 +103,7 @@ out:
918 }
919
920 unsigned long
921-__copy_to_user(void __user *to, const void *from, unsigned long n)
922+___copy_to_user(void __user *to, const void *from, unsigned long n)
923 {
924 /*
925 * This test is stubbed out of the main function above to keep
926diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
927index 2b2d51c..0127490 100644
928--- a/arch/arm/mach-ux500/mbox-db5500.c
929+++ b/arch/arm/mach-ux500/mbox-db5500.c
930@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
931 return sprintf(buf, "0x%X\n", mbox_value);
932 }
933
934-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
936
937 static int mbox_show(struct seq_file *s, void *data)
938 {
939diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
940index 3b5ea68..42fc9af 100644
941--- a/arch/arm/mm/fault.c
942+++ b/arch/arm/mm/fault.c
943@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
944 }
945 #endif
946
947+#ifdef CONFIG_PAX_PAGEEXEC
948+ if (fsr & FSR_LNX_PF) {
949+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
950+ do_group_exit(SIGKILL);
951+ }
952+#endif
953+
954 tsk->thread.address = addr;
955 tsk->thread.error_code = fsr;
956 tsk->thread.trap_no = 14;
957@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
958 }
959 #endif /* CONFIG_MMU */
960
961+#ifdef CONFIG_PAX_PAGEEXEC
962+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
963+{
964+ long i;
965+
966+ printk(KERN_ERR "PAX: bytes at PC: ");
967+ for (i = 0; i < 20; i++) {
968+ unsigned char c;
969+ if (get_user(c, (__force unsigned char __user *)pc+i))
970+ printk(KERN_CONT "?? ");
971+ else
972+ printk(KERN_CONT "%02x ", c);
973+ }
974+ printk("\n");
975+
976+ printk(KERN_ERR "PAX: bytes at SP-4: ");
977+ for (i = -1; i < 20; i++) {
978+ unsigned long c;
979+ if (get_user(c, (__force unsigned long __user *)sp+i))
980+ printk(KERN_CONT "???????? ");
981+ else
982+ printk(KERN_CONT "%08lx ", c);
983+ }
984+ printk("\n");
985+}
986+#endif
987+
988 /*
989 * First Level Translation Fault Handler
990 *
991diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
992index 74be05f..f605b8c 100644
993--- a/arch/arm/mm/mmap.c
994+++ b/arch/arm/mm/mmap.c
995@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
996 if (len > TASK_SIZE)
997 return -ENOMEM;
998
999+#ifdef CONFIG_PAX_RANDMMAP
1000+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1001+#endif
1002+
1003 if (addr) {
1004 if (do_align)
1005 addr = COLOUR_ALIGN(addr, pgoff);
1006@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1007 addr = PAGE_ALIGN(addr);
1008
1009 vma = find_vma(mm, addr);
1010- if (TASK_SIZE - len >= addr &&
1011- (!vma || addr + len <= vma->vm_start))
1012+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1013 return addr;
1014 }
1015 if (len > mm->cached_hole_size) {
1016- start_addr = addr = mm->free_area_cache;
1017+ start_addr = addr = mm->free_area_cache;
1018 } else {
1019- start_addr = addr = TASK_UNMAPPED_BASE;
1020- mm->cached_hole_size = 0;
1021+ start_addr = addr = mm->mmap_base;
1022+ mm->cached_hole_size = 0;
1023 }
1024 /* 8 bits of randomness in 20 address space bits */
1025 if ((current->flags & PF_RANDOMIZE) &&
1026@@ -100,14 +103,14 @@ full_search:
1027 * Start a new search - just in case we missed
1028 * some holes.
1029 */
1030- if (start_addr != TASK_UNMAPPED_BASE) {
1031- start_addr = addr = TASK_UNMAPPED_BASE;
1032+ if (start_addr != mm->mmap_base) {
1033+ start_addr = addr = mm->mmap_base;
1034 mm->cached_hole_size = 0;
1035 goto full_search;
1036 }
1037 return -ENOMEM;
1038 }
1039- if (!vma || addr + len <= vma->vm_start) {
1040+ if (check_heap_stack_gap(vma, addr, len)) {
1041 /*
1042 * Remember the place where we stopped the search:
1043 */
1044diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1045index 3b3159b..425ea94 100644
1046--- a/arch/avr32/include/asm/elf.h
1047+++ b/arch/avr32/include/asm/elf.h
1048@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1049 the loader. We need to make sure that it is out of the way of the program
1050 that it will "exec", and that there is sufficient room for the brk. */
1051
1052-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1053+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1054
1055+#ifdef CONFIG_PAX_ASLR
1056+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1057+
1058+#define PAX_DELTA_MMAP_LEN 15
1059+#define PAX_DELTA_STACK_LEN 15
1060+#endif
1061
1062 /* This yields a mask that user programs can use to figure out what
1063 instruction set this CPU supports. This could be done in user space,
1064diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1065index b7f5c68..556135c 100644
1066--- a/arch/avr32/include/asm/kmap_types.h
1067+++ b/arch/avr32/include/asm/kmap_types.h
1068@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1069 D(11) KM_IRQ1,
1070 D(12) KM_SOFTIRQ0,
1071 D(13) KM_SOFTIRQ1,
1072-D(14) KM_TYPE_NR
1073+D(14) KM_CLEARPAGE,
1074+D(15) KM_TYPE_NR
1075 };
1076
1077 #undef D
1078diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1079index f7040a1..db9f300 100644
1080--- a/arch/avr32/mm/fault.c
1081+++ b/arch/avr32/mm/fault.c
1082@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1083
1084 int exception_trace = 1;
1085
1086+#ifdef CONFIG_PAX_PAGEEXEC
1087+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1088+{
1089+ unsigned long i;
1090+
1091+ printk(KERN_ERR "PAX: bytes at PC: ");
1092+ for (i = 0; i < 20; i++) {
1093+ unsigned char c;
1094+ if (get_user(c, (unsigned char *)pc+i))
1095+ printk(KERN_CONT "???????? ");
1096+ else
1097+ printk(KERN_CONT "%02x ", c);
1098+ }
1099+ printk("\n");
1100+}
1101+#endif
1102+
1103 /*
1104 * This routine handles page faults. It determines the address and the
1105 * problem, and then passes it off to one of the appropriate routines.
1106@@ -156,6 +173,16 @@ bad_area:
1107 up_read(&mm->mmap_sem);
1108
1109 if (user_mode(regs)) {
1110+
1111+#ifdef CONFIG_PAX_PAGEEXEC
1112+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1113+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1114+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1115+ do_group_exit(SIGKILL);
1116+ }
1117+ }
1118+#endif
1119+
1120 if (exception_trace && printk_ratelimit())
1121 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1122 "sp %08lx ecr %lu\n",
1123diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1124index f8e16b2..c73ff79 100644
1125--- a/arch/frv/include/asm/kmap_types.h
1126+++ b/arch/frv/include/asm/kmap_types.h
1127@@ -23,6 +23,7 @@ enum km_type {
1128 KM_IRQ1,
1129 KM_SOFTIRQ0,
1130 KM_SOFTIRQ1,
1131+ KM_CLEARPAGE,
1132 KM_TYPE_NR
1133 };
1134
1135diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1136index 385fd30..6c3d97e 100644
1137--- a/arch/frv/mm/elf-fdpic.c
1138+++ b/arch/frv/mm/elf-fdpic.c
1139@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1140 if (addr) {
1141 addr = PAGE_ALIGN(addr);
1142 vma = find_vma(current->mm, addr);
1143- if (TASK_SIZE - len >= addr &&
1144- (!vma || addr + len <= vma->vm_start))
1145+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1146 goto success;
1147 }
1148
1149@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1150 for (; vma; vma = vma->vm_next) {
1151 if (addr > limit)
1152 break;
1153- if (addr + len <= vma->vm_start)
1154+ if (check_heap_stack_gap(vma, addr, len))
1155 goto success;
1156 addr = vma->vm_end;
1157 }
1158@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1159 for (; vma; vma = vma->vm_next) {
1160 if (addr > limit)
1161 break;
1162- if (addr + len <= vma->vm_start)
1163+ if (check_heap_stack_gap(vma, addr, len))
1164 goto success;
1165 addr = vma->vm_end;
1166 }
1167diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1168index b5298eb..67c6e62 100644
1169--- a/arch/ia64/include/asm/elf.h
1170+++ b/arch/ia64/include/asm/elf.h
1171@@ -42,6 +42,13 @@
1172 */
1173 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1174
1175+#ifdef CONFIG_PAX_ASLR
1176+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1177+
1178+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1180+#endif
1181+
1182 #define PT_IA_64_UNWIND 0x70000001
1183
1184 /* IA-64 relocations: */
1185diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1186index 1a97af3..7529d31 100644
1187--- a/arch/ia64/include/asm/pgtable.h
1188+++ b/arch/ia64/include/asm/pgtable.h
1189@@ -12,7 +12,7 @@
1190 * David Mosberger-Tang <davidm@hpl.hp.com>
1191 */
1192
1193-
1194+#include <linux/const.h>
1195 #include <asm/mman.h>
1196 #include <asm/page.h>
1197 #include <asm/processor.h>
1198@@ -143,6 +143,17 @@
1199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1202+
1203+#ifdef CONFIG_PAX_PAGEEXEC
1204+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1205+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1207+#else
1208+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1209+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1210+# define PAGE_COPY_NOEXEC PAGE_COPY
1211+#endif
1212+
1213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1216diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1217index b77768d..e0795eb 100644
1218--- a/arch/ia64/include/asm/spinlock.h
1219+++ b/arch/ia64/include/asm/spinlock.h
1220@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1222
1223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1224- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1225+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1226 }
1227
1228 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1229diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1230index 449c8c0..432a3d2 100644
1231--- a/arch/ia64/include/asm/uaccess.h
1232+++ b/arch/ia64/include/asm/uaccess.h
1233@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1234 const void *__cu_from = (from); \
1235 long __cu_len = (n); \
1236 \
1237- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1238+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1240 __cu_len; \
1241 })
1242@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1243 long __cu_len = (n); \
1244 \
1245 __chk_user_ptr(__cu_from); \
1246- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1247+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1249 __cu_len; \
1250 })
1251diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1252index 24603be..948052d 100644
1253--- a/arch/ia64/kernel/module.c
1254+++ b/arch/ia64/kernel/module.c
1255@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1256 void
1257 module_free (struct module *mod, void *module_region)
1258 {
1259- if (mod && mod->arch.init_unw_table &&
1260- module_region == mod->module_init) {
1261+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1262 unw_remove_unwind_table(mod->arch.init_unw_table);
1263 mod->arch.init_unw_table = NULL;
1264 }
1265@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1266 }
1267
1268 static inline int
1269+in_init_rx (const struct module *mod, uint64_t addr)
1270+{
1271+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1272+}
1273+
1274+static inline int
1275+in_init_rw (const struct module *mod, uint64_t addr)
1276+{
1277+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1278+}
1279+
1280+static inline int
1281 in_init (const struct module *mod, uint64_t addr)
1282 {
1283- return addr - (uint64_t) mod->module_init < mod->init_size;
1284+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1285+}
1286+
1287+static inline int
1288+in_core_rx (const struct module *mod, uint64_t addr)
1289+{
1290+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1291+}
1292+
1293+static inline int
1294+in_core_rw (const struct module *mod, uint64_t addr)
1295+{
1296+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1297 }
1298
1299 static inline int
1300 in_core (const struct module *mod, uint64_t addr)
1301 {
1302- return addr - (uint64_t) mod->module_core < mod->core_size;
1303+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1304 }
1305
1306 static inline int
1307@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1308 break;
1309
1310 case RV_BDREL:
1311- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1312+ if (in_init_rx(mod, val))
1313+ val -= (uint64_t) mod->module_init_rx;
1314+ else if (in_init_rw(mod, val))
1315+ val -= (uint64_t) mod->module_init_rw;
1316+ else if (in_core_rx(mod, val))
1317+ val -= (uint64_t) mod->module_core_rx;
1318+ else if (in_core_rw(mod, val))
1319+ val -= (uint64_t) mod->module_core_rw;
1320 break;
1321
1322 case RV_LTV:
1323@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1324 * addresses have been selected...
1325 */
1326 uint64_t gp;
1327- if (mod->core_size > MAX_LTOFF)
1328+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1329 /*
1330 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1331 * at the end of the module.
1332 */
1333- gp = mod->core_size - MAX_LTOFF / 2;
1334+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1335 else
1336- gp = mod->core_size / 2;
1337- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1338+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1339+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1340 mod->arch.gp = gp;
1341 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1342 }
1343diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1344index 609d500..7dde2a8 100644
1345--- a/arch/ia64/kernel/sys_ia64.c
1346+++ b/arch/ia64/kernel/sys_ia64.c
1347@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1348 if (REGION_NUMBER(addr) == RGN_HPAGE)
1349 addr = 0;
1350 #endif
1351+
1352+#ifdef CONFIG_PAX_RANDMMAP
1353+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1354+ addr = mm->free_area_cache;
1355+ else
1356+#endif
1357+
1358 if (!addr)
1359 addr = mm->free_area_cache;
1360
1361@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1362 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1363 /* At this point: (!vma || addr < vma->vm_end). */
1364 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1365- if (start_addr != TASK_UNMAPPED_BASE) {
1366+ if (start_addr != mm->mmap_base) {
1367 /* Start a new search --- just in case we missed some holes. */
1368- addr = TASK_UNMAPPED_BASE;
1369+ addr = mm->mmap_base;
1370 goto full_search;
1371 }
1372 return -ENOMEM;
1373 }
1374- if (!vma || addr + len <= vma->vm_start) {
1375+ if (check_heap_stack_gap(vma, addr, len)) {
1376 /* Remember the address where we stopped this search: */
1377 mm->free_area_cache = addr + len;
1378 return addr;
1379diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1380index 53c0ba0..2accdde 100644
1381--- a/arch/ia64/kernel/vmlinux.lds.S
1382+++ b/arch/ia64/kernel/vmlinux.lds.S
1383@@ -199,7 +199,7 @@ SECTIONS {
1384 /* Per-cpu data: */
1385 . = ALIGN(PERCPU_PAGE_SIZE);
1386 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1387- __phys_per_cpu_start = __per_cpu_load;
1388+ __phys_per_cpu_start = per_cpu_load;
1389 /*
1390 * ensure percpu data fits
1391 * into percpu page size
1392diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1393index 20b3593..1ce77f0 100644
1394--- a/arch/ia64/mm/fault.c
1395+++ b/arch/ia64/mm/fault.c
1396@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1397 return pte_present(pte);
1398 }
1399
1400+#ifdef CONFIG_PAX_PAGEEXEC
1401+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1402+{
1403+ unsigned long i;
1404+
1405+ printk(KERN_ERR "PAX: bytes at PC: ");
1406+ for (i = 0; i < 8; i++) {
1407+ unsigned int c;
1408+ if (get_user(c, (unsigned int *)pc+i))
1409+ printk(KERN_CONT "???????? ");
1410+ else
1411+ printk(KERN_CONT "%08x ", c);
1412+ }
1413+ printk("\n");
1414+}
1415+#endif
1416+
1417 void __kprobes
1418 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1419 {
1420@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1421 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1422 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1423
1424- if ((vma->vm_flags & mask) != mask)
1425+ if ((vma->vm_flags & mask) != mask) {
1426+
1427+#ifdef CONFIG_PAX_PAGEEXEC
1428+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1429+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1430+ goto bad_area;
1431+
1432+ up_read(&mm->mmap_sem);
1433+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1434+ do_group_exit(SIGKILL);
1435+ }
1436+#endif
1437+
1438 goto bad_area;
1439
1440+ }
1441+
1442 /*
1443 * If for any reason at all we couldn't handle the fault, make
1444 * sure we exit gracefully rather than endlessly redo the
1445diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1446index 5ca674b..e0e1b70 100644
1447--- a/arch/ia64/mm/hugetlbpage.c
1448+++ b/arch/ia64/mm/hugetlbpage.c
1449@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1450 /* At this point: (!vmm || addr < vmm->vm_end). */
1451 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1452 return -ENOMEM;
1453- if (!vmm || (addr + len) <= vmm->vm_start)
1454+ if (check_heap_stack_gap(vmm, addr, len))
1455 return addr;
1456 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1457 }
1458diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1459index 00cb0e2..2ad8024 100644
1460--- a/arch/ia64/mm/init.c
1461+++ b/arch/ia64/mm/init.c
1462@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1463 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1464 vma->vm_end = vma->vm_start + PAGE_SIZE;
1465 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1466+
1467+#ifdef CONFIG_PAX_PAGEEXEC
1468+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1469+ vma->vm_flags &= ~VM_EXEC;
1470+
1471+#ifdef CONFIG_PAX_MPROTECT
1472+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1473+ vma->vm_flags &= ~VM_MAYEXEC;
1474+#endif
1475+
1476+ }
1477+#endif
1478+
1479 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1480 down_write(&current->mm->mmap_sem);
1481 if (insert_vm_struct(current->mm, vma)) {
1482diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1483index 82abd15..d95ae5d 100644
1484--- a/arch/m32r/lib/usercopy.c
1485+++ b/arch/m32r/lib/usercopy.c
1486@@ -14,6 +14,9 @@
1487 unsigned long
1488 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1489 {
1490+ if ((long)n < 0)
1491+ return n;
1492+
1493 prefetch(from);
1494 if (access_ok(VERIFY_WRITE, to, n))
1495 __copy_user(to,from,n);
1496@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1497 unsigned long
1498 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1499 {
1500+ if ((long)n < 0)
1501+ return n;
1502+
1503 prefetchw(to);
1504 if (access_ok(VERIFY_READ, from, n))
1505 __copy_user_zeroing(to,from,n);
1506diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1507index 455c0ac..ad65fbe 100644
1508--- a/arch/mips/include/asm/elf.h
1509+++ b/arch/mips/include/asm/elf.h
1510@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1511 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1512 #endif
1513
1514+#ifdef CONFIG_PAX_ASLR
1515+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1516+
1517+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1519+#endif
1520+
1521 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1522 struct linux_binprm;
1523 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1524 int uses_interp);
1525
1526-struct mm_struct;
1527-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1528-#define arch_randomize_brk arch_randomize_brk
1529-
1530 #endif /* _ASM_ELF_H */
1531diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1532index e59cd1a..8e329d6 100644
1533--- a/arch/mips/include/asm/page.h
1534+++ b/arch/mips/include/asm/page.h
1535@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1536 #ifdef CONFIG_CPU_MIPS32
1537 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1538 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1539- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1540+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1541 #else
1542 typedef struct { unsigned long long pte; } pte_t;
1543 #define pte_val(x) ((x).pte)
1544diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1545index 6018c80..7c37203 100644
1546--- a/arch/mips/include/asm/system.h
1547+++ b/arch/mips/include/asm/system.h
1548@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1549 */
1550 #define __ARCH_WANT_UNLOCKED_CTXSW
1551
1552-extern unsigned long arch_align_stack(unsigned long sp);
1553+#define arch_align_stack(x) ((x) & ~0xfUL)
1554
1555 #endif /* _ASM_SYSTEM_H */
1556diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1557index 9fdd8bc..4bd7f1a 100644
1558--- a/arch/mips/kernel/binfmt_elfn32.c
1559+++ b/arch/mips/kernel/binfmt_elfn32.c
1560@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1561 #undef ELF_ET_DYN_BASE
1562 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1563
1564+#ifdef CONFIG_PAX_ASLR
1565+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1566+
1567+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1569+#endif
1570+
1571 #include <asm/processor.h>
1572 #include <linux/module.h>
1573 #include <linux/elfcore.h>
1574diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1575index ff44823..97f8906 100644
1576--- a/arch/mips/kernel/binfmt_elfo32.c
1577+++ b/arch/mips/kernel/binfmt_elfo32.c
1578@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1579 #undef ELF_ET_DYN_BASE
1580 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1581
1582+#ifdef CONFIG_PAX_ASLR
1583+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1584+
1585+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1587+#endif
1588+
1589 #include <asm/processor.h>
1590
1591 /*
1592diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1593index b30cb25..454c0a9 100644
1594--- a/arch/mips/kernel/process.c
1595+++ b/arch/mips/kernel/process.c
1596@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1597 out:
1598 return pc;
1599 }
1600-
1601-/*
1602- * Don't forget that the stack pointer must be aligned on a 8 bytes
1603- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1604- */
1605-unsigned long arch_align_stack(unsigned long sp)
1606-{
1607- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1608- sp -= get_random_int() & ~PAGE_MASK;
1609-
1610- return sp & ALMASK;
1611-}
1612diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1613index 937cf33..adb39bb 100644
1614--- a/arch/mips/mm/fault.c
1615+++ b/arch/mips/mm/fault.c
1616@@ -28,6 +28,23 @@
1617 #include <asm/highmem.h> /* For VMALLOC_END */
1618 #include <linux/kdebug.h>
1619
1620+#ifdef CONFIG_PAX_PAGEEXEC
1621+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1622+{
1623+ unsigned long i;
1624+
1625+ printk(KERN_ERR "PAX: bytes at PC: ");
1626+ for (i = 0; i < 5; i++) {
1627+ unsigned int c;
1628+ if (get_user(c, (unsigned int *)pc+i))
1629+ printk(KERN_CONT "???????? ");
1630+ else
1631+ printk(KERN_CONT "%08x ", c);
1632+ }
1633+ printk("\n");
1634+}
1635+#endif
1636+
1637 /*
1638 * This routine handles page faults. It determines the address,
1639 * and the problem, and then passes it off to one of the appropriate
1640diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1641index 302d779..7d35bf8 100644
1642--- a/arch/mips/mm/mmap.c
1643+++ b/arch/mips/mm/mmap.c
1644@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1645 do_color_align = 1;
1646
1647 /* requesting a specific address */
1648+
1649+#ifdef CONFIG_PAX_RANDMMAP
1650+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1651+#endif
1652+
1653 if (addr) {
1654 if (do_color_align)
1655 addr = COLOUR_ALIGN(addr, pgoff);
1656@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1657 addr = PAGE_ALIGN(addr);
1658
1659 vma = find_vma(mm, addr);
1660- if (TASK_SIZE - len >= addr &&
1661- (!vma || addr + len <= vma->vm_start))
1662+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1663 return addr;
1664 }
1665
1666@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1667 /* At this point: (!vma || addr < vma->vm_end). */
1668 if (TASK_SIZE - len < addr)
1669 return -ENOMEM;
1670- if (!vma || addr + len <= vma->vm_start)
1671+ if (check_heap_stack_gap(vmm, addr, len))
1672 return addr;
1673 addr = vma->vm_end;
1674 if (do_color_align)
1675@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1676 /* make sure it can fit in the remaining address space */
1677 if (likely(addr > len)) {
1678 vma = find_vma(mm, addr - len);
1679- if (!vma || addr <= vma->vm_start) {
1680+ if (check_heap_stack_gap(vmm, addr - len, len))
1681 /* cache the address as a hint for next time */
1682 return mm->free_area_cache = addr - len;
1683 }
1684@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1685 * return with success:
1686 */
1687 vma = find_vma(mm, addr);
1688- if (likely(!vma || addr + len <= vma->vm_start)) {
1689+ if (check_heap_stack_gap(vmm, addr, len)) {
1690 /* cache the address as a hint for next time */
1691 return mm->free_area_cache = addr;
1692 }
1693@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1694 mm->unmap_area = arch_unmap_area_topdown;
1695 }
1696 }
1697-
1698-static inline unsigned long brk_rnd(void)
1699-{
1700- unsigned long rnd = get_random_int();
1701-
1702- rnd = rnd << PAGE_SHIFT;
1703- /* 8MB for 32bit, 256MB for 64bit */
1704- if (TASK_IS_32BIT_ADDR)
1705- rnd = rnd & 0x7ffffful;
1706- else
1707- rnd = rnd & 0xffffffful;
1708-
1709- return rnd;
1710-}
1711-
1712-unsigned long arch_randomize_brk(struct mm_struct *mm)
1713-{
1714- unsigned long base = mm->brk;
1715- unsigned long ret;
1716-
1717- ret = PAGE_ALIGN(base + brk_rnd());
1718-
1719- if (ret < mm->brk)
1720- return mm->brk;
1721-
1722- return ret;
1723-}
1724diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1725index 19f6cb1..6c78cf2 100644
1726--- a/arch/parisc/include/asm/elf.h
1727+++ b/arch/parisc/include/asm/elf.h
1728@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1729
1730 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1731
1732+#ifdef CONFIG_PAX_ASLR
1733+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1734+
1735+#define PAX_DELTA_MMAP_LEN 16
1736+#define PAX_DELTA_STACK_LEN 16
1737+#endif
1738+
1739 /* This yields a mask that user programs can use to figure out what
1740 instruction set this CPU supports. This could be done in user space,
1741 but it's not easy, and we've already done it here. */
1742diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1743index 22dadeb..f6c2be4 100644
1744--- a/arch/parisc/include/asm/pgtable.h
1745+++ b/arch/parisc/include/asm/pgtable.h
1746@@ -210,6 +210,17 @@ struct vm_area_struct;
1747 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1748 #define PAGE_COPY PAGE_EXECREAD
1749 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1750+
1751+#ifdef CONFIG_PAX_PAGEEXEC
1752+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1753+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1755+#else
1756+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1757+# define PAGE_COPY_NOEXEC PAGE_COPY
1758+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1759+#endif
1760+
1761 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1762 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1763 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1764diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1765index 5e34ccf..672bc9c 100644
1766--- a/arch/parisc/kernel/module.c
1767+++ b/arch/parisc/kernel/module.c
1768@@ -98,16 +98,38 @@
1769
1770 /* three functions to determine where in the module core
1771 * or init pieces the location is */
1772+static inline int in_init_rx(struct module *me, void *loc)
1773+{
1774+ return (loc >= me->module_init_rx &&
1775+ loc < (me->module_init_rx + me->init_size_rx));
1776+}
1777+
1778+static inline int in_init_rw(struct module *me, void *loc)
1779+{
1780+ return (loc >= me->module_init_rw &&
1781+ loc < (me->module_init_rw + me->init_size_rw));
1782+}
1783+
1784 static inline int in_init(struct module *me, void *loc)
1785 {
1786- return (loc >= me->module_init &&
1787- loc <= (me->module_init + me->init_size));
1788+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1789+}
1790+
1791+static inline int in_core_rx(struct module *me, void *loc)
1792+{
1793+ return (loc >= me->module_core_rx &&
1794+ loc < (me->module_core_rx + me->core_size_rx));
1795+}
1796+
1797+static inline int in_core_rw(struct module *me, void *loc)
1798+{
1799+ return (loc >= me->module_core_rw &&
1800+ loc < (me->module_core_rw + me->core_size_rw));
1801 }
1802
1803 static inline int in_core(struct module *me, void *loc)
1804 {
1805- return (loc >= me->module_core &&
1806- loc <= (me->module_core + me->core_size));
1807+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1808 }
1809
1810 static inline int in_local(struct module *me, void *loc)
1811@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1812 }
1813
1814 /* align things a bit */
1815- me->core_size = ALIGN(me->core_size, 16);
1816- me->arch.got_offset = me->core_size;
1817- me->core_size += gots * sizeof(struct got_entry);
1818+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1819+ me->arch.got_offset = me->core_size_rw;
1820+ me->core_size_rw += gots * sizeof(struct got_entry);
1821
1822- me->core_size = ALIGN(me->core_size, 16);
1823- me->arch.fdesc_offset = me->core_size;
1824- me->core_size += fdescs * sizeof(Elf_Fdesc);
1825+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1826+ me->arch.fdesc_offset = me->core_size_rw;
1827+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1828
1829 me->arch.got_max = gots;
1830 me->arch.fdesc_max = fdescs;
1831@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1832
1833 BUG_ON(value == 0);
1834
1835- got = me->module_core + me->arch.got_offset;
1836+ got = me->module_core_rw + me->arch.got_offset;
1837 for (i = 0; got[i].addr; i++)
1838 if (got[i].addr == value)
1839 goto out;
1840@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1841 #ifdef CONFIG_64BIT
1842 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1843 {
1844- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1845+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1846
1847 if (!value) {
1848 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1849@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1850
1851 /* Create new one */
1852 fdesc->addr = value;
1853- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1854+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1855 return (Elf_Addr)fdesc;
1856 }
1857 #endif /* CONFIG_64BIT */
1858@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1859
1860 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1861 end = table + sechdrs[me->arch.unwind_section].sh_size;
1862- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1863+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1864
1865 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1866 me->arch.unwind_section, table, end, gp);
1867diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1868index c9b9322..02d8940 100644
1869--- a/arch/parisc/kernel/sys_parisc.c
1870+++ b/arch/parisc/kernel/sys_parisc.c
1871@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1872 /* At this point: (!vma || addr < vma->vm_end). */
1873 if (TASK_SIZE - len < addr)
1874 return -ENOMEM;
1875- if (!vma || addr + len <= vma->vm_start)
1876+ if (check_heap_stack_gap(vma, addr, len))
1877 return addr;
1878 addr = vma->vm_end;
1879 }
1880@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1881 /* At this point: (!vma || addr < vma->vm_end). */
1882 if (TASK_SIZE - len < addr)
1883 return -ENOMEM;
1884- if (!vma || addr + len <= vma->vm_start)
1885+ if (check_heap_stack_gap(vma, addr, len))
1886 return addr;
1887 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1888 if (addr < vma->vm_end) /* handle wraparound */
1889@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1890 if (flags & MAP_FIXED)
1891 return addr;
1892 if (!addr)
1893- addr = TASK_UNMAPPED_BASE;
1894+ addr = current->mm->mmap_base;
1895
1896 if (filp) {
1897 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1898diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1899index f19e660..414fe24 100644
1900--- a/arch/parisc/kernel/traps.c
1901+++ b/arch/parisc/kernel/traps.c
1902@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1903
1904 down_read(&current->mm->mmap_sem);
1905 vma = find_vma(current->mm,regs->iaoq[0]);
1906- if (vma && (regs->iaoq[0] >= vma->vm_start)
1907- && (vma->vm_flags & VM_EXEC)) {
1908-
1909+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1910 fault_address = regs->iaoq[0];
1911 fault_space = regs->iasq[0];
1912
1913diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1914index 18162ce..94de376 100644
1915--- a/arch/parisc/mm/fault.c
1916+++ b/arch/parisc/mm/fault.c
1917@@ -15,6 +15,7 @@
1918 #include <linux/sched.h>
1919 #include <linux/interrupt.h>
1920 #include <linux/module.h>
1921+#include <linux/unistd.h>
1922
1923 #include <asm/uaccess.h>
1924 #include <asm/traps.h>
1925@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1926 static unsigned long
1927 parisc_acctyp(unsigned long code, unsigned int inst)
1928 {
1929- if (code == 6 || code == 16)
1930+ if (code == 6 || code == 7 || code == 16)
1931 return VM_EXEC;
1932
1933 switch (inst & 0xf0000000) {
1934@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1935 }
1936 #endif
1937
1938+#ifdef CONFIG_PAX_PAGEEXEC
1939+/*
1940+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1941+ *
1942+ * returns 1 when task should be killed
1943+ * 2 when rt_sigreturn trampoline was detected
1944+ * 3 when unpatched PLT trampoline was detected
1945+ */
1946+static int pax_handle_fetch_fault(struct pt_regs *regs)
1947+{
1948+
1949+#ifdef CONFIG_PAX_EMUPLT
1950+ int err;
1951+
1952+ do { /* PaX: unpatched PLT emulation */
1953+ unsigned int bl, depwi;
1954+
1955+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1956+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1957+
1958+ if (err)
1959+ break;
1960+
1961+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1962+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1963+
1964+ err = get_user(ldw, (unsigned int *)addr);
1965+ err |= get_user(bv, (unsigned int *)(addr+4));
1966+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1967+
1968+ if (err)
1969+ break;
1970+
1971+ if (ldw == 0x0E801096U &&
1972+ bv == 0xEAC0C000U &&
1973+ ldw2 == 0x0E881095U)
1974+ {
1975+ unsigned int resolver, map;
1976+
1977+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1978+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1979+ if (err)
1980+ break;
1981+
1982+ regs->gr[20] = instruction_pointer(regs)+8;
1983+ regs->gr[21] = map;
1984+ regs->gr[22] = resolver;
1985+ regs->iaoq[0] = resolver | 3UL;
1986+ regs->iaoq[1] = regs->iaoq[0] + 4;
1987+ return 3;
1988+ }
1989+ }
1990+ } while (0);
1991+#endif
1992+
1993+#ifdef CONFIG_PAX_EMUTRAMP
1994+
1995+#ifndef CONFIG_PAX_EMUSIGRT
1996+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1997+ return 1;
1998+#endif
1999+
2000+ do { /* PaX: rt_sigreturn emulation */
2001+ unsigned int ldi1, ldi2, bel, nop;
2002+
2003+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2004+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2005+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2006+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2007+
2008+ if (err)
2009+ break;
2010+
2011+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2012+ ldi2 == 0x3414015AU &&
2013+ bel == 0xE4008200U &&
2014+ nop == 0x08000240U)
2015+ {
2016+ regs->gr[25] = (ldi1 & 2) >> 1;
2017+ regs->gr[20] = __NR_rt_sigreturn;
2018+ regs->gr[31] = regs->iaoq[1] + 16;
2019+ regs->sr[0] = regs->iasq[1];
2020+ regs->iaoq[0] = 0x100UL;
2021+ regs->iaoq[1] = regs->iaoq[0] + 4;
2022+ regs->iasq[0] = regs->sr[2];
2023+ regs->iasq[1] = regs->sr[2];
2024+ return 2;
2025+ }
2026+ } while (0);
2027+#endif
2028+
2029+ return 1;
2030+}
2031+
2032+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2033+{
2034+ unsigned long i;
2035+
2036+ printk(KERN_ERR "PAX: bytes at PC: ");
2037+ for (i = 0; i < 5; i++) {
2038+ unsigned int c;
2039+ if (get_user(c, (unsigned int *)pc+i))
2040+ printk(KERN_CONT "???????? ");
2041+ else
2042+ printk(KERN_CONT "%08x ", c);
2043+ }
2044+ printk("\n");
2045+}
2046+#endif
2047+
2048 int fixup_exception(struct pt_regs *regs)
2049 {
2050 const struct exception_table_entry *fix;
2051@@ -192,8 +303,33 @@ good_area:
2052
2053 acc_type = parisc_acctyp(code,regs->iir);
2054
2055- if ((vma->vm_flags & acc_type) != acc_type)
2056+ if ((vma->vm_flags & acc_type) != acc_type) {
2057+
2058+#ifdef CONFIG_PAX_PAGEEXEC
2059+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2060+ (address & ~3UL) == instruction_pointer(regs))
2061+ {
2062+ up_read(&mm->mmap_sem);
2063+ switch (pax_handle_fetch_fault(regs)) {
2064+
2065+#ifdef CONFIG_PAX_EMUPLT
2066+ case 3:
2067+ return;
2068+#endif
2069+
2070+#ifdef CONFIG_PAX_EMUTRAMP
2071+ case 2:
2072+ return;
2073+#endif
2074+
2075+ }
2076+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2077+ do_group_exit(SIGKILL);
2078+ }
2079+#endif
2080+
2081 goto bad_area;
2082+ }
2083
2084 /*
2085 * If for any reason at all we couldn't handle the fault, make
2086diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2087index 3bf9cca..e7457d0 100644
2088--- a/arch/powerpc/include/asm/elf.h
2089+++ b/arch/powerpc/include/asm/elf.h
2090@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2091 the loader. We need to make sure that it is out of the way of the program
2092 that it will "exec", and that there is sufficient room for the brk. */
2093
2094-extern unsigned long randomize_et_dyn(unsigned long base);
2095-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2096+#define ELF_ET_DYN_BASE (0x20000000)
2097+
2098+#ifdef CONFIG_PAX_ASLR
2099+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2100+
2101+#ifdef __powerpc64__
2102+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2103+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2104+#else
2105+#define PAX_DELTA_MMAP_LEN 15
2106+#define PAX_DELTA_STACK_LEN 15
2107+#endif
2108+#endif
2109
2110 /*
2111 * Our registers are always unsigned longs, whether we're a 32 bit
2112@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2113 (0x7ff >> (PAGE_SHIFT - 12)) : \
2114 (0x3ffff >> (PAGE_SHIFT - 12)))
2115
2116-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2117-#define arch_randomize_brk arch_randomize_brk
2118-
2119 #endif /* __KERNEL__ */
2120
2121 /*
2122diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2123index bca8fdc..61e9580 100644
2124--- a/arch/powerpc/include/asm/kmap_types.h
2125+++ b/arch/powerpc/include/asm/kmap_types.h
2126@@ -27,6 +27,7 @@ enum km_type {
2127 KM_PPC_SYNC_PAGE,
2128 KM_PPC_SYNC_ICACHE,
2129 KM_KDB,
2130+ KM_CLEARPAGE,
2131 KM_TYPE_NR
2132 };
2133
2134diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2135index d4a7f64..451de1c 100644
2136--- a/arch/powerpc/include/asm/mman.h
2137+++ b/arch/powerpc/include/asm/mman.h
2138@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2139 }
2140 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2141
2142-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2143+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2144 {
2145 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2146 }
2147diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2148index 2cd664e..1d2e8a7 100644
2149--- a/arch/powerpc/include/asm/page.h
2150+++ b/arch/powerpc/include/asm/page.h
2151@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2152 * and needs to be executable. This means the whole heap ends
2153 * up being executable.
2154 */
2155-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2156- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2157+#define VM_DATA_DEFAULT_FLAGS32 \
2158+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2159+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2160
2161 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2162 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2163@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2164 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2165 #endif
2166
2167+#define ktla_ktva(addr) (addr)
2168+#define ktva_ktla(addr) (addr)
2169+
2170 #ifndef __ASSEMBLY__
2171
2172 #undef STRICT_MM_TYPECHECKS
2173diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2174index 9356262..ea96148 100644
2175--- a/arch/powerpc/include/asm/page_64.h
2176+++ b/arch/powerpc/include/asm/page_64.h
2177@@ -155,15 +155,18 @@ do { \
2178 * stack by default, so in the absence of a PT_GNU_STACK program header
2179 * we turn execute permission off.
2180 */
2181-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2182- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2183+#define VM_STACK_DEFAULT_FLAGS32 \
2184+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2185+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2186
2187 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2188 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2189
2190+#ifndef CONFIG_PAX_PAGEEXEC
2191 #define VM_STACK_DEFAULT_FLAGS \
2192 (is_32bit_task() ? \
2193 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2194+#endif
2195
2196 #include <asm-generic/getorder.h>
2197
2198diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2199index 88b0bd9..e32bc67 100644
2200--- a/arch/powerpc/include/asm/pgtable.h
2201+++ b/arch/powerpc/include/asm/pgtable.h
2202@@ -2,6 +2,7 @@
2203 #define _ASM_POWERPC_PGTABLE_H
2204 #ifdef __KERNEL__
2205
2206+#include <linux/const.h>
2207 #ifndef __ASSEMBLY__
2208 #include <asm/processor.h> /* For TASK_SIZE */
2209 #include <asm/mmu.h>
2210diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2211index 4aad413..85d86bf 100644
2212--- a/arch/powerpc/include/asm/pte-hash32.h
2213+++ b/arch/powerpc/include/asm/pte-hash32.h
2214@@ -21,6 +21,7 @@
2215 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2216 #define _PAGE_USER 0x004 /* usermode access allowed */
2217 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2218+#define _PAGE_EXEC _PAGE_GUARDED
2219 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2220 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2221 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2222diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2223index 559da19..7e5835c 100644
2224--- a/arch/powerpc/include/asm/reg.h
2225+++ b/arch/powerpc/include/asm/reg.h
2226@@ -212,6 +212,7 @@
2227 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2228 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2229 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2230+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2231 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2232 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2233 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2234diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2235index e30a13d..2b7d994 100644
2236--- a/arch/powerpc/include/asm/system.h
2237+++ b/arch/powerpc/include/asm/system.h
2238@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2239 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2240 #endif
2241
2242-extern unsigned long arch_align_stack(unsigned long sp);
2243+#define arch_align_stack(x) ((x) & ~0xfUL)
2244
2245 /* Used in very early kernel initialization. */
2246 extern unsigned long reloc_offset(void);
2247diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2248index bd0fb84..a42a14b 100644
2249--- a/arch/powerpc/include/asm/uaccess.h
2250+++ b/arch/powerpc/include/asm/uaccess.h
2251@@ -13,6 +13,8 @@
2252 #define VERIFY_READ 0
2253 #define VERIFY_WRITE 1
2254
2255+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2256+
2257 /*
2258 * The fs value determines whether argument validity checking should be
2259 * performed or not. If get_fs() == USER_DS, checking is performed, with
2260@@ -327,52 +329,6 @@ do { \
2261 extern unsigned long __copy_tofrom_user(void __user *to,
2262 const void __user *from, unsigned long size);
2263
2264-#ifndef __powerpc64__
2265-
2266-static inline unsigned long copy_from_user(void *to,
2267- const void __user *from, unsigned long n)
2268-{
2269- unsigned long over;
2270-
2271- if (access_ok(VERIFY_READ, from, n))
2272- return __copy_tofrom_user((__force void __user *)to, from, n);
2273- if ((unsigned long)from < TASK_SIZE) {
2274- over = (unsigned long)from + n - TASK_SIZE;
2275- return __copy_tofrom_user((__force void __user *)to, from,
2276- n - over) + over;
2277- }
2278- return n;
2279-}
2280-
2281-static inline unsigned long copy_to_user(void __user *to,
2282- const void *from, unsigned long n)
2283-{
2284- unsigned long over;
2285-
2286- if (access_ok(VERIFY_WRITE, to, n))
2287- return __copy_tofrom_user(to, (__force void __user *)from, n);
2288- if ((unsigned long)to < TASK_SIZE) {
2289- over = (unsigned long)to + n - TASK_SIZE;
2290- return __copy_tofrom_user(to, (__force void __user *)from,
2291- n - over) + over;
2292- }
2293- return n;
2294-}
2295-
2296-#else /* __powerpc64__ */
2297-
2298-#define __copy_in_user(to, from, size) \
2299- __copy_tofrom_user((to), (from), (size))
2300-
2301-extern unsigned long copy_from_user(void *to, const void __user *from,
2302- unsigned long n);
2303-extern unsigned long copy_to_user(void __user *to, const void *from,
2304- unsigned long n);
2305-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2306- unsigned long n);
2307-
2308-#endif /* __powerpc64__ */
2309-
2310 static inline unsigned long __copy_from_user_inatomic(void *to,
2311 const void __user *from, unsigned long n)
2312 {
2313@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2314 if (ret == 0)
2315 return 0;
2316 }
2317+
2318+ if (!__builtin_constant_p(n))
2319+ check_object_size(to, n, false);
2320+
2321 return __copy_tofrom_user((__force void __user *)to, from, n);
2322 }
2323
2324@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2325 if (ret == 0)
2326 return 0;
2327 }
2328+
2329+ if (!__builtin_constant_p(n))
2330+ check_object_size(from, n, true);
2331+
2332 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2333 }
2334
2335@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2336 return __copy_to_user_inatomic(to, from, size);
2337 }
2338
2339+#ifndef __powerpc64__
2340+
2341+static inline unsigned long __must_check copy_from_user(void *to,
2342+ const void __user *from, unsigned long n)
2343+{
2344+ unsigned long over;
2345+
2346+ if ((long)n < 0)
2347+ return n;
2348+
2349+ if (access_ok(VERIFY_READ, from, n)) {
2350+ if (!__builtin_constant_p(n))
2351+ check_object_size(to, n, false);
2352+ return __copy_tofrom_user((__force void __user *)to, from, n);
2353+ }
2354+ if ((unsigned long)from < TASK_SIZE) {
2355+ over = (unsigned long)from + n - TASK_SIZE;
2356+ if (!__builtin_constant_p(n - over))
2357+ check_object_size(to, n - over, false);
2358+ return __copy_tofrom_user((__force void __user *)to, from,
2359+ n - over) + over;
2360+ }
2361+ return n;
2362+}
2363+
2364+static inline unsigned long __must_check copy_to_user(void __user *to,
2365+ const void *from, unsigned long n)
2366+{
2367+ unsigned long over;
2368+
2369+ if ((long)n < 0)
2370+ return n;
2371+
2372+ if (access_ok(VERIFY_WRITE, to, n)) {
2373+ if (!__builtin_constant_p(n))
2374+ check_object_size(from, n, true);
2375+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2376+ }
2377+ if ((unsigned long)to < TASK_SIZE) {
2378+ over = (unsigned long)to + n - TASK_SIZE;
2379+ if (!__builtin_constant_p(n))
2380+ check_object_size(from, n - over, true);
2381+ return __copy_tofrom_user(to, (__force void __user *)from,
2382+ n - over) + over;
2383+ }
2384+ return n;
2385+}
2386+
2387+#else /* __powerpc64__ */
2388+
2389+#define __copy_in_user(to, from, size) \
2390+ __copy_tofrom_user((to), (from), (size))
2391+
2392+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2393+{
2394+ if ((long)n < 0 || n > INT_MAX)
2395+ return n;
2396+
2397+ if (!__builtin_constant_p(n))
2398+ check_object_size(to, n, false);
2399+
2400+ if (likely(access_ok(VERIFY_READ, from, n)))
2401+ n = __copy_from_user(to, from, n);
2402+ else
2403+ memset(to, 0, n);
2404+ return n;
2405+}
2406+
2407+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2408+{
2409+ if ((long)n < 0 || n > INT_MAX)
2410+ return n;
2411+
2412+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2413+ if (!__builtin_constant_p(n))
2414+ check_object_size(from, n, true);
2415+ n = __copy_to_user(to, from, n);
2416+ }
2417+ return n;
2418+}
2419+
2420+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2421+ unsigned long n);
2422+
2423+#endif /* __powerpc64__ */
2424+
2425 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2426
2427 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2428diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2429index 429983c..7af363b 100644
2430--- a/arch/powerpc/kernel/exceptions-64e.S
2431+++ b/arch/powerpc/kernel/exceptions-64e.S
2432@@ -587,6 +587,7 @@ storage_fault_common:
2433 std r14,_DAR(r1)
2434 std r15,_DSISR(r1)
2435 addi r3,r1,STACK_FRAME_OVERHEAD
2436+ bl .save_nvgprs
2437 mr r4,r14
2438 mr r5,r15
2439 ld r14,PACA_EXGEN+EX_R14(r13)
2440@@ -596,8 +597,7 @@ storage_fault_common:
2441 cmpdi r3,0
2442 bne- 1f
2443 b .ret_from_except_lite
2444-1: bl .save_nvgprs
2445- mr r5,r3
2446+1: mr r5,r3
2447 addi r3,r1,STACK_FRAME_OVERHEAD
2448 ld r4,_DAR(r1)
2449 bl .bad_page_fault
2450diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2451index 41b02c7..05e76fb 100644
2452--- a/arch/powerpc/kernel/exceptions-64s.S
2453+++ b/arch/powerpc/kernel/exceptions-64s.S
2454@@ -1014,10 +1014,10 @@ handle_page_fault:
2455 11: ld r4,_DAR(r1)
2456 ld r5,_DSISR(r1)
2457 addi r3,r1,STACK_FRAME_OVERHEAD
2458+ bl .save_nvgprs
2459 bl .do_page_fault
2460 cmpdi r3,0
2461 beq+ 13f
2462- bl .save_nvgprs
2463 mr r5,r3
2464 addi r3,r1,STACK_FRAME_OVERHEAD
2465 lwz r4,_DAR(r1)
2466diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2467index 0b6d796..d760ddb 100644
2468--- a/arch/powerpc/kernel/module_32.c
2469+++ b/arch/powerpc/kernel/module_32.c
2470@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2471 me->arch.core_plt_section = i;
2472 }
2473 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2474- printk("Module doesn't contain .plt or .init.plt sections.\n");
2475+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2476 return -ENOEXEC;
2477 }
2478
2479@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2480
2481 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2482 /* Init, or core PLT? */
2483- if (location >= mod->module_core
2484- && location < mod->module_core + mod->core_size)
2485+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2486+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2487 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2488- else
2489+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2490+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2491 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2492+ else {
2493+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2494+ return ~0UL;
2495+ }
2496
2497 /* Find this entry, or if that fails, the next avail. entry */
2498 while (entry->jump[0]) {
2499diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2500index 8f53954..a704ad6 100644
2501--- a/arch/powerpc/kernel/process.c
2502+++ b/arch/powerpc/kernel/process.c
2503@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2504 * Lookup NIP late so we have the best change of getting the
2505 * above info out without failing
2506 */
2507- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2508- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2509+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2510+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2511 #endif
2512 show_stack(current, (unsigned long *) regs->gpr[1]);
2513 if (!user_mode(regs))
2514@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2515 newsp = stack[0];
2516 ip = stack[STACK_FRAME_LR_SAVE];
2517 if (!firstframe || ip != lr) {
2518- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2519+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2520 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2521 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2522- printk(" (%pS)",
2523+ printk(" (%pA)",
2524 (void *)current->ret_stack[curr_frame].ret);
2525 curr_frame--;
2526 }
2527@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2528 struct pt_regs *regs = (struct pt_regs *)
2529 (sp + STACK_FRAME_OVERHEAD);
2530 lr = regs->link;
2531- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2532+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2533 regs->trap, (void *)regs->nip, (void *)lr);
2534 firstframe = 1;
2535 }
2536@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2537 }
2538
2539 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2540-
2541-unsigned long arch_align_stack(unsigned long sp)
2542-{
2543- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2544- sp -= get_random_int() & ~PAGE_MASK;
2545- return sp & ~0xf;
2546-}
2547-
2548-static inline unsigned long brk_rnd(void)
2549-{
2550- unsigned long rnd = 0;
2551-
2552- /* 8MB for 32bit, 1GB for 64bit */
2553- if (is_32bit_task())
2554- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2555- else
2556- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2557-
2558- return rnd << PAGE_SHIFT;
2559-}
2560-
2561-unsigned long arch_randomize_brk(struct mm_struct *mm)
2562-{
2563- unsigned long base = mm->brk;
2564- unsigned long ret;
2565-
2566-#ifdef CONFIG_PPC_STD_MMU_64
2567- /*
2568- * If we are using 1TB segments and we are allowed to randomise
2569- * the heap, we can put it above 1TB so it is backed by a 1TB
2570- * segment. Otherwise the heap will be in the bottom 1TB
2571- * which always uses 256MB segments and this may result in a
2572- * performance penalty.
2573- */
2574- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2575- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2576-#endif
2577-
2578- ret = PAGE_ALIGN(base + brk_rnd());
2579-
2580- if (ret < mm->brk)
2581- return mm->brk;
2582-
2583- return ret;
2584-}
2585-
2586-unsigned long randomize_et_dyn(unsigned long base)
2587-{
2588- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2589-
2590- if (ret < base)
2591- return base;
2592-
2593- return ret;
2594-}
2595diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2596index 78b76dc..7f232ef 100644
2597--- a/arch/powerpc/kernel/signal_32.c
2598+++ b/arch/powerpc/kernel/signal_32.c
2599@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2600 /* Save user registers on the stack */
2601 frame = &rt_sf->uc.uc_mcontext;
2602 addr = frame;
2603- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2604+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2605 if (save_user_regs(regs, frame, 0, 1))
2606 goto badframe;
2607 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2608diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2609index e91c736..742ec06 100644
2610--- a/arch/powerpc/kernel/signal_64.c
2611+++ b/arch/powerpc/kernel/signal_64.c
2612@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2613 current->thread.fpscr.val = 0;
2614
2615 /* Set up to return from userspace. */
2616- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2617+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2618 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2619 } else {
2620 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2621diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2622index f19d977..8ac286e 100644
2623--- a/arch/powerpc/kernel/traps.c
2624+++ b/arch/powerpc/kernel/traps.c
2625@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2626 static inline void pmac_backlight_unblank(void) { }
2627 #endif
2628
2629+extern void gr_handle_kernel_exploit(void);
2630+
2631 int die(const char *str, struct pt_regs *regs, long err)
2632 {
2633 static struct {
2634@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2635 if (panic_on_oops)
2636 panic("Fatal exception");
2637
2638+ gr_handle_kernel_exploit();
2639+
2640 oops_exit();
2641 do_exit(err);
2642
2643diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2644index 142ab10..236e61a 100644
2645--- a/arch/powerpc/kernel/vdso.c
2646+++ b/arch/powerpc/kernel/vdso.c
2647@@ -36,6 +36,7 @@
2648 #include <asm/firmware.h>
2649 #include <asm/vdso.h>
2650 #include <asm/vdso_datapage.h>
2651+#include <asm/mman.h>
2652
2653 #include "setup.h"
2654
2655@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2656 vdso_base = VDSO32_MBASE;
2657 #endif
2658
2659- current->mm->context.vdso_base = 0;
2660+ current->mm->context.vdso_base = ~0UL;
2661
2662 /* vDSO has a problem and was disabled, just don't "enable" it for the
2663 * process
2664@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2665 vdso_base = get_unmapped_area(NULL, vdso_base,
2666 (vdso_pages << PAGE_SHIFT) +
2667 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2668- 0, 0);
2669+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2670 if (IS_ERR_VALUE(vdso_base)) {
2671 rc = vdso_base;
2672 goto fail_mmapsem;
2673diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2674index 5eea6f3..5d10396 100644
2675--- a/arch/powerpc/lib/usercopy_64.c
2676+++ b/arch/powerpc/lib/usercopy_64.c
2677@@ -9,22 +9,6 @@
2678 #include <linux/module.h>
2679 #include <asm/uaccess.h>
2680
2681-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2682-{
2683- if (likely(access_ok(VERIFY_READ, from, n)))
2684- n = __copy_from_user(to, from, n);
2685- else
2686- memset(to, 0, n);
2687- return n;
2688-}
2689-
2690-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2691-{
2692- if (likely(access_ok(VERIFY_WRITE, to, n)))
2693- n = __copy_to_user(to, from, n);
2694- return n;
2695-}
2696-
2697 unsigned long copy_in_user(void __user *to, const void __user *from,
2698 unsigned long n)
2699 {
2700@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2701 return n;
2702 }
2703
2704-EXPORT_SYMBOL(copy_from_user);
2705-EXPORT_SYMBOL(copy_to_user);
2706 EXPORT_SYMBOL(copy_in_user);
2707
2708diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2709index 5efe8c9..db9ceef 100644
2710--- a/arch/powerpc/mm/fault.c
2711+++ b/arch/powerpc/mm/fault.c
2712@@ -32,6 +32,10 @@
2713 #include <linux/perf_event.h>
2714 #include <linux/magic.h>
2715 #include <linux/ratelimit.h>
2716+#include <linux/slab.h>
2717+#include <linux/pagemap.h>
2718+#include <linux/compiler.h>
2719+#include <linux/unistd.h>
2720
2721 #include <asm/firmware.h>
2722 #include <asm/page.h>
2723@@ -43,6 +47,7 @@
2724 #include <asm/tlbflush.h>
2725 #include <asm/siginfo.h>
2726 #include <mm/mmu_decl.h>
2727+#include <asm/ptrace.h>
2728
2729 #ifdef CONFIG_KPROBES
2730 static inline int notify_page_fault(struct pt_regs *regs)
2731@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2732 }
2733 #endif
2734
2735+#ifdef CONFIG_PAX_PAGEEXEC
2736+/*
2737+ * PaX: decide what to do with offenders (regs->nip = fault address)
2738+ *
2739+ * returns 1 when task should be killed
2740+ */
2741+static int pax_handle_fetch_fault(struct pt_regs *regs)
2742+{
2743+ return 1;
2744+}
2745+
2746+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2747+{
2748+ unsigned long i;
2749+
2750+ printk(KERN_ERR "PAX: bytes at PC: ");
2751+ for (i = 0; i < 5; i++) {
2752+ unsigned int c;
2753+ if (get_user(c, (unsigned int __user *)pc+i))
2754+ printk(KERN_CONT "???????? ");
2755+ else
2756+ printk(KERN_CONT "%08x ", c);
2757+ }
2758+ printk("\n");
2759+}
2760+#endif
2761+
2762 /*
2763 * Check whether the instruction at regs->nip is a store using
2764 * an update addressing form which will update r1.
2765@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2766 * indicate errors in DSISR but can validly be set in SRR1.
2767 */
2768 if (trap == 0x400)
2769- error_code &= 0x48200000;
2770+ error_code &= 0x58200000;
2771 else
2772 is_write = error_code & DSISR_ISSTORE;
2773 #else
2774@@ -259,7 +291,7 @@ good_area:
2775 * "undefined". Of those that can be set, this is the only
2776 * one which seems bad.
2777 */
2778- if (error_code & 0x10000000)
2779+ if (error_code & DSISR_GUARDED)
2780 /* Guarded storage error. */
2781 goto bad_area;
2782 #endif /* CONFIG_8xx */
2783@@ -274,7 +306,7 @@ good_area:
2784 * processors use the same I/D cache coherency mechanism
2785 * as embedded.
2786 */
2787- if (error_code & DSISR_PROTFAULT)
2788+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2789 goto bad_area;
2790 #endif /* CONFIG_PPC_STD_MMU */
2791
2792@@ -343,6 +375,23 @@ bad_area:
2793 bad_area_nosemaphore:
2794 /* User mode accesses cause a SIGSEGV */
2795 if (user_mode(regs)) {
2796+
2797+#ifdef CONFIG_PAX_PAGEEXEC
2798+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2799+#ifdef CONFIG_PPC_STD_MMU
2800+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2801+#else
2802+ if (is_exec && regs->nip == address) {
2803+#endif
2804+ switch (pax_handle_fetch_fault(regs)) {
2805+ }
2806+
2807+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2808+ do_group_exit(SIGKILL);
2809+ }
2810+ }
2811+#endif
2812+
2813 _exception(SIGSEGV, regs, code, address);
2814 return 0;
2815 }
2816diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2817index 5a783d8..c23e14b 100644
2818--- a/arch/powerpc/mm/mmap_64.c
2819+++ b/arch/powerpc/mm/mmap_64.c
2820@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2821 */
2822 if (mmap_is_legacy()) {
2823 mm->mmap_base = TASK_UNMAPPED_BASE;
2824+
2825+#ifdef CONFIG_PAX_RANDMMAP
2826+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2827+ mm->mmap_base += mm->delta_mmap;
2828+#endif
2829+
2830 mm->get_unmapped_area = arch_get_unmapped_area;
2831 mm->unmap_area = arch_unmap_area;
2832 } else {
2833 mm->mmap_base = mmap_base();
2834+
2835+#ifdef CONFIG_PAX_RANDMMAP
2836+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2837+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2838+#endif
2839+
2840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2841 mm->unmap_area = arch_unmap_area_topdown;
2842 }
2843diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2844index ba51948..23009d9 100644
2845--- a/arch/powerpc/mm/slice.c
2846+++ b/arch/powerpc/mm/slice.c
2847@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2848 if ((mm->task_size - len) < addr)
2849 return 0;
2850 vma = find_vma(mm, addr);
2851- return (!vma || (addr + len) <= vma->vm_start);
2852+ return check_heap_stack_gap(vma, addr, len);
2853 }
2854
2855 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2856@@ -256,7 +256,7 @@ full_search:
2857 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2858 continue;
2859 }
2860- if (!vma || addr + len <= vma->vm_start) {
2861+ if (check_heap_stack_gap(vma, addr, len)) {
2862 /*
2863 * Remember the place where we stopped the search:
2864 */
2865@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2866 }
2867 }
2868
2869- addr = mm->mmap_base;
2870- while (addr > len) {
2871+ if (mm->mmap_base < len)
2872+ addr = -ENOMEM;
2873+ else
2874+ addr = mm->mmap_base - len;
2875+
2876+ while (!IS_ERR_VALUE(addr)) {
2877 /* Go down by chunk size */
2878- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2879+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2880
2881 /* Check for hit with different page size */
2882 mask = slice_range_to_mask(addr, len);
2883@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2884 * return with success:
2885 */
2886 vma = find_vma(mm, addr);
2887- if (!vma || (addr + len) <= vma->vm_start) {
2888+ if (check_heap_stack_gap(vma, addr, len)) {
2889 /* remember the address as a hint for next time */
2890 if (use_cache)
2891 mm->free_area_cache = addr;
2892@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2893 mm->cached_hole_size = vma->vm_start - addr;
2894
2895 /* try just below the current vma->vm_start */
2896- addr = vma->vm_start;
2897+ addr = skip_heap_stack_gap(vma, len);
2898 }
2899
2900 /*
2901@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2902 if (fixed && addr > (mm->task_size - len))
2903 return -EINVAL;
2904
2905+#ifdef CONFIG_PAX_RANDMMAP
2906+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2907+ addr = 0;
2908+#endif
2909+
2910 /* If hint, make sure it matches our alignment restrictions */
2911 if (!fixed && addr) {
2912 addr = _ALIGN_UP(addr, 1ul << pshift);
2913diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2914index 547f1a6..3fff354 100644
2915--- a/arch/s390/include/asm/elf.h
2916+++ b/arch/s390/include/asm/elf.h
2917@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2918 the loader. We need to make sure that it is out of the way of the program
2919 that it will "exec", and that there is sufficient room for the brk. */
2920
2921-extern unsigned long randomize_et_dyn(unsigned long base);
2922-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2923+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2924+
2925+#ifdef CONFIG_PAX_ASLR
2926+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2927+
2928+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2930+#endif
2931
2932 /* This yields a mask that user programs can use to figure out what
2933 instruction set this CPU supports. */
2934@@ -211,7 +217,4 @@ struct linux_binprm;
2935 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2936 int arch_setup_additional_pages(struct linux_binprm *, int);
2937
2938-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2939-#define arch_randomize_brk arch_randomize_brk
2940-
2941 #endif
2942diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2943index 6582f69..b69906f 100644
2944--- a/arch/s390/include/asm/system.h
2945+++ b/arch/s390/include/asm/system.h
2946@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command);
2947 extern void (*_machine_halt)(void);
2948 extern void (*_machine_power_off)(void);
2949
2950-extern unsigned long arch_align_stack(unsigned long sp);
2951+#define arch_align_stack(x) ((x) & ~0xfUL)
2952
2953 static inline int tprot(unsigned long addr)
2954 {
2955diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2956index 2b23885..e136e31 100644
2957--- a/arch/s390/include/asm/uaccess.h
2958+++ b/arch/s390/include/asm/uaccess.h
2959@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2960 copy_to_user(void __user *to, const void *from, unsigned long n)
2961 {
2962 might_fault();
2963+
2964+ if ((long)n < 0)
2965+ return n;
2966+
2967 if (access_ok(VERIFY_WRITE, to, n))
2968 n = __copy_to_user(to, from, n);
2969 return n;
2970@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2971 static inline unsigned long __must_check
2972 __copy_from_user(void *to, const void __user *from, unsigned long n)
2973 {
2974+ if ((long)n < 0)
2975+ return n;
2976+
2977 if (__builtin_constant_p(n) && (n <= 256))
2978 return uaccess.copy_from_user_small(n, from, to);
2979 else
2980@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2981 unsigned int sz = __compiletime_object_size(to);
2982
2983 might_fault();
2984+
2985+ if ((long)n < 0)
2986+ return n;
2987+
2988 if (unlikely(sz != -1 && sz < n)) {
2989 copy_from_user_overflow();
2990 return n;
2991diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2992index dfcb343..eda788a 100644
2993--- a/arch/s390/kernel/module.c
2994+++ b/arch/s390/kernel/module.c
2995@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2996
2997 /* Increase core size by size of got & plt and set start
2998 offsets for got and plt. */
2999- me->core_size = ALIGN(me->core_size, 4);
3000- me->arch.got_offset = me->core_size;
3001- me->core_size += me->arch.got_size;
3002- me->arch.plt_offset = me->core_size;
3003- me->core_size += me->arch.plt_size;
3004+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3005+ me->arch.got_offset = me->core_size_rw;
3006+ me->core_size_rw += me->arch.got_size;
3007+ me->arch.plt_offset = me->core_size_rx;
3008+ me->core_size_rx += me->arch.plt_size;
3009 return 0;
3010 }
3011
3012@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3013 if (info->got_initialized == 0) {
3014 Elf_Addr *gotent;
3015
3016- gotent = me->module_core + me->arch.got_offset +
3017+ gotent = me->module_core_rw + me->arch.got_offset +
3018 info->got_offset;
3019 *gotent = val;
3020 info->got_initialized = 1;
3021@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3022 else if (r_type == R_390_GOTENT ||
3023 r_type == R_390_GOTPLTENT)
3024 *(unsigned int *) loc =
3025- (val + (Elf_Addr) me->module_core - loc) >> 1;
3026+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3027 else if (r_type == R_390_GOT64 ||
3028 r_type == R_390_GOTPLT64)
3029 *(unsigned long *) loc = val;
3030@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3031 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3032 if (info->plt_initialized == 0) {
3033 unsigned int *ip;
3034- ip = me->module_core + me->arch.plt_offset +
3035+ ip = me->module_core_rx + me->arch.plt_offset +
3036 info->plt_offset;
3037 #ifndef CONFIG_64BIT
3038 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3039@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3040 val - loc + 0xffffUL < 0x1ffffeUL) ||
3041 (r_type == R_390_PLT32DBL &&
3042 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3043- val = (Elf_Addr) me->module_core +
3044+ val = (Elf_Addr) me->module_core_rx +
3045 me->arch.plt_offset +
3046 info->plt_offset;
3047 val += rela->r_addend - loc;
3048@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3049 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3050 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3051 val = val + rela->r_addend -
3052- ((Elf_Addr) me->module_core + me->arch.got_offset);
3053+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3054 if (r_type == R_390_GOTOFF16)
3055 *(unsigned short *) loc = val;
3056 else if (r_type == R_390_GOTOFF32)
3057@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3058 break;
3059 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3060 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3061- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3062+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3063 rela->r_addend - loc;
3064 if (r_type == R_390_GOTPC)
3065 *(unsigned int *) loc = val;
3066diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3067index 541a750..8739853 100644
3068--- a/arch/s390/kernel/process.c
3069+++ b/arch/s390/kernel/process.c
3070@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p)
3071 }
3072 return 0;
3073 }
3074-
3075-unsigned long arch_align_stack(unsigned long sp)
3076-{
3077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3078- sp -= get_random_int() & ~PAGE_MASK;
3079- return sp & ~0xf;
3080-}
3081-
3082-static inline unsigned long brk_rnd(void)
3083-{
3084- /* 8MB for 32bit, 1GB for 64bit */
3085- if (is_32bit_task())
3086- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3087- else
3088- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3089-}
3090-
3091-unsigned long arch_randomize_brk(struct mm_struct *mm)
3092-{
3093- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3094-
3095- if (ret < mm->brk)
3096- return mm->brk;
3097- return ret;
3098-}
3099-
3100-unsigned long randomize_et_dyn(unsigned long base)
3101-{
3102- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3103-
3104- if (!(current->flags & PF_RANDOMIZE))
3105- return base;
3106- if (ret < base)
3107- return base;
3108- return ret;
3109-}
3110diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3111index c9a9f7f..60d0315 100644
3112--- a/arch/s390/mm/mmap.c
3113+++ b/arch/s390/mm/mmap.c
3114@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3115 */
3116 if (mmap_is_legacy()) {
3117 mm->mmap_base = TASK_UNMAPPED_BASE;
3118+
3119+#ifdef CONFIG_PAX_RANDMMAP
3120+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3121+ mm->mmap_base += mm->delta_mmap;
3122+#endif
3123+
3124 mm->get_unmapped_area = arch_get_unmapped_area;
3125 mm->unmap_area = arch_unmap_area;
3126 } else {
3127 mm->mmap_base = mmap_base();
3128+
3129+#ifdef CONFIG_PAX_RANDMMAP
3130+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3131+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3132+#endif
3133+
3134 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3135 mm->unmap_area = arch_unmap_area_topdown;
3136 }
3137@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3138 */
3139 if (mmap_is_legacy()) {
3140 mm->mmap_base = TASK_UNMAPPED_BASE;
3141+
3142+#ifdef CONFIG_PAX_RANDMMAP
3143+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3144+ mm->mmap_base += mm->delta_mmap;
3145+#endif
3146+
3147 mm->get_unmapped_area = s390_get_unmapped_area;
3148 mm->unmap_area = arch_unmap_area;
3149 } else {
3150 mm->mmap_base = mmap_base();
3151+
3152+#ifdef CONFIG_PAX_RANDMMAP
3153+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3154+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3155+#endif
3156+
3157 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3158 mm->unmap_area = arch_unmap_area_topdown;
3159 }
3160diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3161index 589d5c7..669e274 100644
3162--- a/arch/score/include/asm/system.h
3163+++ b/arch/score/include/asm/system.h
3164@@ -17,7 +17,7 @@ do { \
3165 #define finish_arch_switch(prev) do {} while (0)
3166
3167 typedef void (*vi_handler_t)(void);
3168-extern unsigned long arch_align_stack(unsigned long sp);
3169+#define arch_align_stack(x) (x)
3170
3171 #define mb() barrier()
3172 #define rmb() barrier()
3173diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3174index 25d0803..d6c8e36 100644
3175--- a/arch/score/kernel/process.c
3176+++ b/arch/score/kernel/process.c
3177@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3178
3179 return task_pt_regs(task)->cp0_epc;
3180 }
3181-
3182-unsigned long arch_align_stack(unsigned long sp)
3183-{
3184- return sp;
3185-}
3186diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3187index afeb710..d1d1289 100644
3188--- a/arch/sh/mm/mmap.c
3189+++ b/arch/sh/mm/mmap.c
3190@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3191 addr = PAGE_ALIGN(addr);
3192
3193 vma = find_vma(mm, addr);
3194- if (TASK_SIZE - len >= addr &&
3195- (!vma || addr + len <= vma->vm_start))
3196+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3197 return addr;
3198 }
3199
3200@@ -106,7 +105,7 @@ full_search:
3201 }
3202 return -ENOMEM;
3203 }
3204- if (likely(!vma || addr + len <= vma->vm_start)) {
3205+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3206 /*
3207 * Remember the place where we stopped the search:
3208 */
3209@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3210 addr = PAGE_ALIGN(addr);
3211
3212 vma = find_vma(mm, addr);
3213- if (TASK_SIZE - len >= addr &&
3214- (!vma || addr + len <= vma->vm_start))
3215+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3216 return addr;
3217 }
3218
3219@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3220 /* make sure it can fit in the remaining address space */
3221 if (likely(addr > len)) {
3222 vma = find_vma(mm, addr-len);
3223- if (!vma || addr <= vma->vm_start) {
3224+ if (check_heap_stack_gap(vma, addr - len, len)) {
3225 /* remember the address as a hint for next time */
3226 return (mm->free_area_cache = addr-len);
3227 }
3228@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3229 if (unlikely(mm->mmap_base < len))
3230 goto bottomup;
3231
3232- addr = mm->mmap_base-len;
3233- if (do_colour_align)
3234- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3235+ addr = mm->mmap_base - len;
3236
3237 do {
3238+ if (do_colour_align)
3239+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3240 /*
3241 * Lookup failure means no vma is above this address,
3242 * else if new region fits below vma->vm_start,
3243 * return with success:
3244 */
3245 vma = find_vma(mm, addr);
3246- if (likely(!vma || addr+len <= vma->vm_start)) {
3247+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3248 /* remember the address as a hint for next time */
3249 return (mm->free_area_cache = addr);
3250 }
3251@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3252 mm->cached_hole_size = vma->vm_start - addr;
3253
3254 /* try just below the current vma->vm_start */
3255- addr = vma->vm_start-len;
3256- if (do_colour_align)
3257- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3258- } while (likely(len < vma->vm_start));
3259+ addr = skip_heap_stack_gap(vma, len);
3260+ } while (!IS_ERR_VALUE(addr));
3261
3262 bottomup:
3263 /*
3264diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3265index ad1fb5d..fc5315b 100644
3266--- a/arch/sparc/Makefile
3267+++ b/arch/sparc/Makefile
3268@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3269 # Export what is needed by arch/sparc/boot/Makefile
3270 export VMLINUX_INIT VMLINUX_MAIN
3271 VMLINUX_INIT := $(head-y) $(init-y)
3272-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3273+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3274 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3275 VMLINUX_MAIN += $(drivers-y) $(net-y)
3276
3277diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3278index 9f421df..b81fc12 100644
3279--- a/arch/sparc/include/asm/atomic_64.h
3280+++ b/arch/sparc/include/asm/atomic_64.h
3281@@ -14,18 +14,40 @@
3282 #define ATOMIC64_INIT(i) { (i) }
3283
3284 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3285+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3286+{
3287+ return v->counter;
3288+}
3289 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3290+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3291+{
3292+ return v->counter;
3293+}
3294
3295 #define atomic_set(v, i) (((v)->counter) = i)
3296+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3297+{
3298+ v->counter = i;
3299+}
3300 #define atomic64_set(v, i) (((v)->counter) = i)
3301+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3302+{
3303+ v->counter = i;
3304+}
3305
3306 extern void atomic_add(int, atomic_t *);
3307+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3308 extern void atomic64_add(long, atomic64_t *);
3309+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3310 extern void atomic_sub(int, atomic_t *);
3311+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3312 extern void atomic64_sub(long, atomic64_t *);
3313+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3314
3315 extern int atomic_add_ret(int, atomic_t *);
3316+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3317 extern long atomic64_add_ret(long, atomic64_t *);
3318+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3319 extern int atomic_sub_ret(int, atomic_t *);
3320 extern long atomic64_sub_ret(long, atomic64_t *);
3321
3322@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3323 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3324
3325 #define atomic_inc_return(v) atomic_add_ret(1, v)
3326+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3327+{
3328+ return atomic_add_ret_unchecked(1, v);
3329+}
3330 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3331+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3332+{
3333+ return atomic64_add_ret_unchecked(1, v);
3334+}
3335
3336 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3337 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3338
3339 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3340+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3341+{
3342+ return atomic_add_ret_unchecked(i, v);
3343+}
3344 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3345+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3346+{
3347+ return atomic64_add_ret_unchecked(i, v);
3348+}
3349
3350 /*
3351 * atomic_inc_and_test - increment and test
3352@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3353 * other cases.
3354 */
3355 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3356+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3357+{
3358+ return atomic_inc_return_unchecked(v) == 0;
3359+}
3360 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3361
3362 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3363@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3364 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3365
3366 #define atomic_inc(v) atomic_add(1, v)
3367+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3368+{
3369+ atomic_add_unchecked(1, v);
3370+}
3371 #define atomic64_inc(v) atomic64_add(1, v)
3372+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3373+{
3374+ atomic64_add_unchecked(1, v);
3375+}
3376
3377 #define atomic_dec(v) atomic_sub(1, v)
3378+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3379+{
3380+ atomic_sub_unchecked(1, v);
3381+}
3382 #define atomic64_dec(v) atomic64_sub(1, v)
3383+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3384+{
3385+ atomic64_sub_unchecked(1, v);
3386+}
3387
3388 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3389 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3390
3391 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3392+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3393+{
3394+ return cmpxchg(&v->counter, old, new);
3395+}
3396 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3397+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3398+{
3399+ return xchg(&v->counter, new);
3400+}
3401
3402 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3403 {
3404- int c, old;
3405+ int c, old, new;
3406 c = atomic_read(v);
3407 for (;;) {
3408- if (unlikely(c == (u)))
3409+ if (unlikely(c == u))
3410 break;
3411- old = atomic_cmpxchg((v), c, c + (a));
3412+
3413+ asm volatile("addcc %2, %0, %0\n"
3414+
3415+#ifdef CONFIG_PAX_REFCOUNT
3416+ "tvs %%icc, 6\n"
3417+#endif
3418+
3419+ : "=r" (new)
3420+ : "0" (c), "ir" (a)
3421+ : "cc");
3422+
3423+ old = atomic_cmpxchg(v, c, new);
3424 if (likely(old == c))
3425 break;
3426 c = old;
3427@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3428 #define atomic64_cmpxchg(v, o, n) \
3429 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3430 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3431+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3432+{
3433+ return xchg(&v->counter, new);
3434+}
3435
3436 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3437 {
3438- long c, old;
3439+ long c, old, new;
3440 c = atomic64_read(v);
3441 for (;;) {
3442- if (unlikely(c == (u)))
3443+ if (unlikely(c == u))
3444 break;
3445- old = atomic64_cmpxchg((v), c, c + (a));
3446+
3447+ asm volatile("addcc %2, %0, %0\n"
3448+
3449+#ifdef CONFIG_PAX_REFCOUNT
3450+ "tvs %%xcc, 6\n"
3451+#endif
3452+
3453+ : "=r" (new)
3454+ : "0" (c), "ir" (a)
3455+ : "cc");
3456+
3457+ old = atomic64_cmpxchg(v, c, new);
3458 if (likely(old == c))
3459 break;
3460 c = old;
3461 }
3462- return c != (u);
3463+ return c != u;
3464 }
3465
3466 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3467diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3468index 69358b5..17b4745 100644
3469--- a/arch/sparc/include/asm/cache.h
3470+++ b/arch/sparc/include/asm/cache.h
3471@@ -10,7 +10,7 @@
3472 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3473
3474 #define L1_CACHE_SHIFT 5
3475-#define L1_CACHE_BYTES 32
3476+#define L1_CACHE_BYTES 32UL
3477
3478 #ifdef CONFIG_SPARC32
3479 #define SMP_CACHE_BYTES_SHIFT 5
3480diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3481index 4269ca6..e3da77f 100644
3482--- a/arch/sparc/include/asm/elf_32.h
3483+++ b/arch/sparc/include/asm/elf_32.h
3484@@ -114,6 +114,13 @@ typedef struct {
3485
3486 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3487
3488+#ifdef CONFIG_PAX_ASLR
3489+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3490+
3491+#define PAX_DELTA_MMAP_LEN 16
3492+#define PAX_DELTA_STACK_LEN 16
3493+#endif
3494+
3495 /* This yields a mask that user programs can use to figure out what
3496 instruction set this cpu supports. This can NOT be done in userspace
3497 on Sparc. */
3498diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3499index 7df8b7f..4946269 100644
3500--- a/arch/sparc/include/asm/elf_64.h
3501+++ b/arch/sparc/include/asm/elf_64.h
3502@@ -180,6 +180,13 @@ typedef struct {
3503 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3504 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3505
3506+#ifdef CONFIG_PAX_ASLR
3507+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3508+
3509+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3510+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3511+#endif
3512+
3513 extern unsigned long sparc64_elf_hwcap;
3514 #define ELF_HWCAP sparc64_elf_hwcap
3515
3516diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3517index a790cc6..091ed94 100644
3518--- a/arch/sparc/include/asm/pgtable_32.h
3519+++ b/arch/sparc/include/asm/pgtable_32.h
3520@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3521 BTFIXUPDEF_INT(page_none)
3522 BTFIXUPDEF_INT(page_copy)
3523 BTFIXUPDEF_INT(page_readonly)
3524+
3525+#ifdef CONFIG_PAX_PAGEEXEC
3526+BTFIXUPDEF_INT(page_shared_noexec)
3527+BTFIXUPDEF_INT(page_copy_noexec)
3528+BTFIXUPDEF_INT(page_readonly_noexec)
3529+#endif
3530+
3531 BTFIXUPDEF_INT(page_kernel)
3532
3533 #define PMD_SHIFT SUN4C_PMD_SHIFT
3534@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3535 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3536 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3537
3538+#ifdef CONFIG_PAX_PAGEEXEC
3539+extern pgprot_t PAGE_SHARED_NOEXEC;
3540+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3541+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3542+#else
3543+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3544+# define PAGE_COPY_NOEXEC PAGE_COPY
3545+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3546+#endif
3547+
3548 extern unsigned long page_kernel;
3549
3550 #ifdef MODULE
3551diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3552index f6ae2b2..b03ffc7 100644
3553--- a/arch/sparc/include/asm/pgtsrmmu.h
3554+++ b/arch/sparc/include/asm/pgtsrmmu.h
3555@@ -115,6 +115,13 @@
3556 SRMMU_EXEC | SRMMU_REF)
3557 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3558 SRMMU_EXEC | SRMMU_REF)
3559+
3560+#ifdef CONFIG_PAX_PAGEEXEC
3561+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3562+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3564+#endif
3565+
3566 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3567 SRMMU_DIRTY | SRMMU_REF)
3568
3569diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3570index 9689176..63c18ea 100644
3571--- a/arch/sparc/include/asm/spinlock_64.h
3572+++ b/arch/sparc/include/asm/spinlock_64.h
3573@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3574
3575 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3576
3577-static void inline arch_read_lock(arch_rwlock_t *lock)
3578+static inline void arch_read_lock(arch_rwlock_t *lock)
3579 {
3580 unsigned long tmp1, tmp2;
3581
3582 __asm__ __volatile__ (
3583 "1: ldsw [%2], %0\n"
3584 " brlz,pn %0, 2f\n"
3585-"4: add %0, 1, %1\n"
3586+"4: addcc %0, 1, %1\n"
3587+
3588+#ifdef CONFIG_PAX_REFCOUNT
3589+" tvs %%icc, 6\n"
3590+#endif
3591+
3592 " cas [%2], %0, %1\n"
3593 " cmp %0, %1\n"
3594 " bne,pn %%icc, 1b\n"
3595@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3596 " .previous"
3597 : "=&r" (tmp1), "=&r" (tmp2)
3598 : "r" (lock)
3599- : "memory");
3600+ : "memory", "cc");
3601 }
3602
3603-static int inline arch_read_trylock(arch_rwlock_t *lock)
3604+static inline int arch_read_trylock(arch_rwlock_t *lock)
3605 {
3606 int tmp1, tmp2;
3607
3608@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3609 "1: ldsw [%2], %0\n"
3610 " brlz,a,pn %0, 2f\n"
3611 " mov 0, %0\n"
3612-" add %0, 1, %1\n"
3613+" addcc %0, 1, %1\n"
3614+
3615+#ifdef CONFIG_PAX_REFCOUNT
3616+" tvs %%icc, 6\n"
3617+#endif
3618+
3619 " cas [%2], %0, %1\n"
3620 " cmp %0, %1\n"
3621 " bne,pn %%icc, 1b\n"
3622@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3623 return tmp1;
3624 }
3625
3626-static void inline arch_read_unlock(arch_rwlock_t *lock)
3627+static inline void arch_read_unlock(arch_rwlock_t *lock)
3628 {
3629 unsigned long tmp1, tmp2;
3630
3631 __asm__ __volatile__(
3632 "1: lduw [%2], %0\n"
3633-" sub %0, 1, %1\n"
3634+" subcc %0, 1, %1\n"
3635+
3636+#ifdef CONFIG_PAX_REFCOUNT
3637+" tvs %%icc, 6\n"
3638+#endif
3639+
3640 " cas [%2], %0, %1\n"
3641 " cmp %0, %1\n"
3642 " bne,pn %%xcc, 1b\n"
3643@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3644 : "memory");
3645 }
3646
3647-static void inline arch_write_lock(arch_rwlock_t *lock)
3648+static inline void arch_write_lock(arch_rwlock_t *lock)
3649 {
3650 unsigned long mask, tmp1, tmp2;
3651
3652@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3653 : "memory");
3654 }
3655
3656-static void inline arch_write_unlock(arch_rwlock_t *lock)
3657+static inline void arch_write_unlock(arch_rwlock_t *lock)
3658 {
3659 __asm__ __volatile__(
3660 " stw %%g0, [%0]"
3661@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3662 : "memory");
3663 }
3664
3665-static int inline arch_write_trylock(arch_rwlock_t *lock)
3666+static inline int arch_write_trylock(arch_rwlock_t *lock)
3667 {
3668 unsigned long mask, tmp1, tmp2, result;
3669
3670diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3671index fa57532..e1a4c53 100644
3672--- a/arch/sparc/include/asm/thread_info_32.h
3673+++ b/arch/sparc/include/asm/thread_info_32.h
3674@@ -50,6 +50,8 @@ struct thread_info {
3675 unsigned long w_saved;
3676
3677 struct restart_block restart_block;
3678+
3679+ unsigned long lowest_stack;
3680 };
3681
3682 /*
3683diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3684index 60d86be..952dea1 100644
3685--- a/arch/sparc/include/asm/thread_info_64.h
3686+++ b/arch/sparc/include/asm/thread_info_64.h
3687@@ -63,6 +63,8 @@ struct thread_info {
3688 struct pt_regs *kern_una_regs;
3689 unsigned int kern_una_insn;
3690
3691+ unsigned long lowest_stack;
3692+
3693 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3694 };
3695
3696diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3697index e88fbe5..96b0ce5 100644
3698--- a/arch/sparc/include/asm/uaccess.h
3699+++ b/arch/sparc/include/asm/uaccess.h
3700@@ -1,5 +1,13 @@
3701 #ifndef ___ASM_SPARC_UACCESS_H
3702 #define ___ASM_SPARC_UACCESS_H
3703+
3704+#ifdef __KERNEL__
3705+#ifndef __ASSEMBLY__
3706+#include <linux/types.h>
3707+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3708+#endif
3709+#endif
3710+
3711 #if defined(__sparc__) && defined(__arch64__)
3712 #include <asm/uaccess_64.h>
3713 #else
3714diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3715index 8303ac4..07f333d 100644
3716--- a/arch/sparc/include/asm/uaccess_32.h
3717+++ b/arch/sparc/include/asm/uaccess_32.h
3718@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3719
3720 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3721 {
3722- if (n && __access_ok((unsigned long) to, n))
3723+ if ((long)n < 0)
3724+ return n;
3725+
3726+ if (n && __access_ok((unsigned long) to, n)) {
3727+ if (!__builtin_constant_p(n))
3728+ check_object_size(from, n, true);
3729 return __copy_user(to, (__force void __user *) from, n);
3730- else
3731+ } else
3732 return n;
3733 }
3734
3735 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3736 {
3737+ if ((long)n < 0)
3738+ return n;
3739+
3740+ if (!__builtin_constant_p(n))
3741+ check_object_size(from, n, true);
3742+
3743 return __copy_user(to, (__force void __user *) from, n);
3744 }
3745
3746 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3747 {
3748- if (n && __access_ok((unsigned long) from, n))
3749+ if ((long)n < 0)
3750+ return n;
3751+
3752+ if (n && __access_ok((unsigned long) from, n)) {
3753+ if (!__builtin_constant_p(n))
3754+ check_object_size(to, n, false);
3755 return __copy_user((__force void __user *) to, from, n);
3756- else
3757+ } else
3758 return n;
3759 }
3760
3761 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3762 {
3763+ if ((long)n < 0)
3764+ return n;
3765+
3766 return __copy_user((__force void __user *) to, from, n);
3767 }
3768
3769diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3770index 3e1449f..5293a0e 100644
3771--- a/arch/sparc/include/asm/uaccess_64.h
3772+++ b/arch/sparc/include/asm/uaccess_64.h
3773@@ -10,6 +10,7 @@
3774 #include <linux/compiler.h>
3775 #include <linux/string.h>
3776 #include <linux/thread_info.h>
3777+#include <linux/kernel.h>
3778 #include <asm/asi.h>
3779 #include <asm/system.h>
3780 #include <asm/spitfire.h>
3781@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3782 static inline unsigned long __must_check
3783 copy_from_user(void *to, const void __user *from, unsigned long size)
3784 {
3785- unsigned long ret = ___copy_from_user(to, from, size);
3786+ unsigned long ret;
3787
3788+ if ((long)size < 0 || size > INT_MAX)
3789+ return size;
3790+
3791+ if (!__builtin_constant_p(size))
3792+ check_object_size(to, size, false);
3793+
3794+ ret = ___copy_from_user(to, from, size);
3795 if (unlikely(ret))
3796 ret = copy_from_user_fixup(to, from, size);
3797
3798@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3799 static inline unsigned long __must_check
3800 copy_to_user(void __user *to, const void *from, unsigned long size)
3801 {
3802- unsigned long ret = ___copy_to_user(to, from, size);
3803+ unsigned long ret;
3804
3805+ if ((long)size < 0 || size > INT_MAX)
3806+ return size;
3807+
3808+ if (!__builtin_constant_p(size))
3809+ check_object_size(from, size, true);
3810+
3811+ ret = ___copy_to_user(to, from, size);
3812 if (unlikely(ret))
3813 ret = copy_to_user_fixup(to, from, size);
3814 return ret;
3815diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3816index cb85458..e063f17 100644
3817--- a/arch/sparc/kernel/Makefile
3818+++ b/arch/sparc/kernel/Makefile
3819@@ -3,7 +3,7 @@
3820 #
3821
3822 asflags-y := -ansi
3823-ccflags-y := -Werror
3824+#ccflags-y := -Werror
3825
3826 extra-y := head_$(BITS).o
3827 extra-y += init_task.o
3828diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3829index f793742..4d880af 100644
3830--- a/arch/sparc/kernel/process_32.c
3831+++ b/arch/sparc/kernel/process_32.c
3832@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3833 rw->ins[4], rw->ins[5],
3834 rw->ins[6],
3835 rw->ins[7]);
3836- printk("%pS\n", (void *) rw->ins[7]);
3837+ printk("%pA\n", (void *) rw->ins[7]);
3838 rw = (struct reg_window32 *) rw->ins[6];
3839 }
3840 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3841@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3842
3843 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3844 r->psr, r->pc, r->npc, r->y, print_tainted());
3845- printk("PC: <%pS>\n", (void *) r->pc);
3846+ printk("PC: <%pA>\n", (void *) r->pc);
3847 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3848 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3849 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3850 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3851 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3852 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3853- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3854+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3855
3856 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3857 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3858@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3859 rw = (struct reg_window32 *) fp;
3860 pc = rw->ins[7];
3861 printk("[%08lx : ", pc);
3862- printk("%pS ] ", (void *) pc);
3863+ printk("%pA ] ", (void *) pc);
3864 fp = rw->ins[6];
3865 } while (++count < 16);
3866 printk("\n");
3867diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3868index d959cd0..7b42812 100644
3869--- a/arch/sparc/kernel/process_64.c
3870+++ b/arch/sparc/kernel/process_64.c
3871@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3872 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3873 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3874 if (regs->tstate & TSTATE_PRIV)
3875- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3876+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3877 }
3878
3879 void show_regs(struct pt_regs *regs)
3880 {
3881 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3882 regs->tpc, regs->tnpc, regs->y, print_tainted());
3883- printk("TPC: <%pS>\n", (void *) regs->tpc);
3884+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3885 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3886 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3887 regs->u_regs[3]);
3888@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3889 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3890 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3891 regs->u_regs[15]);
3892- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3893+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3894 show_regwindow(regs);
3895 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3896 }
3897@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3898 ((tp && tp->task) ? tp->task->pid : -1));
3899
3900 if (gp->tstate & TSTATE_PRIV) {
3901- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3902+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3903 (void *) gp->tpc,
3904 (void *) gp->o7,
3905 (void *) gp->i7,
3906diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3907index 42b282f..28ce9f2 100644
3908--- a/arch/sparc/kernel/sys_sparc_32.c
3909+++ b/arch/sparc/kernel/sys_sparc_32.c
3910@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3911 if (ARCH_SUN4C && len > 0x20000000)
3912 return -ENOMEM;
3913 if (!addr)
3914- addr = TASK_UNMAPPED_BASE;
3915+ addr = current->mm->mmap_base;
3916
3917 if (flags & MAP_SHARED)
3918 addr = COLOUR_ALIGN(addr);
3919@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3920 }
3921 if (TASK_SIZE - PAGE_SIZE - len < addr)
3922 return -ENOMEM;
3923- if (!vmm || addr + len <= vmm->vm_start)
3924+ if (check_heap_stack_gap(vmm, addr, len))
3925 return addr;
3926 addr = vmm->vm_end;
3927 if (flags & MAP_SHARED)
3928diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3929index 908b47a..aa9e584 100644
3930--- a/arch/sparc/kernel/sys_sparc_64.c
3931+++ b/arch/sparc/kernel/sys_sparc_64.c
3932@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3933 /* We do not accept a shared mapping if it would violate
3934 * cache aliasing constraints.
3935 */
3936- if ((flags & MAP_SHARED) &&
3937+ if ((filp || (flags & MAP_SHARED)) &&
3938 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3939 return -EINVAL;
3940 return addr;
3941@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3942 if (filp || (flags & MAP_SHARED))
3943 do_color_align = 1;
3944
3945+#ifdef CONFIG_PAX_RANDMMAP
3946+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3947+#endif
3948+
3949 if (addr) {
3950 if (do_color_align)
3951 addr = COLOUR_ALIGN(addr, pgoff);
3952@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3953 addr = PAGE_ALIGN(addr);
3954
3955 vma = find_vma(mm, addr);
3956- if (task_size - len >= addr &&
3957- (!vma || addr + len <= vma->vm_start))
3958+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3959 return addr;
3960 }
3961
3962 if (len > mm->cached_hole_size) {
3963- start_addr = addr = mm->free_area_cache;
3964+ start_addr = addr = mm->free_area_cache;
3965 } else {
3966- start_addr = addr = TASK_UNMAPPED_BASE;
3967+ start_addr = addr = mm->mmap_base;
3968 mm->cached_hole_size = 0;
3969 }
3970
3971@@ -174,14 +177,14 @@ full_search:
3972 vma = find_vma(mm, VA_EXCLUDE_END);
3973 }
3974 if (unlikely(task_size < addr)) {
3975- if (start_addr != TASK_UNMAPPED_BASE) {
3976- start_addr = addr = TASK_UNMAPPED_BASE;
3977+ if (start_addr != mm->mmap_base) {
3978+ start_addr = addr = mm->mmap_base;
3979 mm->cached_hole_size = 0;
3980 goto full_search;
3981 }
3982 return -ENOMEM;
3983 }
3984- if (likely(!vma || addr + len <= vma->vm_start)) {
3985+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3986 /*
3987 * Remember the place where we stopped the search:
3988 */
3989@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3990 /* We do not accept a shared mapping if it would violate
3991 * cache aliasing constraints.
3992 */
3993- if ((flags & MAP_SHARED) &&
3994+ if ((filp || (flags & MAP_SHARED)) &&
3995 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3996 return -EINVAL;
3997 return addr;
3998@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3999 addr = PAGE_ALIGN(addr);
4000
4001 vma = find_vma(mm, addr);
4002- if (task_size - len >= addr &&
4003- (!vma || addr + len <= vma->vm_start))
4004+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4005 return addr;
4006 }
4007
4008@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4009 /* make sure it can fit in the remaining address space */
4010 if (likely(addr > len)) {
4011 vma = find_vma(mm, addr-len);
4012- if (!vma || addr <= vma->vm_start) {
4013+ if (check_heap_stack_gap(vma, addr - len, len)) {
4014 /* remember the address as a hint for next time */
4015 return (mm->free_area_cache = addr-len);
4016 }
4017@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4018 if (unlikely(mm->mmap_base < len))
4019 goto bottomup;
4020
4021- addr = mm->mmap_base-len;
4022- if (do_color_align)
4023- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4024+ addr = mm->mmap_base - len;
4025
4026 do {
4027+ if (do_color_align)
4028+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4029 /*
4030 * Lookup failure means no vma is above this address,
4031 * else if new region fits below vma->vm_start,
4032 * return with success:
4033 */
4034 vma = find_vma(mm, addr);
4035- if (likely(!vma || addr+len <= vma->vm_start)) {
4036+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4037 /* remember the address as a hint for next time */
4038 return (mm->free_area_cache = addr);
4039 }
4040@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4041 mm->cached_hole_size = vma->vm_start - addr;
4042
4043 /* try just below the current vma->vm_start */
4044- addr = vma->vm_start-len;
4045- if (do_color_align)
4046- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4047- } while (likely(len < vma->vm_start));
4048+ addr = skip_heap_stack_gap(vma, len);
4049+ } while (!IS_ERR_VALUE(addr));
4050
4051 bottomup:
4052 /*
4053@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4054 gap == RLIM_INFINITY ||
4055 sysctl_legacy_va_layout) {
4056 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4057+
4058+#ifdef CONFIG_PAX_RANDMMAP
4059+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4060+ mm->mmap_base += mm->delta_mmap;
4061+#endif
4062+
4063 mm->get_unmapped_area = arch_get_unmapped_area;
4064 mm->unmap_area = arch_unmap_area;
4065 } else {
4066@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4067 gap = (task_size / 6 * 5);
4068
4069 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4070+
4071+#ifdef CONFIG_PAX_RANDMMAP
4072+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4073+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4074+#endif
4075+
4076 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4077 mm->unmap_area = arch_unmap_area_topdown;
4078 }
4079diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4080index c0490c7..84959d1 100644
4081--- a/arch/sparc/kernel/traps_32.c
4082+++ b/arch/sparc/kernel/traps_32.c
4083@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
4084 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4085 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4086
4087+extern void gr_handle_kernel_exploit(void);
4088+
4089 void die_if_kernel(char *str, struct pt_regs *regs)
4090 {
4091 static int die_counter;
4092@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4093 count++ < 30 &&
4094 (((unsigned long) rw) >= PAGE_OFFSET) &&
4095 !(((unsigned long) rw) & 0x7)) {
4096- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4097+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4098 (void *) rw->ins[7]);
4099 rw = (struct reg_window32 *)rw->ins[6];
4100 }
4101 }
4102 printk("Instruction DUMP:");
4103 instruction_dump ((unsigned long *) regs->pc);
4104- if(regs->psr & PSR_PS)
4105+ if(regs->psr & PSR_PS) {
4106+ gr_handle_kernel_exploit();
4107 do_exit(SIGKILL);
4108+ }
4109 do_exit(SIGSEGV);
4110 }
4111
4112diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4113index 0cbdaa4..438e4c9 100644
4114--- a/arch/sparc/kernel/traps_64.c
4115+++ b/arch/sparc/kernel/traps_64.c
4116@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4117 i + 1,
4118 p->trapstack[i].tstate, p->trapstack[i].tpc,
4119 p->trapstack[i].tnpc, p->trapstack[i].tt);
4120- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4121+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4122 }
4123 }
4124
4125@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4126
4127 lvl -= 0x100;
4128 if (regs->tstate & TSTATE_PRIV) {
4129+
4130+#ifdef CONFIG_PAX_REFCOUNT
4131+ if (lvl == 6)
4132+ pax_report_refcount_overflow(regs);
4133+#endif
4134+
4135 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4136 die_if_kernel(buffer, regs);
4137 }
4138@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4139 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4140 {
4141 char buffer[32];
4142-
4143+
4144 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4145 0, lvl, SIGTRAP) == NOTIFY_STOP)
4146 return;
4147
4148+#ifdef CONFIG_PAX_REFCOUNT
4149+ if (lvl == 6)
4150+ pax_report_refcount_overflow(regs);
4151+#endif
4152+
4153 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4154
4155 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4156@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4157 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4158 printk("%s" "ERROR(%d): ",
4159 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4160- printk("TPC<%pS>\n", (void *) regs->tpc);
4161+ printk("TPC<%pA>\n", (void *) regs->tpc);
4162 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4164 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4165@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4166 smp_processor_id(),
4167 (type & 0x1) ? 'I' : 'D',
4168 regs->tpc);
4169- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4170+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4171 panic("Irrecoverable Cheetah+ parity error.");
4172 }
4173
4174@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4175 smp_processor_id(),
4176 (type & 0x1) ? 'I' : 'D',
4177 regs->tpc);
4178- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4179+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4180 }
4181
4182 struct sun4v_error_entry {
4183@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4184
4185 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4186 regs->tpc, tl);
4187- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4188+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4189 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4190- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4191+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4192 (void *) regs->u_regs[UREG_I7]);
4193 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4194 "pte[%lx] error[%lx]\n",
4195@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4196
4197 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4198 regs->tpc, tl);
4199- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4200+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4201 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4202- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4203+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4204 (void *) regs->u_regs[UREG_I7]);
4205 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4206 "pte[%lx] error[%lx]\n",
4207@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4208 fp = (unsigned long)sf->fp + STACK_BIAS;
4209 }
4210
4211- printk(" [%016lx] %pS\n", pc, (void *) pc);
4212+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4213 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4214 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4215 int index = tsk->curr_ret_stack;
4216 if (tsk->ret_stack && index >= graph) {
4217 pc = tsk->ret_stack[index - graph].ret;
4218- printk(" [%016lx] %pS\n", pc, (void *) pc);
4219+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4220 graph++;
4221 }
4222 }
4223@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4224 return (struct reg_window *) (fp + STACK_BIAS);
4225 }
4226
4227+extern void gr_handle_kernel_exploit(void);
4228+
4229 void die_if_kernel(char *str, struct pt_regs *regs)
4230 {
4231 static int die_counter;
4232@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4233 while (rw &&
4234 count++ < 30 &&
4235 kstack_valid(tp, (unsigned long) rw)) {
4236- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4237+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4238 (void *) rw->ins[7]);
4239
4240 rw = kernel_stack_up(rw);
4241@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4242 }
4243 user_instruction_dump ((unsigned int __user *) regs->tpc);
4244 }
4245- if (regs->tstate & TSTATE_PRIV)
4246+ if (regs->tstate & TSTATE_PRIV) {
4247+ gr_handle_kernel_exploit();
4248 do_exit(SIGKILL);
4249+ }
4250 do_exit(SIGSEGV);
4251 }
4252 EXPORT_SYMBOL(die_if_kernel);
4253diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4254index 76e4ac1..78f8bb1 100644
4255--- a/arch/sparc/kernel/unaligned_64.c
4256+++ b/arch/sparc/kernel/unaligned_64.c
4257@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4258 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4259
4260 if (__ratelimit(&ratelimit)) {
4261- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4262+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4263 regs->tpc, (void *) regs->tpc);
4264 }
4265 }
4266diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4267index a3fc437..fea9957 100644
4268--- a/arch/sparc/lib/Makefile
4269+++ b/arch/sparc/lib/Makefile
4270@@ -2,7 +2,7 @@
4271 #
4272
4273 asflags-y := -ansi -DST_DIV0=0x02
4274-ccflags-y := -Werror
4275+#ccflags-y := -Werror
4276
4277 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4278 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4279diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4280index 59186e0..f747d7a 100644
4281--- a/arch/sparc/lib/atomic_64.S
4282+++ b/arch/sparc/lib/atomic_64.S
4283@@ -18,7 +18,12 @@
4284 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4285 BACKOFF_SETUP(%o2)
4286 1: lduw [%o1], %g1
4287- add %g1, %o0, %g7
4288+ addcc %g1, %o0, %g7
4289+
4290+#ifdef CONFIG_PAX_REFCOUNT
4291+ tvs %icc, 6
4292+#endif
4293+
4294 cas [%o1], %g1, %g7
4295 cmp %g1, %g7
4296 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4297@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4298 2: BACKOFF_SPIN(%o2, %o3, 1b)
4299 .size atomic_add, .-atomic_add
4300
4301+ .globl atomic_add_unchecked
4302+ .type atomic_add_unchecked,#function
4303+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4304+ BACKOFF_SETUP(%o2)
4305+1: lduw [%o1], %g1
4306+ add %g1, %o0, %g7
4307+ cas [%o1], %g1, %g7
4308+ cmp %g1, %g7
4309+ bne,pn %icc, 2f
4310+ nop
4311+ retl
4312+ nop
4313+2: BACKOFF_SPIN(%o2, %o3, 1b)
4314+ .size atomic_add_unchecked, .-atomic_add_unchecked
4315+
4316 .globl atomic_sub
4317 .type atomic_sub,#function
4318 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4319 BACKOFF_SETUP(%o2)
4320 1: lduw [%o1], %g1
4321- sub %g1, %o0, %g7
4322+ subcc %g1, %o0, %g7
4323+
4324+#ifdef CONFIG_PAX_REFCOUNT
4325+ tvs %icc, 6
4326+#endif
4327+
4328 cas [%o1], %g1, %g7
4329 cmp %g1, %g7
4330 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4331@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4332 2: BACKOFF_SPIN(%o2, %o3, 1b)
4333 .size atomic_sub, .-atomic_sub
4334
4335+ .globl atomic_sub_unchecked
4336+ .type atomic_sub_unchecked,#function
4337+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4338+ BACKOFF_SETUP(%o2)
4339+1: lduw [%o1], %g1
4340+ sub %g1, %o0, %g7
4341+ cas [%o1], %g1, %g7
4342+ cmp %g1, %g7
4343+ bne,pn %icc, 2f
4344+ nop
4345+ retl
4346+ nop
4347+2: BACKOFF_SPIN(%o2, %o3, 1b)
4348+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4349+
4350 .globl atomic_add_ret
4351 .type atomic_add_ret,#function
4352 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4353 BACKOFF_SETUP(%o2)
4354 1: lduw [%o1], %g1
4355- add %g1, %o0, %g7
4356+ addcc %g1, %o0, %g7
4357+
4358+#ifdef CONFIG_PAX_REFCOUNT
4359+ tvs %icc, 6
4360+#endif
4361+
4362 cas [%o1], %g1, %g7
4363 cmp %g1, %g7
4364 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4365@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4366 2: BACKOFF_SPIN(%o2, %o3, 1b)
4367 .size atomic_add_ret, .-atomic_add_ret
4368
4369+ .globl atomic_add_ret_unchecked
4370+ .type atomic_add_ret_unchecked,#function
4371+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4372+ BACKOFF_SETUP(%o2)
4373+1: lduw [%o1], %g1
4374+ addcc %g1, %o0, %g7
4375+ cas [%o1], %g1, %g7
4376+ cmp %g1, %g7
4377+ bne,pn %icc, 2f
4378+ add %g7, %o0, %g7
4379+ sra %g7, 0, %o0
4380+ retl
4381+ nop
4382+2: BACKOFF_SPIN(%o2, %o3, 1b)
4383+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4384+
4385 .globl atomic_sub_ret
4386 .type atomic_sub_ret,#function
4387 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4388 BACKOFF_SETUP(%o2)
4389 1: lduw [%o1], %g1
4390- sub %g1, %o0, %g7
4391+ subcc %g1, %o0, %g7
4392+
4393+#ifdef CONFIG_PAX_REFCOUNT
4394+ tvs %icc, 6
4395+#endif
4396+
4397 cas [%o1], %g1, %g7
4398 cmp %g1, %g7
4399 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4400@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4401 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4402 BACKOFF_SETUP(%o2)
4403 1: ldx [%o1], %g1
4404- add %g1, %o0, %g7
4405+ addcc %g1, %o0, %g7
4406+
4407+#ifdef CONFIG_PAX_REFCOUNT
4408+ tvs %xcc, 6
4409+#endif
4410+
4411 casx [%o1], %g1, %g7
4412 cmp %g1, %g7
4413 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4414@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4415 2: BACKOFF_SPIN(%o2, %o3, 1b)
4416 .size atomic64_add, .-atomic64_add
4417
4418+ .globl atomic64_add_unchecked
4419+ .type atomic64_add_unchecked,#function
4420+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4421+ BACKOFF_SETUP(%o2)
4422+1: ldx [%o1], %g1
4423+ addcc %g1, %o0, %g7
4424+ casx [%o1], %g1, %g7
4425+ cmp %g1, %g7
4426+ bne,pn %xcc, 2f
4427+ nop
4428+ retl
4429+ nop
4430+2: BACKOFF_SPIN(%o2, %o3, 1b)
4431+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4432+
4433 .globl atomic64_sub
4434 .type atomic64_sub,#function
4435 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4436 BACKOFF_SETUP(%o2)
4437 1: ldx [%o1], %g1
4438- sub %g1, %o0, %g7
4439+ subcc %g1, %o0, %g7
4440+
4441+#ifdef CONFIG_PAX_REFCOUNT
4442+ tvs %xcc, 6
4443+#endif
4444+
4445 casx [%o1], %g1, %g7
4446 cmp %g1, %g7
4447 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4448@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4449 2: BACKOFF_SPIN(%o2, %o3, 1b)
4450 .size atomic64_sub, .-atomic64_sub
4451
4452+ .globl atomic64_sub_unchecked
4453+ .type atomic64_sub_unchecked,#function
4454+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4455+ BACKOFF_SETUP(%o2)
4456+1: ldx [%o1], %g1
4457+ subcc %g1, %o0, %g7
4458+ casx [%o1], %g1, %g7
4459+ cmp %g1, %g7
4460+ bne,pn %xcc, 2f
4461+ nop
4462+ retl
4463+ nop
4464+2: BACKOFF_SPIN(%o2, %o3, 1b)
4465+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4466+
4467 .globl atomic64_add_ret
4468 .type atomic64_add_ret,#function
4469 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4470 BACKOFF_SETUP(%o2)
4471 1: ldx [%o1], %g1
4472- add %g1, %o0, %g7
4473+ addcc %g1, %o0, %g7
4474+
4475+#ifdef CONFIG_PAX_REFCOUNT
4476+ tvs %xcc, 6
4477+#endif
4478+
4479 casx [%o1], %g1, %g7
4480 cmp %g1, %g7
4481 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4482@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4483 2: BACKOFF_SPIN(%o2, %o3, 1b)
4484 .size atomic64_add_ret, .-atomic64_add_ret
4485
4486+ .globl atomic64_add_ret_unchecked
4487+ .type atomic64_add_ret_unchecked,#function
4488+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4489+ BACKOFF_SETUP(%o2)
4490+1: ldx [%o1], %g1
4491+ addcc %g1, %o0, %g7
4492+ casx [%o1], %g1, %g7
4493+ cmp %g1, %g7
4494+ bne,pn %xcc, 2f
4495+ add %g7, %o0, %g7
4496+ mov %g7, %o0
4497+ retl
4498+ nop
4499+2: BACKOFF_SPIN(%o2, %o3, 1b)
4500+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4501+
4502 .globl atomic64_sub_ret
4503 .type atomic64_sub_ret,#function
4504 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4505 BACKOFF_SETUP(%o2)
4506 1: ldx [%o1], %g1
4507- sub %g1, %o0, %g7
4508+ subcc %g1, %o0, %g7
4509+
4510+#ifdef CONFIG_PAX_REFCOUNT
4511+ tvs %xcc, 6
4512+#endif
4513+
4514 casx [%o1], %g1, %g7
4515 cmp %g1, %g7
4516 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4517diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4518index 1b30bb3..b4a16c7 100644
4519--- a/arch/sparc/lib/ksyms.c
4520+++ b/arch/sparc/lib/ksyms.c
4521@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4522
4523 /* Atomic counter implementation. */
4524 EXPORT_SYMBOL(atomic_add);
4525+EXPORT_SYMBOL(atomic_add_unchecked);
4526 EXPORT_SYMBOL(atomic_add_ret);
4527+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4528 EXPORT_SYMBOL(atomic_sub);
4529+EXPORT_SYMBOL(atomic_sub_unchecked);
4530 EXPORT_SYMBOL(atomic_sub_ret);
4531 EXPORT_SYMBOL(atomic64_add);
4532+EXPORT_SYMBOL(atomic64_add_unchecked);
4533 EXPORT_SYMBOL(atomic64_add_ret);
4534+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4535 EXPORT_SYMBOL(atomic64_sub);
4536+EXPORT_SYMBOL(atomic64_sub_unchecked);
4537 EXPORT_SYMBOL(atomic64_sub_ret);
4538
4539 /* Atomic bit operations. */
4540diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4541index 301421c..e2535d1 100644
4542--- a/arch/sparc/mm/Makefile
4543+++ b/arch/sparc/mm/Makefile
4544@@ -2,7 +2,7 @@
4545 #
4546
4547 asflags-y := -ansi
4548-ccflags-y := -Werror
4549+#ccflags-y := -Werror
4550
4551 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4552 obj-y += fault_$(BITS).o
4553diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4554index aa1c1b1..f93e28f 100644
4555--- a/arch/sparc/mm/fault_32.c
4556+++ b/arch/sparc/mm/fault_32.c
4557@@ -22,6 +22,9 @@
4558 #include <linux/interrupt.h>
4559 #include <linux/module.h>
4560 #include <linux/kdebug.h>
4561+#include <linux/slab.h>
4562+#include <linux/pagemap.h>
4563+#include <linux/compiler.h>
4564
4565 #include <asm/system.h>
4566 #include <asm/page.h>
4567@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4568 return safe_compute_effective_address(regs, insn);
4569 }
4570
4571+#ifdef CONFIG_PAX_PAGEEXEC
4572+#ifdef CONFIG_PAX_DLRESOLVE
4573+static void pax_emuplt_close(struct vm_area_struct *vma)
4574+{
4575+ vma->vm_mm->call_dl_resolve = 0UL;
4576+}
4577+
4578+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4579+{
4580+ unsigned int *kaddr;
4581+
4582+ vmf->page = alloc_page(GFP_HIGHUSER);
4583+ if (!vmf->page)
4584+ return VM_FAULT_OOM;
4585+
4586+ kaddr = kmap(vmf->page);
4587+ memset(kaddr, 0, PAGE_SIZE);
4588+ kaddr[0] = 0x9DE3BFA8U; /* save */
4589+ flush_dcache_page(vmf->page);
4590+ kunmap(vmf->page);
4591+ return VM_FAULT_MAJOR;
4592+}
4593+
4594+static const struct vm_operations_struct pax_vm_ops = {
4595+ .close = pax_emuplt_close,
4596+ .fault = pax_emuplt_fault
4597+};
4598+
4599+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4600+{
4601+ int ret;
4602+
4603+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4604+ vma->vm_mm = current->mm;
4605+ vma->vm_start = addr;
4606+ vma->vm_end = addr + PAGE_SIZE;
4607+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4608+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4609+ vma->vm_ops = &pax_vm_ops;
4610+
4611+ ret = insert_vm_struct(current->mm, vma);
4612+ if (ret)
4613+ return ret;
4614+
4615+ ++current->mm->total_vm;
4616+ return 0;
4617+}
4618+#endif
4619+
4620+/*
4621+ * PaX: decide what to do with offenders (regs->pc = fault address)
4622+ *
4623+ * returns 1 when task should be killed
4624+ * 2 when patched PLT trampoline was detected
4625+ * 3 when unpatched PLT trampoline was detected
4626+ */
4627+static int pax_handle_fetch_fault(struct pt_regs *regs)
4628+{
4629+
4630+#ifdef CONFIG_PAX_EMUPLT
4631+ int err;
4632+
4633+ do { /* PaX: patched PLT emulation #1 */
4634+ unsigned int sethi1, sethi2, jmpl;
4635+
4636+ err = get_user(sethi1, (unsigned int *)regs->pc);
4637+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4638+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4645+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4646+ {
4647+ unsigned int addr;
4648+
4649+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4650+ addr = regs->u_regs[UREG_G1];
4651+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4652+ regs->pc = addr;
4653+ regs->npc = addr+4;
4654+ return 2;
4655+ }
4656+ } while (0);
4657+
4658+ { /* PaX: patched PLT emulation #2 */
4659+ unsigned int ba;
4660+
4661+ err = get_user(ba, (unsigned int *)regs->pc);
4662+
4663+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4664+ unsigned int addr;
4665+
4666+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4667+ regs->pc = addr;
4668+ regs->npc = addr+4;
4669+ return 2;
4670+ }
4671+ }
4672+
4673+ do { /* PaX: patched PLT emulation #3 */
4674+ unsigned int sethi, jmpl, nop;
4675+
4676+ err = get_user(sethi, (unsigned int *)regs->pc);
4677+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4678+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4679+
4680+ if (err)
4681+ break;
4682+
4683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4684+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4685+ nop == 0x01000000U)
4686+ {
4687+ unsigned int addr;
4688+
4689+ addr = (sethi & 0x003FFFFFU) << 10;
4690+ regs->u_regs[UREG_G1] = addr;
4691+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4692+ regs->pc = addr;
4693+ regs->npc = addr+4;
4694+ return 2;
4695+ }
4696+ } while (0);
4697+
4698+ do { /* PaX: unpatched PLT emulation step 1 */
4699+ unsigned int sethi, ba, nop;
4700+
4701+ err = get_user(sethi, (unsigned int *)regs->pc);
4702+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4703+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4704+
4705+ if (err)
4706+ break;
4707+
4708+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4709+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4710+ nop == 0x01000000U)
4711+ {
4712+ unsigned int addr, save, call;
4713+
4714+ if ((ba & 0xFFC00000U) == 0x30800000U)
4715+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4716+ else
4717+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4718+
4719+ err = get_user(save, (unsigned int *)addr);
4720+ err |= get_user(call, (unsigned int *)(addr+4));
4721+ err |= get_user(nop, (unsigned int *)(addr+8));
4722+ if (err)
4723+ break;
4724+
4725+#ifdef CONFIG_PAX_DLRESOLVE
4726+ if (save == 0x9DE3BFA8U &&
4727+ (call & 0xC0000000U) == 0x40000000U &&
4728+ nop == 0x01000000U)
4729+ {
4730+ struct vm_area_struct *vma;
4731+ unsigned long call_dl_resolve;
4732+
4733+ down_read(&current->mm->mmap_sem);
4734+ call_dl_resolve = current->mm->call_dl_resolve;
4735+ up_read(&current->mm->mmap_sem);
4736+ if (likely(call_dl_resolve))
4737+ goto emulate;
4738+
4739+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4740+
4741+ down_write(&current->mm->mmap_sem);
4742+ if (current->mm->call_dl_resolve) {
4743+ call_dl_resolve = current->mm->call_dl_resolve;
4744+ up_write(&current->mm->mmap_sem);
4745+ if (vma)
4746+ kmem_cache_free(vm_area_cachep, vma);
4747+ goto emulate;
4748+ }
4749+
4750+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752+ up_write(&current->mm->mmap_sem);
4753+ if (vma)
4754+ kmem_cache_free(vm_area_cachep, vma);
4755+ return 1;
4756+ }
4757+
4758+ if (pax_insert_vma(vma, call_dl_resolve)) {
4759+ up_write(&current->mm->mmap_sem);
4760+ kmem_cache_free(vm_area_cachep, vma);
4761+ return 1;
4762+ }
4763+
4764+ current->mm->call_dl_resolve = call_dl_resolve;
4765+ up_write(&current->mm->mmap_sem);
4766+
4767+emulate:
4768+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769+ regs->pc = call_dl_resolve;
4770+ regs->npc = addr+4;
4771+ return 3;
4772+ }
4773+#endif
4774+
4775+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776+ if ((save & 0xFFC00000U) == 0x05000000U &&
4777+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4778+ nop == 0x01000000U)
4779+ {
4780+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781+ regs->u_regs[UREG_G2] = addr + 4;
4782+ addr = (save & 0x003FFFFFU) << 10;
4783+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4784+ regs->pc = addr;
4785+ regs->npc = addr+4;
4786+ return 3;
4787+ }
4788+ }
4789+ } while (0);
4790+
4791+ do { /* PaX: unpatched PLT emulation step 2 */
4792+ unsigned int save, call, nop;
4793+
4794+ err = get_user(save, (unsigned int *)(regs->pc-4));
4795+ err |= get_user(call, (unsigned int *)regs->pc);
4796+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4797+ if (err)
4798+ break;
4799+
4800+ if (save == 0x9DE3BFA8U &&
4801+ (call & 0xC0000000U) == 0x40000000U &&
4802+ nop == 0x01000000U)
4803+ {
4804+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4805+
4806+ regs->u_regs[UREG_RETPC] = regs->pc;
4807+ regs->pc = dl_resolve;
4808+ regs->npc = dl_resolve+4;
4809+ return 3;
4810+ }
4811+ } while (0);
4812+#endif
4813+
4814+ return 1;
4815+}
4816+
4817+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4818+{
4819+ unsigned long i;
4820+
4821+ printk(KERN_ERR "PAX: bytes at PC: ");
4822+ for (i = 0; i < 8; i++) {
4823+ unsigned int c;
4824+ if (get_user(c, (unsigned int *)pc+i))
4825+ printk(KERN_CONT "???????? ");
4826+ else
4827+ printk(KERN_CONT "%08x ", c);
4828+ }
4829+ printk("\n");
4830+}
4831+#endif
4832+
4833 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4834 int text_fault)
4835 {
4836@@ -281,6 +546,24 @@ good_area:
4837 if(!(vma->vm_flags & VM_WRITE))
4838 goto bad_area;
4839 } else {
4840+
4841+#ifdef CONFIG_PAX_PAGEEXEC
4842+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4843+ up_read(&mm->mmap_sem);
4844+ switch (pax_handle_fetch_fault(regs)) {
4845+
4846+#ifdef CONFIG_PAX_EMUPLT
4847+ case 2:
4848+ case 3:
4849+ return;
4850+#endif
4851+
4852+ }
4853+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4854+ do_group_exit(SIGKILL);
4855+ }
4856+#endif
4857+
4858 /* Allow reads even for write-only mappings */
4859 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4860 goto bad_area;
4861diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4862index 504c062..6fcb9c6 100644
4863--- a/arch/sparc/mm/fault_64.c
4864+++ b/arch/sparc/mm/fault_64.c
4865@@ -21,6 +21,9 @@
4866 #include <linux/kprobes.h>
4867 #include <linux/kdebug.h>
4868 #include <linux/percpu.h>
4869+#include <linux/slab.h>
4870+#include <linux/pagemap.h>
4871+#include <linux/compiler.h>
4872
4873 #include <asm/page.h>
4874 #include <asm/pgtable.h>
4875@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4876 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4877 regs->tpc);
4878 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4879- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4880+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4881 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4882 dump_stack();
4883 unhandled_fault(regs->tpc, current, regs);
4884@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4885 show_regs(regs);
4886 }
4887
4888+#ifdef CONFIG_PAX_PAGEEXEC
4889+#ifdef CONFIG_PAX_DLRESOLVE
4890+static void pax_emuplt_close(struct vm_area_struct *vma)
4891+{
4892+ vma->vm_mm->call_dl_resolve = 0UL;
4893+}
4894+
4895+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4896+{
4897+ unsigned int *kaddr;
4898+
4899+ vmf->page = alloc_page(GFP_HIGHUSER);
4900+ if (!vmf->page)
4901+ return VM_FAULT_OOM;
4902+
4903+ kaddr = kmap(vmf->page);
4904+ memset(kaddr, 0, PAGE_SIZE);
4905+ kaddr[0] = 0x9DE3BFA8U; /* save */
4906+ flush_dcache_page(vmf->page);
4907+ kunmap(vmf->page);
4908+ return VM_FAULT_MAJOR;
4909+}
4910+
4911+static const struct vm_operations_struct pax_vm_ops = {
4912+ .close = pax_emuplt_close,
4913+ .fault = pax_emuplt_fault
4914+};
4915+
4916+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4917+{
4918+ int ret;
4919+
4920+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4921+ vma->vm_mm = current->mm;
4922+ vma->vm_start = addr;
4923+ vma->vm_end = addr + PAGE_SIZE;
4924+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4925+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4926+ vma->vm_ops = &pax_vm_ops;
4927+
4928+ ret = insert_vm_struct(current->mm, vma);
4929+ if (ret)
4930+ return ret;
4931+
4932+ ++current->mm->total_vm;
4933+ return 0;
4934+}
4935+#endif
4936+
4937+/*
4938+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4939+ *
4940+ * returns 1 when task should be killed
4941+ * 2 when patched PLT trampoline was detected
4942+ * 3 when unpatched PLT trampoline was detected
4943+ */
4944+static int pax_handle_fetch_fault(struct pt_regs *regs)
4945+{
4946+
4947+#ifdef CONFIG_PAX_EMUPLT
4948+ int err;
4949+
4950+ do { /* PaX: patched PLT emulation #1 */
4951+ unsigned int sethi1, sethi2, jmpl;
4952+
4953+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4954+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4955+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4956+
4957+ if (err)
4958+ break;
4959+
4960+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4961+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4962+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4963+ {
4964+ unsigned long addr;
4965+
4966+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4967+ addr = regs->u_regs[UREG_G1];
4968+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4969+
4970+ if (test_thread_flag(TIF_32BIT))
4971+ addr &= 0xFFFFFFFFUL;
4972+
4973+ regs->tpc = addr;
4974+ regs->tnpc = addr+4;
4975+ return 2;
4976+ }
4977+ } while (0);
4978+
4979+ { /* PaX: patched PLT emulation #2 */
4980+ unsigned int ba;
4981+
4982+ err = get_user(ba, (unsigned int *)regs->tpc);
4983+
4984+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4985+ unsigned long addr;
4986+
4987+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4988+
4989+ if (test_thread_flag(TIF_32BIT))
4990+ addr &= 0xFFFFFFFFUL;
4991+
4992+ regs->tpc = addr;
4993+ regs->tnpc = addr+4;
4994+ return 2;
4995+ }
4996+ }
4997+
4998+ do { /* PaX: patched PLT emulation #3 */
4999+ unsigned int sethi, jmpl, nop;
5000+
5001+ err = get_user(sethi, (unsigned int *)regs->tpc);
5002+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5003+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5004+
5005+ if (err)
5006+ break;
5007+
5008+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5009+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5010+ nop == 0x01000000U)
5011+ {
5012+ unsigned long addr;
5013+
5014+ addr = (sethi & 0x003FFFFFU) << 10;
5015+ regs->u_regs[UREG_G1] = addr;
5016+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5017+
5018+ if (test_thread_flag(TIF_32BIT))
5019+ addr &= 0xFFFFFFFFUL;
5020+
5021+ regs->tpc = addr;
5022+ regs->tnpc = addr+4;
5023+ return 2;
5024+ }
5025+ } while (0);
5026+
5027+ do { /* PaX: patched PLT emulation #4 */
5028+ unsigned int sethi, mov1, call, mov2;
5029+
5030+ err = get_user(sethi, (unsigned int *)regs->tpc);
5031+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5032+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5033+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5034+
5035+ if (err)
5036+ break;
5037+
5038+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5039+ mov1 == 0x8210000FU &&
5040+ (call & 0xC0000000U) == 0x40000000U &&
5041+ mov2 == 0x9E100001U)
5042+ {
5043+ unsigned long addr;
5044+
5045+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5046+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5047+
5048+ if (test_thread_flag(TIF_32BIT))
5049+ addr &= 0xFFFFFFFFUL;
5050+
5051+ regs->tpc = addr;
5052+ regs->tnpc = addr+4;
5053+ return 2;
5054+ }
5055+ } while (0);
5056+
5057+ do { /* PaX: patched PLT emulation #5 */
5058+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5059+
5060+ err = get_user(sethi, (unsigned int *)regs->tpc);
5061+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5062+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5063+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5064+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5065+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5066+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5067+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5068+
5069+ if (err)
5070+ break;
5071+
5072+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5074+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5075+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5076+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5077+ sllx == 0x83287020U &&
5078+ jmpl == 0x81C04005U &&
5079+ nop == 0x01000000U)
5080+ {
5081+ unsigned long addr;
5082+
5083+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5084+ regs->u_regs[UREG_G1] <<= 32;
5085+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5086+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5087+ regs->tpc = addr;
5088+ regs->tnpc = addr+4;
5089+ return 2;
5090+ }
5091+ } while (0);
5092+
5093+ do { /* PaX: patched PLT emulation #6 */
5094+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5095+
5096+ err = get_user(sethi, (unsigned int *)regs->tpc);
5097+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5098+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5099+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5100+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5101+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5102+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5103+
5104+ if (err)
5105+ break;
5106+
5107+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5109+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5110+ sllx == 0x83287020U &&
5111+ (or & 0xFFFFE000U) == 0x8A116000U &&
5112+ jmpl == 0x81C04005U &&
5113+ nop == 0x01000000U)
5114+ {
5115+ unsigned long addr;
5116+
5117+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5118+ regs->u_regs[UREG_G1] <<= 32;
5119+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5120+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5121+ regs->tpc = addr;
5122+ regs->tnpc = addr+4;
5123+ return 2;
5124+ }
5125+ } while (0);
5126+
5127+ do { /* PaX: unpatched PLT emulation step 1 */
5128+ unsigned int sethi, ba, nop;
5129+
5130+ err = get_user(sethi, (unsigned int *)regs->tpc);
5131+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5132+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5133+
5134+ if (err)
5135+ break;
5136+
5137+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5138+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5139+ nop == 0x01000000U)
5140+ {
5141+ unsigned long addr;
5142+ unsigned int save, call;
5143+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5144+
5145+ if ((ba & 0xFFC00000U) == 0x30800000U)
5146+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5147+ else
5148+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5149+
5150+ if (test_thread_flag(TIF_32BIT))
5151+ addr &= 0xFFFFFFFFUL;
5152+
5153+ err = get_user(save, (unsigned int *)addr);
5154+ err |= get_user(call, (unsigned int *)(addr+4));
5155+ err |= get_user(nop, (unsigned int *)(addr+8));
5156+ if (err)
5157+ break;
5158+
5159+#ifdef CONFIG_PAX_DLRESOLVE
5160+ if (save == 0x9DE3BFA8U &&
5161+ (call & 0xC0000000U) == 0x40000000U &&
5162+ nop == 0x01000000U)
5163+ {
5164+ struct vm_area_struct *vma;
5165+ unsigned long call_dl_resolve;
5166+
5167+ down_read(&current->mm->mmap_sem);
5168+ call_dl_resolve = current->mm->call_dl_resolve;
5169+ up_read(&current->mm->mmap_sem);
5170+ if (likely(call_dl_resolve))
5171+ goto emulate;
5172+
5173+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5174+
5175+ down_write(&current->mm->mmap_sem);
5176+ if (current->mm->call_dl_resolve) {
5177+ call_dl_resolve = current->mm->call_dl_resolve;
5178+ up_write(&current->mm->mmap_sem);
5179+ if (vma)
5180+ kmem_cache_free(vm_area_cachep, vma);
5181+ goto emulate;
5182+ }
5183+
5184+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5185+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5186+ up_write(&current->mm->mmap_sem);
5187+ if (vma)
5188+ kmem_cache_free(vm_area_cachep, vma);
5189+ return 1;
5190+ }
5191+
5192+ if (pax_insert_vma(vma, call_dl_resolve)) {
5193+ up_write(&current->mm->mmap_sem);
5194+ kmem_cache_free(vm_area_cachep, vma);
5195+ return 1;
5196+ }
5197+
5198+ current->mm->call_dl_resolve = call_dl_resolve;
5199+ up_write(&current->mm->mmap_sem);
5200+
5201+emulate:
5202+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5203+ regs->tpc = call_dl_resolve;
5204+ regs->tnpc = addr+4;
5205+ return 3;
5206+ }
5207+#endif
5208+
5209+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5210+ if ((save & 0xFFC00000U) == 0x05000000U &&
5211+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5212+ nop == 0x01000000U)
5213+ {
5214+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5215+ regs->u_regs[UREG_G2] = addr + 4;
5216+ addr = (save & 0x003FFFFFU) << 10;
5217+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5218+
5219+ if (test_thread_flag(TIF_32BIT))
5220+ addr &= 0xFFFFFFFFUL;
5221+
5222+ regs->tpc = addr;
5223+ regs->tnpc = addr+4;
5224+ return 3;
5225+ }
5226+
5227+ /* PaX: 64-bit PLT stub */
5228+ err = get_user(sethi1, (unsigned int *)addr);
5229+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5230+ err |= get_user(or1, (unsigned int *)(addr+8));
5231+ err |= get_user(or2, (unsigned int *)(addr+12));
5232+ err |= get_user(sllx, (unsigned int *)(addr+16));
5233+ err |= get_user(add, (unsigned int *)(addr+20));
5234+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5235+ err |= get_user(nop, (unsigned int *)(addr+28));
5236+ if (err)
5237+ break;
5238+
5239+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5240+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5241+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5242+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5243+ sllx == 0x89293020U &&
5244+ add == 0x8A010005U &&
5245+ jmpl == 0x89C14000U &&
5246+ nop == 0x01000000U)
5247+ {
5248+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5249+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5250+ regs->u_regs[UREG_G4] <<= 32;
5251+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5252+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5253+ regs->u_regs[UREG_G4] = addr + 24;
5254+ addr = regs->u_regs[UREG_G5];
5255+ regs->tpc = addr;
5256+ regs->tnpc = addr+4;
5257+ return 3;
5258+ }
5259+ }
5260+ } while (0);
5261+
5262+#ifdef CONFIG_PAX_DLRESOLVE
5263+ do { /* PaX: unpatched PLT emulation step 2 */
5264+ unsigned int save, call, nop;
5265+
5266+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5267+ err |= get_user(call, (unsigned int *)regs->tpc);
5268+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5269+ if (err)
5270+ break;
5271+
5272+ if (save == 0x9DE3BFA8U &&
5273+ (call & 0xC0000000U) == 0x40000000U &&
5274+ nop == 0x01000000U)
5275+ {
5276+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5277+
5278+ if (test_thread_flag(TIF_32BIT))
5279+ dl_resolve &= 0xFFFFFFFFUL;
5280+
5281+ regs->u_regs[UREG_RETPC] = regs->tpc;
5282+ regs->tpc = dl_resolve;
5283+ regs->tnpc = dl_resolve+4;
5284+ return 3;
5285+ }
5286+ } while (0);
5287+#endif
5288+
5289+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5290+ unsigned int sethi, ba, nop;
5291+
5292+ err = get_user(sethi, (unsigned int *)regs->tpc);
5293+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5294+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5295+
5296+ if (err)
5297+ break;
5298+
5299+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5300+ (ba & 0xFFF00000U) == 0x30600000U &&
5301+ nop == 0x01000000U)
5302+ {
5303+ unsigned long addr;
5304+
5305+ addr = (sethi & 0x003FFFFFU) << 10;
5306+ regs->u_regs[UREG_G1] = addr;
5307+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5308+
5309+ if (test_thread_flag(TIF_32BIT))
5310+ addr &= 0xFFFFFFFFUL;
5311+
5312+ regs->tpc = addr;
5313+ regs->tnpc = addr+4;
5314+ return 2;
5315+ }
5316+ } while (0);
5317+
5318+#endif
5319+
5320+ return 1;
5321+}
5322+
5323+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5324+{
5325+ unsigned long i;
5326+
5327+ printk(KERN_ERR "PAX: bytes at PC: ");
5328+ for (i = 0; i < 8; i++) {
5329+ unsigned int c;
5330+ if (get_user(c, (unsigned int *)pc+i))
5331+ printk(KERN_CONT "???????? ");
5332+ else
5333+ printk(KERN_CONT "%08x ", c);
5334+ }
5335+ printk("\n");
5336+}
5337+#endif
5338+
5339 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5340 {
5341 struct mm_struct *mm = current->mm;
5342@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5343 if (!vma)
5344 goto bad_area;
5345
5346+#ifdef CONFIG_PAX_PAGEEXEC
5347+ /* PaX: detect ITLB misses on non-exec pages */
5348+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5349+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5350+ {
5351+ if (address != regs->tpc)
5352+ goto good_area;
5353+
5354+ up_read(&mm->mmap_sem);
5355+ switch (pax_handle_fetch_fault(regs)) {
5356+
5357+#ifdef CONFIG_PAX_EMUPLT
5358+ case 2:
5359+ case 3:
5360+ return;
5361+#endif
5362+
5363+ }
5364+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5365+ do_group_exit(SIGKILL);
5366+ }
5367+#endif
5368+
5369 /* Pure DTLB misses do not tell us whether the fault causing
5370 * load/store/atomic was a write or not, it only says that there
5371 * was no match. So in such a case we (carefully) read the
5372diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5373index f4e9764..5682724 100644
5374--- a/arch/sparc/mm/hugetlbpage.c
5375+++ b/arch/sparc/mm/hugetlbpage.c
5376@@ -68,7 +68,7 @@ full_search:
5377 }
5378 return -ENOMEM;
5379 }
5380- if (likely(!vma || addr + len <= vma->vm_start)) {
5381+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5382 /*
5383 * Remember the place where we stopped the search:
5384 */
5385@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5386 /* make sure it can fit in the remaining address space */
5387 if (likely(addr > len)) {
5388 vma = find_vma(mm, addr-len);
5389- if (!vma || addr <= vma->vm_start) {
5390+ if (check_heap_stack_gap(vma, addr - len, len)) {
5391 /* remember the address as a hint for next time */
5392 return (mm->free_area_cache = addr-len);
5393 }
5394@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 if (unlikely(mm->mmap_base < len))
5396 goto bottomup;
5397
5398- addr = (mm->mmap_base-len) & HPAGE_MASK;
5399+ addr = mm->mmap_base - len;
5400
5401 do {
5402+ addr &= HPAGE_MASK;
5403 /*
5404 * Lookup failure means no vma is above this address,
5405 * else if new region fits below vma->vm_start,
5406 * return with success:
5407 */
5408 vma = find_vma(mm, addr);
5409- if (likely(!vma || addr+len <= vma->vm_start)) {
5410+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5411 /* remember the address as a hint for next time */
5412 return (mm->free_area_cache = addr);
5413 }
5414@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5415 mm->cached_hole_size = vma->vm_start - addr;
5416
5417 /* try just below the current vma->vm_start */
5418- addr = (vma->vm_start-len) & HPAGE_MASK;
5419- } while (likely(len < vma->vm_start));
5420+ addr = skip_heap_stack_gap(vma, len);
5421+ } while (!IS_ERR_VALUE(addr));
5422
5423 bottomup:
5424 /*
5425@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5426 if (addr) {
5427 addr = ALIGN(addr, HPAGE_SIZE);
5428 vma = find_vma(mm, addr);
5429- if (task_size - len >= addr &&
5430- (!vma || addr + len <= vma->vm_start))
5431+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5432 return addr;
5433 }
5434 if (mm->get_unmapped_area == arch_get_unmapped_area)
5435diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5436index 7b00de6..78239f4 100644
5437--- a/arch/sparc/mm/init_32.c
5438+++ b/arch/sparc/mm/init_32.c
5439@@ -316,6 +316,9 @@ extern void device_scan(void);
5440 pgprot_t PAGE_SHARED __read_mostly;
5441 EXPORT_SYMBOL(PAGE_SHARED);
5442
5443+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5444+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5445+
5446 void __init paging_init(void)
5447 {
5448 switch(sparc_cpu_model) {
5449@@ -344,17 +347,17 @@ void __init paging_init(void)
5450
5451 /* Initialize the protection map with non-constant, MMU dependent values. */
5452 protection_map[0] = PAGE_NONE;
5453- protection_map[1] = PAGE_READONLY;
5454- protection_map[2] = PAGE_COPY;
5455- protection_map[3] = PAGE_COPY;
5456+ protection_map[1] = PAGE_READONLY_NOEXEC;
5457+ protection_map[2] = PAGE_COPY_NOEXEC;
5458+ protection_map[3] = PAGE_COPY_NOEXEC;
5459 protection_map[4] = PAGE_READONLY;
5460 protection_map[5] = PAGE_READONLY;
5461 protection_map[6] = PAGE_COPY;
5462 protection_map[7] = PAGE_COPY;
5463 protection_map[8] = PAGE_NONE;
5464- protection_map[9] = PAGE_READONLY;
5465- protection_map[10] = PAGE_SHARED;
5466- protection_map[11] = PAGE_SHARED;
5467+ protection_map[9] = PAGE_READONLY_NOEXEC;
5468+ protection_map[10] = PAGE_SHARED_NOEXEC;
5469+ protection_map[11] = PAGE_SHARED_NOEXEC;
5470 protection_map[12] = PAGE_READONLY;
5471 protection_map[13] = PAGE_READONLY;
5472 protection_map[14] = PAGE_SHARED;
5473diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5474index cbef74e..c38fead 100644
5475--- a/arch/sparc/mm/srmmu.c
5476+++ b/arch/sparc/mm/srmmu.c
5477@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5478 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5479 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5480 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5481+
5482+#ifdef CONFIG_PAX_PAGEEXEC
5483+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5484+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5485+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5486+#endif
5487+
5488 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5489 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5490
5491diff --git a/arch/um/Makefile b/arch/um/Makefile
5492index c0f712c..3a5c4c9 100644
5493--- a/arch/um/Makefile
5494+++ b/arch/um/Makefile
5495@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5496 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5497 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5498
5499+ifdef CONSTIFY_PLUGIN
5500+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5501+endif
5502+
5503 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5504
5505 #This will adjust *FLAGS accordingly to the platform.
5506diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5507index 6c03acd..a5e0215 100644
5508--- a/arch/um/include/asm/kmap_types.h
5509+++ b/arch/um/include/asm/kmap_types.h
5510@@ -23,6 +23,7 @@ enum km_type {
5511 KM_IRQ1,
5512 KM_SOFTIRQ0,
5513 KM_SOFTIRQ1,
5514+ KM_CLEARPAGE,
5515 KM_TYPE_NR
5516 };
5517
5518diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5519index 4cc9b6c..02e5029 100644
5520--- a/arch/um/include/asm/page.h
5521+++ b/arch/um/include/asm/page.h
5522@@ -14,6 +14,9 @@
5523 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5524 #define PAGE_MASK (~(PAGE_SIZE-1))
5525
5526+#define ktla_ktva(addr) (addr)
5527+#define ktva_ktla(addr) (addr)
5528+
5529 #ifndef __ASSEMBLY__
5530
5531 struct page;
5532diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5533index 21c1ae7..4640aaa 100644
5534--- a/arch/um/kernel/process.c
5535+++ b/arch/um/kernel/process.c
5536@@ -404,22 +404,6 @@ int singlestepping(void * t)
5537 return 2;
5538 }
5539
5540-/*
5541- * Only x86 and x86_64 have an arch_align_stack().
5542- * All other arches have "#define arch_align_stack(x) (x)"
5543- * in their asm/system.h
5544- * As this is included in UML from asm-um/system-generic.h,
5545- * we can use it to behave as the subarch does.
5546- */
5547-#ifndef arch_align_stack
5548-unsigned long arch_align_stack(unsigned long sp)
5549-{
5550- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5551- sp -= get_random_int() % 8192;
5552- return sp & ~0xf;
5553-}
5554-#endif
5555-
5556 unsigned long get_wchan(struct task_struct *p)
5557 {
5558 unsigned long stack_page, sp, ip;
5559diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
5560index d1b93c4..ae1b7fd 100644
5561--- a/arch/um/sys-i386/shared/sysdep/system.h
5562+++ b/arch/um/sys-i386/shared/sysdep/system.h
5563@@ -17,7 +17,7 @@
5564 # define AT_VECTOR_SIZE_ARCH 1
5565 #endif
5566
5567-extern unsigned long arch_align_stack(unsigned long sp);
5568+#define arch_align_stack(x) ((x) & ~0xfUL)
5569
5570 void default_idle(void);
5571
5572diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
5573index 70ca357..728d1cc 100644
5574--- a/arch/um/sys-i386/syscalls.c
5575+++ b/arch/um/sys-i386/syscalls.c
5576@@ -11,6 +11,21 @@
5577 #include "asm/uaccess.h"
5578 #include "asm/unistd.h"
5579
5580+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5581+{
5582+ unsigned long pax_task_size = TASK_SIZE;
5583+
5584+#ifdef CONFIG_PAX_SEGMEXEC
5585+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5586+ pax_task_size = SEGMEXEC_TASK_SIZE;
5587+#endif
5588+
5589+ if (len > pax_task_size || addr > pax_task_size - len)
5590+ return -EINVAL;
5591+
5592+ return 0;
5593+}
5594+
5595 /*
5596 * The prototype on i386 is:
5597 *
5598diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
5599index d1b93c4..ae1b7fd 100644
5600--- a/arch/um/sys-x86_64/shared/sysdep/system.h
5601+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
5602@@ -17,7 +17,7 @@
5603 # define AT_VECTOR_SIZE_ARCH 1
5604 #endif
5605
5606-extern unsigned long arch_align_stack(unsigned long sp);
5607+#define arch_align_stack(x) ((x) & ~0xfUL)
5608
5609 void default_idle(void);
5610
5611diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5612index 6a47bb2..dc9a868 100644
5613--- a/arch/x86/Kconfig
5614+++ b/arch/x86/Kconfig
5615@@ -236,7 +236,7 @@ config X86_HT
5616
5617 config X86_32_LAZY_GS
5618 def_bool y
5619- depends on X86_32 && !CC_STACKPROTECTOR
5620+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5621
5622 config ARCH_HWEIGHT_CFLAGS
5623 string
5624@@ -1019,7 +1019,7 @@ choice
5625
5626 config NOHIGHMEM
5627 bool "off"
5628- depends on !X86_NUMAQ
5629+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5630 ---help---
5631 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5632 However, the address space of 32-bit x86 processors is only 4
5633@@ -1056,7 +1056,7 @@ config NOHIGHMEM
5634
5635 config HIGHMEM4G
5636 bool "4GB"
5637- depends on !X86_NUMAQ
5638+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5639 ---help---
5640 Select this if you have a 32-bit processor and between 1 and 4
5641 gigabytes of physical RAM.
5642@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
5643 hex
5644 default 0xB0000000 if VMSPLIT_3G_OPT
5645 default 0x80000000 if VMSPLIT_2G
5646- default 0x78000000 if VMSPLIT_2G_OPT
5647+ default 0x70000000 if VMSPLIT_2G_OPT
5648 default 0x40000000 if VMSPLIT_1G
5649 default 0xC0000000
5650 depends on X86_32
5651@@ -1484,6 +1484,7 @@ config SECCOMP
5652
5653 config CC_STACKPROTECTOR
5654 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5655+ depends on X86_64 || !PAX_MEMORY_UDEREF
5656 ---help---
5657 This option turns on the -fstack-protector GCC feature. This
5658 feature puts, at the beginning of functions, a canary value on
5659@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
5660 config PHYSICAL_START
5661 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5662 default "0x1000000"
5663+ range 0x400000 0x40000000
5664 ---help---
5665 This gives the physical address where the kernel is loaded.
5666
5667@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
5668 config PHYSICAL_ALIGN
5669 hex "Alignment value to which kernel should be aligned" if X86_32
5670 default "0x1000000"
5671+ range 0x400000 0x1000000 if PAX_KERNEXEC
5672 range 0x2000 0x1000000
5673 ---help---
5674 This value puts the alignment restrictions on physical address
5675@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
5676 Say N if you want to disable CPU hotplug.
5677
5678 config COMPAT_VDSO
5679- def_bool y
5680+ def_bool n
5681 prompt "Compat VDSO support"
5682 depends on X86_32 || IA32_EMULATION
5683+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5684 ---help---
5685 Map the 32-bit VDSO to the predictable old-style address too.
5686
5687diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5688index e3ca7e0..b30b28a 100644
5689--- a/arch/x86/Kconfig.cpu
5690+++ b/arch/x86/Kconfig.cpu
5691@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5692
5693 config X86_F00F_BUG
5694 def_bool y
5695- depends on M586MMX || M586TSC || M586 || M486 || M386
5696+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5697
5698 config X86_INVD_BUG
5699 def_bool y
5700@@ -365,7 +365,7 @@ config X86_POPAD_OK
5701
5702 config X86_ALIGNMENT_16
5703 def_bool y
5704- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5705+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5706
5707 config X86_INTEL_USERCOPY
5708 def_bool y
5709@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5710 # generates cmov.
5711 config X86_CMOV
5712 def_bool y
5713- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5714+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5715
5716 config X86_MINIMUM_CPU_FAMILY
5717 int
5718diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5719index c0f8a5c..6404f61 100644
5720--- a/arch/x86/Kconfig.debug
5721+++ b/arch/x86/Kconfig.debug
5722@@ -81,7 +81,7 @@ config X86_PTDUMP
5723 config DEBUG_RODATA
5724 bool "Write protect kernel read-only data structures"
5725 default y
5726- depends on DEBUG_KERNEL
5727+ depends on DEBUG_KERNEL && BROKEN
5728 ---help---
5729 Mark the kernel read-only data as write-protected in the pagetables,
5730 in order to catch accidental (and incorrect) writes to such const
5731@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5732
5733 config DEBUG_SET_MODULE_RONX
5734 bool "Set loadable kernel module data as NX and text as RO"
5735- depends on MODULES
5736+ depends on MODULES && BROKEN
5737 ---help---
5738 This option helps catch unintended modifications to loadable
5739 kernel module's text and read-only data. It also prevents execution
5740diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5741index b02e509..2631e48 100644
5742--- a/arch/x86/Makefile
5743+++ b/arch/x86/Makefile
5744@@ -46,6 +46,7 @@ else
5745 UTS_MACHINE := x86_64
5746 CHECKFLAGS += -D__x86_64__ -m64
5747
5748+ biarch := $(call cc-option,-m64)
5749 KBUILD_AFLAGS += -m64
5750 KBUILD_CFLAGS += -m64
5751
5752@@ -195,3 +196,12 @@ define archhelp
5753 echo ' FDARGS="..." arguments for the booted kernel'
5754 echo ' FDINITRD=file initrd for the booted kernel'
5755 endef
5756+
5757+define OLD_LD
5758+
5759+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5760+*** Please upgrade your binutils to 2.18 or newer
5761+endef
5762+
5763+archprepare:
5764+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5765diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5766index 95365a8..52f857b 100644
5767--- a/arch/x86/boot/Makefile
5768+++ b/arch/x86/boot/Makefile
5769@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5770 $(call cc-option, -fno-stack-protector) \
5771 $(call cc-option, -mpreferred-stack-boundary=2)
5772 KBUILD_CFLAGS += $(call cc-option, -m32)
5773+ifdef CONSTIFY_PLUGIN
5774+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5775+endif
5776 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5777 GCOV_PROFILE := n
5778
5779diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5780index 878e4b9..20537ab 100644
5781--- a/arch/x86/boot/bitops.h
5782+++ b/arch/x86/boot/bitops.h
5783@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5784 u8 v;
5785 const u32 *p = (const u32 *)addr;
5786
5787- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5788+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5789 return v;
5790 }
5791
5792@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5793
5794 static inline void set_bit(int nr, void *addr)
5795 {
5796- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5797+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5798 }
5799
5800 #endif /* BOOT_BITOPS_H */
5801diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5802index c7093bd..d4247ffe0 100644
5803--- a/arch/x86/boot/boot.h
5804+++ b/arch/x86/boot/boot.h
5805@@ -85,7 +85,7 @@ static inline void io_delay(void)
5806 static inline u16 ds(void)
5807 {
5808 u16 seg;
5809- asm("movw %%ds,%0" : "=rm" (seg));
5810+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5811 return seg;
5812 }
5813
5814@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5815 static inline int memcmp(const void *s1, const void *s2, size_t len)
5816 {
5817 u8 diff;
5818- asm("repe; cmpsb; setnz %0"
5819+ asm volatile("repe; cmpsb; setnz %0"
5820 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5821 return diff;
5822 }
5823diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5824index 09664ef..edc5d03 100644
5825--- a/arch/x86/boot/compressed/Makefile
5826+++ b/arch/x86/boot/compressed/Makefile
5827@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5828 KBUILD_CFLAGS += $(cflags-y)
5829 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5830 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5831+ifdef CONSTIFY_PLUGIN
5832+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5833+endif
5834
5835 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5836 GCOV_PROFILE := n
5837diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5838index 67a655a..b924059 100644
5839--- a/arch/x86/boot/compressed/head_32.S
5840+++ b/arch/x86/boot/compressed/head_32.S
5841@@ -76,7 +76,7 @@ ENTRY(startup_32)
5842 notl %eax
5843 andl %eax, %ebx
5844 #else
5845- movl $LOAD_PHYSICAL_ADDR, %ebx
5846+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5847 #endif
5848
5849 /* Target address to relocate to for decompression */
5850@@ -162,7 +162,7 @@ relocated:
5851 * and where it was actually loaded.
5852 */
5853 movl %ebp, %ebx
5854- subl $LOAD_PHYSICAL_ADDR, %ebx
5855+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5856 jz 2f /* Nothing to be done if loaded at compiled addr. */
5857 /*
5858 * Process relocations.
5859@@ -170,8 +170,7 @@ relocated:
5860
5861 1: subl $4, %edi
5862 movl (%edi), %ecx
5863- testl %ecx, %ecx
5864- jz 2f
5865+ jecxz 2f
5866 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5867 jmp 1b
5868 2:
5869diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5870index 35af09d..99c9676 100644
5871--- a/arch/x86/boot/compressed/head_64.S
5872+++ b/arch/x86/boot/compressed/head_64.S
5873@@ -91,7 +91,7 @@ ENTRY(startup_32)
5874 notl %eax
5875 andl %eax, %ebx
5876 #else
5877- movl $LOAD_PHYSICAL_ADDR, %ebx
5878+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5879 #endif
5880
5881 /* Target address to relocate to for decompression */
5882@@ -233,7 +233,7 @@ ENTRY(startup_64)
5883 notq %rax
5884 andq %rax, %rbp
5885 #else
5886- movq $LOAD_PHYSICAL_ADDR, %rbp
5887+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5888 #endif
5889
5890 /* Target address to relocate to for decompression */
5891diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5892index 3a19d04..7c1d55a 100644
5893--- a/arch/x86/boot/compressed/misc.c
5894+++ b/arch/x86/boot/compressed/misc.c
5895@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5896 case PT_LOAD:
5897 #ifdef CONFIG_RELOCATABLE
5898 dest = output;
5899- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5900+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5901 #else
5902 dest = (void *)(phdr->p_paddr);
5903 #endif
5904@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5905 error("Destination address too large");
5906 #endif
5907 #ifndef CONFIG_RELOCATABLE
5908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5910 error("Wrong destination address");
5911 #endif
5912
5913diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5914index 89bbf4e..869908e 100644
5915--- a/arch/x86/boot/compressed/relocs.c
5916+++ b/arch/x86/boot/compressed/relocs.c
5917@@ -13,8 +13,11 @@
5918
5919 static void die(char *fmt, ...);
5920
5921+#include "../../../../include/generated/autoconf.h"
5922+
5923 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5924 static Elf32_Ehdr ehdr;
5925+static Elf32_Phdr *phdr;
5926 static unsigned long reloc_count, reloc_idx;
5927 static unsigned long *relocs;
5928
5929@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5930 }
5931 }
5932
5933+static void read_phdrs(FILE *fp)
5934+{
5935+ unsigned int i;
5936+
5937+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5938+ if (!phdr) {
5939+ die("Unable to allocate %d program headers\n",
5940+ ehdr.e_phnum);
5941+ }
5942+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5943+ die("Seek to %d failed: %s\n",
5944+ ehdr.e_phoff, strerror(errno));
5945+ }
5946+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5947+ die("Cannot read ELF program headers: %s\n",
5948+ strerror(errno));
5949+ }
5950+ for(i = 0; i < ehdr.e_phnum; i++) {
5951+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5952+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5953+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5954+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5955+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5956+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5957+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5958+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5959+ }
5960+
5961+}
5962+
5963 static void read_shdrs(FILE *fp)
5964 {
5965- int i;
5966+ unsigned int i;
5967 Elf32_Shdr shdr;
5968
5969 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5970@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5971
5972 static void read_strtabs(FILE *fp)
5973 {
5974- int i;
5975+ unsigned int i;
5976 for (i = 0; i < ehdr.e_shnum; i++) {
5977 struct section *sec = &secs[i];
5978 if (sec->shdr.sh_type != SHT_STRTAB) {
5979@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5980
5981 static void read_symtabs(FILE *fp)
5982 {
5983- int i,j;
5984+ unsigned int i,j;
5985 for (i = 0; i < ehdr.e_shnum; i++) {
5986 struct section *sec = &secs[i];
5987 if (sec->shdr.sh_type != SHT_SYMTAB) {
5988@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5989
5990 static void read_relocs(FILE *fp)
5991 {
5992- int i,j;
5993+ unsigned int i,j;
5994+ uint32_t base;
5995+
5996 for (i = 0; i < ehdr.e_shnum; i++) {
5997 struct section *sec = &secs[i];
5998 if (sec->shdr.sh_type != SHT_REL) {
5999@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6000 die("Cannot read symbol table: %s\n",
6001 strerror(errno));
6002 }
6003+ base = 0;
6004+ for (j = 0; j < ehdr.e_phnum; j++) {
6005+ if (phdr[j].p_type != PT_LOAD )
6006+ continue;
6007+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6008+ continue;
6009+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6010+ break;
6011+ }
6012 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6013 Elf32_Rel *rel = &sec->reltab[j];
6014- rel->r_offset = elf32_to_cpu(rel->r_offset);
6015+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6016 rel->r_info = elf32_to_cpu(rel->r_info);
6017 }
6018 }
6019@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6020
6021 static void print_absolute_symbols(void)
6022 {
6023- int i;
6024+ unsigned int i;
6025 printf("Absolute symbols\n");
6026 printf(" Num: Value Size Type Bind Visibility Name\n");
6027 for (i = 0; i < ehdr.e_shnum; i++) {
6028 struct section *sec = &secs[i];
6029 char *sym_strtab;
6030 Elf32_Sym *sh_symtab;
6031- int j;
6032+ unsigned int j;
6033
6034 if (sec->shdr.sh_type != SHT_SYMTAB) {
6035 continue;
6036@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6037
6038 static void print_absolute_relocs(void)
6039 {
6040- int i, printed = 0;
6041+ unsigned int i, printed = 0;
6042
6043 for (i = 0; i < ehdr.e_shnum; i++) {
6044 struct section *sec = &secs[i];
6045 struct section *sec_applies, *sec_symtab;
6046 char *sym_strtab;
6047 Elf32_Sym *sh_symtab;
6048- int j;
6049+ unsigned int j;
6050 if (sec->shdr.sh_type != SHT_REL) {
6051 continue;
6052 }
6053@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6054
6055 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6056 {
6057- int i;
6058+ unsigned int i;
6059 /* Walk through the relocations */
6060 for (i = 0; i < ehdr.e_shnum; i++) {
6061 char *sym_strtab;
6062 Elf32_Sym *sh_symtab;
6063 struct section *sec_applies, *sec_symtab;
6064- int j;
6065+ unsigned int j;
6066 struct section *sec = &secs[i];
6067
6068 if (sec->shdr.sh_type != SHT_REL) {
6069@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6070 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6071 continue;
6072 }
6073+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6074+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6075+ continue;
6076+
6077+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6078+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6079+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6080+ continue;
6081+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6082+ continue;
6083+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6084+ continue;
6085+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6086+ continue;
6087+#endif
6088+
6089 switch (r_type) {
6090 case R_386_NONE:
6091 case R_386_PC32:
6092@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6093
6094 static void emit_relocs(int as_text)
6095 {
6096- int i;
6097+ unsigned int i;
6098 /* Count how many relocations I have and allocate space for them. */
6099 reloc_count = 0;
6100 walk_relocs(count_reloc);
6101@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6102 fname, strerror(errno));
6103 }
6104 read_ehdr(fp);
6105+ read_phdrs(fp);
6106 read_shdrs(fp);
6107 read_strtabs(fp);
6108 read_symtabs(fp);
6109diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6110index 4d3ff03..e4972ff 100644
6111--- a/arch/x86/boot/cpucheck.c
6112+++ b/arch/x86/boot/cpucheck.c
6113@@ -74,7 +74,7 @@ static int has_fpu(void)
6114 u16 fcw = -1, fsw = -1;
6115 u32 cr0;
6116
6117- asm("movl %%cr0,%0" : "=r" (cr0));
6118+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6119 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6120 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6121 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6122@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6123 {
6124 u32 f0, f1;
6125
6126- asm("pushfl ; "
6127+ asm volatile("pushfl ; "
6128 "pushfl ; "
6129 "popl %0 ; "
6130 "movl %0,%1 ; "
6131@@ -115,7 +115,7 @@ static void get_flags(void)
6132 set_bit(X86_FEATURE_FPU, cpu.flags);
6133
6134 if (has_eflag(X86_EFLAGS_ID)) {
6135- asm("cpuid"
6136+ asm volatile("cpuid"
6137 : "=a" (max_intel_level),
6138 "=b" (cpu_vendor[0]),
6139 "=d" (cpu_vendor[1]),
6140@@ -124,7 +124,7 @@ static void get_flags(void)
6141
6142 if (max_intel_level >= 0x00000001 &&
6143 max_intel_level <= 0x0000ffff) {
6144- asm("cpuid"
6145+ asm volatile("cpuid"
6146 : "=a" (tfms),
6147 "=c" (cpu.flags[4]),
6148 "=d" (cpu.flags[0])
6149@@ -136,7 +136,7 @@ static void get_flags(void)
6150 cpu.model += ((tfms >> 16) & 0xf) << 4;
6151 }
6152
6153- asm("cpuid"
6154+ asm volatile("cpuid"
6155 : "=a" (max_amd_level)
6156 : "a" (0x80000000)
6157 : "ebx", "ecx", "edx");
6158@@ -144,7 +144,7 @@ static void get_flags(void)
6159 if (max_amd_level >= 0x80000001 &&
6160 max_amd_level <= 0x8000ffff) {
6161 u32 eax = 0x80000001;
6162- asm("cpuid"
6163+ asm volatile("cpuid"
6164 : "+a" (eax),
6165 "=c" (cpu.flags[6]),
6166 "=d" (cpu.flags[1])
6167@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6168 u32 ecx = MSR_K7_HWCR;
6169 u32 eax, edx;
6170
6171- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6172+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6173 eax &= ~(1 << 15);
6174- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6175+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6176
6177 get_flags(); /* Make sure it really did something */
6178 err = check_flags();
6179@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6180 u32 ecx = MSR_VIA_FCR;
6181 u32 eax, edx;
6182
6183- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6184+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6185 eax |= (1<<1)|(1<<7);
6186- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6187+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6188
6189 set_bit(X86_FEATURE_CX8, cpu.flags);
6190 err = check_flags();
6191@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6192 u32 eax, edx;
6193 u32 level = 1;
6194
6195- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6196- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6197- asm("cpuid"
6198+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6199+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6200+ asm volatile("cpuid"
6201 : "+a" (level), "=d" (cpu.flags[0])
6202 : : "ecx", "ebx");
6203- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6204+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6205
6206 err = check_flags();
6207 }
6208diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6209index 93e689f..504ba09 100644
6210--- a/arch/x86/boot/header.S
6211+++ b/arch/x86/boot/header.S
6212@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6213 # single linked list of
6214 # struct setup_data
6215
6216-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6217+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6218
6219 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6220 #define VO_INIT_SIZE (VO__end - VO__text)
6221diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6222index db75d07..8e6d0af 100644
6223--- a/arch/x86/boot/memory.c
6224+++ b/arch/x86/boot/memory.c
6225@@ -19,7 +19,7 @@
6226
6227 static int detect_memory_e820(void)
6228 {
6229- int count = 0;
6230+ unsigned int count = 0;
6231 struct biosregs ireg, oreg;
6232 struct e820entry *desc = boot_params.e820_map;
6233 static struct e820entry buf; /* static so it is zeroed */
6234diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6235index 11e8c6e..fdbb1ed 100644
6236--- a/arch/x86/boot/video-vesa.c
6237+++ b/arch/x86/boot/video-vesa.c
6238@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6239
6240 boot_params.screen_info.vesapm_seg = oreg.es;
6241 boot_params.screen_info.vesapm_off = oreg.di;
6242+ boot_params.screen_info.vesapm_size = oreg.cx;
6243 }
6244
6245 /*
6246diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6247index 43eda28..5ab5fdb 100644
6248--- a/arch/x86/boot/video.c
6249+++ b/arch/x86/boot/video.c
6250@@ -96,7 +96,7 @@ static void store_mode_params(void)
6251 static unsigned int get_entry(void)
6252 {
6253 char entry_buf[4];
6254- int i, len = 0;
6255+ unsigned int i, len = 0;
6256 int key;
6257 unsigned int v;
6258
6259diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6260index 5b577d5..3c1fed4 100644
6261--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6262+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6263@@ -8,6 +8,8 @@
6264 * including this sentence is retained in full.
6265 */
6266
6267+#include <asm/alternative-asm.h>
6268+
6269 .extern crypto_ft_tab
6270 .extern crypto_it_tab
6271 .extern crypto_fl_tab
6272@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6273 je B192; \
6274 leaq 32(r9),r9;
6275
6276+#define ret pax_force_retaddr 0, 1; ret
6277+
6278 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6279 movq r1,r2; \
6280 movq r3,r4; \
6281diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6282index be6d9e3..21fbbca 100644
6283--- a/arch/x86/crypto/aesni-intel_asm.S
6284+++ b/arch/x86/crypto/aesni-intel_asm.S
6285@@ -31,6 +31,7 @@
6286
6287 #include <linux/linkage.h>
6288 #include <asm/inst.h>
6289+#include <asm/alternative-asm.h>
6290
6291 #ifdef __x86_64__
6292 .data
6293@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6294 pop %r14
6295 pop %r13
6296 pop %r12
6297+ pax_force_retaddr 0, 1
6298 ret
6299+ENDPROC(aesni_gcm_dec)
6300
6301
6302 /*****************************************************************************
6303@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6304 pop %r14
6305 pop %r13
6306 pop %r12
6307+ pax_force_retaddr 0, 1
6308 ret
6309+ENDPROC(aesni_gcm_enc)
6310
6311 #endif
6312
6313@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6314 pxor %xmm1, %xmm0
6315 movaps %xmm0, (TKEYP)
6316 add $0x10, TKEYP
6317+ pax_force_retaddr_bts
6318 ret
6319
6320 .align 4
6321@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6322 shufps $0b01001110, %xmm2, %xmm1
6323 movaps %xmm1, 0x10(TKEYP)
6324 add $0x20, TKEYP
6325+ pax_force_retaddr_bts
6326 ret
6327
6328 .align 4
6329@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6330
6331 movaps %xmm0, (TKEYP)
6332 add $0x10, TKEYP
6333+ pax_force_retaddr_bts
6334 ret
6335
6336 .align 4
6337@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6338 pxor %xmm1, %xmm2
6339 movaps %xmm2, (TKEYP)
6340 add $0x10, TKEYP
6341+ pax_force_retaddr_bts
6342 ret
6343
6344 /*
6345@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6346 #ifndef __x86_64__
6347 popl KEYP
6348 #endif
6349+ pax_force_retaddr 0, 1
6350 ret
6351+ENDPROC(aesni_set_key)
6352
6353 /*
6354 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6355@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6356 popl KLEN
6357 popl KEYP
6358 #endif
6359+ pax_force_retaddr 0, 1
6360 ret
6361+ENDPROC(aesni_enc)
6362
6363 /*
6364 * _aesni_enc1: internal ABI
6365@@ -1959,6 +1972,7 @@ _aesni_enc1:
6366 AESENC KEY STATE
6367 movaps 0x70(TKEYP), KEY
6368 AESENCLAST KEY STATE
6369+ pax_force_retaddr_bts
6370 ret
6371
6372 /*
6373@@ -2067,6 +2081,7 @@ _aesni_enc4:
6374 AESENCLAST KEY STATE2
6375 AESENCLAST KEY STATE3
6376 AESENCLAST KEY STATE4
6377+ pax_force_retaddr_bts
6378 ret
6379
6380 /*
6381@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6382 popl KLEN
6383 popl KEYP
6384 #endif
6385+ pax_force_retaddr 0, 1
6386 ret
6387+ENDPROC(aesni_dec)
6388
6389 /*
6390 * _aesni_dec1: internal ABI
6391@@ -2146,6 +2163,7 @@ _aesni_dec1:
6392 AESDEC KEY STATE
6393 movaps 0x70(TKEYP), KEY
6394 AESDECLAST KEY STATE
6395+ pax_force_retaddr_bts
6396 ret
6397
6398 /*
6399@@ -2254,6 +2272,7 @@ _aesni_dec4:
6400 AESDECLAST KEY STATE2
6401 AESDECLAST KEY STATE3
6402 AESDECLAST KEY STATE4
6403+ pax_force_retaddr_bts
6404 ret
6405
6406 /*
6407@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6408 popl KEYP
6409 popl LEN
6410 #endif
6411+ pax_force_retaddr 0, 1
6412 ret
6413+ENDPROC(aesni_ecb_enc)
6414
6415 /*
6416 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6417@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6418 popl KEYP
6419 popl LEN
6420 #endif
6421+ pax_force_retaddr 0, 1
6422 ret
6423+ENDPROC(aesni_ecb_dec)
6424
6425 /*
6426 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6427@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6428 popl LEN
6429 popl IVP
6430 #endif
6431+ pax_force_retaddr 0, 1
6432 ret
6433+ENDPROC(aesni_cbc_enc)
6434
6435 /*
6436 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6437@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6438 popl LEN
6439 popl IVP
6440 #endif
6441+ pax_force_retaddr 0, 1
6442 ret
6443+ENDPROC(aesni_cbc_dec)
6444
6445 #ifdef __x86_64__
6446 .align 16
6447@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6448 mov $1, TCTR_LOW
6449 MOVQ_R64_XMM TCTR_LOW INC
6450 MOVQ_R64_XMM CTR TCTR_LOW
6451+ pax_force_retaddr_bts
6452 ret
6453
6454 /*
6455@@ -2552,6 +2580,7 @@ _aesni_inc:
6456 .Linc_low:
6457 movaps CTR, IV
6458 PSHUFB_XMM BSWAP_MASK IV
6459+ pax_force_retaddr_bts
6460 ret
6461
6462 /*
6463@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6464 .Lctr_enc_ret:
6465 movups IV, (IVP)
6466 .Lctr_enc_just_ret:
6467+ pax_force_retaddr 0, 1
6468 ret
6469+ENDPROC(aesni_ctr_enc)
6470 #endif
6471diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6472index 6214a9b..1f4fc9a 100644
6473--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6474+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475@@ -1,3 +1,5 @@
6476+#include <asm/alternative-asm.h>
6477+
6478 # enter ECRYPT_encrypt_bytes
6479 .text
6480 .p2align 5
6481@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6482 add %r11,%rsp
6483 mov %rdi,%rax
6484 mov %rsi,%rdx
6485+ pax_force_retaddr 0, 1
6486 ret
6487 # bytesatleast65:
6488 ._bytesatleast65:
6489@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6490 add %r11,%rsp
6491 mov %rdi,%rax
6492 mov %rsi,%rdx
6493+ pax_force_retaddr
6494 ret
6495 # enter ECRYPT_ivsetup
6496 .text
6497@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6498 add %r11,%rsp
6499 mov %rdi,%rax
6500 mov %rsi,%rdx
6501+ pax_force_retaddr
6502 ret
6503diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6504index 573aa10..b73ad89 100644
6505--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6506+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6507@@ -21,6 +21,7 @@
6508 .text
6509
6510 #include <asm/asm-offsets.h>
6511+#include <asm/alternative-asm.h>
6512
6513 #define a_offset 0
6514 #define b_offset 4
6515@@ -269,6 +270,7 @@ twofish_enc_blk:
6516
6517 popq R1
6518 movq $1,%rax
6519+ pax_force_retaddr 0, 1
6520 ret
6521
6522 twofish_dec_blk:
6523@@ -321,4 +323,5 @@ twofish_dec_blk:
6524
6525 popq R1
6526 movq $1,%rax
6527+ pax_force_retaddr 0, 1
6528 ret
6529diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6530index fd84387..0b4af7d 100644
6531--- a/arch/x86/ia32/ia32_aout.c
6532+++ b/arch/x86/ia32/ia32_aout.c
6533@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6534 unsigned long dump_start, dump_size;
6535 struct user32 dump;
6536
6537+ memset(&dump, 0, sizeof(dump));
6538+
6539 fs = get_fs();
6540 set_fs(KERNEL_DS);
6541 has_dumped = 1;
6542diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6543index 6557769..ef6ae89 100644
6544--- a/arch/x86/ia32/ia32_signal.c
6545+++ b/arch/x86/ia32/ia32_signal.c
6546@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6547 }
6548 seg = get_fs();
6549 set_fs(KERNEL_DS);
6550- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6551+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6552 set_fs(seg);
6553 if (ret >= 0 && uoss_ptr) {
6554 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6555@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6556 */
6557 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6558 size_t frame_size,
6559- void **fpstate)
6560+ void __user **fpstate)
6561 {
6562 unsigned long sp;
6563
6564@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6565
6566 if (used_math()) {
6567 sp = sp - sig_xstate_ia32_size;
6568- *fpstate = (struct _fpstate_ia32 *) sp;
6569+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6570 if (save_i387_xstate_ia32(*fpstate) < 0)
6571 return (void __user *) -1L;
6572 }
6573@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6574 sp -= frame_size;
6575 /* Align the stack pointer according to the i386 ABI,
6576 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6577- sp = ((sp + 4) & -16ul) - 4;
6578+ sp = ((sp - 12) & -16ul) - 4;
6579 return (void __user *) sp;
6580 }
6581
6582@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6583 * These are actually not used anymore, but left because some
6584 * gdb versions depend on them as a marker.
6585 */
6586- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6587+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6588 } put_user_catch(err);
6589
6590 if (err)
6591@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6592 0xb8,
6593 __NR_ia32_rt_sigreturn,
6594 0x80cd,
6595- 0,
6596+ 0
6597 };
6598
6599 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6600@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6601
6602 if (ka->sa.sa_flags & SA_RESTORER)
6603 restorer = ka->sa.sa_restorer;
6604+ else if (current->mm->context.vdso)
6605+ /* Return stub is in 32bit vsyscall page */
6606+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6607 else
6608- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6609- rt_sigreturn);
6610+ restorer = &frame->retcode;
6611 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6612
6613 /*
6614 * Not actually used anymore, but left because some gdb
6615 * versions need it.
6616 */
6617- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6618+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6619 } put_user_catch(err);
6620
6621 if (err)
6622diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6623index 54edb207..db27073 100644
6624--- a/arch/x86/ia32/ia32entry.S
6625+++ b/arch/x86/ia32/ia32entry.S
6626@@ -13,7 +13,9 @@
6627 #include <asm/thread_info.h>
6628 #include <asm/segment.h>
6629 #include <asm/irqflags.h>
6630+#include <asm/pgtable.h>
6631 #include <linux/linkage.h>
6632+#include <asm/alternative-asm.h>
6633
6634 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6635 #include <linux/elf-em.h>
6636@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6637 ENDPROC(native_irq_enable_sysexit)
6638 #endif
6639
6640+ .macro pax_enter_kernel_user
6641+ pax_set_fptr_mask
6642+#ifdef CONFIG_PAX_MEMORY_UDEREF
6643+ call pax_enter_kernel_user
6644+#endif
6645+ .endm
6646+
6647+ .macro pax_exit_kernel_user
6648+#ifdef CONFIG_PAX_MEMORY_UDEREF
6649+ call pax_exit_kernel_user
6650+#endif
6651+#ifdef CONFIG_PAX_RANDKSTACK
6652+ pushq %rax
6653+ pushq %r11
6654+ call pax_randomize_kstack
6655+ popq %r11
6656+ popq %rax
6657+#endif
6658+ .endm
6659+
6660+.macro pax_erase_kstack
6661+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6662+ call pax_erase_kstack
6663+#endif
6664+.endm
6665+
6666 /*
6667 * 32bit SYSENTER instruction entry.
6668 *
6669@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6670 CFI_REGISTER rsp,rbp
6671 SWAPGS_UNSAFE_STACK
6672 movq PER_CPU_VAR(kernel_stack), %rsp
6673- addq $(KERNEL_STACK_OFFSET),%rsp
6674- /*
6675- * No need to follow this irqs on/off section: the syscall
6676- * disabled irqs, here we enable it straight after entry:
6677- */
6678- ENABLE_INTERRUPTS(CLBR_NONE)
6679 movl %ebp,%ebp /* zero extension */
6680 pushq_cfi $__USER32_DS
6681 /*CFI_REL_OFFSET ss,0*/
6682@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6683 CFI_REL_OFFSET rsp,0
6684 pushfq_cfi
6685 /*CFI_REL_OFFSET rflags,0*/
6686- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6687- CFI_REGISTER rip,r10
6688+ orl $X86_EFLAGS_IF,(%rsp)
6689+ GET_THREAD_INFO(%r11)
6690+ movl TI_sysenter_return(%r11), %r11d
6691+ CFI_REGISTER rip,r11
6692 pushq_cfi $__USER32_CS
6693 /*CFI_REL_OFFSET cs,0*/
6694 movl %eax, %eax
6695- pushq_cfi %r10
6696+ pushq_cfi %r11
6697 CFI_REL_OFFSET rip,0
6698 pushq_cfi %rax
6699 cld
6700 SAVE_ARGS 0,1,0
6701+ pax_enter_kernel_user
6702+ /*
6703+ * No need to follow this irqs on/off section: the syscall
6704+ * disabled irqs, here we enable it straight after entry:
6705+ */
6706+ ENABLE_INTERRUPTS(CLBR_NONE)
6707 /* no need to do an access_ok check here because rbp has been
6708 32bit zero extended */
6709+
6710+#ifdef CONFIG_PAX_MEMORY_UDEREF
6711+ mov $PAX_USER_SHADOW_BASE,%r11
6712+ add %r11,%rbp
6713+#endif
6714+
6715 1: movl (%rbp),%ebp
6716 .section __ex_table,"a"
6717 .quad 1b,ia32_badarg
6718 .previous
6719- GET_THREAD_INFO(%r10)
6720- orl $TS_COMPAT,TI_status(%r10)
6721- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6722+ GET_THREAD_INFO(%r11)
6723+ orl $TS_COMPAT,TI_status(%r11)
6724+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6725 CFI_REMEMBER_STATE
6726 jnz sysenter_tracesys
6727 cmpq $(IA32_NR_syscalls-1),%rax
6728@@ -162,13 +198,15 @@ sysenter_do_call:
6729 sysenter_dispatch:
6730 call *ia32_sys_call_table(,%rax,8)
6731 movq %rax,RAX-ARGOFFSET(%rsp)
6732- GET_THREAD_INFO(%r10)
6733+ GET_THREAD_INFO(%r11)
6734 DISABLE_INTERRUPTS(CLBR_NONE)
6735 TRACE_IRQS_OFF
6736- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6737+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6738 jnz sysexit_audit
6739 sysexit_from_sys_call:
6740- andl $~TS_COMPAT,TI_status(%r10)
6741+ pax_exit_kernel_user
6742+ pax_erase_kstack
6743+ andl $~TS_COMPAT,TI_status(%r11)
6744 /* clear IF, that popfq doesn't enable interrupts early */
6745 andl $~0x200,EFLAGS-R11(%rsp)
6746 movl RIP-R11(%rsp),%edx /* User %eip */
6747@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6748 movl %eax,%esi /* 2nd arg: syscall number */
6749 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6750 call audit_syscall_entry
6751+
6752+ pax_erase_kstack
6753+
6754 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6755 cmpq $(IA32_NR_syscalls-1),%rax
6756 ja ia32_badsys
6757@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6758 .endm
6759
6760 .macro auditsys_exit exit
6761- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6762+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6763 jnz ia32_ret_from_sys_call
6764 TRACE_IRQS_ON
6765 sti
6766@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6767 movzbl %al,%edi /* zero-extend that into %edi */
6768 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6769 call audit_syscall_exit
6770- GET_THREAD_INFO(%r10)
6771+ GET_THREAD_INFO(%r11)
6772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6773 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6774 cli
6775 TRACE_IRQS_OFF
6776- testl %edi,TI_flags(%r10)
6777+ testl %edi,TI_flags(%r11)
6778 jz \exit
6779 CLEAR_RREGS -ARGOFFSET
6780 jmp int_with_check
6781@@ -238,7 +279,7 @@ sysexit_audit:
6782
6783 sysenter_tracesys:
6784 #ifdef CONFIG_AUDITSYSCALL
6785- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6786+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6787 jz sysenter_auditsys
6788 #endif
6789 SAVE_REST
6790@@ -246,6 +287,9 @@ sysenter_tracesys:
6791 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6792 movq %rsp,%rdi /* &pt_regs -> arg1 */
6793 call syscall_trace_enter
6794+
6795+ pax_erase_kstack
6796+
6797 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6798 RESTORE_REST
6799 cmpq $(IA32_NR_syscalls-1),%rax
6800@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6801 ENTRY(ia32_cstar_target)
6802 CFI_STARTPROC32 simple
6803 CFI_SIGNAL_FRAME
6804- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6805+ CFI_DEF_CFA rsp,0
6806 CFI_REGISTER rip,rcx
6807 /*CFI_REGISTER rflags,r11*/
6808 SWAPGS_UNSAFE_STACK
6809 movl %esp,%r8d
6810 CFI_REGISTER rsp,r8
6811 movq PER_CPU_VAR(kernel_stack),%rsp
6812+ SAVE_ARGS 8*6,0,0
6813+ pax_enter_kernel_user
6814 /*
6815 * No need to follow this irqs on/off section: the syscall
6816 * disabled irqs and here we enable it straight after entry:
6817 */
6818 ENABLE_INTERRUPTS(CLBR_NONE)
6819- SAVE_ARGS 8,0,0
6820 movl %eax,%eax /* zero extension */
6821 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6822 movq %rcx,RIP-ARGOFFSET(%rsp)
6823@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6824 /* no need to do an access_ok check here because r8 has been
6825 32bit zero extended */
6826 /* hardware stack frame is complete now */
6827+
6828+#ifdef CONFIG_PAX_MEMORY_UDEREF
6829+ mov $PAX_USER_SHADOW_BASE,%r11
6830+ add %r11,%r8
6831+#endif
6832+
6833 1: movl (%r8),%r9d
6834 .section __ex_table,"a"
6835 .quad 1b,ia32_badarg
6836 .previous
6837- GET_THREAD_INFO(%r10)
6838- orl $TS_COMPAT,TI_status(%r10)
6839- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6840+ GET_THREAD_INFO(%r11)
6841+ orl $TS_COMPAT,TI_status(%r11)
6842+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6843 CFI_REMEMBER_STATE
6844 jnz cstar_tracesys
6845 cmpq $IA32_NR_syscalls-1,%rax
6846@@ -321,13 +372,15 @@ cstar_do_call:
6847 cstar_dispatch:
6848 call *ia32_sys_call_table(,%rax,8)
6849 movq %rax,RAX-ARGOFFSET(%rsp)
6850- GET_THREAD_INFO(%r10)
6851+ GET_THREAD_INFO(%r11)
6852 DISABLE_INTERRUPTS(CLBR_NONE)
6853 TRACE_IRQS_OFF
6854- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6855+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6856 jnz sysretl_audit
6857 sysretl_from_sys_call:
6858- andl $~TS_COMPAT,TI_status(%r10)
6859+ pax_exit_kernel_user
6860+ pax_erase_kstack
6861+ andl $~TS_COMPAT,TI_status(%r11)
6862 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6863 movl RIP-ARGOFFSET(%rsp),%ecx
6864 CFI_REGISTER rip,rcx
6865@@ -355,7 +408,7 @@ sysretl_audit:
6866
6867 cstar_tracesys:
6868 #ifdef CONFIG_AUDITSYSCALL
6869- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6870+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6871 jz cstar_auditsys
6872 #endif
6873 xchgl %r9d,%ebp
6874@@ -364,6 +417,9 @@ cstar_tracesys:
6875 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6876 movq %rsp,%rdi /* &pt_regs -> arg1 */
6877 call syscall_trace_enter
6878+
6879+ pax_erase_kstack
6880+
6881 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6882 RESTORE_REST
6883 xchgl %ebp,%r9d
6884@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6885 CFI_REL_OFFSET rip,RIP-RIP
6886 PARAVIRT_ADJUST_EXCEPTION_FRAME
6887 SWAPGS
6888- /*
6889- * No need to follow this irqs on/off section: the syscall
6890- * disabled irqs and here we enable it straight after entry:
6891- */
6892- ENABLE_INTERRUPTS(CLBR_NONE)
6893 movl %eax,%eax
6894 pushq_cfi %rax
6895 cld
6896 /* note the registers are not zero extended to the sf.
6897 this could be a problem. */
6898 SAVE_ARGS 0,1,0
6899- GET_THREAD_INFO(%r10)
6900- orl $TS_COMPAT,TI_status(%r10)
6901- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6902+ pax_enter_kernel_user
6903+ /*
6904+ * No need to follow this irqs on/off section: the syscall
6905+ * disabled irqs and here we enable it straight after entry:
6906+ */
6907+ ENABLE_INTERRUPTS(CLBR_NONE)
6908+ GET_THREAD_INFO(%r11)
6909+ orl $TS_COMPAT,TI_status(%r11)
6910+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6911 jnz ia32_tracesys
6912 cmpq $(IA32_NR_syscalls-1),%rax
6913 ja ia32_badsys
6914@@ -441,6 +498,9 @@ ia32_tracesys:
6915 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6916 movq %rsp,%rdi /* &pt_regs -> arg1 */
6917 call syscall_trace_enter
6918+
6919+ pax_erase_kstack
6920+
6921 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6922 RESTORE_REST
6923 cmpq $(IA32_NR_syscalls-1),%rax
6924@@ -455,6 +515,7 @@ ia32_badsys:
6925
6926 quiet_ni_syscall:
6927 movq $-ENOSYS,%rax
6928+ pax_force_retaddr
6929 ret
6930 CFI_ENDPROC
6931
6932diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6933index f6f5c53..b358b28 100644
6934--- a/arch/x86/ia32/sys_ia32.c
6935+++ b/arch/x86/ia32/sys_ia32.c
6936@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6937 */
6938 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6939 {
6940- typeof(ubuf->st_uid) uid = 0;
6941- typeof(ubuf->st_gid) gid = 0;
6942+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
6943+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
6944 SET_UID(uid, stat->uid);
6945 SET_GID(gid, stat->gid);
6946 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6947@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6948 }
6949 set_fs(KERNEL_DS);
6950 ret = sys_rt_sigprocmask(how,
6951- set ? (sigset_t __user *)&s : NULL,
6952- oset ? (sigset_t __user *)&s : NULL,
6953+ set ? (sigset_t __force_user *)&s : NULL,
6954+ oset ? (sigset_t __force_user *)&s : NULL,
6955 sigsetsize);
6956 set_fs(old_fs);
6957 if (ret)
6958@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6959 return alarm_setitimer(seconds);
6960 }
6961
6962-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6963+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6964 int options)
6965 {
6966 return compat_sys_wait4(pid, stat_addr, options, NULL);
6967@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6968 mm_segment_t old_fs = get_fs();
6969
6970 set_fs(KERNEL_DS);
6971- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6972+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6973 set_fs(old_fs);
6974 if (put_compat_timespec(&t, interval))
6975 return -EFAULT;
6976@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6977 mm_segment_t old_fs = get_fs();
6978
6979 set_fs(KERNEL_DS);
6980- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6981+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6982 set_fs(old_fs);
6983 if (!ret) {
6984 switch (_NSIG_WORDS) {
6985@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6986 if (copy_siginfo_from_user32(&info, uinfo))
6987 return -EFAULT;
6988 set_fs(KERNEL_DS);
6989- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6990+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6991 set_fs(old_fs);
6992 return ret;
6993 }
6994@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
6995 return -EFAULT;
6996
6997 set_fs(KERNEL_DS);
6998- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6999+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7000 count);
7001 set_fs(old_fs);
7002
7003diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7004index 091508b..e245ff2 100644
7005--- a/arch/x86/include/asm/alternative-asm.h
7006+++ b/arch/x86/include/asm/alternative-asm.h
7007@@ -4,10 +4,10 @@
7008
7009 #ifdef CONFIG_SMP
7010 .macro LOCK_PREFIX
7011-1: lock
7012+672: lock
7013 .section .smp_locks,"a"
7014 .balign 4
7015- .long 1b - .
7016+ .long 672b - .
7017 .previous
7018 .endm
7019 #else
7020@@ -15,6 +15,45 @@
7021 .endm
7022 #endif
7023
7024+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7025+ .macro pax_force_retaddr_bts rip=0
7026+ btsq $63,\rip(%rsp)
7027+ .endm
7028+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7029+ .macro pax_force_retaddr rip=0, reload=0
7030+ btsq $63,\rip(%rsp)
7031+ .endm
7032+ .macro pax_force_fptr ptr
7033+ btsq $63,\ptr
7034+ .endm
7035+ .macro pax_set_fptr_mask
7036+ .endm
7037+#endif
7038+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7039+ .macro pax_force_retaddr rip=0, reload=0
7040+ .if \reload
7041+ pax_set_fptr_mask
7042+ .endif
7043+ orq %r10,\rip(%rsp)
7044+ .endm
7045+ .macro pax_force_fptr ptr
7046+ orq %r10,\ptr
7047+ .endm
7048+ .macro pax_set_fptr_mask
7049+ movabs $0x8000000000000000,%r10
7050+ .endm
7051+#endif
7052+#else
7053+ .macro pax_force_retaddr rip=0, reload=0
7054+ .endm
7055+ .macro pax_force_fptr ptr
7056+ .endm
7057+ .macro pax_force_retaddr_bts rip=0
7058+ .endm
7059+ .macro pax_set_fptr_mask
7060+ .endm
7061+#endif
7062+
7063 .macro altinstruction_entry orig alt feature orig_len alt_len
7064 .long \orig - .
7065 .long \alt - .
7066diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7067index 37ad100..7d47faa 100644
7068--- a/arch/x86/include/asm/alternative.h
7069+++ b/arch/x86/include/asm/alternative.h
7070@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7071 ".section .discard,\"aw\",@progbits\n" \
7072 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7073 ".previous\n" \
7074- ".section .altinstr_replacement, \"ax\"\n" \
7075+ ".section .altinstr_replacement, \"a\"\n" \
7076 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7077 ".previous"
7078
7079diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7080index 9b7273c..e9fcc24 100644
7081--- a/arch/x86/include/asm/apic.h
7082+++ b/arch/x86/include/asm/apic.h
7083@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7084
7085 #ifdef CONFIG_X86_LOCAL_APIC
7086
7087-extern unsigned int apic_verbosity;
7088+extern int apic_verbosity;
7089 extern int local_apic_timer_c2_ok;
7090
7091 extern int disable_apic;
7092diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7093index 20370c6..a2eb9b0 100644
7094--- a/arch/x86/include/asm/apm.h
7095+++ b/arch/x86/include/asm/apm.h
7096@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7097 __asm__ __volatile__(APM_DO_ZERO_SEGS
7098 "pushl %%edi\n\t"
7099 "pushl %%ebp\n\t"
7100- "lcall *%%cs:apm_bios_entry\n\t"
7101+ "lcall *%%ss:apm_bios_entry\n\t"
7102 "setc %%al\n\t"
7103 "popl %%ebp\n\t"
7104 "popl %%edi\n\t"
7105@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7106 __asm__ __volatile__(APM_DO_ZERO_SEGS
7107 "pushl %%edi\n\t"
7108 "pushl %%ebp\n\t"
7109- "lcall *%%cs:apm_bios_entry\n\t"
7110+ "lcall *%%ss:apm_bios_entry\n\t"
7111 "setc %%bl\n\t"
7112 "popl %%ebp\n\t"
7113 "popl %%edi\n\t"
7114diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7115index 10572e3..392d0bc 100644
7116--- a/arch/x86/include/asm/atomic.h
7117+++ b/arch/x86/include/asm/atomic.h
7118@@ -22,7 +22,18 @@
7119 */
7120 static inline int atomic_read(const atomic_t *v)
7121 {
7122- return (*(volatile int *)&(v)->counter);
7123+ return (*(volatile const int *)&(v)->counter);
7124+}
7125+
7126+/**
7127+ * atomic_read_unchecked - read atomic variable
7128+ * @v: pointer of type atomic_unchecked_t
7129+ *
7130+ * Atomically reads the value of @v.
7131+ */
7132+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7133+{
7134+ return (*(volatile const int *)&(v)->counter);
7135 }
7136
7137 /**
7138@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7139 }
7140
7141 /**
7142+ * atomic_set_unchecked - set atomic variable
7143+ * @v: pointer of type atomic_unchecked_t
7144+ * @i: required value
7145+ *
7146+ * Atomically sets the value of @v to @i.
7147+ */
7148+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7149+{
7150+ v->counter = i;
7151+}
7152+
7153+/**
7154 * atomic_add - add integer to atomic variable
7155 * @i: integer value to add
7156 * @v: pointer of type atomic_t
7157@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7158 */
7159 static inline void atomic_add(int i, atomic_t *v)
7160 {
7161- asm volatile(LOCK_PREFIX "addl %1,%0"
7162+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7163+
7164+#ifdef CONFIG_PAX_REFCOUNT
7165+ "jno 0f\n"
7166+ LOCK_PREFIX "subl %1,%0\n"
7167+ "int $4\n0:\n"
7168+ _ASM_EXTABLE(0b, 0b)
7169+#endif
7170+
7171+ : "+m" (v->counter)
7172+ : "ir" (i));
7173+}
7174+
7175+/**
7176+ * atomic_add_unchecked - add integer to atomic variable
7177+ * @i: integer value to add
7178+ * @v: pointer of type atomic_unchecked_t
7179+ *
7180+ * Atomically adds @i to @v.
7181+ */
7182+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7183+{
7184+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7185 : "+m" (v->counter)
7186 : "ir" (i));
7187 }
7188@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7189 */
7190 static inline void atomic_sub(int i, atomic_t *v)
7191 {
7192- asm volatile(LOCK_PREFIX "subl %1,%0"
7193+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7194+
7195+#ifdef CONFIG_PAX_REFCOUNT
7196+ "jno 0f\n"
7197+ LOCK_PREFIX "addl %1,%0\n"
7198+ "int $4\n0:\n"
7199+ _ASM_EXTABLE(0b, 0b)
7200+#endif
7201+
7202+ : "+m" (v->counter)
7203+ : "ir" (i));
7204+}
7205+
7206+/**
7207+ * atomic_sub_unchecked - subtract integer from atomic variable
7208+ * @i: integer value to subtract
7209+ * @v: pointer of type atomic_unchecked_t
7210+ *
7211+ * Atomically subtracts @i from @v.
7212+ */
7213+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7214+{
7215+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7216 : "+m" (v->counter)
7217 : "ir" (i));
7218 }
7219@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7220 {
7221 unsigned char c;
7222
7223- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7224+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7225+
7226+#ifdef CONFIG_PAX_REFCOUNT
7227+ "jno 0f\n"
7228+ LOCK_PREFIX "addl %2,%0\n"
7229+ "int $4\n0:\n"
7230+ _ASM_EXTABLE(0b, 0b)
7231+#endif
7232+
7233+ "sete %1\n"
7234 : "+m" (v->counter), "=qm" (c)
7235 : "ir" (i) : "memory");
7236 return c;
7237@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7238 */
7239 static inline void atomic_inc(atomic_t *v)
7240 {
7241- asm volatile(LOCK_PREFIX "incl %0"
7242+ asm volatile(LOCK_PREFIX "incl %0\n"
7243+
7244+#ifdef CONFIG_PAX_REFCOUNT
7245+ "jno 0f\n"
7246+ LOCK_PREFIX "decl %0\n"
7247+ "int $4\n0:\n"
7248+ _ASM_EXTABLE(0b, 0b)
7249+#endif
7250+
7251+ : "+m" (v->counter));
7252+}
7253+
7254+/**
7255+ * atomic_inc_unchecked - increment atomic variable
7256+ * @v: pointer of type atomic_unchecked_t
7257+ *
7258+ * Atomically increments @v by 1.
7259+ */
7260+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7261+{
7262+ asm volatile(LOCK_PREFIX "incl %0\n"
7263 : "+m" (v->counter));
7264 }
7265
7266@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7267 */
7268 static inline void atomic_dec(atomic_t *v)
7269 {
7270- asm volatile(LOCK_PREFIX "decl %0"
7271+ asm volatile(LOCK_PREFIX "decl %0\n"
7272+
7273+#ifdef CONFIG_PAX_REFCOUNT
7274+ "jno 0f\n"
7275+ LOCK_PREFIX "incl %0\n"
7276+ "int $4\n0:\n"
7277+ _ASM_EXTABLE(0b, 0b)
7278+#endif
7279+
7280+ : "+m" (v->counter));
7281+}
7282+
7283+/**
7284+ * atomic_dec_unchecked - decrement atomic variable
7285+ * @v: pointer of type atomic_unchecked_t
7286+ *
7287+ * Atomically decrements @v by 1.
7288+ */
7289+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7290+{
7291+ asm volatile(LOCK_PREFIX "decl %0\n"
7292 : "+m" (v->counter));
7293 }
7294
7295@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7296 {
7297 unsigned char c;
7298
7299- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7300+ asm volatile(LOCK_PREFIX "decl %0\n"
7301+
7302+#ifdef CONFIG_PAX_REFCOUNT
7303+ "jno 0f\n"
7304+ LOCK_PREFIX "incl %0\n"
7305+ "int $4\n0:\n"
7306+ _ASM_EXTABLE(0b, 0b)
7307+#endif
7308+
7309+ "sete %1\n"
7310 : "+m" (v->counter), "=qm" (c)
7311 : : "memory");
7312 return c != 0;
7313@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7314 {
7315 unsigned char c;
7316
7317- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7318+ asm volatile(LOCK_PREFIX "incl %0\n"
7319+
7320+#ifdef CONFIG_PAX_REFCOUNT
7321+ "jno 0f\n"
7322+ LOCK_PREFIX "decl %0\n"
7323+ "int $4\n0:\n"
7324+ _ASM_EXTABLE(0b, 0b)
7325+#endif
7326+
7327+ "sete %1\n"
7328+ : "+m" (v->counter), "=qm" (c)
7329+ : : "memory");
7330+ return c != 0;
7331+}
7332+
7333+/**
7334+ * atomic_inc_and_test_unchecked - increment and test
7335+ * @v: pointer of type atomic_unchecked_t
7336+ *
7337+ * Atomically increments @v by 1
7338+ * and returns true if the result is zero, or false for all
7339+ * other cases.
7340+ */
7341+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7342+{
7343+ unsigned char c;
7344+
7345+ asm volatile(LOCK_PREFIX "incl %0\n"
7346+ "sete %1\n"
7347 : "+m" (v->counter), "=qm" (c)
7348 : : "memory");
7349 return c != 0;
7350@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7351 {
7352 unsigned char c;
7353
7354- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7355+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7356+
7357+#ifdef CONFIG_PAX_REFCOUNT
7358+ "jno 0f\n"
7359+ LOCK_PREFIX "subl %2,%0\n"
7360+ "int $4\n0:\n"
7361+ _ASM_EXTABLE(0b, 0b)
7362+#endif
7363+
7364+ "sets %1\n"
7365 : "+m" (v->counter), "=qm" (c)
7366 : "ir" (i) : "memory");
7367 return c;
7368@@ -180,16 +342,56 @@ static inline int atomic_add_return(int i, atomic_t *v)
7369 #endif
7370 /* Modern 486+ processor */
7371 __i = i;
7372- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7373+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7374+
7375+#ifdef CONFIG_PAX_REFCOUNT
7376+ "jno 0f\n"
7377+ "movl %0, %1\n"
7378+ "int $4\n0:\n"
7379+ _ASM_EXTABLE(0b, 0b)
7380+#endif
7381+
7382 : "+r" (i), "+m" (v->counter)
7383 : : "memory");
7384 return i + __i;
7385
7386 #ifdef CONFIG_M386
7387 no_xadd: /* Legacy 386 processor */
7388- raw_local_irq_save(flags);
7389+ local_irq_save(flags);
7390 __i = atomic_read(v);
7391 atomic_set(v, i + __i);
7392+ local_irq_restore(flags);
7393+ return i + __i;
7394+#endif
7395+}
7396+
7397+/**
7398+ * atomic_add_return_unchecked - add integer and return
7399+ * @v: pointer of type atomic_unchecked_t
7400+ * @i: integer value to add
7401+ *
7402+ * Atomically adds @i to @v and returns @i + @v
7403+ */
7404+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7405+{
7406+ int __i;
7407+#ifdef CONFIG_M386
7408+ unsigned long flags;
7409+ if (unlikely(boot_cpu_data.x86 <= 3))
7410+ goto no_xadd;
7411+#endif
7412+ /* Modern 486+ processor */
7413+ __i = i;
7414+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
7415+ : "+r" (i), "+m" (v->counter)
7416+ : : "memory");
7417+ return i + __i;
7418+
7419+#ifdef CONFIG_M386
7420+no_xadd: /* Legacy 386 processor */
7421+ raw_local_irq_save(flags);
7422+ __i = atomic_read_unchecked(v);
7423+ atomic_set_unchecked(v, i + __i);
7424 raw_local_irq_restore(flags);
7425 return i + __i;
7426 #endif
7427@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7428 }
7429
7430 #define atomic_inc_return(v) (atomic_add_return(1, v))
7431+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7432+{
7433+ return atomic_add_return_unchecked(1, v);
7434+}
7435 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7436
7437 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7438@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7439 return cmpxchg(&v->counter, old, new);
7440 }
7441
7442+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7443+{
7444+ return cmpxchg(&v->counter, old, new);
7445+}
7446+
7447 static inline int atomic_xchg(atomic_t *v, int new)
7448 {
7449 return xchg(&v->counter, new);
7450 }
7451
7452+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7453+{
7454+ return xchg(&v->counter, new);
7455+}
7456+
7457 /**
7458 * __atomic_add_unless - add unless the number is already a given value
7459 * @v: pointer of type atomic_t
7460@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7461 */
7462 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7463 {
7464- int c, old;
7465+ int c, old, new;
7466 c = atomic_read(v);
7467 for (;;) {
7468- if (unlikely(c == (u)))
7469+ if (unlikely(c == u))
7470 break;
7471- old = atomic_cmpxchg((v), c, c + (a));
7472+
7473+ asm volatile("addl %2,%0\n"
7474+
7475+#ifdef CONFIG_PAX_REFCOUNT
7476+ "jno 0f\n"
7477+ "subl %2,%0\n"
7478+ "int $4\n0:\n"
7479+ _ASM_EXTABLE(0b, 0b)
7480+#endif
7481+
7482+ : "=r" (new)
7483+ : "0" (c), "ir" (a));
7484+
7485+ old = atomic_cmpxchg(v, c, new);
7486 if (likely(old == c))
7487 break;
7488 c = old;
7489@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7490 return c;
7491 }
7492
7493+/**
7494+ * atomic_inc_not_zero_hint - increment if not null
7495+ * @v: pointer of type atomic_t
7496+ * @hint: probable value of the atomic before the increment
7497+ *
7498+ * This version of atomic_inc_not_zero() gives a hint of probable
7499+ * value of the atomic. This helps processor to not read the memory
7500+ * before doing the atomic read/modify/write cycle, lowering
7501+ * number of bus transactions on some arches.
7502+ *
7503+ * Returns: 0 if increment was not done, 1 otherwise.
7504+ */
7505+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7506+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7507+{
7508+ int val, c = hint, new;
7509+
7510+ /* sanity test, should be removed by compiler if hint is a constant */
7511+ if (!hint)
7512+ return __atomic_add_unless(v, 1, 0);
7513+
7514+ do {
7515+ asm volatile("incl %0\n"
7516+
7517+#ifdef CONFIG_PAX_REFCOUNT
7518+ "jno 0f\n"
7519+ "decl %0\n"
7520+ "int $4\n0:\n"
7521+ _ASM_EXTABLE(0b, 0b)
7522+#endif
7523+
7524+ : "=r" (new)
7525+ : "0" (c));
7526+
7527+ val = atomic_cmpxchg(v, c, new);
7528+ if (val == c)
7529+ return 1;
7530+ c = val;
7531+ } while (c);
7532+
7533+ return 0;
7534+}
7535
7536 /*
7537 * atomic_dec_if_positive - decrement by 1 if old value positive
7538diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7539index 24098aa..1e37723 100644
7540--- a/arch/x86/include/asm/atomic64_32.h
7541+++ b/arch/x86/include/asm/atomic64_32.h
7542@@ -12,6 +12,14 @@ typedef struct {
7543 u64 __aligned(8) counter;
7544 } atomic64_t;
7545
7546+#ifdef CONFIG_PAX_REFCOUNT
7547+typedef struct {
7548+ u64 __aligned(8) counter;
7549+} atomic64_unchecked_t;
7550+#else
7551+typedef atomic64_t atomic64_unchecked_t;
7552+#endif
7553+
7554 #define ATOMIC64_INIT(val) { (val) }
7555
7556 #ifdef CONFIG_X86_CMPXCHG64
7557@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7558 }
7559
7560 /**
7561+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7562+ * @p: pointer to type atomic64_unchecked_t
7563+ * @o: expected value
7564+ * @n: new value
7565+ *
7566+ * Atomically sets @v to @n if it was equal to @o and returns
7567+ * the old value.
7568+ */
7569+
7570+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7571+{
7572+ return cmpxchg64(&v->counter, o, n);
7573+}
7574+
7575+/**
7576 * atomic64_xchg - xchg atomic64 variable
7577 * @v: pointer to type atomic64_t
7578 * @n: value to assign
7579@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7580 }
7581
7582 /**
7583+ * atomic64_set_unchecked - set atomic64 variable
7584+ * @v: pointer to type atomic64_unchecked_t
7585+ * @n: value to assign
7586+ *
7587+ * Atomically sets the value of @v to @n.
7588+ */
7589+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7590+{
7591+ unsigned high = (unsigned)(i >> 32);
7592+ unsigned low = (unsigned)i;
7593+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7594+ : "+b" (low), "+c" (high)
7595+ : "S" (v)
7596+ : "eax", "edx", "memory"
7597+ );
7598+}
7599+
7600+/**
7601 * atomic64_read - read atomic64 variable
7602 * @v: pointer to type atomic64_t
7603 *
7604@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7605 }
7606
7607 /**
7608+ * atomic64_read_unchecked - read atomic64 variable
7609+ * @v: pointer to type atomic64_unchecked_t
7610+ *
7611+ * Atomically reads the value of @v and returns it.
7612+ */
7613+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7614+{
7615+ long long r;
7616+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7617+ : "=A" (r), "+c" (v)
7618+ : : "memory"
7619+ );
7620+ return r;
7621+ }
7622+
7623+/**
7624 * atomic64_add_return - add and return
7625 * @i: integer value to add
7626 * @v: pointer to type atomic64_t
7627@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7628 return i;
7629 }
7630
7631+/**
7632+ * atomic64_add_return_unchecked - add and return
7633+ * @i: integer value to add
7634+ * @v: pointer to type atomic64_unchecked_t
7635+ *
7636+ * Atomically adds @i to @v and returns @i + *@v
7637+ */
7638+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7639+{
7640+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7641+ : "+A" (i), "+c" (v)
7642+ : : "memory"
7643+ );
7644+ return i;
7645+}
7646+
7647 /*
7648 * Other variants with different arithmetic operators:
7649 */
7650@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7651 return a;
7652 }
7653
7654+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7655+{
7656+ long long a;
7657+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7658+ : "=A" (a)
7659+ : "S" (v)
7660+ : "memory", "ecx"
7661+ );
7662+ return a;
7663+}
7664+
7665 static inline long long atomic64_dec_return(atomic64_t *v)
7666 {
7667 long long a;
7668@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7669 }
7670
7671 /**
7672+ * atomic64_add_unchecked - add integer to atomic64 variable
7673+ * @i: integer value to add
7674+ * @v: pointer to type atomic64_unchecked_t
7675+ *
7676+ * Atomically adds @i to @v.
7677+ */
7678+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7679+{
7680+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7681+ : "+A" (i), "+c" (v)
7682+ : : "memory"
7683+ );
7684+ return i;
7685+}
7686+
7687+/**
7688 * atomic64_sub - subtract the atomic64 variable
7689 * @i: integer value to subtract
7690 * @v: pointer to type atomic64_t
7691diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7692index 017594d..d3fcf72 100644
7693--- a/arch/x86/include/asm/atomic64_64.h
7694+++ b/arch/x86/include/asm/atomic64_64.h
7695@@ -18,7 +18,19 @@
7696 */
7697 static inline long atomic64_read(const atomic64_t *v)
7698 {
7699- return (*(volatile long *)&(v)->counter);
7700+ return (*(volatile const long *)&(v)->counter);
7701+}
7702+
7703+/**
7704+ * atomic64_read_unchecked - read atomic64 variable
7705+ * @v: pointer of type atomic64_unchecked_t
7706+ *
7707+ * Atomically reads the value of @v.
7708+ * Doesn't imply a read memory barrier.
7709+ */
7710+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7711+{
7712+ return (*(volatile const long *)&(v)->counter);
7713 }
7714
7715 /**
7716@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7717 }
7718
7719 /**
7720+ * atomic64_set_unchecked - set atomic64 variable
7721+ * @v: pointer to type atomic64_unchecked_t
7722+ * @i: required value
7723+ *
7724+ * Atomically sets the value of @v to @i.
7725+ */
7726+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7727+{
7728+ v->counter = i;
7729+}
7730+
7731+/**
7732 * atomic64_add - add integer to atomic64 variable
7733 * @i: integer value to add
7734 * @v: pointer to type atomic64_t
7735@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7736 */
7737 static inline void atomic64_add(long i, atomic64_t *v)
7738 {
7739+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7740+
7741+#ifdef CONFIG_PAX_REFCOUNT
7742+ "jno 0f\n"
7743+ LOCK_PREFIX "subq %1,%0\n"
7744+ "int $4\n0:\n"
7745+ _ASM_EXTABLE(0b, 0b)
7746+#endif
7747+
7748+ : "=m" (v->counter)
7749+ : "er" (i), "m" (v->counter));
7750+}
7751+
7752+/**
7753+ * atomic64_add_unchecked - add integer to atomic64 variable
7754+ * @i: integer value to add
7755+ * @v: pointer to type atomic64_unchecked_t
7756+ *
7757+ * Atomically adds @i to @v.
7758+ */
7759+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7760+{
7761 asm volatile(LOCK_PREFIX "addq %1,%0"
7762 : "=m" (v->counter)
7763 : "er" (i), "m" (v->counter));
7764@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7765 */
7766 static inline void atomic64_sub(long i, atomic64_t *v)
7767 {
7768- asm volatile(LOCK_PREFIX "subq %1,%0"
7769+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7770+
7771+#ifdef CONFIG_PAX_REFCOUNT
7772+ "jno 0f\n"
7773+ LOCK_PREFIX "addq %1,%0\n"
7774+ "int $4\n0:\n"
7775+ _ASM_EXTABLE(0b, 0b)
7776+#endif
7777+
7778+ : "=m" (v->counter)
7779+ : "er" (i), "m" (v->counter));
7780+}
7781+
7782+/**
7783+ * atomic64_sub_unchecked - subtract the atomic64 variable
7784+ * @i: integer value to subtract
7785+ * @v: pointer to type atomic64_unchecked_t
7786+ *
7787+ * Atomically subtracts @i from @v.
7788+ */
7789+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7790+{
7791+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7792 : "=m" (v->counter)
7793 : "er" (i), "m" (v->counter));
7794 }
7795@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7796 {
7797 unsigned char c;
7798
7799- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7800+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7801+
7802+#ifdef CONFIG_PAX_REFCOUNT
7803+ "jno 0f\n"
7804+ LOCK_PREFIX "addq %2,%0\n"
7805+ "int $4\n0:\n"
7806+ _ASM_EXTABLE(0b, 0b)
7807+#endif
7808+
7809+ "sete %1\n"
7810 : "=m" (v->counter), "=qm" (c)
7811 : "er" (i), "m" (v->counter) : "memory");
7812 return c;
7813@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7814 */
7815 static inline void atomic64_inc(atomic64_t *v)
7816 {
7817+ asm volatile(LOCK_PREFIX "incq %0\n"
7818+
7819+#ifdef CONFIG_PAX_REFCOUNT
7820+ "jno 0f\n"
7821+ LOCK_PREFIX "decq %0\n"
7822+ "int $4\n0:\n"
7823+ _ASM_EXTABLE(0b, 0b)
7824+#endif
7825+
7826+ : "=m" (v->counter)
7827+ : "m" (v->counter));
7828+}
7829+
7830+/**
7831+ * atomic64_inc_unchecked - increment atomic64 variable
7832+ * @v: pointer to type atomic64_unchecked_t
7833+ *
7834+ * Atomically increments @v by 1.
7835+ */
7836+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7837+{
7838 asm volatile(LOCK_PREFIX "incq %0"
7839 : "=m" (v->counter)
7840 : "m" (v->counter));
7841@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7842 */
7843 static inline void atomic64_dec(atomic64_t *v)
7844 {
7845- asm volatile(LOCK_PREFIX "decq %0"
7846+ asm volatile(LOCK_PREFIX "decq %0\n"
7847+
7848+#ifdef CONFIG_PAX_REFCOUNT
7849+ "jno 0f\n"
7850+ LOCK_PREFIX "incq %0\n"
7851+ "int $4\n0:\n"
7852+ _ASM_EXTABLE(0b, 0b)
7853+#endif
7854+
7855+ : "=m" (v->counter)
7856+ : "m" (v->counter));
7857+}
7858+
7859+/**
7860+ * atomic64_dec_unchecked - decrement atomic64 variable
7861+ * @v: pointer to type atomic64_t
7862+ *
7863+ * Atomically decrements @v by 1.
7864+ */
7865+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7866+{
7867+ asm volatile(LOCK_PREFIX "decq %0\n"
7868 : "=m" (v->counter)
7869 : "m" (v->counter));
7870 }
7871@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7872 {
7873 unsigned char c;
7874
7875- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7876+ asm volatile(LOCK_PREFIX "decq %0\n"
7877+
7878+#ifdef CONFIG_PAX_REFCOUNT
7879+ "jno 0f\n"
7880+ LOCK_PREFIX "incq %0\n"
7881+ "int $4\n0:\n"
7882+ _ASM_EXTABLE(0b, 0b)
7883+#endif
7884+
7885+ "sete %1\n"
7886 : "=m" (v->counter), "=qm" (c)
7887 : "m" (v->counter) : "memory");
7888 return c != 0;
7889@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7890 {
7891 unsigned char c;
7892
7893- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7894+ asm volatile(LOCK_PREFIX "incq %0\n"
7895+
7896+#ifdef CONFIG_PAX_REFCOUNT
7897+ "jno 0f\n"
7898+ LOCK_PREFIX "decq %0\n"
7899+ "int $4\n0:\n"
7900+ _ASM_EXTABLE(0b, 0b)
7901+#endif
7902+
7903+ "sete %1\n"
7904 : "=m" (v->counter), "=qm" (c)
7905 : "m" (v->counter) : "memory");
7906 return c != 0;
7907@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7908 {
7909 unsigned char c;
7910
7911- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7912+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7913+
7914+#ifdef CONFIG_PAX_REFCOUNT
7915+ "jno 0f\n"
7916+ LOCK_PREFIX "subq %2,%0\n"
7917+ "int $4\n0:\n"
7918+ _ASM_EXTABLE(0b, 0b)
7919+#endif
7920+
7921+ "sets %1\n"
7922 : "=m" (v->counter), "=qm" (c)
7923 : "er" (i), "m" (v->counter) : "memory");
7924 return c;
7925@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7926 static inline long atomic64_add_return(long i, atomic64_t *v)
7927 {
7928 long __i = i;
7929- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7930+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7931+
7932+#ifdef CONFIG_PAX_REFCOUNT
7933+ "jno 0f\n"
7934+ "movq %0, %1\n"
7935+ "int $4\n0:\n"
7936+ _ASM_EXTABLE(0b, 0b)
7937+#endif
7938+
7939+ : "+r" (i), "+m" (v->counter)
7940+ : : "memory");
7941+ return i + __i;
7942+}
7943+
7944+/**
7945+ * atomic64_add_return_unchecked - add and return
7946+ * @i: integer value to add
7947+ * @v: pointer to type atomic64_unchecked_t
7948+ *
7949+ * Atomically adds @i to @v and returns @i + @v
7950+ */
7951+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7952+{
7953+ long __i = i;
7954+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7955 : "+r" (i), "+m" (v->counter)
7956 : : "memory");
7957 return i + __i;
7958@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7959 }
7960
7961 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7962+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7963+{
7964+ return atomic64_add_return_unchecked(1, v);
7965+}
7966 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7967
7968 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7969@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7970 return cmpxchg(&v->counter, old, new);
7971 }
7972
7973+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7974+{
7975+ return cmpxchg(&v->counter, old, new);
7976+}
7977+
7978 static inline long atomic64_xchg(atomic64_t *v, long new)
7979 {
7980 return xchg(&v->counter, new);
7981@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
7982 */
7983 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7984 {
7985- long c, old;
7986+ long c, old, new;
7987 c = atomic64_read(v);
7988 for (;;) {
7989- if (unlikely(c == (u)))
7990+ if (unlikely(c == u))
7991 break;
7992- old = atomic64_cmpxchg((v), c, c + (a));
7993+
7994+ asm volatile("add %2,%0\n"
7995+
7996+#ifdef CONFIG_PAX_REFCOUNT
7997+ "jno 0f\n"
7998+ "sub %2,%0\n"
7999+ "int $4\n0:\n"
8000+ _ASM_EXTABLE(0b, 0b)
8001+#endif
8002+
8003+ : "=r" (new)
8004+ : "0" (c), "ir" (a));
8005+
8006+ old = atomic64_cmpxchg(v, c, new);
8007 if (likely(old == c))
8008 break;
8009 c = old;
8010 }
8011- return c != (u);
8012+ return c != u;
8013 }
8014
8015 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8016diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8017index 1775d6e..b65017f 100644
8018--- a/arch/x86/include/asm/bitops.h
8019+++ b/arch/x86/include/asm/bitops.h
8020@@ -38,7 +38,7 @@
8021 * a mask operation on a byte.
8022 */
8023 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8024-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8025+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8026 #define CONST_MASK(nr) (1 << ((nr) & 7))
8027
8028 /**
8029diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8030index 5e1a2ee..c9f9533 100644
8031--- a/arch/x86/include/asm/boot.h
8032+++ b/arch/x86/include/asm/boot.h
8033@@ -11,10 +11,15 @@
8034 #include <asm/pgtable_types.h>
8035
8036 /* Physical address where kernel should be loaded. */
8037-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8038+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8039 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8040 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8041
8042+#ifndef __ASSEMBLY__
8043+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8044+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8045+#endif
8046+
8047 /* Minimum kernel alignment, as a power of two */
8048 #ifdef CONFIG_X86_64
8049 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8050diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8051index 48f99f1..d78ebf9 100644
8052--- a/arch/x86/include/asm/cache.h
8053+++ b/arch/x86/include/asm/cache.h
8054@@ -5,12 +5,13 @@
8055
8056 /* L1 cache line size */
8057 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8058-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8059+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8060
8061 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8062+#define __read_only __attribute__((__section__(".data..read_only")))
8063
8064 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8065-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8066+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8067
8068 #ifdef CONFIG_X86_VSMP
8069 #ifdef CONFIG_SMP
8070diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8071index 4e12668..501d239 100644
8072--- a/arch/x86/include/asm/cacheflush.h
8073+++ b/arch/x86/include/asm/cacheflush.h
8074@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8075 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8076
8077 if (pg_flags == _PGMT_DEFAULT)
8078- return -1;
8079+ return ~0UL;
8080 else if (pg_flags == _PGMT_WC)
8081 return _PAGE_CACHE_WC;
8082 else if (pg_flags == _PGMT_UC_MINUS)
8083diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8084index 46fc474..b02b0f9 100644
8085--- a/arch/x86/include/asm/checksum_32.h
8086+++ b/arch/x86/include/asm/checksum_32.h
8087@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8088 int len, __wsum sum,
8089 int *src_err_ptr, int *dst_err_ptr);
8090
8091+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8092+ int len, __wsum sum,
8093+ int *src_err_ptr, int *dst_err_ptr);
8094+
8095+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8096+ int len, __wsum sum,
8097+ int *src_err_ptr, int *dst_err_ptr);
8098+
8099 /*
8100 * Note: when you get a NULL pointer exception here this means someone
8101 * passed in an incorrect kernel address to one of these functions.
8102@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8103 int *err_ptr)
8104 {
8105 might_sleep();
8106- return csum_partial_copy_generic((__force void *)src, dst,
8107+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8108 len, sum, err_ptr, NULL);
8109 }
8110
8111@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8112 {
8113 might_sleep();
8114 if (access_ok(VERIFY_WRITE, dst, len))
8115- return csum_partial_copy_generic(src, (__force void *)dst,
8116+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8117 len, sum, NULL, err_ptr);
8118
8119 if (len)
8120diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8121index 88b23a4..d2e5f9f 100644
8122--- a/arch/x86/include/asm/cpufeature.h
8123+++ b/arch/x86/include/asm/cpufeature.h
8124@@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8125 ".section .discard,\"aw\",@progbits\n"
8126 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8127 ".previous\n"
8128- ".section .altinstr_replacement,\"ax\"\n"
8129+ ".section .altinstr_replacement,\"a\"\n"
8130 "3: movb $1,%0\n"
8131 "4:\n"
8132 ".previous\n"
8133diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8134index 41935fa..3b40db8 100644
8135--- a/arch/x86/include/asm/desc.h
8136+++ b/arch/x86/include/asm/desc.h
8137@@ -4,6 +4,7 @@
8138 #include <asm/desc_defs.h>
8139 #include <asm/ldt.h>
8140 #include <asm/mmu.h>
8141+#include <asm/pgtable.h>
8142
8143 #include <linux/smp.h>
8144
8145@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8146
8147 desc->type = (info->read_exec_only ^ 1) << 1;
8148 desc->type |= info->contents << 2;
8149+ desc->type |= info->seg_not_present ^ 1;
8150
8151 desc->s = 1;
8152 desc->dpl = 0x3;
8153@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8154 }
8155
8156 extern struct desc_ptr idt_descr;
8157-extern gate_desc idt_table[];
8158-
8159-struct gdt_page {
8160- struct desc_struct gdt[GDT_ENTRIES];
8161-} __attribute__((aligned(PAGE_SIZE)));
8162-
8163-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8164+extern gate_desc idt_table[256];
8165
8166+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8167 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8168 {
8169- return per_cpu(gdt_page, cpu).gdt;
8170+ return cpu_gdt_table[cpu];
8171 }
8172
8173 #ifdef CONFIG_X86_64
8174@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8175 unsigned long base, unsigned dpl, unsigned flags,
8176 unsigned short seg)
8177 {
8178- gate->a = (seg << 16) | (base & 0xffff);
8179- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8180+ gate->gate.offset_low = base;
8181+ gate->gate.seg = seg;
8182+ gate->gate.reserved = 0;
8183+ gate->gate.type = type;
8184+ gate->gate.s = 0;
8185+ gate->gate.dpl = dpl;
8186+ gate->gate.p = 1;
8187+ gate->gate.offset_high = base >> 16;
8188 }
8189
8190 #endif
8191@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8192
8193 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8194 {
8195+ pax_open_kernel();
8196 memcpy(&idt[entry], gate, sizeof(*gate));
8197+ pax_close_kernel();
8198 }
8199
8200 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8201 {
8202+ pax_open_kernel();
8203 memcpy(&ldt[entry], desc, 8);
8204+ pax_close_kernel();
8205 }
8206
8207 static inline void
8208@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8209 default: size = sizeof(*gdt); break;
8210 }
8211
8212+ pax_open_kernel();
8213 memcpy(&gdt[entry], desc, size);
8214+ pax_close_kernel();
8215 }
8216
8217 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8218@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8219
8220 static inline void native_load_tr_desc(void)
8221 {
8222+ pax_open_kernel();
8223 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8224+ pax_close_kernel();
8225 }
8226
8227 static inline void native_load_gdt(const struct desc_ptr *dtr)
8228@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8229 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8230 unsigned int i;
8231
8232+ pax_open_kernel();
8233 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8234 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8235+ pax_close_kernel();
8236 }
8237
8238 #define _LDT_empty(info) \
8239@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8240 desc->limit = (limit >> 16) & 0xf;
8241 }
8242
8243-static inline void _set_gate(int gate, unsigned type, void *addr,
8244+static inline void _set_gate(int gate, unsigned type, const void *addr,
8245 unsigned dpl, unsigned ist, unsigned seg)
8246 {
8247 gate_desc s;
8248@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8249 * Pentium F0 0F bugfix can have resulted in the mapped
8250 * IDT being write-protected.
8251 */
8252-static inline void set_intr_gate(unsigned int n, void *addr)
8253+static inline void set_intr_gate(unsigned int n, const void *addr)
8254 {
8255 BUG_ON((unsigned)n > 0xFF);
8256 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8257@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8258 /*
8259 * This routine sets up an interrupt gate at directory privilege level 3.
8260 */
8261-static inline void set_system_intr_gate(unsigned int n, void *addr)
8262+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8263 {
8264 BUG_ON((unsigned)n > 0xFF);
8265 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8266 }
8267
8268-static inline void set_system_trap_gate(unsigned int n, void *addr)
8269+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8270 {
8271 BUG_ON((unsigned)n > 0xFF);
8272 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8273 }
8274
8275-static inline void set_trap_gate(unsigned int n, void *addr)
8276+static inline void set_trap_gate(unsigned int n, const void *addr)
8277 {
8278 BUG_ON((unsigned)n > 0xFF);
8279 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8280@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8281 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8282 {
8283 BUG_ON((unsigned)n > 0xFF);
8284- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8285+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8286 }
8287
8288-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8289+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8290 {
8291 BUG_ON((unsigned)n > 0xFF);
8292 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8293 }
8294
8295-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8296+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8297 {
8298 BUG_ON((unsigned)n > 0xFF);
8299 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8300 }
8301
8302+#ifdef CONFIG_X86_32
8303+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8304+{
8305+ struct desc_struct d;
8306+
8307+ if (likely(limit))
8308+ limit = (limit - 1UL) >> PAGE_SHIFT;
8309+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8310+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8311+}
8312+#endif
8313+
8314 #endif /* _ASM_X86_DESC_H */
8315diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8316index 278441f..b95a174 100644
8317--- a/arch/x86/include/asm/desc_defs.h
8318+++ b/arch/x86/include/asm/desc_defs.h
8319@@ -31,6 +31,12 @@ struct desc_struct {
8320 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8321 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8322 };
8323+ struct {
8324+ u16 offset_low;
8325+ u16 seg;
8326+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8327+ unsigned offset_high: 16;
8328+ } gate;
8329 };
8330 } __attribute__((packed));
8331
8332diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8333index 908b969..a1f4eb4 100644
8334--- a/arch/x86/include/asm/e820.h
8335+++ b/arch/x86/include/asm/e820.h
8336@@ -69,7 +69,7 @@ struct e820map {
8337 #define ISA_START_ADDRESS 0xa0000
8338 #define ISA_END_ADDRESS 0x100000
8339
8340-#define BIOS_BEGIN 0x000a0000
8341+#define BIOS_BEGIN 0x000c0000
8342 #define BIOS_END 0x00100000
8343
8344 #define BIOS_ROM_BASE 0xffe00000
8345diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8346index f2ad216..eb24c96 100644
8347--- a/arch/x86/include/asm/elf.h
8348+++ b/arch/x86/include/asm/elf.h
8349@@ -237,7 +237,25 @@ extern int force_personality32;
8350 the loader. We need to make sure that it is out of the way of the program
8351 that it will "exec", and that there is sufficient room for the brk. */
8352
8353+#ifdef CONFIG_PAX_SEGMEXEC
8354+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8355+#else
8356 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8357+#endif
8358+
8359+#ifdef CONFIG_PAX_ASLR
8360+#ifdef CONFIG_X86_32
8361+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8362+
8363+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8364+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8365+#else
8366+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8367+
8368+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8369+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8370+#endif
8371+#endif
8372
8373 /* This yields a mask that user programs can use to figure out what
8374 instruction set this CPU supports. This could be done in user space,
8375@@ -290,9 +308,7 @@ do { \
8376
8377 #define ARCH_DLINFO \
8378 do { \
8379- if (vdso_enabled) \
8380- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8381- (unsigned long)current->mm->context.vdso); \
8382+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8383 } while (0)
8384
8385 #define AT_SYSINFO 32
8386@@ -303,7 +319,7 @@ do { \
8387
8388 #endif /* !CONFIG_X86_32 */
8389
8390-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8391+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8392
8393 #define VDSO_ENTRY \
8394 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8395@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8396 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8397 #define compat_arch_setup_additional_pages syscall32_setup_pages
8398
8399-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8400-#define arch_randomize_brk arch_randomize_brk
8401-
8402 #endif /* _ASM_X86_ELF_H */
8403diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8404index cc70c1c..d96d011 100644
8405--- a/arch/x86/include/asm/emergency-restart.h
8406+++ b/arch/x86/include/asm/emergency-restart.h
8407@@ -15,6 +15,6 @@ enum reboot_type {
8408
8409 extern enum reboot_type reboot_type;
8410
8411-extern void machine_emergency_restart(void);
8412+extern void machine_emergency_restart(void) __noreturn;
8413
8414 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8415diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8416index d09bb03..4ea4194 100644
8417--- a/arch/x86/include/asm/futex.h
8418+++ b/arch/x86/include/asm/futex.h
8419@@ -12,16 +12,18 @@
8420 #include <asm/system.h>
8421
8422 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8423+ typecheck(u32 __user *, uaddr); \
8424 asm volatile("1:\t" insn "\n" \
8425 "2:\t.section .fixup,\"ax\"\n" \
8426 "3:\tmov\t%3, %1\n" \
8427 "\tjmp\t2b\n" \
8428 "\t.previous\n" \
8429 _ASM_EXTABLE(1b, 3b) \
8430- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8431+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8432 : "i" (-EFAULT), "0" (oparg), "1" (0))
8433
8434 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8435+ typecheck(u32 __user *, uaddr); \
8436 asm volatile("1:\tmovl %2, %0\n" \
8437 "\tmovl\t%0, %3\n" \
8438 "\t" insn "\n" \
8439@@ -34,7 +36,7 @@
8440 _ASM_EXTABLE(1b, 4b) \
8441 _ASM_EXTABLE(2b, 4b) \
8442 : "=&a" (oldval), "=&r" (ret), \
8443- "+m" (*uaddr), "=&r" (tem) \
8444+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8445 : "r" (oparg), "i" (-EFAULT), "1" (0))
8446
8447 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8448@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8449
8450 switch (op) {
8451 case FUTEX_OP_SET:
8452- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8453+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8454 break;
8455 case FUTEX_OP_ADD:
8456- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8457+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8458 uaddr, oparg);
8459 break;
8460 case FUTEX_OP_OR:
8461@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8462 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8463 return -EFAULT;
8464
8465- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8466+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8467 "2:\t.section .fixup, \"ax\"\n"
8468 "3:\tmov %3, %0\n"
8469 "\tjmp 2b\n"
8470 "\t.previous\n"
8471 _ASM_EXTABLE(1b, 3b)
8472- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8473+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8474 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8475 : "memory"
8476 );
8477diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8478index 0919905..2cf38d6 100644
8479--- a/arch/x86/include/asm/hw_irq.h
8480+++ b/arch/x86/include/asm/hw_irq.h
8481@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8482 extern void enable_IO_APIC(void);
8483
8484 /* Statistics */
8485-extern atomic_t irq_err_count;
8486-extern atomic_t irq_mis_count;
8487+extern atomic_unchecked_t irq_err_count;
8488+extern atomic_unchecked_t irq_mis_count;
8489
8490 /* EISA */
8491 extern void eisa_set_level_irq(unsigned int irq);
8492diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8493index c9e09ea..73888df 100644
8494--- a/arch/x86/include/asm/i387.h
8495+++ b/arch/x86/include/asm/i387.h
8496@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8497 {
8498 int err;
8499
8500+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8501+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8502+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8503+#endif
8504+
8505 /* See comment in fxsave() below. */
8506 #ifdef CONFIG_AS_FXSAVEQ
8507 asm volatile("1: fxrstorq %[fx]\n\t"
8508@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8509 {
8510 int err;
8511
8512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8513+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8514+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8515+#endif
8516+
8517 /*
8518 * Clear the bytes not touched by the fxsave and reserved
8519 * for the SW usage.
8520@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8521 #endif /* CONFIG_X86_64 */
8522
8523 /* We need a safe address that is cheap to find and that is already
8524- in L1 during context switch. The best choices are unfortunately
8525- different for UP and SMP */
8526-#ifdef CONFIG_SMP
8527-#define safe_address (__per_cpu_offset[0])
8528-#else
8529-#define safe_address (kstat_cpu(0).cpustat.user)
8530-#endif
8531+ in L1 during context switch. */
8532+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8533
8534 /*
8535 * These must be called with preempt disabled
8536@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8537 struct thread_info *me = current_thread_info();
8538 preempt_disable();
8539 if (me->status & TS_USEDFPU)
8540- __save_init_fpu(me->task);
8541+ __save_init_fpu(current);
8542 else
8543 clts();
8544 }
8545diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8546index d8e8eef..99f81ae 100644
8547--- a/arch/x86/include/asm/io.h
8548+++ b/arch/x86/include/asm/io.h
8549@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8550
8551 #include <linux/vmalloc.h>
8552
8553+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8554+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8555+{
8556+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8557+}
8558+
8559+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8560+{
8561+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8562+}
8563+
8564 /*
8565 * Convert a virtual cached pointer to an uncached pointer
8566 */
8567diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8568index bba3cf8..06bc8da 100644
8569--- a/arch/x86/include/asm/irqflags.h
8570+++ b/arch/x86/include/asm/irqflags.h
8571@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8572 sti; \
8573 sysexit
8574
8575+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8576+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8577+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8578+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8579+
8580 #else
8581 #define INTERRUPT_RETURN iret
8582 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8583diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8584index 5478825..839e88c 100644
8585--- a/arch/x86/include/asm/kprobes.h
8586+++ b/arch/x86/include/asm/kprobes.h
8587@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8588 #define RELATIVEJUMP_SIZE 5
8589 #define RELATIVECALL_OPCODE 0xe8
8590 #define RELATIVE_ADDR_SIZE 4
8591-#define MAX_STACK_SIZE 64
8592-#define MIN_STACK_SIZE(ADDR) \
8593- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8594- THREAD_SIZE - (unsigned long)(ADDR))) \
8595- ? (MAX_STACK_SIZE) \
8596- : (((unsigned long)current_thread_info()) + \
8597- THREAD_SIZE - (unsigned long)(ADDR)))
8598+#define MAX_STACK_SIZE 64UL
8599+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8600
8601 #define flush_insn_slot(p) do { } while (0)
8602
8603diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8604index dd51c83..66cbfac 100644
8605--- a/arch/x86/include/asm/kvm_host.h
8606+++ b/arch/x86/include/asm/kvm_host.h
8607@@ -456,7 +456,7 @@ struct kvm_arch {
8608 unsigned int n_requested_mmu_pages;
8609 unsigned int n_max_mmu_pages;
8610 unsigned int indirect_shadow_pages;
8611- atomic_t invlpg_counter;
8612+ atomic_unchecked_t invlpg_counter;
8613 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8614 /*
8615 * Hash table of struct kvm_mmu_page.
8616@@ -636,7 +636,7 @@ struct kvm_x86_ops {
8617 enum x86_intercept_stage stage);
8618
8619 const struct trace_print_flags *exit_reasons_str;
8620-};
8621+} __do_const;
8622
8623 struct kvm_arch_async_pf {
8624 u32 token;
8625diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8626index 9cdae5d..300d20f 100644
8627--- a/arch/x86/include/asm/local.h
8628+++ b/arch/x86/include/asm/local.h
8629@@ -18,26 +18,58 @@ typedef struct {
8630
8631 static inline void local_inc(local_t *l)
8632 {
8633- asm volatile(_ASM_INC "%0"
8634+ asm volatile(_ASM_INC "%0\n"
8635+
8636+#ifdef CONFIG_PAX_REFCOUNT
8637+ "jno 0f\n"
8638+ _ASM_DEC "%0\n"
8639+ "int $4\n0:\n"
8640+ _ASM_EXTABLE(0b, 0b)
8641+#endif
8642+
8643 : "+m" (l->a.counter));
8644 }
8645
8646 static inline void local_dec(local_t *l)
8647 {
8648- asm volatile(_ASM_DEC "%0"
8649+ asm volatile(_ASM_DEC "%0\n"
8650+
8651+#ifdef CONFIG_PAX_REFCOUNT
8652+ "jno 0f\n"
8653+ _ASM_INC "%0\n"
8654+ "int $4\n0:\n"
8655+ _ASM_EXTABLE(0b, 0b)
8656+#endif
8657+
8658 : "+m" (l->a.counter));
8659 }
8660
8661 static inline void local_add(long i, local_t *l)
8662 {
8663- asm volatile(_ASM_ADD "%1,%0"
8664+ asm volatile(_ASM_ADD "%1,%0\n"
8665+
8666+#ifdef CONFIG_PAX_REFCOUNT
8667+ "jno 0f\n"
8668+ _ASM_SUB "%1,%0\n"
8669+ "int $4\n0:\n"
8670+ _ASM_EXTABLE(0b, 0b)
8671+#endif
8672+
8673 : "+m" (l->a.counter)
8674 : "ir" (i));
8675 }
8676
8677 static inline void local_sub(long i, local_t *l)
8678 {
8679- asm volatile(_ASM_SUB "%1,%0"
8680+ asm volatile(_ASM_SUB "%1,%0\n"
8681+
8682+#ifdef CONFIG_PAX_REFCOUNT
8683+ "jno 0f\n"
8684+ _ASM_ADD "%1,%0\n"
8685+ "int $4\n0:\n"
8686+ _ASM_EXTABLE(0b, 0b)
8687+#endif
8688+
8689 : "+m" (l->a.counter)
8690 : "ir" (i));
8691 }
8692@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8693 {
8694 unsigned char c;
8695
8696- asm volatile(_ASM_SUB "%2,%0; sete %1"
8697+ asm volatile(_ASM_SUB "%2,%0\n"
8698+
8699+#ifdef CONFIG_PAX_REFCOUNT
8700+ "jno 0f\n"
8701+ _ASM_ADD "%2,%0\n"
8702+ "int $4\n0:\n"
8703+ _ASM_EXTABLE(0b, 0b)
8704+#endif
8705+
8706+ "sete %1\n"
8707 : "+m" (l->a.counter), "=qm" (c)
8708 : "ir" (i) : "memory");
8709 return c;
8710@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8711 {
8712 unsigned char c;
8713
8714- asm volatile(_ASM_DEC "%0; sete %1"
8715+ asm volatile(_ASM_DEC "%0\n"
8716+
8717+#ifdef CONFIG_PAX_REFCOUNT
8718+ "jno 0f\n"
8719+ _ASM_INC "%0\n"
8720+ "int $4\n0:\n"
8721+ _ASM_EXTABLE(0b, 0b)
8722+#endif
8723+
8724+ "sete %1\n"
8725 : "+m" (l->a.counter), "=qm" (c)
8726 : : "memory");
8727 return c != 0;
8728@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8729 {
8730 unsigned char c;
8731
8732- asm volatile(_ASM_INC "%0; sete %1"
8733+ asm volatile(_ASM_INC "%0\n"
8734+
8735+#ifdef CONFIG_PAX_REFCOUNT
8736+ "jno 0f\n"
8737+ _ASM_DEC "%0\n"
8738+ "int $4\n0:\n"
8739+ _ASM_EXTABLE(0b, 0b)
8740+#endif
8741+
8742+ "sete %1\n"
8743 : "+m" (l->a.counter), "=qm" (c)
8744 : : "memory");
8745 return c != 0;
8746@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8747 {
8748 unsigned char c;
8749
8750- asm volatile(_ASM_ADD "%2,%0; sets %1"
8751+ asm volatile(_ASM_ADD "%2,%0\n"
8752+
8753+#ifdef CONFIG_PAX_REFCOUNT
8754+ "jno 0f\n"
8755+ _ASM_SUB "%2,%0\n"
8756+ "int $4\n0:\n"
8757+ _ASM_EXTABLE(0b, 0b)
8758+#endif
8759+
8760+ "sets %1\n"
8761 : "+m" (l->a.counter), "=qm" (c)
8762 : "ir" (i) : "memory");
8763 return c;
8764@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8765 #endif
8766 /* Modern 486+ processor */
8767 __i = i;
8768- asm volatile(_ASM_XADD "%0, %1;"
8769+ asm volatile(_ASM_XADD "%0, %1\n"
8770+
8771+#ifdef CONFIG_PAX_REFCOUNT
8772+ "jno 0f\n"
8773+ _ASM_MOV "%0,%1\n"
8774+ "int $4\n0:\n"
8775+ _ASM_EXTABLE(0b, 0b)
8776+#endif
8777+
8778 : "+r" (i), "+m" (l->a.counter)
8779 : : "memory");
8780 return i + __i;
8781diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8782index 593e51d..fa69c9a 100644
8783--- a/arch/x86/include/asm/mman.h
8784+++ b/arch/x86/include/asm/mman.h
8785@@ -5,4 +5,14 @@
8786
8787 #include <asm-generic/mman.h>
8788
8789+#ifdef __KERNEL__
8790+#ifndef __ASSEMBLY__
8791+#ifdef CONFIG_X86_32
8792+#define arch_mmap_check i386_mmap_check
8793+int i386_mmap_check(unsigned long addr, unsigned long len,
8794+ unsigned long flags);
8795+#endif
8796+#endif
8797+#endif
8798+
8799 #endif /* _ASM_X86_MMAN_H */
8800diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8801index 5f55e69..e20bfb1 100644
8802--- a/arch/x86/include/asm/mmu.h
8803+++ b/arch/x86/include/asm/mmu.h
8804@@ -9,7 +9,7 @@
8805 * we put the segment information here.
8806 */
8807 typedef struct {
8808- void *ldt;
8809+ struct desc_struct *ldt;
8810 int size;
8811
8812 #ifdef CONFIG_X86_64
8813@@ -18,7 +18,19 @@ typedef struct {
8814 #endif
8815
8816 struct mutex lock;
8817- void *vdso;
8818+ unsigned long vdso;
8819+
8820+#ifdef CONFIG_X86_32
8821+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8822+ unsigned long user_cs_base;
8823+ unsigned long user_cs_limit;
8824+
8825+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8826+ cpumask_t cpu_user_cs_mask;
8827+#endif
8828+
8829+#endif
8830+#endif
8831 } mm_context_t;
8832
8833 #ifdef CONFIG_SMP
8834diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8835index 6902152..399f3a2 100644
8836--- a/arch/x86/include/asm/mmu_context.h
8837+++ b/arch/x86/include/asm/mmu_context.h
8838@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8839
8840 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8841 {
8842+
8843+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8844+ unsigned int i;
8845+ pgd_t *pgd;
8846+
8847+ pax_open_kernel();
8848+ pgd = get_cpu_pgd(smp_processor_id());
8849+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8850+ set_pgd_batched(pgd+i, native_make_pgd(0));
8851+ pax_close_kernel();
8852+#endif
8853+
8854 #ifdef CONFIG_SMP
8855 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8856 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8857@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8858 struct task_struct *tsk)
8859 {
8860 unsigned cpu = smp_processor_id();
8861+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8862+ int tlbstate = TLBSTATE_OK;
8863+#endif
8864
8865 if (likely(prev != next)) {
8866 #ifdef CONFIG_SMP
8867+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8868+ tlbstate = percpu_read(cpu_tlbstate.state);
8869+#endif
8870 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8871 percpu_write(cpu_tlbstate.active_mm, next);
8872 #endif
8873 cpumask_set_cpu(cpu, mm_cpumask(next));
8874
8875 /* Re-load page tables */
8876+#ifdef CONFIG_PAX_PER_CPU_PGD
8877+ pax_open_kernel();
8878+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8879+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8880+ pax_close_kernel();
8881+ load_cr3(get_cpu_pgd(cpu));
8882+#else
8883 load_cr3(next->pgd);
8884+#endif
8885
8886 /* stop flush ipis for the previous mm */
8887 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8888@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8889 */
8890 if (unlikely(prev->context.ldt != next->context.ldt))
8891 load_LDT_nolock(&next->context);
8892- }
8893+
8894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8895+ if (!(__supported_pte_mask & _PAGE_NX)) {
8896+ smp_mb__before_clear_bit();
8897+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8898+ smp_mb__after_clear_bit();
8899+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8900+ }
8901+#endif
8902+
8903+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8904+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8905+ prev->context.user_cs_limit != next->context.user_cs_limit))
8906+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8907 #ifdef CONFIG_SMP
8908+ else if (unlikely(tlbstate != TLBSTATE_OK))
8909+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8910+#endif
8911+#endif
8912+
8913+ }
8914 else {
8915+
8916+#ifdef CONFIG_PAX_PER_CPU_PGD
8917+ pax_open_kernel();
8918+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8919+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8920+ pax_close_kernel();
8921+ load_cr3(get_cpu_pgd(cpu));
8922+#endif
8923+
8924+#ifdef CONFIG_SMP
8925 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8926 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8927
8928@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8929 * tlb flush IPI delivery. We must reload CR3
8930 * to make sure to use no freed page tables.
8931 */
8932+
8933+#ifndef CONFIG_PAX_PER_CPU_PGD
8934 load_cr3(next->pgd);
8935+#endif
8936+
8937 load_LDT_nolock(&next->context);
8938+
8939+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8940+ if (!(__supported_pte_mask & _PAGE_NX))
8941+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8942+#endif
8943+
8944+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8945+#ifdef CONFIG_PAX_PAGEEXEC
8946+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8947+#endif
8948+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8949+#endif
8950+
8951 }
8952+#endif
8953 }
8954-#endif
8955 }
8956
8957 #define activate_mm(prev, next) \
8958diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
8959index 9eae775..c914fea 100644
8960--- a/arch/x86/include/asm/module.h
8961+++ b/arch/x86/include/asm/module.h
8962@@ -5,6 +5,7 @@
8963
8964 #ifdef CONFIG_X86_64
8965 /* X86_64 does not define MODULE_PROC_FAMILY */
8966+#define MODULE_PROC_FAMILY ""
8967 #elif defined CONFIG_M386
8968 #define MODULE_PROC_FAMILY "386 "
8969 #elif defined CONFIG_M486
8970@@ -59,8 +60,20 @@
8971 #error unknown processor family
8972 #endif
8973
8974-#ifdef CONFIG_X86_32
8975-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8976+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8977+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8978+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8979+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
8980+#else
8981+#define MODULE_PAX_KERNEXEC ""
8982 #endif
8983
8984+#ifdef CONFIG_PAX_MEMORY_UDEREF
8985+#define MODULE_PAX_UDEREF "UDEREF "
8986+#else
8987+#define MODULE_PAX_UDEREF ""
8988+#endif
8989+
8990+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8991+
8992 #endif /* _ASM_X86_MODULE_H */
8993diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
8994index 7639dbf..e08a58c 100644
8995--- a/arch/x86/include/asm/page_64_types.h
8996+++ b/arch/x86/include/asm/page_64_types.h
8997@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8998
8999 /* duplicated to the one in bootmem.h */
9000 extern unsigned long max_pfn;
9001-extern unsigned long phys_base;
9002+extern const unsigned long phys_base;
9003
9004 extern unsigned long __phys_addr(unsigned long);
9005 #define __phys_reloc_hide(x) (x)
9006diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9007index a7d2db9..edb023e 100644
9008--- a/arch/x86/include/asm/paravirt.h
9009+++ b/arch/x86/include/asm/paravirt.h
9010@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9011 val);
9012 }
9013
9014+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9015+{
9016+ pgdval_t val = native_pgd_val(pgd);
9017+
9018+ if (sizeof(pgdval_t) > sizeof(long))
9019+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9020+ val, (u64)val >> 32);
9021+ else
9022+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9023+ val);
9024+}
9025+
9026 static inline void pgd_clear(pgd_t *pgdp)
9027 {
9028 set_pgd(pgdp, __pgd(0));
9029@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9030 pv_mmu_ops.set_fixmap(idx, phys, flags);
9031 }
9032
9033+#ifdef CONFIG_PAX_KERNEXEC
9034+static inline unsigned long pax_open_kernel(void)
9035+{
9036+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9037+}
9038+
9039+static inline unsigned long pax_close_kernel(void)
9040+{
9041+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9042+}
9043+#else
9044+static inline unsigned long pax_open_kernel(void) { return 0; }
9045+static inline unsigned long pax_close_kernel(void) { return 0; }
9046+#endif
9047+
9048 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9049
9050 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9051@@ -964,7 +991,7 @@ extern void default_banner(void);
9052
9053 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9054 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9055-#define PARA_INDIRECT(addr) *%cs:addr
9056+#define PARA_INDIRECT(addr) *%ss:addr
9057 #endif
9058
9059 #define INTERRUPT_RETURN \
9060@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9061 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9062 CLBR_NONE, \
9063 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9064+
9065+#define GET_CR0_INTO_RDI \
9066+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9067+ mov %rax,%rdi
9068+
9069+#define SET_RDI_INTO_CR0 \
9070+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9071+
9072+#define GET_CR3_INTO_RDI \
9073+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9074+ mov %rax,%rdi
9075+
9076+#define SET_RDI_INTO_CR3 \
9077+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9078+
9079 #endif /* CONFIG_X86_32 */
9080
9081 #endif /* __ASSEMBLY__ */
9082diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9083index 8e8b9a4..f07d725 100644
9084--- a/arch/x86/include/asm/paravirt_types.h
9085+++ b/arch/x86/include/asm/paravirt_types.h
9086@@ -84,20 +84,20 @@ struct pv_init_ops {
9087 */
9088 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9089 unsigned long addr, unsigned len);
9090-};
9091+} __no_const;
9092
9093
9094 struct pv_lazy_ops {
9095 /* Set deferred update mode, used for batching operations. */
9096 void (*enter)(void);
9097 void (*leave)(void);
9098-};
9099+} __no_const;
9100
9101 struct pv_time_ops {
9102 unsigned long long (*sched_clock)(void);
9103 unsigned long long (*steal_clock)(int cpu);
9104 unsigned long (*get_tsc_khz)(void);
9105-};
9106+} __no_const;
9107
9108 struct pv_cpu_ops {
9109 /* hooks for various privileged instructions */
9110@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9111
9112 void (*start_context_switch)(struct task_struct *prev);
9113 void (*end_context_switch)(struct task_struct *next);
9114-};
9115+} __no_const;
9116
9117 struct pv_irq_ops {
9118 /*
9119@@ -224,7 +224,7 @@ struct pv_apic_ops {
9120 unsigned long start_eip,
9121 unsigned long start_esp);
9122 #endif
9123-};
9124+} __no_const;
9125
9126 struct pv_mmu_ops {
9127 unsigned long (*read_cr2)(void);
9128@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9129 struct paravirt_callee_save make_pud;
9130
9131 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9132+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9133 #endif /* PAGETABLE_LEVELS == 4 */
9134 #endif /* PAGETABLE_LEVELS >= 3 */
9135
9136@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9137 an mfn. We can tell which is which from the index. */
9138 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9139 phys_addr_t phys, pgprot_t flags);
9140+
9141+#ifdef CONFIG_PAX_KERNEXEC
9142+ unsigned long (*pax_open_kernel)(void);
9143+ unsigned long (*pax_close_kernel)(void);
9144+#endif
9145+
9146 };
9147
9148 struct arch_spinlock;
9149@@ -334,7 +341,7 @@ struct pv_lock_ops {
9150 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9151 int (*spin_trylock)(struct arch_spinlock *lock);
9152 void (*spin_unlock)(struct arch_spinlock *lock);
9153-};
9154+} __no_const;
9155
9156 /* This contains all the paravirt structures: we get a convenient
9157 * number for each function using the offset which we use to indicate
9158diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9159index b4389a4..b7ff22c 100644
9160--- a/arch/x86/include/asm/pgalloc.h
9161+++ b/arch/x86/include/asm/pgalloc.h
9162@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9163 pmd_t *pmd, pte_t *pte)
9164 {
9165 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9166+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9167+}
9168+
9169+static inline void pmd_populate_user(struct mm_struct *mm,
9170+ pmd_t *pmd, pte_t *pte)
9171+{
9172+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9173 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9174 }
9175
9176diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9177index 98391db..8f6984e 100644
9178--- a/arch/x86/include/asm/pgtable-2level.h
9179+++ b/arch/x86/include/asm/pgtable-2level.h
9180@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9181
9182 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9183 {
9184+ pax_open_kernel();
9185 *pmdp = pmd;
9186+ pax_close_kernel();
9187 }
9188
9189 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9190diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9191index effff47..f9e4035 100644
9192--- a/arch/x86/include/asm/pgtable-3level.h
9193+++ b/arch/x86/include/asm/pgtable-3level.h
9194@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9195
9196 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9197 {
9198+ pax_open_kernel();
9199 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9200+ pax_close_kernel();
9201 }
9202
9203 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9204 {
9205+ pax_open_kernel();
9206 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9207+ pax_close_kernel();
9208 }
9209
9210 /*
9211diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9212index 18601c8..3d716d1 100644
9213--- a/arch/x86/include/asm/pgtable.h
9214+++ b/arch/x86/include/asm/pgtable.h
9215@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9216
9217 #ifndef __PAGETABLE_PUD_FOLDED
9218 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9219+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9220 #define pgd_clear(pgd) native_pgd_clear(pgd)
9221 #endif
9222
9223@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9224
9225 #define arch_end_context_switch(prev) do {} while(0)
9226
9227+#define pax_open_kernel() native_pax_open_kernel()
9228+#define pax_close_kernel() native_pax_close_kernel()
9229 #endif /* CONFIG_PARAVIRT */
9230
9231+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9232+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9233+
9234+#ifdef CONFIG_PAX_KERNEXEC
9235+static inline unsigned long native_pax_open_kernel(void)
9236+{
9237+ unsigned long cr0;
9238+
9239+ preempt_disable();
9240+ barrier();
9241+ cr0 = read_cr0() ^ X86_CR0_WP;
9242+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9243+ write_cr0(cr0);
9244+ return cr0 ^ X86_CR0_WP;
9245+}
9246+
9247+static inline unsigned long native_pax_close_kernel(void)
9248+{
9249+ unsigned long cr0;
9250+
9251+ cr0 = read_cr0() ^ X86_CR0_WP;
9252+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9253+ write_cr0(cr0);
9254+ barrier();
9255+ preempt_enable_no_resched();
9256+ return cr0 ^ X86_CR0_WP;
9257+}
9258+#else
9259+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9260+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9261+#endif
9262+
9263 /*
9264 * The following only work if pte_present() is true.
9265 * Undefined behaviour if not..
9266 */
9267+static inline int pte_user(pte_t pte)
9268+{
9269+ return pte_val(pte) & _PAGE_USER;
9270+}
9271+
9272 static inline int pte_dirty(pte_t pte)
9273 {
9274 return pte_flags(pte) & _PAGE_DIRTY;
9275@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9276 return pte_clear_flags(pte, _PAGE_RW);
9277 }
9278
9279+static inline pte_t pte_mkread(pte_t pte)
9280+{
9281+ return __pte(pte_val(pte) | _PAGE_USER);
9282+}
9283+
9284 static inline pte_t pte_mkexec(pte_t pte)
9285 {
9286- return pte_clear_flags(pte, _PAGE_NX);
9287+#ifdef CONFIG_X86_PAE
9288+ if (__supported_pte_mask & _PAGE_NX)
9289+ return pte_clear_flags(pte, _PAGE_NX);
9290+ else
9291+#endif
9292+ return pte_set_flags(pte, _PAGE_USER);
9293+}
9294+
9295+static inline pte_t pte_exprotect(pte_t pte)
9296+{
9297+#ifdef CONFIG_X86_PAE
9298+ if (__supported_pte_mask & _PAGE_NX)
9299+ return pte_set_flags(pte, _PAGE_NX);
9300+ else
9301+#endif
9302+ return pte_clear_flags(pte, _PAGE_USER);
9303 }
9304
9305 static inline pte_t pte_mkdirty(pte_t pte)
9306@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9307 #endif
9308
9309 #ifndef __ASSEMBLY__
9310+
9311+#ifdef CONFIG_PAX_PER_CPU_PGD
9312+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9313+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9314+{
9315+ return cpu_pgd[cpu];
9316+}
9317+#endif
9318+
9319 #include <linux/mm_types.h>
9320
9321 static inline int pte_none(pte_t pte)
9322@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9323
9324 static inline int pgd_bad(pgd_t pgd)
9325 {
9326- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9327+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9328 }
9329
9330 static inline int pgd_none(pgd_t pgd)
9331@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9332 * pgd_offset() returns a (pgd_t *)
9333 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9334 */
9335-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9336+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9337+
9338+#ifdef CONFIG_PAX_PER_CPU_PGD
9339+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9340+#endif
9341+
9342 /*
9343 * a shortcut which implies the use of the kernel's pgd, instead
9344 * of a process's
9345@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9346 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9347 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9348
9349+#ifdef CONFIG_X86_32
9350+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9351+#else
9352+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9353+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9354+
9355+#ifdef CONFIG_PAX_MEMORY_UDEREF
9356+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9357+#else
9358+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9359+#endif
9360+
9361+#endif
9362+
9363 #ifndef __ASSEMBLY__
9364
9365 extern int direct_gbpages;
9366@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9367 * dst and src can be on the same page, but the range must not overlap,
9368 * and must not cross a page boundary.
9369 */
9370-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9371+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9372 {
9373- memcpy(dst, src, count * sizeof(pgd_t));
9374+ pax_open_kernel();
9375+ while (count--)
9376+ *dst++ = *src++;
9377+ pax_close_kernel();
9378 }
9379
9380+#ifdef CONFIG_PAX_PER_CPU_PGD
9381+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9382+#endif
9383+
9384+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9385+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9386+#else
9387+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9388+#endif
9389
9390 #include <asm-generic/pgtable.h>
9391 #endif /* __ASSEMBLY__ */
9392diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9393index 0c92113..34a77c6 100644
9394--- a/arch/x86/include/asm/pgtable_32.h
9395+++ b/arch/x86/include/asm/pgtable_32.h
9396@@ -25,9 +25,6 @@
9397 struct mm_struct;
9398 struct vm_area_struct;
9399
9400-extern pgd_t swapper_pg_dir[1024];
9401-extern pgd_t initial_page_table[1024];
9402-
9403 static inline void pgtable_cache_init(void) { }
9404 static inline void check_pgt_cache(void) { }
9405 void paging_init(void);
9406@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9407 # include <asm/pgtable-2level.h>
9408 #endif
9409
9410+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9411+extern pgd_t initial_page_table[PTRS_PER_PGD];
9412+#ifdef CONFIG_X86_PAE
9413+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9414+#endif
9415+
9416 #if defined(CONFIG_HIGHPTE)
9417 #define pte_offset_map(dir, address) \
9418 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9419@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9420 /* Clear a kernel PTE and flush it from the TLB */
9421 #define kpte_clear_flush(ptep, vaddr) \
9422 do { \
9423+ pax_open_kernel(); \
9424 pte_clear(&init_mm, (vaddr), (ptep)); \
9425+ pax_close_kernel(); \
9426 __flush_tlb_one((vaddr)); \
9427 } while (0)
9428
9429@@ -74,6 +79,9 @@ do { \
9430
9431 #endif /* !__ASSEMBLY__ */
9432
9433+#define HAVE_ARCH_UNMAPPED_AREA
9434+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9435+
9436 /*
9437 * kern_addr_valid() is (1) for FLATMEM and (0) for
9438 * SPARSEMEM and DISCONTIGMEM
9439diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9440index ed5903b..c7fe163 100644
9441--- a/arch/x86/include/asm/pgtable_32_types.h
9442+++ b/arch/x86/include/asm/pgtable_32_types.h
9443@@ -8,7 +8,7 @@
9444 */
9445 #ifdef CONFIG_X86_PAE
9446 # include <asm/pgtable-3level_types.h>
9447-# define PMD_SIZE (1UL << PMD_SHIFT)
9448+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9449 # define PMD_MASK (~(PMD_SIZE - 1))
9450 #else
9451 # include <asm/pgtable-2level_types.h>
9452@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9453 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9454 #endif
9455
9456+#ifdef CONFIG_PAX_KERNEXEC
9457+#ifndef __ASSEMBLY__
9458+extern unsigned char MODULES_EXEC_VADDR[];
9459+extern unsigned char MODULES_EXEC_END[];
9460+#endif
9461+#include <asm/boot.h>
9462+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9463+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9464+#else
9465+#define ktla_ktva(addr) (addr)
9466+#define ktva_ktla(addr) (addr)
9467+#endif
9468+
9469 #define MODULES_VADDR VMALLOC_START
9470 #define MODULES_END VMALLOC_END
9471 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9472diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9473index 975f709..107976d 100644
9474--- a/arch/x86/include/asm/pgtable_64.h
9475+++ b/arch/x86/include/asm/pgtable_64.h
9476@@ -16,10 +16,14 @@
9477
9478 extern pud_t level3_kernel_pgt[512];
9479 extern pud_t level3_ident_pgt[512];
9480+extern pud_t level3_vmalloc_start_pgt[512];
9481+extern pud_t level3_vmalloc_end_pgt[512];
9482+extern pud_t level3_vmemmap_pgt[512];
9483+extern pud_t level2_vmemmap_pgt[512];
9484 extern pmd_t level2_kernel_pgt[512];
9485 extern pmd_t level2_fixmap_pgt[512];
9486-extern pmd_t level2_ident_pgt[512];
9487-extern pgd_t init_level4_pgt[];
9488+extern pmd_t level2_ident_pgt[512*2];
9489+extern pgd_t init_level4_pgt[512];
9490
9491 #define swapper_pg_dir init_level4_pgt
9492
9493@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9494
9495 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9496 {
9497+ pax_open_kernel();
9498 *pmdp = pmd;
9499+ pax_close_kernel();
9500 }
9501
9502 static inline void native_pmd_clear(pmd_t *pmd)
9503@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9504
9505 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9506 {
9507+ pax_open_kernel();
9508+ *pgdp = pgd;
9509+ pax_close_kernel();
9510+}
9511+
9512+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9513+{
9514 *pgdp = pgd;
9515 }
9516
9517diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9518index 766ea16..5b96cb3 100644
9519--- a/arch/x86/include/asm/pgtable_64_types.h
9520+++ b/arch/x86/include/asm/pgtable_64_types.h
9521@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9522 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9523 #define MODULES_END _AC(0xffffffffff000000, UL)
9524 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9525+#define MODULES_EXEC_VADDR MODULES_VADDR
9526+#define MODULES_EXEC_END MODULES_END
9527+
9528+#define ktla_ktva(addr) (addr)
9529+#define ktva_ktla(addr) (addr)
9530
9531 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9532diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9533index 013286a..8b42f4f 100644
9534--- a/arch/x86/include/asm/pgtable_types.h
9535+++ b/arch/x86/include/asm/pgtable_types.h
9536@@ -16,13 +16,12 @@
9537 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9538 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9539 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9540-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9541+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9542 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9543 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9544 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9545-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9546-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9547-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9548+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9549+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9550 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9551
9552 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9553@@ -40,7 +39,6 @@
9554 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9555 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9556 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9557-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9558 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9559 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9560 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9561@@ -57,8 +55,10 @@
9562
9563 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9564 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9565-#else
9566+#elif defined(CONFIG_KMEMCHECK)
9567 #define _PAGE_NX (_AT(pteval_t, 0))
9568+#else
9569+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9570 #endif
9571
9572 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9573@@ -96,6 +96,9 @@
9574 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9575 _PAGE_ACCESSED)
9576
9577+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9578+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9579+
9580 #define __PAGE_KERNEL_EXEC \
9581 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9582 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9583@@ -106,7 +109,7 @@
9584 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9585 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9586 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9587-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9588+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9589 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9590 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9591 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9592@@ -168,8 +171,8 @@
9593 * bits are combined, this will alow user to access the high address mapped
9594 * VDSO in the presence of CONFIG_COMPAT_VDSO
9595 */
9596-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9597-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9598+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9599+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9600 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9601 #endif
9602
9603@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9604 {
9605 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9606 }
9607+#endif
9608
9609+#if PAGETABLE_LEVELS == 3
9610+#include <asm-generic/pgtable-nopud.h>
9611+#endif
9612+
9613+#if PAGETABLE_LEVELS == 2
9614+#include <asm-generic/pgtable-nopmd.h>
9615+#endif
9616+
9617+#ifndef __ASSEMBLY__
9618 #if PAGETABLE_LEVELS > 3
9619 typedef struct { pudval_t pud; } pud_t;
9620
9621@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9622 return pud.pud;
9623 }
9624 #else
9625-#include <asm-generic/pgtable-nopud.h>
9626-
9627 static inline pudval_t native_pud_val(pud_t pud)
9628 {
9629 return native_pgd_val(pud.pgd);
9630@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9631 return pmd.pmd;
9632 }
9633 #else
9634-#include <asm-generic/pgtable-nopmd.h>
9635-
9636 static inline pmdval_t native_pmd_val(pmd_t pmd)
9637 {
9638 return native_pgd_val(pmd.pud.pgd);
9639@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9640
9641 extern pteval_t __supported_pte_mask;
9642 extern void set_nx(void);
9643-extern int nx_enabled;
9644
9645 #define pgprot_writecombine pgprot_writecombine
9646 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9647diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9648index 0d1171c..36571a9 100644
9649--- a/arch/x86/include/asm/processor.h
9650+++ b/arch/x86/include/asm/processor.h
9651@@ -266,7 +266,7 @@ struct tss_struct {
9652
9653 } ____cacheline_aligned;
9654
9655-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9656+extern struct tss_struct init_tss[NR_CPUS];
9657
9658 /*
9659 * Save the original ist values for checking stack pointers during debugging
9660@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x)
9661 */
9662 #define TASK_SIZE PAGE_OFFSET
9663 #define TASK_SIZE_MAX TASK_SIZE
9664+
9665+#ifdef CONFIG_PAX_SEGMEXEC
9666+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9667+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9668+#else
9669 #define STACK_TOP TASK_SIZE
9670-#define STACK_TOP_MAX STACK_TOP
9671+#endif
9672+
9673+#define STACK_TOP_MAX TASK_SIZE
9674
9675 #define INIT_THREAD { \
9676- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9677+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9678 .vm86_info = NULL, \
9679 .sysenter_cs = __KERNEL_CS, \
9680 .io_bitmap_ptr = NULL, \
9681@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x)
9682 */
9683 #define INIT_TSS { \
9684 .x86_tss = { \
9685- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9686+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9687 .ss0 = __KERNEL_DS, \
9688 .ss1 = __KERNEL_CS, \
9689 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9690@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x)
9691 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9692
9693 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9694-#define KSTK_TOP(info) \
9695-({ \
9696- unsigned long *__ptr = (unsigned long *)(info); \
9697- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9698-})
9699+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9700
9701 /*
9702 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9703@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9704 #define task_pt_regs(task) \
9705 ({ \
9706 struct pt_regs *__regs__; \
9707- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9708+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9709 __regs__ - 1; \
9710 })
9711
9712@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9713 /*
9714 * User space process size. 47bits minus one guard page.
9715 */
9716-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9717+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9718
9719 /* This decides where the kernel will search for a free chunk of vm
9720 * space during mmap's.
9721 */
9722 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9723- 0xc0000000 : 0xFFFFe000)
9724+ 0xc0000000 : 0xFFFFf000)
9725
9726 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9727 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9728@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9729 #define STACK_TOP_MAX TASK_SIZE_MAX
9730
9731 #define INIT_THREAD { \
9732- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9733+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9734 }
9735
9736 #define INIT_TSS { \
9737- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9738+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9739 }
9740
9741 /*
9742@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9743 */
9744 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9745
9746+#ifdef CONFIG_PAX_SEGMEXEC
9747+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9748+#endif
9749+
9750 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9751
9752 /* Get/set a process' ability to use the timestamp counter instruction */
9753diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9754index 3566454..4bdfb8c 100644
9755--- a/arch/x86/include/asm/ptrace.h
9756+++ b/arch/x86/include/asm/ptrace.h
9757@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9758 }
9759
9760 /*
9761- * user_mode_vm(regs) determines whether a register set came from user mode.
9762+ * user_mode(regs) determines whether a register set came from user mode.
9763 * This is true if V8086 mode was enabled OR if the register set was from
9764 * protected mode with RPL-3 CS value. This tricky test checks that with
9765 * one comparison. Many places in the kernel can bypass this full check
9766- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9767+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9768+ * be used.
9769 */
9770-static inline int user_mode(struct pt_regs *regs)
9771+static inline int user_mode_novm(struct pt_regs *regs)
9772 {
9773 #ifdef CONFIG_X86_32
9774 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9775 #else
9776- return !!(regs->cs & 3);
9777+ return !!(regs->cs & SEGMENT_RPL_MASK);
9778 #endif
9779 }
9780
9781-static inline int user_mode_vm(struct pt_regs *regs)
9782+static inline int user_mode(struct pt_regs *regs)
9783 {
9784 #ifdef CONFIG_X86_32
9785 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9786 USER_RPL;
9787 #else
9788- return user_mode(regs);
9789+ return user_mode_novm(regs);
9790 #endif
9791 }
9792
9793@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9794 #ifdef CONFIG_X86_64
9795 static inline bool user_64bit_mode(struct pt_regs *regs)
9796 {
9797+ unsigned long cs = regs->cs & 0xffff;
9798 #ifndef CONFIG_PARAVIRT
9799 /*
9800 * On non-paravirt systems, this is the only long mode CPL 3
9801 * selector. We do not allow long mode selectors in the LDT.
9802 */
9803- return regs->cs == __USER_CS;
9804+ return cs == __USER_CS;
9805 #else
9806 /* Headers are too twisted for this to go in paravirt.h. */
9807- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9808+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9809 #endif
9810 }
9811 #endif
9812diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9813index 3250e3d..20db631 100644
9814--- a/arch/x86/include/asm/reboot.h
9815+++ b/arch/x86/include/asm/reboot.h
9816@@ -6,19 +6,19 @@
9817 struct pt_regs;
9818
9819 struct machine_ops {
9820- void (*restart)(char *cmd);
9821- void (*halt)(void);
9822- void (*power_off)(void);
9823+ void (* __noreturn restart)(char *cmd);
9824+ void (* __noreturn halt)(void);
9825+ void (* __noreturn power_off)(void);
9826 void (*shutdown)(void);
9827 void (*crash_shutdown)(struct pt_regs *);
9828- void (*emergency_restart)(void);
9829-};
9830+ void (* __noreturn emergency_restart)(void);
9831+} __no_const;
9832
9833 extern struct machine_ops machine_ops;
9834
9835 void native_machine_crash_shutdown(struct pt_regs *regs);
9836 void native_machine_shutdown(void);
9837-void machine_real_restart(unsigned int type);
9838+void machine_real_restart(unsigned int type) __noreturn;
9839 /* These must match dispatch_table in reboot_32.S */
9840 #define MRR_BIOS 0
9841 #define MRR_APM 1
9842diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9843index df4cd32..27ae072 100644
9844--- a/arch/x86/include/asm/rwsem.h
9845+++ b/arch/x86/include/asm/rwsem.h
9846@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9847 {
9848 asm volatile("# beginning down_read\n\t"
9849 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9850+
9851+#ifdef CONFIG_PAX_REFCOUNT
9852+ "jno 0f\n"
9853+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9854+ "int $4\n0:\n"
9855+ _ASM_EXTABLE(0b, 0b)
9856+#endif
9857+
9858 /* adds 0x00000001 */
9859 " jns 1f\n"
9860 " call call_rwsem_down_read_failed\n"
9861@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9862 "1:\n\t"
9863 " mov %1,%2\n\t"
9864 " add %3,%2\n\t"
9865+
9866+#ifdef CONFIG_PAX_REFCOUNT
9867+ "jno 0f\n"
9868+ "sub %3,%2\n"
9869+ "int $4\n0:\n"
9870+ _ASM_EXTABLE(0b, 0b)
9871+#endif
9872+
9873 " jle 2f\n\t"
9874 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9875 " jnz 1b\n\t"
9876@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9877 long tmp;
9878 asm volatile("# beginning down_write\n\t"
9879 LOCK_PREFIX " xadd %1,(%2)\n\t"
9880+
9881+#ifdef CONFIG_PAX_REFCOUNT
9882+ "jno 0f\n"
9883+ "mov %1,(%2)\n"
9884+ "int $4\n0:\n"
9885+ _ASM_EXTABLE(0b, 0b)
9886+#endif
9887+
9888 /* adds 0xffff0001, returns the old value */
9889 " test %1,%1\n\t"
9890 /* was the count 0 before? */
9891@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9892 long tmp;
9893 asm volatile("# beginning __up_read\n\t"
9894 LOCK_PREFIX " xadd %1,(%2)\n\t"
9895+
9896+#ifdef CONFIG_PAX_REFCOUNT
9897+ "jno 0f\n"
9898+ "mov %1,(%2)\n"
9899+ "int $4\n0:\n"
9900+ _ASM_EXTABLE(0b, 0b)
9901+#endif
9902+
9903 /* subtracts 1, returns the old value */
9904 " jns 1f\n\t"
9905 " call call_rwsem_wake\n" /* expects old value in %edx */
9906@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9907 long tmp;
9908 asm volatile("# beginning __up_write\n\t"
9909 LOCK_PREFIX " xadd %1,(%2)\n\t"
9910+
9911+#ifdef CONFIG_PAX_REFCOUNT
9912+ "jno 0f\n"
9913+ "mov %1,(%2)\n"
9914+ "int $4\n0:\n"
9915+ _ASM_EXTABLE(0b, 0b)
9916+#endif
9917+
9918 /* subtracts 0xffff0001, returns the old value */
9919 " jns 1f\n\t"
9920 " call call_rwsem_wake\n" /* expects old value in %edx */
9921@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9922 {
9923 asm volatile("# beginning __downgrade_write\n\t"
9924 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9925+
9926+#ifdef CONFIG_PAX_REFCOUNT
9927+ "jno 0f\n"
9928+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9929+ "int $4\n0:\n"
9930+ _ASM_EXTABLE(0b, 0b)
9931+#endif
9932+
9933 /*
9934 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9935 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9936@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9937 */
9938 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9939 {
9940- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9941+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9942+
9943+#ifdef CONFIG_PAX_REFCOUNT
9944+ "jno 0f\n"
9945+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
9946+ "int $4\n0:\n"
9947+ _ASM_EXTABLE(0b, 0b)
9948+#endif
9949+
9950 : "+m" (sem->count)
9951 : "er" (delta));
9952 }
9953@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
9954 {
9955 long tmp = delta;
9956
9957- asm volatile(LOCK_PREFIX "xadd %0,%1"
9958+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9959+
9960+#ifdef CONFIG_PAX_REFCOUNT
9961+ "jno 0f\n"
9962+ "mov %0,%1\n"
9963+ "int $4\n0:\n"
9964+ _ASM_EXTABLE(0b, 0b)
9965+#endif
9966+
9967 : "+r" (tmp), "+m" (sem->count)
9968 : : "memory");
9969
9970diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
9971index 5e64171..f58957e 100644
9972--- a/arch/x86/include/asm/segment.h
9973+++ b/arch/x86/include/asm/segment.h
9974@@ -64,10 +64,15 @@
9975 * 26 - ESPFIX small SS
9976 * 27 - per-cpu [ offset to per-cpu data area ]
9977 * 28 - stack_canary-20 [ for stack protector ]
9978- * 29 - unused
9979- * 30 - unused
9980+ * 29 - PCI BIOS CS
9981+ * 30 - PCI BIOS DS
9982 * 31 - TSS for double fault handler
9983 */
9984+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9985+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9986+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9987+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9988+
9989 #define GDT_ENTRY_TLS_MIN 6
9990 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9991
9992@@ -79,6 +84,8 @@
9993
9994 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9995
9996+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9997+
9998 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9999
10000 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10001@@ -104,6 +111,12 @@
10002 #define __KERNEL_STACK_CANARY 0
10003 #endif
10004
10005+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10006+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10007+
10008+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10009+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10010+
10011 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10012
10013 /*
10014@@ -141,7 +154,7 @@
10015 */
10016
10017 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10018-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10019+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10020
10021
10022 #else
10023@@ -165,6 +178,8 @@
10024 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10025 #define __USER32_DS __USER_DS
10026
10027+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10028+
10029 #define GDT_ENTRY_TSS 8 /* needs two entries */
10030 #define GDT_ENTRY_LDT 10 /* needs two entries */
10031 #define GDT_ENTRY_TLS_MIN 12
10032@@ -185,6 +200,7 @@
10033 #endif
10034
10035 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10036+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10037 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10038 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10039 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10040diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10041index 73b11bc..d4a3b63 100644
10042--- a/arch/x86/include/asm/smp.h
10043+++ b/arch/x86/include/asm/smp.h
10044@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10045 /* cpus sharing the last level cache: */
10046 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10047 DECLARE_PER_CPU(u16, cpu_llc_id);
10048-DECLARE_PER_CPU(int, cpu_number);
10049+DECLARE_PER_CPU(unsigned int, cpu_number);
10050
10051 static inline struct cpumask *cpu_sibling_mask(int cpu)
10052 {
10053@@ -77,7 +77,7 @@ struct smp_ops {
10054
10055 void (*send_call_func_ipi)(const struct cpumask *mask);
10056 void (*send_call_func_single_ipi)(int cpu);
10057-};
10058+} __no_const;
10059
10060 /* Globals due to paravirt */
10061 extern void set_cpu_sibling_map(int cpu);
10062@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10063 extern int safe_smp_processor_id(void);
10064
10065 #elif defined(CONFIG_X86_64_SMP)
10066-#define raw_smp_processor_id() (percpu_read(cpu_number))
10067-
10068-#define stack_smp_processor_id() \
10069-({ \
10070- struct thread_info *ti; \
10071- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10072- ti->cpu; \
10073-})
10074+#define raw_smp_processor_id() (percpu_read(cpu_number))
10075+#define stack_smp_processor_id() raw_smp_processor_id()
10076 #define safe_smp_processor_id() smp_processor_id()
10077
10078 #endif
10079diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10080index ee67edf..49c796b 100644
10081--- a/arch/x86/include/asm/spinlock.h
10082+++ b/arch/x86/include/asm/spinlock.h
10083@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10084 static inline void arch_read_lock(arch_rwlock_t *rw)
10085 {
10086 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10087+
10088+#ifdef CONFIG_PAX_REFCOUNT
10089+ "jno 0f\n"
10090+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10091+ "int $4\n0:\n"
10092+ _ASM_EXTABLE(0b, 0b)
10093+#endif
10094+
10095 "jns 1f\n"
10096 "call __read_lock_failed\n\t"
10097 "1:\n"
10098@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10099 static inline void arch_write_lock(arch_rwlock_t *rw)
10100 {
10101 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10102+
10103+#ifdef CONFIG_PAX_REFCOUNT
10104+ "jno 0f\n"
10105+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10106+ "int $4\n0:\n"
10107+ _ASM_EXTABLE(0b, 0b)
10108+#endif
10109+
10110 "jz 1f\n"
10111 "call __write_lock_failed\n\t"
10112 "1:\n"
10113@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10114
10115 static inline void arch_read_unlock(arch_rwlock_t *rw)
10116 {
10117- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10118+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10119+
10120+#ifdef CONFIG_PAX_REFCOUNT
10121+ "jno 0f\n"
10122+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10123+ "int $4\n0:\n"
10124+ _ASM_EXTABLE(0b, 0b)
10125+#endif
10126+
10127 :"+m" (rw->lock) : : "memory");
10128 }
10129
10130 static inline void arch_write_unlock(arch_rwlock_t *rw)
10131 {
10132- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10133+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10134+
10135+#ifdef CONFIG_PAX_REFCOUNT
10136+ "jno 0f\n"
10137+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10138+ "int $4\n0:\n"
10139+ _ASM_EXTABLE(0b, 0b)
10140+#endif
10141+
10142 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10143 }
10144
10145diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10146index 1575177..cb23f52 100644
10147--- a/arch/x86/include/asm/stackprotector.h
10148+++ b/arch/x86/include/asm/stackprotector.h
10149@@ -48,7 +48,7 @@
10150 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10151 */
10152 #define GDT_STACK_CANARY_INIT \
10153- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10154+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10155
10156 /*
10157 * Initialize the stackprotector canary value.
10158@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10159
10160 static inline void load_stack_canary_segment(void)
10161 {
10162-#ifdef CONFIG_X86_32
10163+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10164 asm volatile ("mov %0, %%gs" : : "r" (0));
10165 #endif
10166 }
10167diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10168index 70bbe39..4ae2bd4 100644
10169--- a/arch/x86/include/asm/stacktrace.h
10170+++ b/arch/x86/include/asm/stacktrace.h
10171@@ -11,28 +11,20 @@
10172
10173 extern int kstack_depth_to_print;
10174
10175-struct thread_info;
10176+struct task_struct;
10177 struct stacktrace_ops;
10178
10179-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10180- unsigned long *stack,
10181- unsigned long bp,
10182- const struct stacktrace_ops *ops,
10183- void *data,
10184- unsigned long *end,
10185- int *graph);
10186+typedef unsigned long walk_stack_t(struct task_struct *task,
10187+ void *stack_start,
10188+ unsigned long *stack,
10189+ unsigned long bp,
10190+ const struct stacktrace_ops *ops,
10191+ void *data,
10192+ unsigned long *end,
10193+ int *graph);
10194
10195-extern unsigned long
10196-print_context_stack(struct thread_info *tinfo,
10197- unsigned long *stack, unsigned long bp,
10198- const struct stacktrace_ops *ops, void *data,
10199- unsigned long *end, int *graph);
10200-
10201-extern unsigned long
10202-print_context_stack_bp(struct thread_info *tinfo,
10203- unsigned long *stack, unsigned long bp,
10204- const struct stacktrace_ops *ops, void *data,
10205- unsigned long *end, int *graph);
10206+extern walk_stack_t print_context_stack;
10207+extern walk_stack_t print_context_stack_bp;
10208
10209 /* Generic stack tracer with callbacks */
10210
10211@@ -40,7 +32,7 @@ struct stacktrace_ops {
10212 void (*address)(void *data, unsigned long address, int reliable);
10213 /* On negative return stop dumping */
10214 int (*stack)(void *data, char *name);
10215- walk_stack_t walk_stack;
10216+ walk_stack_t *walk_stack;
10217 };
10218
10219 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10220diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10221index cb23852..2dde194 100644
10222--- a/arch/x86/include/asm/sys_ia32.h
10223+++ b/arch/x86/include/asm/sys_ia32.h
10224@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10225 compat_sigset_t __user *, unsigned int);
10226 asmlinkage long sys32_alarm(unsigned int);
10227
10228-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10229+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10230 asmlinkage long sys32_sysfs(int, u32, u32);
10231
10232 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10233diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10234index 2d2f01c..f985723 100644
10235--- a/arch/x86/include/asm/system.h
10236+++ b/arch/x86/include/asm/system.h
10237@@ -129,7 +129,7 @@ do { \
10238 "call __switch_to\n\t" \
10239 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10240 __switch_canary \
10241- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10242+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10243 "movq %%rax,%%rdi\n\t" \
10244 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10245 "jnz ret_from_fork\n\t" \
10246@@ -140,7 +140,7 @@ do { \
10247 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10248 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10249 [_tif_fork] "i" (_TIF_FORK), \
10250- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10251+ [thread_info] "m" (current_tinfo), \
10252 [current_task] "m" (current_task) \
10253 __switch_canary_iparam \
10254 : "memory", "cc" __EXTRA_CLOBBER)
10255@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10256 {
10257 unsigned long __limit;
10258 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10259- return __limit + 1;
10260+ return __limit;
10261 }
10262
10263 static inline void native_clts(void)
10264@@ -397,13 +397,13 @@ void enable_hlt(void);
10265
10266 void cpu_idle_wait(void);
10267
10268-extern unsigned long arch_align_stack(unsigned long sp);
10269+#define arch_align_stack(x) ((x) & ~0xfUL)
10270 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10271
10272 void default_idle(void);
10273 bool set_pm_idle_to_default(void);
10274
10275-void stop_this_cpu(void *dummy);
10276+void stop_this_cpu(void *dummy) __noreturn;
10277
10278 /*
10279 * Force strict CPU ordering.
10280diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10281index a1fe5c1..ee326d8 100644
10282--- a/arch/x86/include/asm/thread_info.h
10283+++ b/arch/x86/include/asm/thread_info.h
10284@@ -10,6 +10,7 @@
10285 #include <linux/compiler.h>
10286 #include <asm/page.h>
10287 #include <asm/types.h>
10288+#include <asm/percpu.h>
10289
10290 /*
10291 * low level task data that entry.S needs immediate access to
10292@@ -24,7 +25,6 @@ struct exec_domain;
10293 #include <linux/atomic.h>
10294
10295 struct thread_info {
10296- struct task_struct *task; /* main task structure */
10297 struct exec_domain *exec_domain; /* execution domain */
10298 __u32 flags; /* low level flags */
10299 __u32 status; /* thread synchronous flags */
10300@@ -34,18 +34,12 @@ struct thread_info {
10301 mm_segment_t addr_limit;
10302 struct restart_block restart_block;
10303 void __user *sysenter_return;
10304-#ifdef CONFIG_X86_32
10305- unsigned long previous_esp; /* ESP of the previous stack in
10306- case of nested (IRQ) stacks
10307- */
10308- __u8 supervisor_stack[0];
10309-#endif
10310+ unsigned long lowest_stack;
10311 int uaccess_err;
10312 };
10313
10314-#define INIT_THREAD_INFO(tsk) \
10315+#define INIT_THREAD_INFO \
10316 { \
10317- .task = &tsk, \
10318 .exec_domain = &default_exec_domain, \
10319 .flags = 0, \
10320 .cpu = 0, \
10321@@ -56,7 +50,7 @@ struct thread_info {
10322 }, \
10323 }
10324
10325-#define init_thread_info (init_thread_union.thread_info)
10326+#define init_thread_info (init_thread_union.stack)
10327 #define init_stack (init_thread_union.stack)
10328
10329 #else /* !__ASSEMBLY__ */
10330@@ -170,45 +164,40 @@ struct thread_info {
10331 ret; \
10332 })
10333
10334-#ifdef CONFIG_X86_32
10335-
10336-#define STACK_WARN (THREAD_SIZE/8)
10337-/*
10338- * macros/functions for gaining access to the thread information structure
10339- *
10340- * preempt_count needs to be 1 initially, until the scheduler is functional.
10341- */
10342-#ifndef __ASSEMBLY__
10343-
10344-
10345-/* how to get the current stack pointer from C */
10346-register unsigned long current_stack_pointer asm("esp") __used;
10347-
10348-/* how to get the thread information struct from C */
10349-static inline struct thread_info *current_thread_info(void)
10350-{
10351- return (struct thread_info *)
10352- (current_stack_pointer & ~(THREAD_SIZE - 1));
10353-}
10354-
10355-#else /* !__ASSEMBLY__ */
10356-
10357+#ifdef __ASSEMBLY__
10358 /* how to get the thread information struct from ASM */
10359 #define GET_THREAD_INFO(reg) \
10360- movl $-THREAD_SIZE, reg; \
10361- andl %esp, reg
10362+ mov PER_CPU_VAR(current_tinfo), reg
10363
10364 /* use this one if reg already contains %esp */
10365-#define GET_THREAD_INFO_WITH_ESP(reg) \
10366- andl $-THREAD_SIZE, reg
10367+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10368+#else
10369+/* how to get the thread information struct from C */
10370+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10371+
10372+static __always_inline struct thread_info *current_thread_info(void)
10373+{
10374+ return percpu_read_stable(current_tinfo);
10375+}
10376+#endif
10377+
10378+#ifdef CONFIG_X86_32
10379+
10380+#define STACK_WARN (THREAD_SIZE/8)
10381+/*
10382+ * macros/functions for gaining access to the thread information structure
10383+ *
10384+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10385+ */
10386+#ifndef __ASSEMBLY__
10387+
10388+/* how to get the current stack pointer from C */
10389+register unsigned long current_stack_pointer asm("esp") __used;
10390
10391 #endif
10392
10393 #else /* X86_32 */
10394
10395-#include <asm/percpu.h>
10396-#define KERNEL_STACK_OFFSET (5*8)
10397-
10398 /*
10399 * macros/functions for gaining access to the thread information structure
10400 * preempt_count needs to be 1 initially, until the scheduler is functional.
10401@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10402 #ifndef __ASSEMBLY__
10403 DECLARE_PER_CPU(unsigned long, kernel_stack);
10404
10405-static inline struct thread_info *current_thread_info(void)
10406-{
10407- struct thread_info *ti;
10408- ti = (void *)(percpu_read_stable(kernel_stack) +
10409- KERNEL_STACK_OFFSET - THREAD_SIZE);
10410- return ti;
10411-}
10412-
10413-#else /* !__ASSEMBLY__ */
10414-
10415-/* how to get the thread information struct from ASM */
10416-#define GET_THREAD_INFO(reg) \
10417- movq PER_CPU_VAR(kernel_stack),reg ; \
10418- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10419-
10420+/* how to get the current stack pointer from C */
10421+register unsigned long current_stack_pointer asm("rsp") __used;
10422 #endif
10423
10424 #endif /* !X86_32 */
10425@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10426 extern void free_thread_info(struct thread_info *ti);
10427 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10428 #define arch_task_cache_init arch_task_cache_init
10429+
10430+#define __HAVE_THREAD_FUNCTIONS
10431+#define task_thread_info(task) (&(task)->tinfo)
10432+#define task_stack_page(task) ((task)->stack)
10433+#define setup_thread_stack(p, org) do {} while (0)
10434+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10435+
10436+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10437+extern struct task_struct *alloc_task_struct_node(int node);
10438+extern void free_task_struct(struct task_struct *);
10439+
10440 #endif
10441 #endif /* _ASM_X86_THREAD_INFO_H */
10442diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10443index 36361bf..324f262 100644
10444--- a/arch/x86/include/asm/uaccess.h
10445+++ b/arch/x86/include/asm/uaccess.h
10446@@ -7,12 +7,15 @@
10447 #include <linux/compiler.h>
10448 #include <linux/thread_info.h>
10449 #include <linux/string.h>
10450+#include <linux/sched.h>
10451 #include <asm/asm.h>
10452 #include <asm/page.h>
10453
10454 #define VERIFY_READ 0
10455 #define VERIFY_WRITE 1
10456
10457+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10458+
10459 /*
10460 * The fs value determines whether argument validity checking should be
10461 * performed or not. If get_fs() == USER_DS, checking is performed, with
10462@@ -28,7 +31,12 @@
10463
10464 #define get_ds() (KERNEL_DS)
10465 #define get_fs() (current_thread_info()->addr_limit)
10466+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10467+void __set_fs(mm_segment_t x);
10468+void set_fs(mm_segment_t x);
10469+#else
10470 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10471+#endif
10472
10473 #define segment_eq(a, b) ((a).seg == (b).seg)
10474
10475@@ -76,7 +84,33 @@
10476 * checks that the pointer is in the user space range - after calling
10477 * this function, memory access functions may still return -EFAULT.
10478 */
10479-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10480+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10481+#define access_ok(type, addr, size) \
10482+({ \
10483+ long __size = size; \
10484+ unsigned long __addr = (unsigned long)addr; \
10485+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10486+ unsigned long __end_ao = __addr + __size - 1; \
10487+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10488+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10489+ while(__addr_ao <= __end_ao) { \
10490+ char __c_ao; \
10491+ __addr_ao += PAGE_SIZE; \
10492+ if (__size > PAGE_SIZE) \
10493+ cond_resched(); \
10494+ if (__get_user(__c_ao, (char __user *)__addr)) \
10495+ break; \
10496+ if (type != VERIFY_WRITE) { \
10497+ __addr = __addr_ao; \
10498+ continue; \
10499+ } \
10500+ if (__put_user(__c_ao, (char __user *)__addr)) \
10501+ break; \
10502+ __addr = __addr_ao; \
10503+ } \
10504+ } \
10505+ __ret_ao; \
10506+})
10507
10508 /*
10509 * The exception table consists of pairs of addresses: the first is the
10510@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10511 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10512 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10513
10514-
10515+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10516+#define __copyuser_seg "gs;"
10517+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10518+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10519+#else
10520+#define __copyuser_seg
10521+#define __COPYUSER_SET_ES
10522+#define __COPYUSER_RESTORE_ES
10523+#endif
10524
10525 #ifdef CONFIG_X86_32
10526 #define __put_user_asm_u64(x, addr, err, errret) \
10527- asm volatile("1: movl %%eax,0(%2)\n" \
10528- "2: movl %%edx,4(%2)\n" \
10529+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10530+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10531 "3:\n" \
10532 ".section .fixup,\"ax\"\n" \
10533 "4: movl %3,%0\n" \
10534@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10535 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10536
10537 #define __put_user_asm_ex_u64(x, addr) \
10538- asm volatile("1: movl %%eax,0(%1)\n" \
10539- "2: movl %%edx,4(%1)\n" \
10540+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10541+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10542 "3:\n" \
10543 _ASM_EXTABLE(1b, 2b - 1b) \
10544 _ASM_EXTABLE(2b, 3b - 2b) \
10545@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10546 __typeof__(*(ptr)) __pu_val; \
10547 __chk_user_ptr(ptr); \
10548 might_fault(); \
10549- __pu_val = x; \
10550+ __pu_val = (x); \
10551 switch (sizeof(*(ptr))) { \
10552 case 1: \
10553 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10554@@ -373,7 +415,7 @@ do { \
10555 } while (0)
10556
10557 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10558- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10559+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10560 "2:\n" \
10561 ".section .fixup,\"ax\"\n" \
10562 "3: mov %3,%0\n" \
10563@@ -381,7 +423,7 @@ do { \
10564 " jmp 2b\n" \
10565 ".previous\n" \
10566 _ASM_EXTABLE(1b, 3b) \
10567- : "=r" (err), ltype(x) \
10568+ : "=r" (err), ltype (x) \
10569 : "m" (__m(addr)), "i" (errret), "0" (err))
10570
10571 #define __get_user_size_ex(x, ptr, size) \
10572@@ -406,7 +448,7 @@ do { \
10573 } while (0)
10574
10575 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10576- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10577+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10578 "2:\n" \
10579 _ASM_EXTABLE(1b, 2b - 1b) \
10580 : ltype(x) : "m" (__m(addr)))
10581@@ -423,13 +465,24 @@ do { \
10582 int __gu_err; \
10583 unsigned long __gu_val; \
10584 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10585- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10586+ (x) = (__typeof__(*(ptr)))__gu_val; \
10587 __gu_err; \
10588 })
10589
10590 /* FIXME: this hack is definitely wrong -AK */
10591 struct __large_struct { unsigned long buf[100]; };
10592-#define __m(x) (*(struct __large_struct __user *)(x))
10593+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10594+#define ____m(x) \
10595+({ \
10596+ unsigned long ____x = (unsigned long)(x); \
10597+ if (____x < PAX_USER_SHADOW_BASE) \
10598+ ____x += PAX_USER_SHADOW_BASE; \
10599+ (void __user *)____x; \
10600+})
10601+#else
10602+#define ____m(x) (x)
10603+#endif
10604+#define __m(x) (*(struct __large_struct __user *)____m(x))
10605
10606 /*
10607 * Tell gcc we read from memory instead of writing: this is because
10608@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10609 * aliasing issues.
10610 */
10611 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10612- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10613+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10614 "2:\n" \
10615 ".section .fixup,\"ax\"\n" \
10616 "3: mov %3,%0\n" \
10617@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10618 ".previous\n" \
10619 _ASM_EXTABLE(1b, 3b) \
10620 : "=r"(err) \
10621- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10622+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10623
10624 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10625- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10626+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10627 "2:\n" \
10628 _ASM_EXTABLE(1b, 2b - 1b) \
10629 : : ltype(x), "m" (__m(addr)))
10630@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10631 * On error, the variable @x is set to zero.
10632 */
10633
10634+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10635+#define __get_user(x, ptr) get_user((x), (ptr))
10636+#else
10637 #define __get_user(x, ptr) \
10638 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10639+#endif
10640
10641 /**
10642 * __put_user: - Write a simple value into user space, with less checking.
10643@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10644 * Returns zero on success, or -EFAULT on error.
10645 */
10646
10647+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10648+#define __put_user(x, ptr) put_user((x), (ptr))
10649+#else
10650 #define __put_user(x, ptr) \
10651 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10652+#endif
10653
10654 #define __get_user_unaligned __get_user
10655 #define __put_user_unaligned __put_user
10656@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10657 #define get_user_ex(x, ptr) do { \
10658 unsigned long __gue_val; \
10659 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10660- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10661+ (x) = (__typeof__(*(ptr)))__gue_val; \
10662 } while (0)
10663
10664 #ifdef CONFIG_X86_WP_WORKS_OK
10665diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10666index 566e803..89f1e60 100644
10667--- a/arch/x86/include/asm/uaccess_32.h
10668+++ b/arch/x86/include/asm/uaccess_32.h
10669@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10670 static __always_inline unsigned long __must_check
10671 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10672 {
10673+ pax_track_stack();
10674+
10675+ if ((long)n < 0)
10676+ return n;
10677+
10678 if (__builtin_constant_p(n)) {
10679 unsigned long ret;
10680
10681@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10682 return ret;
10683 }
10684 }
10685+ if (!__builtin_constant_p(n))
10686+ check_object_size(from, n, true);
10687 return __copy_to_user_ll(to, from, n);
10688 }
10689
10690@@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check
10691 __copy_to_user(void __user *to, const void *from, unsigned long n)
10692 {
10693 might_fault();
10694+
10695 return __copy_to_user_inatomic(to, from, n);
10696 }
10697
10698 static __always_inline unsigned long
10699 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10700 {
10701+ if ((long)n < 0)
10702+ return n;
10703+
10704 /* Avoid zeroing the tail if the copy fails..
10705 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10706 * but as the zeroing behaviour is only significant when n is not
10707@@ -137,6 +148,12 @@ static __always_inline unsigned long
10708 __copy_from_user(void *to, const void __user *from, unsigned long n)
10709 {
10710 might_fault();
10711+
10712+ pax_track_stack();
10713+
10714+ if ((long)n < 0)
10715+ return n;
10716+
10717 if (__builtin_constant_p(n)) {
10718 unsigned long ret;
10719
10720@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10721 return ret;
10722 }
10723 }
10724+ if (!__builtin_constant_p(n))
10725+ check_object_size(to, n, false);
10726 return __copy_from_user_ll(to, from, n);
10727 }
10728
10729@@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10730 const void __user *from, unsigned long n)
10731 {
10732 might_fault();
10733+
10734+ if ((long)n < 0)
10735+ return n;
10736+
10737 if (__builtin_constant_p(n)) {
10738 unsigned long ret;
10739
10740@@ -181,15 +204,19 @@ static __always_inline unsigned long
10741 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10742 unsigned long n)
10743 {
10744- return __copy_from_user_ll_nocache_nozero(to, from, n);
10745+ if ((long)n < 0)
10746+ return n;
10747+
10748+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10749 }
10750
10751-unsigned long __must_check copy_to_user(void __user *to,
10752- const void *from, unsigned long n);
10753-unsigned long __must_check _copy_from_user(void *to,
10754- const void __user *from,
10755- unsigned long n);
10756-
10757+extern void copy_to_user_overflow(void)
10758+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10759+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10760+#else
10761+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10762+#endif
10763+;
10764
10765 extern void copy_from_user_overflow(void)
10766 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10767@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void)
10768 #endif
10769 ;
10770
10771-static inline unsigned long __must_check copy_from_user(void *to,
10772- const void __user *from,
10773- unsigned long n)
10774+/**
10775+ * copy_to_user: - Copy a block of data into user space.
10776+ * @to: Destination address, in user space.
10777+ * @from: Source address, in kernel space.
10778+ * @n: Number of bytes to copy.
10779+ *
10780+ * Context: User context only. This function may sleep.
10781+ *
10782+ * Copy data from kernel space to user space.
10783+ *
10784+ * Returns number of bytes that could not be copied.
10785+ * On success, this will be zero.
10786+ */
10787+static inline unsigned long __must_check
10788+copy_to_user(void __user *to, const void *from, unsigned long n)
10789+{
10790+ int sz = __compiletime_object_size(from);
10791+
10792+ if (unlikely(sz != -1 && sz < n))
10793+ copy_to_user_overflow();
10794+ else if (access_ok(VERIFY_WRITE, to, n))
10795+ n = __copy_to_user(to, from, n);
10796+ return n;
10797+}
10798+
10799+/**
10800+ * copy_from_user: - Copy a block of data from user space.
10801+ * @to: Destination address, in kernel space.
10802+ * @from: Source address, in user space.
10803+ * @n: Number of bytes to copy.
10804+ *
10805+ * Context: User context only. This function may sleep.
10806+ *
10807+ * Copy data from user space to kernel space.
10808+ *
10809+ * Returns number of bytes that could not be copied.
10810+ * On success, this will be zero.
10811+ *
10812+ * If some data could not be copied, this function will pad the copied
10813+ * data to the requested size using zero bytes.
10814+ */
10815+static inline unsigned long __must_check
10816+copy_from_user(void *to, const void __user *from, unsigned long n)
10817 {
10818 int sz = __compiletime_object_size(to);
10819
10820- if (likely(sz == -1 || sz >= n))
10821- n = _copy_from_user(to, from, n);
10822- else
10823+ if (unlikely(sz != -1 && sz < n))
10824 copy_from_user_overflow();
10825-
10826+ else if (access_ok(VERIFY_READ, from, n))
10827+ n = __copy_from_user(to, from, n);
10828+ else if ((long)n > 0) {
10829+ if (!__builtin_constant_p(n))
10830+ check_object_size(to, n, false);
10831+ memset(to, 0, n);
10832+ }
10833 return n;
10834 }
10835
10836diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10837index 1c66d30..59bd7d4 100644
10838--- a/arch/x86/include/asm/uaccess_64.h
10839+++ b/arch/x86/include/asm/uaccess_64.h
10840@@ -10,6 +10,9 @@
10841 #include <asm/alternative.h>
10842 #include <asm/cpufeature.h>
10843 #include <asm/page.h>
10844+#include <asm/pgtable.h>
10845+
10846+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10847
10848 /*
10849 * Copy To/From Userspace
10850@@ -17,12 +20,12 @@
10851
10852 /* Handles exceptions in both to and from, but doesn't do access_ok */
10853 __must_check unsigned long
10854-copy_user_generic_string(void *to, const void *from, unsigned len);
10855+copy_user_generic_string(void *to, const void *from, unsigned long len);
10856 __must_check unsigned long
10857-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10858+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10859
10860 static __always_inline __must_check unsigned long
10861-copy_user_generic(void *to, const void *from, unsigned len)
10862+copy_user_generic(void *to, const void *from, unsigned long len)
10863 {
10864 unsigned ret;
10865
10866@@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
10867 return ret;
10868 }
10869
10870+static __always_inline __must_check unsigned long
10871+__copy_to_user(void __user *to, const void *from, unsigned long len);
10872+static __always_inline __must_check unsigned long
10873+__copy_from_user(void *to, const void __user *from, unsigned long len);
10874 __must_check unsigned long
10875-_copy_to_user(void __user *to, const void *from, unsigned len);
10876-__must_check unsigned long
10877-_copy_from_user(void *to, const void __user *from, unsigned len);
10878-__must_check unsigned long
10879-copy_in_user(void __user *to, const void __user *from, unsigned len);
10880+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10881
10882 static inline unsigned long __must_check copy_from_user(void *to,
10883 const void __user *from,
10884- unsigned long n)
10885+ unsigned n)
10886 {
10887- int sz = __compiletime_object_size(to);
10888-
10889 might_fault();
10890- if (likely(sz == -1 || sz >= n))
10891- n = _copy_from_user(to, from, n);
10892-#ifdef CONFIG_DEBUG_VM
10893- else
10894- WARN(1, "Buffer overflow detected!\n");
10895-#endif
10896+
10897+ if (access_ok(VERIFY_READ, from, n))
10898+ n = __copy_from_user(to, from, n);
10899+ else if (n < INT_MAX) {
10900+ if (!__builtin_constant_p(n))
10901+ check_object_size(to, n, false);
10902+ memset(to, 0, n);
10903+ }
10904 return n;
10905 }
10906
10907 static __always_inline __must_check
10908-int copy_to_user(void __user *dst, const void *src, unsigned size)
10909+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10910 {
10911 might_fault();
10912
10913- return _copy_to_user(dst, src, size);
10914+ if (access_ok(VERIFY_WRITE, dst, size))
10915+ size = __copy_to_user(dst, src, size);
10916+ return size;
10917 }
10918
10919 static __always_inline __must_check
10920-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10921+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10922 {
10923- int ret = 0;
10924+ int sz = __compiletime_object_size(dst);
10925+ unsigned ret = 0;
10926
10927 might_fault();
10928- if (!__builtin_constant_p(size))
10929- return copy_user_generic(dst, (__force void *)src, size);
10930+
10931+ pax_track_stack();
10932+
10933+ if (size > INT_MAX)
10934+ return size;
10935+
10936+#ifdef CONFIG_PAX_MEMORY_UDEREF
10937+ if (!__access_ok(VERIFY_READ, src, size))
10938+ return size;
10939+#endif
10940+
10941+ if (unlikely(sz != -1 && sz < size)) {
10942+#ifdef CONFIG_DEBUG_VM
10943+ WARN(1, "Buffer overflow detected!\n");
10944+#endif
10945+ return size;
10946+ }
10947+
10948+ if (!__builtin_constant_p(size)) {
10949+ check_object_size(dst, size, false);
10950+
10951+#ifdef CONFIG_PAX_MEMORY_UDEREF
10952+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10953+ src += PAX_USER_SHADOW_BASE;
10954+#endif
10955+
10956+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10957+ }
10958 switch (size) {
10959- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10960+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10961 ret, "b", "b", "=q", 1);
10962 return ret;
10963- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10964+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10965 ret, "w", "w", "=r", 2);
10966 return ret;
10967- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10968+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10969 ret, "l", "k", "=r", 4);
10970 return ret;
10971- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10972+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10973 ret, "q", "", "=r", 8);
10974 return ret;
10975 case 10:
10976- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10977+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10978 ret, "q", "", "=r", 10);
10979 if (unlikely(ret))
10980 return ret;
10981 __get_user_asm(*(u16 *)(8 + (char *)dst),
10982- (u16 __user *)(8 + (char __user *)src),
10983+ (const u16 __user *)(8 + (const char __user *)src),
10984 ret, "w", "w", "=r", 2);
10985 return ret;
10986 case 16:
10987- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10988+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10989 ret, "q", "", "=r", 16);
10990 if (unlikely(ret))
10991 return ret;
10992 __get_user_asm(*(u64 *)(8 + (char *)dst),
10993- (u64 __user *)(8 + (char __user *)src),
10994+ (const u64 __user *)(8 + (const char __user *)src),
10995 ret, "q", "", "=r", 8);
10996 return ret;
10997 default:
10998- return copy_user_generic(dst, (__force void *)src, size);
10999+
11000+#ifdef CONFIG_PAX_MEMORY_UDEREF
11001+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11002+ src += PAX_USER_SHADOW_BASE;
11003+#endif
11004+
11005+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11006 }
11007 }
11008
11009 static __always_inline __must_check
11010-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11011+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11012 {
11013- int ret = 0;
11014+ int sz = __compiletime_object_size(src);
11015+ unsigned ret = 0;
11016
11017 might_fault();
11018- if (!__builtin_constant_p(size))
11019- return copy_user_generic((__force void *)dst, src, size);
11020+
11021+ pax_track_stack();
11022+
11023+ if (size > INT_MAX)
11024+ return size;
11025+
11026+#ifdef CONFIG_PAX_MEMORY_UDEREF
11027+ if (!__access_ok(VERIFY_WRITE, dst, size))
11028+ return size;
11029+#endif
11030+
11031+ if (unlikely(sz != -1 && sz < size)) {
11032+#ifdef CONFIG_DEBUG_VM
11033+ WARN(1, "Buffer overflow detected!\n");
11034+#endif
11035+ return size;
11036+ }
11037+
11038+ if (!__builtin_constant_p(size)) {
11039+ check_object_size(src, size, true);
11040+
11041+#ifdef CONFIG_PAX_MEMORY_UDEREF
11042+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11043+ dst += PAX_USER_SHADOW_BASE;
11044+#endif
11045+
11046+ return copy_user_generic((__force_kernel void *)dst, src, size);
11047+ }
11048 switch (size) {
11049- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11050+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11051 ret, "b", "b", "iq", 1);
11052 return ret;
11053- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11054+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11055 ret, "w", "w", "ir", 2);
11056 return ret;
11057- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11058+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11059 ret, "l", "k", "ir", 4);
11060 return ret;
11061- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11062+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11063 ret, "q", "", "er", 8);
11064 return ret;
11065 case 10:
11066- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11067+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11068 ret, "q", "", "er", 10);
11069 if (unlikely(ret))
11070 return ret;
11071 asm("":::"memory");
11072- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11073+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11074 ret, "w", "w", "ir", 2);
11075 return ret;
11076 case 16:
11077- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11078+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11079 ret, "q", "", "er", 16);
11080 if (unlikely(ret))
11081 return ret;
11082 asm("":::"memory");
11083- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11084+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11085 ret, "q", "", "er", 8);
11086 return ret;
11087 default:
11088- return copy_user_generic((__force void *)dst, src, size);
11089+
11090+#ifdef CONFIG_PAX_MEMORY_UDEREF
11091+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11092+ dst += PAX_USER_SHADOW_BASE;
11093+#endif
11094+
11095+ return copy_user_generic((__force_kernel void *)dst, src, size);
11096 }
11097 }
11098
11099 static __always_inline __must_check
11100-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11101+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11102 {
11103- int ret = 0;
11104+ unsigned ret = 0;
11105
11106 might_fault();
11107- if (!__builtin_constant_p(size))
11108- return copy_user_generic((__force void *)dst,
11109- (__force void *)src, size);
11110+
11111+ if (size > INT_MAX)
11112+ return size;
11113+
11114+#ifdef CONFIG_PAX_MEMORY_UDEREF
11115+ if (!__access_ok(VERIFY_READ, src, size))
11116+ return size;
11117+ if (!__access_ok(VERIFY_WRITE, dst, size))
11118+ return size;
11119+#endif
11120+
11121+ if (!__builtin_constant_p(size)) {
11122+
11123+#ifdef CONFIG_PAX_MEMORY_UDEREF
11124+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11125+ src += PAX_USER_SHADOW_BASE;
11126+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11127+ dst += PAX_USER_SHADOW_BASE;
11128+#endif
11129+
11130+ return copy_user_generic((__force_kernel void *)dst,
11131+ (__force_kernel const void *)src, size);
11132+ }
11133 switch (size) {
11134 case 1: {
11135 u8 tmp;
11136- __get_user_asm(tmp, (u8 __user *)src,
11137+ __get_user_asm(tmp, (const u8 __user *)src,
11138 ret, "b", "b", "=q", 1);
11139 if (likely(!ret))
11140 __put_user_asm(tmp, (u8 __user *)dst,
11141@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11142 }
11143 case 2: {
11144 u16 tmp;
11145- __get_user_asm(tmp, (u16 __user *)src,
11146+ __get_user_asm(tmp, (const u16 __user *)src,
11147 ret, "w", "w", "=r", 2);
11148 if (likely(!ret))
11149 __put_user_asm(tmp, (u16 __user *)dst,
11150@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11151
11152 case 4: {
11153 u32 tmp;
11154- __get_user_asm(tmp, (u32 __user *)src,
11155+ __get_user_asm(tmp, (const u32 __user *)src,
11156 ret, "l", "k", "=r", 4);
11157 if (likely(!ret))
11158 __put_user_asm(tmp, (u32 __user *)dst,
11159@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11160 }
11161 case 8: {
11162 u64 tmp;
11163- __get_user_asm(tmp, (u64 __user *)src,
11164+ __get_user_asm(tmp, (const u64 __user *)src,
11165 ret, "q", "", "=r", 8);
11166 if (likely(!ret))
11167 __put_user_asm(tmp, (u64 __user *)dst,
11168@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11169 return ret;
11170 }
11171 default:
11172- return copy_user_generic((__force void *)dst,
11173- (__force void *)src, size);
11174+
11175+#ifdef CONFIG_PAX_MEMORY_UDEREF
11176+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11177+ src += PAX_USER_SHADOW_BASE;
11178+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11179+ dst += PAX_USER_SHADOW_BASE;
11180+#endif
11181+
11182+ return copy_user_generic((__force_kernel void *)dst,
11183+ (__force_kernel const void *)src, size);
11184 }
11185 }
11186
11187@@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11188 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11189
11190 static __must_check __always_inline int
11191-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11192+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11193 {
11194- return copy_user_generic(dst, (__force const void *)src, size);
11195+ pax_track_stack();
11196+
11197+ if (size > INT_MAX)
11198+ return size;
11199+
11200+#ifdef CONFIG_PAX_MEMORY_UDEREF
11201+ if (!__access_ok(VERIFY_READ, src, size))
11202+ return size;
11203+
11204+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11205+ src += PAX_USER_SHADOW_BASE;
11206+#endif
11207+
11208+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11209 }
11210
11211-static __must_check __always_inline int
11212-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11213+static __must_check __always_inline unsigned long
11214+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11215 {
11216- return copy_user_generic((__force void *)dst, src, size);
11217+ if (size > INT_MAX)
11218+ return size;
11219+
11220+#ifdef CONFIG_PAX_MEMORY_UDEREF
11221+ if (!__access_ok(VERIFY_WRITE, dst, size))
11222+ return size;
11223+
11224+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11225+ dst += PAX_USER_SHADOW_BASE;
11226+#endif
11227+
11228+ return copy_user_generic((__force_kernel void *)dst, src, size);
11229 }
11230
11231-extern long __copy_user_nocache(void *dst, const void __user *src,
11232- unsigned size, int zerorest);
11233+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11234+ unsigned long size, int zerorest);
11235
11236-static inline int
11237-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11238+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11239 {
11240 might_sleep();
11241+
11242+ if (size > INT_MAX)
11243+ return size;
11244+
11245+#ifdef CONFIG_PAX_MEMORY_UDEREF
11246+ if (!__access_ok(VERIFY_READ, src, size))
11247+ return size;
11248+#endif
11249+
11250 return __copy_user_nocache(dst, src, size, 1);
11251 }
11252
11253-static inline int
11254-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11255- unsigned size)
11256+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11257+ unsigned long size)
11258 {
11259+ if (size > INT_MAX)
11260+ return size;
11261+
11262+#ifdef CONFIG_PAX_MEMORY_UDEREF
11263+ if (!__access_ok(VERIFY_READ, src, size))
11264+ return size;
11265+#endif
11266+
11267 return __copy_user_nocache(dst, src, size, 0);
11268 }
11269
11270-unsigned long
11271-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11272+extern unsigned long
11273+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11274
11275 #endif /* _ASM_X86_UACCESS_64_H */
11276diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11277index bb05228..d763d5b 100644
11278--- a/arch/x86/include/asm/vdso.h
11279+++ b/arch/x86/include/asm/vdso.h
11280@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11281 #define VDSO32_SYMBOL(base, name) \
11282 ({ \
11283 extern const char VDSO32_##name[]; \
11284- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11285+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11286 })
11287 #endif
11288
11289diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11290index d3d8590..d296b5f 100644
11291--- a/arch/x86/include/asm/x86_init.h
11292+++ b/arch/x86/include/asm/x86_init.h
11293@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11294 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11295 void (*find_smp_config)(void);
11296 void (*get_smp_config)(unsigned int early);
11297-};
11298+} __no_const;
11299
11300 /**
11301 * struct x86_init_resources - platform specific resource related ops
11302@@ -42,7 +42,7 @@ struct x86_init_resources {
11303 void (*probe_roms)(void);
11304 void (*reserve_resources)(void);
11305 char *(*memory_setup)(void);
11306-};
11307+} __no_const;
11308
11309 /**
11310 * struct x86_init_irqs - platform specific interrupt setup
11311@@ -55,7 +55,7 @@ struct x86_init_irqs {
11312 void (*pre_vector_init)(void);
11313 void (*intr_init)(void);
11314 void (*trap_init)(void);
11315-};
11316+} __no_const;
11317
11318 /**
11319 * struct x86_init_oem - oem platform specific customizing functions
11320@@ -65,7 +65,7 @@ struct x86_init_irqs {
11321 struct x86_init_oem {
11322 void (*arch_setup)(void);
11323 void (*banner)(void);
11324-};
11325+} __no_const;
11326
11327 /**
11328 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11329@@ -76,7 +76,7 @@ struct x86_init_oem {
11330 */
11331 struct x86_init_mapping {
11332 void (*pagetable_reserve)(u64 start, u64 end);
11333-};
11334+} __no_const;
11335
11336 /**
11337 * struct x86_init_paging - platform specific paging functions
11338@@ -86,7 +86,7 @@ struct x86_init_mapping {
11339 struct x86_init_paging {
11340 void (*pagetable_setup_start)(pgd_t *base);
11341 void (*pagetable_setup_done)(pgd_t *base);
11342-};
11343+} __no_const;
11344
11345 /**
11346 * struct x86_init_timers - platform specific timer setup
11347@@ -101,7 +101,7 @@ struct x86_init_timers {
11348 void (*tsc_pre_init)(void);
11349 void (*timer_init)(void);
11350 void (*wallclock_init)(void);
11351-};
11352+} __no_const;
11353
11354 /**
11355 * struct x86_init_iommu - platform specific iommu setup
11356@@ -109,7 +109,7 @@ struct x86_init_timers {
11357 */
11358 struct x86_init_iommu {
11359 int (*iommu_init)(void);
11360-};
11361+} __no_const;
11362
11363 /**
11364 * struct x86_init_pci - platform specific pci init functions
11365@@ -123,7 +123,7 @@ struct x86_init_pci {
11366 int (*init)(void);
11367 void (*init_irq)(void);
11368 void (*fixup_irqs)(void);
11369-};
11370+} __no_const;
11371
11372 /**
11373 * struct x86_init_ops - functions for platform specific setup
11374@@ -139,7 +139,7 @@ struct x86_init_ops {
11375 struct x86_init_timers timers;
11376 struct x86_init_iommu iommu;
11377 struct x86_init_pci pci;
11378-};
11379+} __no_const;
11380
11381 /**
11382 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11383@@ -147,7 +147,7 @@ struct x86_init_ops {
11384 */
11385 struct x86_cpuinit_ops {
11386 void (*setup_percpu_clockev)(void);
11387-};
11388+} __no_const;
11389
11390 /**
11391 * struct x86_platform_ops - platform specific runtime functions
11392@@ -166,7 +166,7 @@ struct x86_platform_ops {
11393 bool (*is_untracked_pat_range)(u64 start, u64 end);
11394 void (*nmi_init)(void);
11395 int (*i8042_detect)(void);
11396-};
11397+} __no_const;
11398
11399 struct pci_dev;
11400
11401@@ -174,7 +174,7 @@ struct x86_msi_ops {
11402 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11403 void (*teardown_msi_irq)(unsigned int irq);
11404 void (*teardown_msi_irqs)(struct pci_dev *dev);
11405-};
11406+} __no_const;
11407
11408 extern struct x86_init_ops x86_init;
11409 extern struct x86_cpuinit_ops x86_cpuinit;
11410diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11411index c6ce245..ffbdab7 100644
11412--- a/arch/x86/include/asm/xsave.h
11413+++ b/arch/x86/include/asm/xsave.h
11414@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11415 {
11416 int err;
11417
11418+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11419+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11420+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11421+#endif
11422+
11423 /*
11424 * Clear the xsave header first, so that reserved fields are
11425 * initialized to zero.
11426@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11427 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11428 {
11429 int err;
11430- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11431+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11432 u32 lmask = mask;
11433 u32 hmask = mask >> 32;
11434
11435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11436+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11437+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11438+#endif
11439+
11440 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11441 "2:\n"
11442 ".section .fixup,\"ax\"\n"
11443diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11444index 6a564ac..9b1340c 100644
11445--- a/arch/x86/kernel/acpi/realmode/Makefile
11446+++ b/arch/x86/kernel/acpi/realmode/Makefile
11447@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11448 $(call cc-option, -fno-stack-protector) \
11449 $(call cc-option, -mpreferred-stack-boundary=2)
11450 KBUILD_CFLAGS += $(call cc-option, -m32)
11451+ifdef CONSTIFY_PLUGIN
11452+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11453+endif
11454 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11455 GCOV_PROFILE := n
11456
11457diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11458index b4fd836..4358fe3 100644
11459--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11460+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11461@@ -108,6 +108,9 @@ wakeup_code:
11462 /* Do any other stuff... */
11463
11464 #ifndef CONFIG_64BIT
11465+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11466+ call verify_cpu
11467+
11468 /* This could also be done in C code... */
11469 movl pmode_cr3, %eax
11470 movl %eax, %cr3
11471@@ -131,6 +134,7 @@ wakeup_code:
11472 movl pmode_cr0, %eax
11473 movl %eax, %cr0
11474 jmp pmode_return
11475+# include "../../verify_cpu.S"
11476 #else
11477 pushw $0
11478 pushw trampoline_segment
11479diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11480index 103b6ab..2004d0a 100644
11481--- a/arch/x86/kernel/acpi/sleep.c
11482+++ b/arch/x86/kernel/acpi/sleep.c
11483@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11484 header->trampoline_segment = trampoline_address() >> 4;
11485 #ifdef CONFIG_SMP
11486 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11487+
11488+ pax_open_kernel();
11489 early_gdt_descr.address =
11490 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11491+ pax_close_kernel();
11492+
11493 initial_gs = per_cpu_offset(smp_processor_id());
11494 #endif
11495 initial_code = (unsigned long)wakeup_long64;
11496diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11497index 13ab720..95d5442 100644
11498--- a/arch/x86/kernel/acpi/wakeup_32.S
11499+++ b/arch/x86/kernel/acpi/wakeup_32.S
11500@@ -30,13 +30,11 @@ wakeup_pmode_return:
11501 # and restore the stack ... but you need gdt for this to work
11502 movl saved_context_esp, %esp
11503
11504- movl %cs:saved_magic, %eax
11505- cmpl $0x12345678, %eax
11506+ cmpl $0x12345678, saved_magic
11507 jne bogus_magic
11508
11509 # jump to place where we left off
11510- movl saved_eip, %eax
11511- jmp *%eax
11512+ jmp *(saved_eip)
11513
11514 bogus_magic:
11515 jmp bogus_magic
11516diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11517index c638228..16dfa8d 100644
11518--- a/arch/x86/kernel/alternative.c
11519+++ b/arch/x86/kernel/alternative.c
11520@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11521 */
11522 for (a = start; a < end; a++) {
11523 instr = (u8 *)&a->instr_offset + a->instr_offset;
11524+
11525+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11526+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11527+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11528+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11529+#endif
11530+
11531 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11532 BUG_ON(a->replacementlen > a->instrlen);
11533 BUG_ON(a->instrlen > sizeof(insnbuf));
11534@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11535 for (poff = start; poff < end; poff++) {
11536 u8 *ptr = (u8 *)poff + *poff;
11537
11538+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11539+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11540+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11541+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11542+#endif
11543+
11544 if (!*poff || ptr < text || ptr >= text_end)
11545 continue;
11546 /* turn DS segment override prefix into lock prefix */
11547- if (*ptr == 0x3e)
11548+ if (*ktla_ktva(ptr) == 0x3e)
11549 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11550 };
11551 mutex_unlock(&text_mutex);
11552@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11553 for (poff = start; poff < end; poff++) {
11554 u8 *ptr = (u8 *)poff + *poff;
11555
11556+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11557+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11558+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11559+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11560+#endif
11561+
11562 if (!*poff || ptr < text || ptr >= text_end)
11563 continue;
11564 /* turn lock prefix into DS segment override prefix */
11565- if (*ptr == 0xf0)
11566+ if (*ktla_ktva(ptr) == 0xf0)
11567 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11568 };
11569 mutex_unlock(&text_mutex);
11570@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11571
11572 BUG_ON(p->len > MAX_PATCH_LEN);
11573 /* prep the buffer with the original instructions */
11574- memcpy(insnbuf, p->instr, p->len);
11575+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11576 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11577 (unsigned long)p->instr, p->len);
11578
11579@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11580 if (smp_alt_once)
11581 free_init_pages("SMP alternatives",
11582 (unsigned long)__smp_locks,
11583- (unsigned long)__smp_locks_end);
11584+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11585
11586 restart_nmi();
11587 }
11588@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11589 * instructions. And on the local CPU you need to be protected again NMI or MCE
11590 * handlers seeing an inconsistent instruction while you patch.
11591 */
11592-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11593+void *__kprobes text_poke_early(void *addr, const void *opcode,
11594 size_t len)
11595 {
11596 unsigned long flags;
11597 local_irq_save(flags);
11598- memcpy(addr, opcode, len);
11599+
11600+ pax_open_kernel();
11601+ memcpy(ktla_ktva(addr), opcode, len);
11602 sync_core();
11603+ pax_close_kernel();
11604+
11605 local_irq_restore(flags);
11606 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11607 that causes hangs on some VIA CPUs. */
11608@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11609 */
11610 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11611 {
11612- unsigned long flags;
11613- char *vaddr;
11614+ unsigned char *vaddr = ktla_ktva(addr);
11615 struct page *pages[2];
11616- int i;
11617+ size_t i;
11618
11619 if (!core_kernel_text((unsigned long)addr)) {
11620- pages[0] = vmalloc_to_page(addr);
11621- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11622+ pages[0] = vmalloc_to_page(vaddr);
11623+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11624 } else {
11625- pages[0] = virt_to_page(addr);
11626+ pages[0] = virt_to_page(vaddr);
11627 WARN_ON(!PageReserved(pages[0]));
11628- pages[1] = virt_to_page(addr + PAGE_SIZE);
11629+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11630 }
11631 BUG_ON(!pages[0]);
11632- local_irq_save(flags);
11633- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11634- if (pages[1])
11635- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11636- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11637- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11638- clear_fixmap(FIX_TEXT_POKE0);
11639- if (pages[1])
11640- clear_fixmap(FIX_TEXT_POKE1);
11641- local_flush_tlb();
11642- sync_core();
11643- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11644- that causes hangs on some VIA CPUs. */
11645+ text_poke_early(addr, opcode, len);
11646 for (i = 0; i < len; i++)
11647- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11648- local_irq_restore(flags);
11649+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11650 return addr;
11651 }
11652
11653diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11654index 52fa563..5de9d9c 100644
11655--- a/arch/x86/kernel/apic/apic.c
11656+++ b/arch/x86/kernel/apic/apic.c
11657@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11658 /*
11659 * Debug level, exported for io_apic.c
11660 */
11661-unsigned int apic_verbosity;
11662+int apic_verbosity;
11663
11664 int pic_mode;
11665
11666@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11667 apic_write(APIC_ESR, 0);
11668 v1 = apic_read(APIC_ESR);
11669 ack_APIC_irq();
11670- atomic_inc(&irq_err_count);
11671+ atomic_inc_unchecked(&irq_err_count);
11672
11673 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11674 smp_processor_id(), v0 , v1);
11675@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void)
11676 u16 *bios_cpu_apicid;
11677 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11678
11679+ pax_track_stack();
11680+
11681 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11682 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11683
11684diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11685index 8eb863e..32e6934 100644
11686--- a/arch/x86/kernel/apic/io_apic.c
11687+++ b/arch/x86/kernel/apic/io_apic.c
11688@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11689 }
11690 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11691
11692-void lock_vector_lock(void)
11693+void lock_vector_lock(void) __acquires(vector_lock)
11694 {
11695 /* Used to the online set of cpus does not change
11696 * during assign_irq_vector.
11697@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
11698 raw_spin_lock(&vector_lock);
11699 }
11700
11701-void unlock_vector_lock(void)
11702+void unlock_vector_lock(void) __releases(vector_lock)
11703 {
11704 raw_spin_unlock(&vector_lock);
11705 }
11706@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data)
11707 ack_APIC_irq();
11708 }
11709
11710-atomic_t irq_mis_count;
11711+atomic_unchecked_t irq_mis_count;
11712
11713 /*
11714 * IO-APIC versions below 0x20 don't support EOI register.
11715@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data)
11716 * at the cpu.
11717 */
11718 if (!(v & (1 << (i & 0x1f)))) {
11719- atomic_inc(&irq_mis_count);
11720+ atomic_inc_unchecked(&irq_mis_count);
11721
11722 eoi_ioapic_irq(irq, cfg);
11723 }
11724diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11725index 0371c48..54cdf63 100644
11726--- a/arch/x86/kernel/apm_32.c
11727+++ b/arch/x86/kernel/apm_32.c
11728@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11729 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11730 * even though they are called in protected mode.
11731 */
11732-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11733+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11734 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11735
11736 static const char driver_version[] = "1.16ac"; /* no spaces */
11737@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11738 BUG_ON(cpu != 0);
11739 gdt = get_cpu_gdt_table(cpu);
11740 save_desc_40 = gdt[0x40 / 8];
11741+
11742+ pax_open_kernel();
11743 gdt[0x40 / 8] = bad_bios_desc;
11744+ pax_close_kernel();
11745
11746 apm_irq_save(flags);
11747 APM_DO_SAVE_SEGS;
11748@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11749 &call->esi);
11750 APM_DO_RESTORE_SEGS;
11751 apm_irq_restore(flags);
11752+
11753+ pax_open_kernel();
11754 gdt[0x40 / 8] = save_desc_40;
11755+ pax_close_kernel();
11756+
11757 put_cpu();
11758
11759 return call->eax & 0xff;
11760@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call)
11761 BUG_ON(cpu != 0);
11762 gdt = get_cpu_gdt_table(cpu);
11763 save_desc_40 = gdt[0x40 / 8];
11764+
11765+ pax_open_kernel();
11766 gdt[0x40 / 8] = bad_bios_desc;
11767+ pax_close_kernel();
11768
11769 apm_irq_save(flags);
11770 APM_DO_SAVE_SEGS;
11771@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call)
11772 &call->eax);
11773 APM_DO_RESTORE_SEGS;
11774 apm_irq_restore(flags);
11775+
11776+ pax_open_kernel();
11777 gdt[0x40 / 8] = save_desc_40;
11778+ pax_close_kernel();
11779+
11780 put_cpu();
11781 return error;
11782 }
11783@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11784 * code to that CPU.
11785 */
11786 gdt = get_cpu_gdt_table(0);
11787+
11788+ pax_open_kernel();
11789 set_desc_base(&gdt[APM_CS >> 3],
11790 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11791 set_desc_base(&gdt[APM_CS_16 >> 3],
11792 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11793 set_desc_base(&gdt[APM_DS >> 3],
11794 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11795+ pax_close_kernel();
11796
11797 proc_create("apm", 0, NULL, &apm_file_ops);
11798
11799diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11800index 4f13faf..87db5d2 100644
11801--- a/arch/x86/kernel/asm-offsets.c
11802+++ b/arch/x86/kernel/asm-offsets.c
11803@@ -33,6 +33,8 @@ void common(void) {
11804 OFFSET(TI_status, thread_info, status);
11805 OFFSET(TI_addr_limit, thread_info, addr_limit);
11806 OFFSET(TI_preempt_count, thread_info, preempt_count);
11807+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11808+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11809
11810 BLANK();
11811 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11812@@ -53,8 +55,26 @@ void common(void) {
11813 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11814 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11815 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11816+
11817+#ifdef CONFIG_PAX_KERNEXEC
11818+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11819 #endif
11820
11821+#ifdef CONFIG_PAX_MEMORY_UDEREF
11822+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11823+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11824+#ifdef CONFIG_X86_64
11825+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11826+#endif
11827+#endif
11828+
11829+#endif
11830+
11831+ BLANK();
11832+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11833+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11834+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11835+
11836 #ifdef CONFIG_XEN
11837 BLANK();
11838 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11839diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11840index e72a119..6e2955d 100644
11841--- a/arch/x86/kernel/asm-offsets_64.c
11842+++ b/arch/x86/kernel/asm-offsets_64.c
11843@@ -69,6 +69,7 @@ int main(void)
11844 BLANK();
11845 #undef ENTRY
11846
11847+ DEFINE(TSS_size, sizeof(struct tss_struct));
11848 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11849 BLANK();
11850
11851diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11852index 6042981..e638266 100644
11853--- a/arch/x86/kernel/cpu/Makefile
11854+++ b/arch/x86/kernel/cpu/Makefile
11855@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11856 CFLAGS_REMOVE_perf_event.o = -pg
11857 endif
11858
11859-# Make sure load_percpu_segment has no stackprotector
11860-nostackp := $(call cc-option, -fno-stack-protector)
11861-CFLAGS_common.o := $(nostackp)
11862-
11863 obj-y := intel_cacheinfo.o scattered.o topology.o
11864 obj-y += proc.o capflags.o powerflags.o common.o
11865 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11866diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11867index b13ed39..603286c 100644
11868--- a/arch/x86/kernel/cpu/amd.c
11869+++ b/arch/x86/kernel/cpu/amd.c
11870@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11871 unsigned int size)
11872 {
11873 /* AMD errata T13 (order #21922) */
11874- if ((c->x86 == 6)) {
11875+ if (c->x86 == 6) {
11876 /* Duron Rev A0 */
11877 if (c->x86_model == 3 && c->x86_mask == 0)
11878 size = 64;
11879diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11880index 6218439..ab2e4ab 100644
11881--- a/arch/x86/kernel/cpu/common.c
11882+++ b/arch/x86/kernel/cpu/common.c
11883@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11884
11885 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11886
11887-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11888-#ifdef CONFIG_X86_64
11889- /*
11890- * We need valid kernel segments for data and code in long mode too
11891- * IRET will check the segment types kkeil 2000/10/28
11892- * Also sysret mandates a special GDT layout
11893- *
11894- * TLS descriptors are currently at a different place compared to i386.
11895- * Hopefully nobody expects them at a fixed place (Wine?)
11896- */
11897- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11898- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11899- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11900- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11901- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11902- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11903-#else
11904- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11905- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11906- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11907- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11908- /*
11909- * Segments used for calling PnP BIOS have byte granularity.
11910- * They code segments and data segments have fixed 64k limits,
11911- * the transfer segment sizes are set at run time.
11912- */
11913- /* 32-bit code */
11914- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11915- /* 16-bit code */
11916- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11917- /* 16-bit data */
11918- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11919- /* 16-bit data */
11920- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11921- /* 16-bit data */
11922- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11923- /*
11924- * The APM segments have byte granularity and their bases
11925- * are set at run time. All have 64k limits.
11926- */
11927- /* 32-bit code */
11928- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11929- /* 16-bit code */
11930- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11931- /* data */
11932- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11933-
11934- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11935- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11936- GDT_STACK_CANARY_INIT
11937-#endif
11938-} };
11939-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11940-
11941 static int __init x86_xsave_setup(char *s)
11942 {
11943 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11944@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11945 {
11946 struct desc_ptr gdt_descr;
11947
11948- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11949+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11950 gdt_descr.size = GDT_SIZE - 1;
11951 load_gdt(&gdt_descr);
11952 /* Reload the per-cpu base */
11953@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
11954 /* Filter out anything that depends on CPUID levels we don't have */
11955 filter_cpuid_features(c, true);
11956
11957+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
11958+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11959+#endif
11960+
11961 /* If the model name is still unset, do table lookup. */
11962 if (!c->x86_model_id[0]) {
11963 const char *p;
11964@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
11965 }
11966 __setup("clearcpuid=", setup_disablecpuid);
11967
11968+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11969+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11970+
11971 #ifdef CONFIG_X86_64
11972 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11973
11974@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
11975 EXPORT_PER_CPU_SYMBOL(current_task);
11976
11977 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11978- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11979+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11980 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11981
11982 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11983@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
11984 {
11985 memset(regs, 0, sizeof(struct pt_regs));
11986 regs->fs = __KERNEL_PERCPU;
11987- regs->gs = __KERNEL_STACK_CANARY;
11988+ savesegment(gs, regs->gs);
11989
11990 return regs;
11991 }
11992@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11993 int i;
11994
11995 cpu = stack_smp_processor_id();
11996- t = &per_cpu(init_tss, cpu);
11997+ t = init_tss + cpu;
11998 oist = &per_cpu(orig_ist, cpu);
11999
12000 #ifdef CONFIG_NUMA
12001@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
12002 switch_to_new_gdt(cpu);
12003 loadsegment(fs, 0);
12004
12005- load_idt((const struct desc_ptr *)&idt_descr);
12006+ load_idt(&idt_descr);
12007
12008 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12009 syscall_init();
12010@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
12011 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12012 barrier();
12013
12014- x86_configure_nx();
12015 if (cpu != 0)
12016 enable_x2apic();
12017
12018@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
12019 {
12020 int cpu = smp_processor_id();
12021 struct task_struct *curr = current;
12022- struct tss_struct *t = &per_cpu(init_tss, cpu);
12023+ struct tss_struct *t = init_tss + cpu;
12024 struct thread_struct *thread = &curr->thread;
12025
12026 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12027diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12028index ed6086e..a1dcf29 100644
12029--- a/arch/x86/kernel/cpu/intel.c
12030+++ b/arch/x86/kernel/cpu/intel.c
12031@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12032 * Update the IDT descriptor and reload the IDT so that
12033 * it uses the read-only mapped virtual address.
12034 */
12035- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12036+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12037 load_idt(&idt_descr);
12038 }
12039 #endif
12040diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12041index 0ed633c..82cef2a 100644
12042--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
12043+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12044@@ -215,7 +215,9 @@ static int inject_init(void)
12045 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
12046 return -ENOMEM;
12047 printk(KERN_INFO "Machine check injector initialized\n");
12048- mce_chrdev_ops.write = mce_write;
12049+ pax_open_kernel();
12050+ *(void **)&mce_chrdev_ops.write = mce_write;
12051+ pax_close_kernel();
12052 register_die_notifier(&mce_raise_nb);
12053 return 0;
12054 }
12055diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12056index 08363b0..ee26113 100644
12057--- a/arch/x86/kernel/cpu/mcheck/mce.c
12058+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12059@@ -42,6 +42,7 @@
12060 #include <asm/processor.h>
12061 #include <asm/mce.h>
12062 #include <asm/msr.h>
12063+#include <asm/local.h>
12064
12065 #include "mce-internal.h"
12066
12067@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
12068 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12069 m->cs, m->ip);
12070
12071- if (m->cs == __KERNEL_CS)
12072+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12073 print_symbol("{%s}", m->ip);
12074 pr_cont("\n");
12075 }
12076@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
12077
12078 #define PANIC_TIMEOUT 5 /* 5 seconds */
12079
12080-static atomic_t mce_paniced;
12081+static atomic_unchecked_t mce_paniced;
12082
12083 static int fake_panic;
12084-static atomic_t mce_fake_paniced;
12085+static atomic_unchecked_t mce_fake_paniced;
12086
12087 /* Panic in progress. Enable interrupts and wait for final IPI */
12088 static void wait_for_panic(void)
12089@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12090 /*
12091 * Make sure only one CPU runs in machine check panic
12092 */
12093- if (atomic_inc_return(&mce_paniced) > 1)
12094+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12095 wait_for_panic();
12096 barrier();
12097
12098@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12099 console_verbose();
12100 } else {
12101 /* Don't log too much for fake panic */
12102- if (atomic_inc_return(&mce_fake_paniced) > 1)
12103+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12104 return;
12105 }
12106 /* First print corrected ones that are still unlogged */
12107@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12108 * might have been modified by someone else.
12109 */
12110 rmb();
12111- if (atomic_read(&mce_paniced))
12112+ if (atomic_read_unchecked(&mce_paniced))
12113 wait_for_panic();
12114 if (!monarch_timeout)
12115 goto out;
12116@@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12117 }
12118
12119 /* Call the installed machine check handler for this CPU setup. */
12120-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12121+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12122 unexpected_machine_check;
12123
12124 /*
12125@@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12126 return;
12127 }
12128
12129+ pax_open_kernel();
12130 machine_check_vector = do_machine_check;
12131+ pax_close_kernel();
12132
12133 __mcheck_cpu_init_generic();
12134 __mcheck_cpu_init_vendor(c);
12135@@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12136 */
12137
12138 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12139-static int mce_chrdev_open_count; /* #times opened */
12140+static local_t mce_chrdev_open_count; /* #times opened */
12141 static int mce_chrdev_open_exclu; /* already open exclusive? */
12142
12143 static int mce_chrdev_open(struct inode *inode, struct file *file)
12144@@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12145 spin_lock(&mce_chrdev_state_lock);
12146
12147 if (mce_chrdev_open_exclu ||
12148- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12149+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12150 spin_unlock(&mce_chrdev_state_lock);
12151
12152 return -EBUSY;
12153@@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12154
12155 if (file->f_flags & O_EXCL)
12156 mce_chrdev_open_exclu = 1;
12157- mce_chrdev_open_count++;
12158+ local_inc(&mce_chrdev_open_count);
12159
12160 spin_unlock(&mce_chrdev_state_lock);
12161
12162@@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12163 {
12164 spin_lock(&mce_chrdev_state_lock);
12165
12166- mce_chrdev_open_count--;
12167+ local_dec(&mce_chrdev_open_count);
12168 mce_chrdev_open_exclu = 0;
12169
12170 spin_unlock(&mce_chrdev_state_lock);
12171@@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
12172 static void mce_reset(void)
12173 {
12174 cpu_missing = 0;
12175- atomic_set(&mce_fake_paniced, 0);
12176+ atomic_set_unchecked(&mce_fake_paniced, 0);
12177 atomic_set(&mce_executing, 0);
12178 atomic_set(&mce_callin, 0);
12179 atomic_set(&global_nwo, 0);
12180diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12181index 5c0e653..1e82c7c 100644
12182--- a/arch/x86/kernel/cpu/mcheck/p5.c
12183+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12184@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12185 if (!cpu_has(c, X86_FEATURE_MCE))
12186 return;
12187
12188+ pax_open_kernel();
12189 machine_check_vector = pentium_machine_check;
12190+ pax_close_kernel();
12191 /* Make sure the vector pointer is visible before we enable MCEs: */
12192 wmb();
12193
12194diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12195index 54060f5..e6ba93d 100644
12196--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12197+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12198@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12199 {
12200 u32 lo, hi;
12201
12202+ pax_open_kernel();
12203 machine_check_vector = winchip_machine_check;
12204+ pax_close_kernel();
12205 /* Make sure the vector pointer is visible before we enable MCEs: */
12206 wmb();
12207
12208diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12209index 6b96110..0da73eb 100644
12210--- a/arch/x86/kernel/cpu/mtrr/main.c
12211+++ b/arch/x86/kernel/cpu/mtrr/main.c
12212@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12213 u64 size_or_mask, size_and_mask;
12214 static bool mtrr_aps_delayed_init;
12215
12216-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12217+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12218
12219 const struct mtrr_ops *mtrr_if;
12220
12221diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12222index df5e41f..816c719 100644
12223--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12224+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12225@@ -25,7 +25,7 @@ struct mtrr_ops {
12226 int (*validate_add_page)(unsigned long base, unsigned long size,
12227 unsigned int type);
12228 int (*have_wrcomb)(void);
12229-};
12230+} __do_const;
12231
12232 extern int generic_get_free_region(unsigned long base, unsigned long size,
12233 int replace_reg);
12234diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12235index cfa62ec..9250dd7 100644
12236--- a/arch/x86/kernel/cpu/perf_event.c
12237+++ b/arch/x86/kernel/cpu/perf_event.c
12238@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
12239 int i, j, w, wmax, num = 0;
12240 struct hw_perf_event *hwc;
12241
12242+ pax_track_stack();
12243+
12244 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
12245
12246 for (i = 0; i < n; i++) {
12247@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12248 break;
12249
12250 perf_callchain_store(entry, frame.return_address);
12251- fp = frame.next_frame;
12252+ fp = (const void __force_user *)frame.next_frame;
12253 }
12254 }
12255
12256diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12257index 764c7c2..c5d9c7b 100644
12258--- a/arch/x86/kernel/crash.c
12259+++ b/arch/x86/kernel/crash.c
12260@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
12261 regs = args->regs;
12262
12263 #ifdef CONFIG_X86_32
12264- if (!user_mode_vm(regs)) {
12265+ if (!user_mode(regs)) {
12266 crash_fixup_ss_esp(&fixed_regs, regs);
12267 regs = &fixed_regs;
12268 }
12269diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12270index 37250fe..bf2ec74 100644
12271--- a/arch/x86/kernel/doublefault_32.c
12272+++ b/arch/x86/kernel/doublefault_32.c
12273@@ -11,7 +11,7 @@
12274
12275 #define DOUBLEFAULT_STACKSIZE (1024)
12276 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12277-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12278+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12279
12280 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12281
12282@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12283 unsigned long gdt, tss;
12284
12285 store_gdt(&gdt_desc);
12286- gdt = gdt_desc.address;
12287+ gdt = (unsigned long)gdt_desc.address;
12288
12289 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12290
12291@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12292 /* 0x2 bit is always set */
12293 .flags = X86_EFLAGS_SF | 0x2,
12294 .sp = STACK_START,
12295- .es = __USER_DS,
12296+ .es = __KERNEL_DS,
12297 .cs = __KERNEL_CS,
12298 .ss = __KERNEL_DS,
12299- .ds = __USER_DS,
12300+ .ds = __KERNEL_DS,
12301 .fs = __KERNEL_PERCPU,
12302
12303 .__cr3 = __pa_nodebug(swapper_pg_dir),
12304diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12305index 1aae78f..aab3a3d 100644
12306--- a/arch/x86/kernel/dumpstack.c
12307+++ b/arch/x86/kernel/dumpstack.c
12308@@ -2,6 +2,9 @@
12309 * Copyright (C) 1991, 1992 Linus Torvalds
12310 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12311 */
12312+#ifdef CONFIG_GRKERNSEC_HIDESYM
12313+#define __INCLUDED_BY_HIDESYM 1
12314+#endif
12315 #include <linux/kallsyms.h>
12316 #include <linux/kprobes.h>
12317 #include <linux/uaccess.h>
12318@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12319 static void
12320 print_ftrace_graph_addr(unsigned long addr, void *data,
12321 const struct stacktrace_ops *ops,
12322- struct thread_info *tinfo, int *graph)
12323+ struct task_struct *task, int *graph)
12324 {
12325- struct task_struct *task = tinfo->task;
12326 unsigned long ret_addr;
12327 int index = task->curr_ret_stack;
12328
12329@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12330 static inline void
12331 print_ftrace_graph_addr(unsigned long addr, void *data,
12332 const struct stacktrace_ops *ops,
12333- struct thread_info *tinfo, int *graph)
12334+ struct task_struct *task, int *graph)
12335 { }
12336 #endif
12337
12338@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12339 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12340 */
12341
12342-static inline int valid_stack_ptr(struct thread_info *tinfo,
12343- void *p, unsigned int size, void *end)
12344+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12345 {
12346- void *t = tinfo;
12347 if (end) {
12348 if (p < end && p >= (end-THREAD_SIZE))
12349 return 1;
12350@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12351 }
12352
12353 unsigned long
12354-print_context_stack(struct thread_info *tinfo,
12355+print_context_stack(struct task_struct *task, void *stack_start,
12356 unsigned long *stack, unsigned long bp,
12357 const struct stacktrace_ops *ops, void *data,
12358 unsigned long *end, int *graph)
12359 {
12360 struct stack_frame *frame = (struct stack_frame *)bp;
12361
12362- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12363+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12364 unsigned long addr;
12365
12366 addr = *stack;
12367@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12368 } else {
12369 ops->address(data, addr, 0);
12370 }
12371- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12372+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12373 }
12374 stack++;
12375 }
12376@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12377 EXPORT_SYMBOL_GPL(print_context_stack);
12378
12379 unsigned long
12380-print_context_stack_bp(struct thread_info *tinfo,
12381+print_context_stack_bp(struct task_struct *task, void *stack_start,
12382 unsigned long *stack, unsigned long bp,
12383 const struct stacktrace_ops *ops, void *data,
12384 unsigned long *end, int *graph)
12385@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12386 struct stack_frame *frame = (struct stack_frame *)bp;
12387 unsigned long *ret_addr = &frame->return_address;
12388
12389- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12390+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12391 unsigned long addr = *ret_addr;
12392
12393 if (!__kernel_text_address(addr))
12394@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12395 ops->address(data, addr, 1);
12396 frame = frame->next_frame;
12397 ret_addr = &frame->return_address;
12398- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12399+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12400 }
12401
12402 return (unsigned long)frame;
12403@@ -186,7 +186,7 @@ void dump_stack(void)
12404
12405 bp = stack_frame(current, NULL);
12406 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12407- current->pid, current->comm, print_tainted(),
12408+ task_pid_nr(current), current->comm, print_tainted(),
12409 init_utsname()->release,
12410 (int)strcspn(init_utsname()->version, " "),
12411 init_utsname()->version);
12412@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12413 }
12414 EXPORT_SYMBOL_GPL(oops_begin);
12415
12416+extern void gr_handle_kernel_exploit(void);
12417+
12418 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12419 {
12420 if (regs && kexec_should_crash(current))
12421@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12422 panic("Fatal exception in interrupt");
12423 if (panic_on_oops)
12424 panic("Fatal exception");
12425- do_exit(signr);
12426+
12427+ gr_handle_kernel_exploit();
12428+
12429+ do_group_exit(signr);
12430 }
12431
12432 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12433@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12434
12435 show_registers(regs);
12436 #ifdef CONFIG_X86_32
12437- if (user_mode_vm(regs)) {
12438+ if (user_mode(regs)) {
12439 sp = regs->sp;
12440 ss = regs->ss & 0xffff;
12441 } else {
12442@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12443 unsigned long flags = oops_begin();
12444 int sig = SIGSEGV;
12445
12446- if (!user_mode_vm(regs))
12447+ if (!user_mode(regs))
12448 report_bug(regs->ip, regs);
12449
12450 if (__die(str, regs, err))
12451diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12452index 3b97a80..667ce7a 100644
12453--- a/arch/x86/kernel/dumpstack_32.c
12454+++ b/arch/x86/kernel/dumpstack_32.c
12455@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12456 bp = stack_frame(task, regs);
12457
12458 for (;;) {
12459- struct thread_info *context;
12460+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12461
12462- context = (struct thread_info *)
12463- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12464- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12465+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12466
12467- stack = (unsigned long *)context->previous_esp;
12468- if (!stack)
12469+ if (stack_start == task_stack_page(task))
12470 break;
12471+ stack = *(unsigned long **)stack_start;
12472 if (ops->stack(data, "IRQ") < 0)
12473 break;
12474 touch_nmi_watchdog();
12475@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12476 * When in-kernel, we also print out the stack and code at the
12477 * time of the fault..
12478 */
12479- if (!user_mode_vm(regs)) {
12480+ if (!user_mode(regs)) {
12481 unsigned int code_prologue = code_bytes * 43 / 64;
12482 unsigned int code_len = code_bytes;
12483 unsigned char c;
12484 u8 *ip;
12485+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12486
12487 printk(KERN_EMERG "Stack:\n");
12488 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12489
12490 printk(KERN_EMERG "Code: ");
12491
12492- ip = (u8 *)regs->ip - code_prologue;
12493+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12494 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12495 /* try starting at IP */
12496- ip = (u8 *)regs->ip;
12497+ ip = (u8 *)regs->ip + cs_base;
12498 code_len = code_len - code_prologue + 1;
12499 }
12500 for (i = 0; i < code_len; i++, ip++) {
12501@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12502 printk(" Bad EIP value.");
12503 break;
12504 }
12505- if (ip == (u8 *)regs->ip)
12506+ if (ip == (u8 *)regs->ip + cs_base)
12507 printk("<%02x> ", c);
12508 else
12509 printk("%02x ", c);
12510@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12511 {
12512 unsigned short ud2;
12513
12514+ ip = ktla_ktva(ip);
12515 if (ip < PAGE_OFFSET)
12516 return 0;
12517 if (probe_kernel_address((unsigned short *)ip, ud2))
12518@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12519
12520 return ud2 == 0x0b0f;
12521 }
12522+
12523+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12524+void pax_check_alloca(unsigned long size)
12525+{
12526+ unsigned long sp = (unsigned long)&sp, stack_left;
12527+
12528+ /* all kernel stacks are of the same size */
12529+ stack_left = sp & (THREAD_SIZE - 1);
12530+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12531+}
12532+EXPORT_SYMBOL(pax_check_alloca);
12533+#endif
12534diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12535index 19853ad..508ca79 100644
12536--- a/arch/x86/kernel/dumpstack_64.c
12537+++ b/arch/x86/kernel/dumpstack_64.c
12538@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12539 unsigned long *irq_stack_end =
12540 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12541 unsigned used = 0;
12542- struct thread_info *tinfo;
12543 int graph = 0;
12544 unsigned long dummy;
12545+ void *stack_start;
12546
12547 if (!task)
12548 task = current;
12549@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12550 * current stack address. If the stacks consist of nested
12551 * exceptions
12552 */
12553- tinfo = task_thread_info(task);
12554 for (;;) {
12555 char *id;
12556 unsigned long *estack_end;
12557+
12558 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12559 &used, &id);
12560
12561@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12562 if (ops->stack(data, id) < 0)
12563 break;
12564
12565- bp = ops->walk_stack(tinfo, stack, bp, ops,
12566+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12567 data, estack_end, &graph);
12568 ops->stack(data, "<EOE>");
12569 /*
12570@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12571 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12572 if (ops->stack(data, "IRQ") < 0)
12573 break;
12574- bp = ops->walk_stack(tinfo, stack, bp,
12575+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12576 ops, data, irq_stack_end, &graph);
12577 /*
12578 * We link to the next stack (which would be
12579@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12580 /*
12581 * This handles the process stack:
12582 */
12583- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12584+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12585+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12586 put_cpu();
12587 }
12588 EXPORT_SYMBOL(dump_trace);
12589@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12590
12591 return ud2 == 0x0b0f;
12592 }
12593+
12594+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12595+void pax_check_alloca(unsigned long size)
12596+{
12597+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12598+ unsigned cpu, used;
12599+ char *id;
12600+
12601+ /* check the process stack first */
12602+ stack_start = (unsigned long)task_stack_page(current);
12603+ stack_end = stack_start + THREAD_SIZE;
12604+ if (likely(stack_start <= sp && sp < stack_end)) {
12605+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12606+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12607+ return;
12608+ }
12609+
12610+ cpu = get_cpu();
12611+
12612+ /* check the irq stacks */
12613+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12614+ stack_start = stack_end - IRQ_STACK_SIZE;
12615+ if (stack_start <= sp && sp < stack_end) {
12616+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12617+ put_cpu();
12618+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12619+ return;
12620+ }
12621+
12622+ /* check the exception stacks */
12623+ used = 0;
12624+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12625+ stack_start = stack_end - EXCEPTION_STKSZ;
12626+ if (stack_end && stack_start <= sp && sp < stack_end) {
12627+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12628+ put_cpu();
12629+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12630+ return;
12631+ }
12632+
12633+ put_cpu();
12634+
12635+ /* unknown stack */
12636+ BUG();
12637+}
12638+EXPORT_SYMBOL(pax_check_alloca);
12639+#endif
12640diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12641index cd28a35..2601699 100644
12642--- a/arch/x86/kernel/early_printk.c
12643+++ b/arch/x86/kernel/early_printk.c
12644@@ -7,6 +7,7 @@
12645 #include <linux/pci_regs.h>
12646 #include <linux/pci_ids.h>
12647 #include <linux/errno.h>
12648+#include <linux/sched.h>
12649 #include <asm/io.h>
12650 #include <asm/processor.h>
12651 #include <asm/fcntl.h>
12652@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...)
12653 int n;
12654 va_list ap;
12655
12656+ pax_track_stack();
12657+
12658 va_start(ap, fmt);
12659 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12660 early_console->write(early_console, buf, n);
12661diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12662index f3f6f53..0841b66 100644
12663--- a/arch/x86/kernel/entry_32.S
12664+++ b/arch/x86/kernel/entry_32.S
12665@@ -186,13 +186,146 @@
12666 /*CFI_REL_OFFSET gs, PT_GS*/
12667 .endm
12668 .macro SET_KERNEL_GS reg
12669+
12670+#ifdef CONFIG_CC_STACKPROTECTOR
12671 movl $(__KERNEL_STACK_CANARY), \reg
12672+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12673+ movl $(__USER_DS), \reg
12674+#else
12675+ xorl \reg, \reg
12676+#endif
12677+
12678 movl \reg, %gs
12679 .endm
12680
12681 #endif /* CONFIG_X86_32_LAZY_GS */
12682
12683-.macro SAVE_ALL
12684+.macro pax_enter_kernel
12685+#ifdef CONFIG_PAX_KERNEXEC
12686+ call pax_enter_kernel
12687+#endif
12688+.endm
12689+
12690+.macro pax_exit_kernel
12691+#ifdef CONFIG_PAX_KERNEXEC
12692+ call pax_exit_kernel
12693+#endif
12694+.endm
12695+
12696+#ifdef CONFIG_PAX_KERNEXEC
12697+ENTRY(pax_enter_kernel)
12698+#ifdef CONFIG_PARAVIRT
12699+ pushl %eax
12700+ pushl %ecx
12701+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12702+ mov %eax, %esi
12703+#else
12704+ mov %cr0, %esi
12705+#endif
12706+ bts $16, %esi
12707+ jnc 1f
12708+ mov %cs, %esi
12709+ cmp $__KERNEL_CS, %esi
12710+ jz 3f
12711+ ljmp $__KERNEL_CS, $3f
12712+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12713+2:
12714+#ifdef CONFIG_PARAVIRT
12715+ mov %esi, %eax
12716+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12717+#else
12718+ mov %esi, %cr0
12719+#endif
12720+3:
12721+#ifdef CONFIG_PARAVIRT
12722+ popl %ecx
12723+ popl %eax
12724+#endif
12725+ ret
12726+ENDPROC(pax_enter_kernel)
12727+
12728+ENTRY(pax_exit_kernel)
12729+#ifdef CONFIG_PARAVIRT
12730+ pushl %eax
12731+ pushl %ecx
12732+#endif
12733+ mov %cs, %esi
12734+ cmp $__KERNEXEC_KERNEL_CS, %esi
12735+ jnz 2f
12736+#ifdef CONFIG_PARAVIRT
12737+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12738+ mov %eax, %esi
12739+#else
12740+ mov %cr0, %esi
12741+#endif
12742+ btr $16, %esi
12743+ ljmp $__KERNEL_CS, $1f
12744+1:
12745+#ifdef CONFIG_PARAVIRT
12746+ mov %esi, %eax
12747+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12748+#else
12749+ mov %esi, %cr0
12750+#endif
12751+2:
12752+#ifdef CONFIG_PARAVIRT
12753+ popl %ecx
12754+ popl %eax
12755+#endif
12756+ ret
12757+ENDPROC(pax_exit_kernel)
12758+#endif
12759+
12760+.macro pax_erase_kstack
12761+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12762+ call pax_erase_kstack
12763+#endif
12764+.endm
12765+
12766+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12767+/*
12768+ * ebp: thread_info
12769+ * ecx, edx: can be clobbered
12770+ */
12771+ENTRY(pax_erase_kstack)
12772+ pushl %edi
12773+ pushl %eax
12774+
12775+ mov TI_lowest_stack(%ebp), %edi
12776+ mov $-0xBEEF, %eax
12777+ std
12778+
12779+1: mov %edi, %ecx
12780+ and $THREAD_SIZE_asm - 1, %ecx
12781+ shr $2, %ecx
12782+ repne scasl
12783+ jecxz 2f
12784+
12785+ cmp $2*16, %ecx
12786+ jc 2f
12787+
12788+ mov $2*16, %ecx
12789+ repe scasl
12790+ jecxz 2f
12791+ jne 1b
12792+
12793+2: cld
12794+ mov %esp, %ecx
12795+ sub %edi, %ecx
12796+ shr $2, %ecx
12797+ rep stosl
12798+
12799+ mov TI_task_thread_sp0(%ebp), %edi
12800+ sub $128, %edi
12801+ mov %edi, TI_lowest_stack(%ebp)
12802+
12803+ popl %eax
12804+ popl %edi
12805+ ret
12806+ENDPROC(pax_erase_kstack)
12807+#endif
12808+
12809+.macro __SAVE_ALL _DS
12810 cld
12811 PUSH_GS
12812 pushl_cfi %fs
12813@@ -215,7 +348,7 @@
12814 CFI_REL_OFFSET ecx, 0
12815 pushl_cfi %ebx
12816 CFI_REL_OFFSET ebx, 0
12817- movl $(__USER_DS), %edx
12818+ movl $\_DS, %edx
12819 movl %edx, %ds
12820 movl %edx, %es
12821 movl $(__KERNEL_PERCPU), %edx
12822@@ -223,6 +356,15 @@
12823 SET_KERNEL_GS %edx
12824 .endm
12825
12826+.macro SAVE_ALL
12827+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12828+ __SAVE_ALL __KERNEL_DS
12829+ pax_enter_kernel
12830+#else
12831+ __SAVE_ALL __USER_DS
12832+#endif
12833+.endm
12834+
12835 .macro RESTORE_INT_REGS
12836 popl_cfi %ebx
12837 CFI_RESTORE ebx
12838@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12839 popfl_cfi
12840 jmp syscall_exit
12841 CFI_ENDPROC
12842-END(ret_from_fork)
12843+ENDPROC(ret_from_fork)
12844
12845 /*
12846 * Interrupt exit functions should be protected against kprobes
12847@@ -333,7 +475,15 @@ check_userspace:
12848 movb PT_CS(%esp), %al
12849 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12850 cmpl $USER_RPL, %eax
12851+
12852+#ifdef CONFIG_PAX_KERNEXEC
12853+ jae resume_userspace
12854+
12855+ PAX_EXIT_KERNEL
12856+ jmp resume_kernel
12857+#else
12858 jb resume_kernel # not returning to v8086 or userspace
12859+#endif
12860
12861 ENTRY(resume_userspace)
12862 LOCKDEP_SYS_EXIT
12863@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12864 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12865 # int/exception return?
12866 jne work_pending
12867- jmp restore_all
12868-END(ret_from_exception)
12869+ jmp restore_all_pax
12870+ENDPROC(ret_from_exception)
12871
12872 #ifdef CONFIG_PREEMPT
12873 ENTRY(resume_kernel)
12874@@ -361,7 +511,7 @@ need_resched:
12875 jz restore_all
12876 call preempt_schedule_irq
12877 jmp need_resched
12878-END(resume_kernel)
12879+ENDPROC(resume_kernel)
12880 #endif
12881 CFI_ENDPROC
12882 /*
12883@@ -395,23 +545,34 @@ sysenter_past_esp:
12884 /*CFI_REL_OFFSET cs, 0*/
12885 /*
12886 * Push current_thread_info()->sysenter_return to the stack.
12887- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12888- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12889 */
12890- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12891+ pushl_cfi $0
12892 CFI_REL_OFFSET eip, 0
12893
12894 pushl_cfi %eax
12895 SAVE_ALL
12896+ GET_THREAD_INFO(%ebp)
12897+ movl TI_sysenter_return(%ebp),%ebp
12898+ movl %ebp,PT_EIP(%esp)
12899 ENABLE_INTERRUPTS(CLBR_NONE)
12900
12901 /*
12902 * Load the potential sixth argument from user stack.
12903 * Careful about security.
12904 */
12905+ movl PT_OLDESP(%esp),%ebp
12906+
12907+#ifdef CONFIG_PAX_MEMORY_UDEREF
12908+ mov PT_OLDSS(%esp),%ds
12909+1: movl %ds:(%ebp),%ebp
12910+ push %ss
12911+ pop %ds
12912+#else
12913 cmpl $__PAGE_OFFSET-3,%ebp
12914 jae syscall_fault
12915 1: movl (%ebp),%ebp
12916+#endif
12917+
12918 movl %ebp,PT_EBP(%esp)
12919 .section __ex_table,"a"
12920 .align 4
12921@@ -434,12 +595,24 @@ sysenter_do_call:
12922 testl $_TIF_ALLWORK_MASK, %ecx
12923 jne sysexit_audit
12924 sysenter_exit:
12925+
12926+#ifdef CONFIG_PAX_RANDKSTACK
12927+ pushl_cfi %eax
12928+ movl %esp, %eax
12929+ call pax_randomize_kstack
12930+ popl_cfi %eax
12931+#endif
12932+
12933+ pax_erase_kstack
12934+
12935 /* if something modifies registers it must also disable sysexit */
12936 movl PT_EIP(%esp), %edx
12937 movl PT_OLDESP(%esp), %ecx
12938 xorl %ebp,%ebp
12939 TRACE_IRQS_ON
12940 1: mov PT_FS(%esp), %fs
12941+2: mov PT_DS(%esp), %ds
12942+3: mov PT_ES(%esp), %es
12943 PTGS_TO_GS
12944 ENABLE_INTERRUPTS_SYSEXIT
12945
12946@@ -456,6 +629,9 @@ sysenter_audit:
12947 movl %eax,%edx /* 2nd arg: syscall number */
12948 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12949 call audit_syscall_entry
12950+
12951+ pax_erase_kstack
12952+
12953 pushl_cfi %ebx
12954 movl PT_EAX(%esp),%eax /* reload syscall number */
12955 jmp sysenter_do_call
12956@@ -482,11 +658,17 @@ sysexit_audit:
12957
12958 CFI_ENDPROC
12959 .pushsection .fixup,"ax"
12960-2: movl $0,PT_FS(%esp)
12961+4: movl $0,PT_FS(%esp)
12962+ jmp 1b
12963+5: movl $0,PT_DS(%esp)
12964+ jmp 1b
12965+6: movl $0,PT_ES(%esp)
12966 jmp 1b
12967 .section __ex_table,"a"
12968 .align 4
12969- .long 1b,2b
12970+ .long 1b,4b
12971+ .long 2b,5b
12972+ .long 3b,6b
12973 .popsection
12974 PTGS_TO_GS_EX
12975 ENDPROC(ia32_sysenter_target)
12976@@ -519,6 +701,15 @@ syscall_exit:
12977 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12978 jne syscall_exit_work
12979
12980+restore_all_pax:
12981+
12982+#ifdef CONFIG_PAX_RANDKSTACK
12983+ movl %esp, %eax
12984+ call pax_randomize_kstack
12985+#endif
12986+
12987+ pax_erase_kstack
12988+
12989 restore_all:
12990 TRACE_IRQS_IRET
12991 restore_all_notrace:
12992@@ -578,14 +769,34 @@ ldt_ss:
12993 * compensating for the offset by changing to the ESPFIX segment with
12994 * a base address that matches for the difference.
12995 */
12996-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12997+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12998 mov %esp, %edx /* load kernel esp */
12999 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13000 mov %dx, %ax /* eax: new kernel esp */
13001 sub %eax, %edx /* offset (low word is 0) */
13002+#ifdef CONFIG_SMP
13003+ movl PER_CPU_VAR(cpu_number), %ebx
13004+ shll $PAGE_SHIFT_asm, %ebx
13005+ addl $cpu_gdt_table, %ebx
13006+#else
13007+ movl $cpu_gdt_table, %ebx
13008+#endif
13009 shr $16, %edx
13010- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13011- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13012+
13013+#ifdef CONFIG_PAX_KERNEXEC
13014+ mov %cr0, %esi
13015+ btr $16, %esi
13016+ mov %esi, %cr0
13017+#endif
13018+
13019+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13020+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13021+
13022+#ifdef CONFIG_PAX_KERNEXEC
13023+ bts $16, %esi
13024+ mov %esi, %cr0
13025+#endif
13026+
13027 pushl_cfi $__ESPFIX_SS
13028 pushl_cfi %eax /* new kernel esp */
13029 /* Disable interrupts, but do not irqtrace this section: we
13030@@ -614,34 +825,28 @@ work_resched:
13031 movl TI_flags(%ebp), %ecx
13032 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13033 # than syscall tracing?
13034- jz restore_all
13035+ jz restore_all_pax
13036 testb $_TIF_NEED_RESCHED, %cl
13037 jnz work_resched
13038
13039 work_notifysig: # deal with pending signals and
13040 # notify-resume requests
13041+ movl %esp, %eax
13042 #ifdef CONFIG_VM86
13043 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13044- movl %esp, %eax
13045- jne work_notifysig_v86 # returning to kernel-space or
13046+ jz 1f # returning to kernel-space or
13047 # vm86-space
13048- xorl %edx, %edx
13049- call do_notify_resume
13050- jmp resume_userspace_sig
13051
13052- ALIGN
13053-work_notifysig_v86:
13054 pushl_cfi %ecx # save ti_flags for do_notify_resume
13055 call save_v86_state # %eax contains pt_regs pointer
13056 popl_cfi %ecx
13057 movl %eax, %esp
13058-#else
13059- movl %esp, %eax
13060+1:
13061 #endif
13062 xorl %edx, %edx
13063 call do_notify_resume
13064 jmp resume_userspace_sig
13065-END(work_pending)
13066+ENDPROC(work_pending)
13067
13068 # perform syscall exit tracing
13069 ALIGN
13070@@ -649,11 +854,14 @@ syscall_trace_entry:
13071 movl $-ENOSYS,PT_EAX(%esp)
13072 movl %esp, %eax
13073 call syscall_trace_enter
13074+
13075+ pax_erase_kstack
13076+
13077 /* What it returned is what we'll actually use. */
13078 cmpl $(nr_syscalls), %eax
13079 jnae syscall_call
13080 jmp syscall_exit
13081-END(syscall_trace_entry)
13082+ENDPROC(syscall_trace_entry)
13083
13084 # perform syscall exit tracing
13085 ALIGN
13086@@ -666,20 +874,24 @@ syscall_exit_work:
13087 movl %esp, %eax
13088 call syscall_trace_leave
13089 jmp resume_userspace
13090-END(syscall_exit_work)
13091+ENDPROC(syscall_exit_work)
13092 CFI_ENDPROC
13093
13094 RING0_INT_FRAME # can't unwind into user space anyway
13095 syscall_fault:
13096+#ifdef CONFIG_PAX_MEMORY_UDEREF
13097+ push %ss
13098+ pop %ds
13099+#endif
13100 GET_THREAD_INFO(%ebp)
13101 movl $-EFAULT,PT_EAX(%esp)
13102 jmp resume_userspace
13103-END(syscall_fault)
13104+ENDPROC(syscall_fault)
13105
13106 syscall_badsys:
13107 movl $-ENOSYS,PT_EAX(%esp)
13108 jmp resume_userspace
13109-END(syscall_badsys)
13110+ENDPROC(syscall_badsys)
13111 CFI_ENDPROC
13112 /*
13113 * End of kprobes section
13114@@ -753,6 +965,36 @@ ptregs_clone:
13115 CFI_ENDPROC
13116 ENDPROC(ptregs_clone)
13117
13118+ ALIGN;
13119+ENTRY(kernel_execve)
13120+ CFI_STARTPROC
13121+ pushl_cfi %ebp
13122+ sub $PT_OLDSS+4,%esp
13123+ pushl_cfi %edi
13124+ pushl_cfi %ecx
13125+ pushl_cfi %eax
13126+ lea 3*4(%esp),%edi
13127+ mov $PT_OLDSS/4+1,%ecx
13128+ xorl %eax,%eax
13129+ rep stosl
13130+ popl_cfi %eax
13131+ popl_cfi %ecx
13132+ popl_cfi %edi
13133+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13134+ pushl_cfi %esp
13135+ call sys_execve
13136+ add $4,%esp
13137+ CFI_ADJUST_CFA_OFFSET -4
13138+ GET_THREAD_INFO(%ebp)
13139+ test %eax,%eax
13140+ jz syscall_exit
13141+ add $PT_OLDSS+4,%esp
13142+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13143+ popl_cfi %ebp
13144+ ret
13145+ CFI_ENDPROC
13146+ENDPROC(kernel_execve)
13147+
13148 .macro FIXUP_ESPFIX_STACK
13149 /*
13150 * Switch back for ESPFIX stack to the normal zerobased stack
13151@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13152 * normal stack and adjusts ESP with the matching offset.
13153 */
13154 /* fixup the stack */
13155- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13156- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13157+#ifdef CONFIG_SMP
13158+ movl PER_CPU_VAR(cpu_number), %ebx
13159+ shll $PAGE_SHIFT_asm, %ebx
13160+ addl $cpu_gdt_table, %ebx
13161+#else
13162+ movl $cpu_gdt_table, %ebx
13163+#endif
13164+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13165+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13166 shl $16, %eax
13167 addl %esp, %eax /* the adjusted stack pointer */
13168 pushl_cfi $__KERNEL_DS
13169@@ -816,7 +1065,7 @@ vector=vector+1
13170 .endr
13171 2: jmp common_interrupt
13172 .endr
13173-END(irq_entries_start)
13174+ENDPROC(irq_entries_start)
13175
13176 .previous
13177 END(interrupt)
13178@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13179 pushl_cfi $do_coprocessor_error
13180 jmp error_code
13181 CFI_ENDPROC
13182-END(coprocessor_error)
13183+ENDPROC(coprocessor_error)
13184
13185 ENTRY(simd_coprocessor_error)
13186 RING0_INT_FRAME
13187@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13188 #endif
13189 jmp error_code
13190 CFI_ENDPROC
13191-END(simd_coprocessor_error)
13192+ENDPROC(simd_coprocessor_error)
13193
13194 ENTRY(device_not_available)
13195 RING0_INT_FRAME
13196@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13197 pushl_cfi $do_device_not_available
13198 jmp error_code
13199 CFI_ENDPROC
13200-END(device_not_available)
13201+ENDPROC(device_not_available)
13202
13203 #ifdef CONFIG_PARAVIRT
13204 ENTRY(native_iret)
13205@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13206 .align 4
13207 .long native_iret, iret_exc
13208 .previous
13209-END(native_iret)
13210+ENDPROC(native_iret)
13211
13212 ENTRY(native_irq_enable_sysexit)
13213 sti
13214 sysexit
13215-END(native_irq_enable_sysexit)
13216+ENDPROC(native_irq_enable_sysexit)
13217 #endif
13218
13219 ENTRY(overflow)
13220@@ -916,7 +1165,7 @@ ENTRY(overflow)
13221 pushl_cfi $do_overflow
13222 jmp error_code
13223 CFI_ENDPROC
13224-END(overflow)
13225+ENDPROC(overflow)
13226
13227 ENTRY(bounds)
13228 RING0_INT_FRAME
13229@@ -924,7 +1173,7 @@ ENTRY(bounds)
13230 pushl_cfi $do_bounds
13231 jmp error_code
13232 CFI_ENDPROC
13233-END(bounds)
13234+ENDPROC(bounds)
13235
13236 ENTRY(invalid_op)
13237 RING0_INT_FRAME
13238@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13239 pushl_cfi $do_invalid_op
13240 jmp error_code
13241 CFI_ENDPROC
13242-END(invalid_op)
13243+ENDPROC(invalid_op)
13244
13245 ENTRY(coprocessor_segment_overrun)
13246 RING0_INT_FRAME
13247@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13248 pushl_cfi $do_coprocessor_segment_overrun
13249 jmp error_code
13250 CFI_ENDPROC
13251-END(coprocessor_segment_overrun)
13252+ENDPROC(coprocessor_segment_overrun)
13253
13254 ENTRY(invalid_TSS)
13255 RING0_EC_FRAME
13256 pushl_cfi $do_invalid_TSS
13257 jmp error_code
13258 CFI_ENDPROC
13259-END(invalid_TSS)
13260+ENDPROC(invalid_TSS)
13261
13262 ENTRY(segment_not_present)
13263 RING0_EC_FRAME
13264 pushl_cfi $do_segment_not_present
13265 jmp error_code
13266 CFI_ENDPROC
13267-END(segment_not_present)
13268+ENDPROC(segment_not_present)
13269
13270 ENTRY(stack_segment)
13271 RING0_EC_FRAME
13272 pushl_cfi $do_stack_segment
13273 jmp error_code
13274 CFI_ENDPROC
13275-END(stack_segment)
13276+ENDPROC(stack_segment)
13277
13278 ENTRY(alignment_check)
13279 RING0_EC_FRAME
13280 pushl_cfi $do_alignment_check
13281 jmp error_code
13282 CFI_ENDPROC
13283-END(alignment_check)
13284+ENDPROC(alignment_check)
13285
13286 ENTRY(divide_error)
13287 RING0_INT_FRAME
13288@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13289 pushl_cfi $do_divide_error
13290 jmp error_code
13291 CFI_ENDPROC
13292-END(divide_error)
13293+ENDPROC(divide_error)
13294
13295 #ifdef CONFIG_X86_MCE
13296 ENTRY(machine_check)
13297@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13298 pushl_cfi machine_check_vector
13299 jmp error_code
13300 CFI_ENDPROC
13301-END(machine_check)
13302+ENDPROC(machine_check)
13303 #endif
13304
13305 ENTRY(spurious_interrupt_bug)
13306@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13307 pushl_cfi $do_spurious_interrupt_bug
13308 jmp error_code
13309 CFI_ENDPROC
13310-END(spurious_interrupt_bug)
13311+ENDPROC(spurious_interrupt_bug)
13312 /*
13313 * End of kprobes section
13314 */
13315@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13316
13317 ENTRY(mcount)
13318 ret
13319-END(mcount)
13320+ENDPROC(mcount)
13321
13322 ENTRY(ftrace_caller)
13323 cmpl $0, function_trace_stop
13324@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13325 .globl ftrace_stub
13326 ftrace_stub:
13327 ret
13328-END(ftrace_caller)
13329+ENDPROC(ftrace_caller)
13330
13331 #else /* ! CONFIG_DYNAMIC_FTRACE */
13332
13333@@ -1174,7 +1423,7 @@ trace:
13334 popl %ecx
13335 popl %eax
13336 jmp ftrace_stub
13337-END(mcount)
13338+ENDPROC(mcount)
13339 #endif /* CONFIG_DYNAMIC_FTRACE */
13340 #endif /* CONFIG_FUNCTION_TRACER */
13341
13342@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13343 popl %ecx
13344 popl %eax
13345 ret
13346-END(ftrace_graph_caller)
13347+ENDPROC(ftrace_graph_caller)
13348
13349 .globl return_to_handler
13350 return_to_handler:
13351@@ -1209,7 +1458,6 @@ return_to_handler:
13352 jmp *%ecx
13353 #endif
13354
13355-.section .rodata,"a"
13356 #include "syscall_table_32.S"
13357
13358 syscall_table_size=(.-sys_call_table)
13359@@ -1255,15 +1503,18 @@ error_code:
13360 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13361 REG_TO_PTGS %ecx
13362 SET_KERNEL_GS %ecx
13363- movl $(__USER_DS), %ecx
13364+ movl $(__KERNEL_DS), %ecx
13365 movl %ecx, %ds
13366 movl %ecx, %es
13367+
13368+ pax_enter_kernel
13369+
13370 TRACE_IRQS_OFF
13371 movl %esp,%eax # pt_regs pointer
13372 call *%edi
13373 jmp ret_from_exception
13374 CFI_ENDPROC
13375-END(page_fault)
13376+ENDPROC(page_fault)
13377
13378 /*
13379 * Debug traps and NMI can happen at the one SYSENTER instruction
13380@@ -1305,7 +1556,7 @@ debug_stack_correct:
13381 call do_debug
13382 jmp ret_from_exception
13383 CFI_ENDPROC
13384-END(debug)
13385+ENDPROC(debug)
13386
13387 /*
13388 * NMI is doubly nasty. It can happen _while_ we're handling
13389@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13390 xorl %edx,%edx # zero error code
13391 movl %esp,%eax # pt_regs pointer
13392 call do_nmi
13393+
13394+ pax_exit_kernel
13395+
13396 jmp restore_all_notrace
13397 CFI_ENDPROC
13398
13399@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13400 FIXUP_ESPFIX_STACK # %eax == %esp
13401 xorl %edx,%edx # zero error code
13402 call do_nmi
13403+
13404+ pax_exit_kernel
13405+
13406 RESTORE_REGS
13407 lss 12+4(%esp), %esp # back to espfix stack
13408 CFI_ADJUST_CFA_OFFSET -24
13409 jmp irq_return
13410 CFI_ENDPROC
13411-END(nmi)
13412+ENDPROC(nmi)
13413
13414 ENTRY(int3)
13415 RING0_INT_FRAME
13416@@ -1395,14 +1652,14 @@ ENTRY(int3)
13417 call do_int3
13418 jmp ret_from_exception
13419 CFI_ENDPROC
13420-END(int3)
13421+ENDPROC(int3)
13422
13423 ENTRY(general_protection)
13424 RING0_EC_FRAME
13425 pushl_cfi $do_general_protection
13426 jmp error_code
13427 CFI_ENDPROC
13428-END(general_protection)
13429+ENDPROC(general_protection)
13430
13431 #ifdef CONFIG_KVM_GUEST
13432 ENTRY(async_page_fault)
13433@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13434 pushl_cfi $do_async_page_fault
13435 jmp error_code
13436 CFI_ENDPROC
13437-END(async_page_fault)
13438+ENDPROC(async_page_fault)
13439 #endif
13440
13441 /*
13442diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13443index 6419bb0..bb59ca4 100644
13444--- a/arch/x86/kernel/entry_64.S
13445+++ b/arch/x86/kernel/entry_64.S
13446@@ -55,6 +55,8 @@
13447 #include <asm/paravirt.h>
13448 #include <asm/ftrace.h>
13449 #include <asm/percpu.h>
13450+#include <asm/pgtable.h>
13451+#include <asm/alternative-asm.h>
13452
13453 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13454 #include <linux/elf-em.h>
13455@@ -68,8 +70,9 @@
13456 #ifdef CONFIG_FUNCTION_TRACER
13457 #ifdef CONFIG_DYNAMIC_FTRACE
13458 ENTRY(mcount)
13459+ pax_force_retaddr
13460 retq
13461-END(mcount)
13462+ENDPROC(mcount)
13463
13464 ENTRY(ftrace_caller)
13465 cmpl $0, function_trace_stop
13466@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13467 #endif
13468
13469 GLOBAL(ftrace_stub)
13470+ pax_force_retaddr
13471 retq
13472-END(ftrace_caller)
13473+ENDPROC(ftrace_caller)
13474
13475 #else /* ! CONFIG_DYNAMIC_FTRACE */
13476 ENTRY(mcount)
13477@@ -112,6 +116,7 @@ ENTRY(mcount)
13478 #endif
13479
13480 GLOBAL(ftrace_stub)
13481+ pax_force_retaddr
13482 retq
13483
13484 trace:
13485@@ -121,12 +126,13 @@ trace:
13486 movq 8(%rbp), %rsi
13487 subq $MCOUNT_INSN_SIZE, %rdi
13488
13489+ pax_force_fptr ftrace_trace_function
13490 call *ftrace_trace_function
13491
13492 MCOUNT_RESTORE_FRAME
13493
13494 jmp ftrace_stub
13495-END(mcount)
13496+ENDPROC(mcount)
13497 #endif /* CONFIG_DYNAMIC_FTRACE */
13498 #endif /* CONFIG_FUNCTION_TRACER */
13499
13500@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13501
13502 MCOUNT_RESTORE_FRAME
13503
13504+ pax_force_retaddr
13505 retq
13506-END(ftrace_graph_caller)
13507+ENDPROC(ftrace_graph_caller)
13508
13509 GLOBAL(return_to_handler)
13510 subq $24, %rsp
13511@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13512 movq 8(%rsp), %rdx
13513 movq (%rsp), %rax
13514 addq $24, %rsp
13515+ pax_force_fptr %rdi
13516 jmp *%rdi
13517 #endif
13518
13519@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13520 ENDPROC(native_usergs_sysret64)
13521 #endif /* CONFIG_PARAVIRT */
13522
13523+ .macro ljmpq sel, off
13524+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13525+ .byte 0x48; ljmp *1234f(%rip)
13526+ .pushsection .rodata
13527+ .align 16
13528+ 1234: .quad \off; .word \sel
13529+ .popsection
13530+#else
13531+ pushq $\sel
13532+ pushq $\off
13533+ lretq
13534+#endif
13535+ .endm
13536+
13537+ .macro pax_enter_kernel
13538+ pax_set_fptr_mask
13539+#ifdef CONFIG_PAX_KERNEXEC
13540+ call pax_enter_kernel
13541+#endif
13542+ .endm
13543+
13544+ .macro pax_exit_kernel
13545+#ifdef CONFIG_PAX_KERNEXEC
13546+ call pax_exit_kernel
13547+#endif
13548+ .endm
13549+
13550+#ifdef CONFIG_PAX_KERNEXEC
13551+ENTRY(pax_enter_kernel)
13552+ pushq %rdi
13553+
13554+#ifdef CONFIG_PARAVIRT
13555+ PV_SAVE_REGS(CLBR_RDI)
13556+#endif
13557+
13558+ GET_CR0_INTO_RDI
13559+ bts $16,%rdi
13560+ jnc 3f
13561+ mov %cs,%edi
13562+ cmp $__KERNEL_CS,%edi
13563+ jnz 2f
13564+1:
13565+
13566+#ifdef CONFIG_PARAVIRT
13567+ PV_RESTORE_REGS(CLBR_RDI)
13568+#endif
13569+
13570+ popq %rdi
13571+ pax_force_retaddr
13572+ retq
13573+
13574+2: ljmpq __KERNEL_CS,1f
13575+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13576+4: SET_RDI_INTO_CR0
13577+ jmp 1b
13578+ENDPROC(pax_enter_kernel)
13579+
13580+ENTRY(pax_exit_kernel)
13581+ pushq %rdi
13582+
13583+#ifdef CONFIG_PARAVIRT
13584+ PV_SAVE_REGS(CLBR_RDI)
13585+#endif
13586+
13587+ mov %cs,%rdi
13588+ cmp $__KERNEXEC_KERNEL_CS,%edi
13589+ jz 2f
13590+1:
13591+
13592+#ifdef CONFIG_PARAVIRT
13593+ PV_RESTORE_REGS(CLBR_RDI);
13594+#endif
13595+
13596+ popq %rdi
13597+ pax_force_retaddr
13598+ retq
13599+
13600+2: GET_CR0_INTO_RDI
13601+ btr $16,%rdi
13602+ ljmpq __KERNEL_CS,3f
13603+3: SET_RDI_INTO_CR0
13604+ jmp 1b
13605+#ifdef CONFIG_PARAVIRT
13606+ PV_RESTORE_REGS(CLBR_RDI);
13607+#endif
13608+
13609+ popq %rdi
13610+ pax_force_retaddr
13611+ retq
13612+ENDPROC(pax_exit_kernel)
13613+#endif
13614+
13615+ .macro pax_enter_kernel_user
13616+ pax_set_fptr_mask
13617+#ifdef CONFIG_PAX_MEMORY_UDEREF
13618+ call pax_enter_kernel_user
13619+#endif
13620+ .endm
13621+
13622+ .macro pax_exit_kernel_user
13623+#ifdef CONFIG_PAX_MEMORY_UDEREF
13624+ call pax_exit_kernel_user
13625+#endif
13626+#ifdef CONFIG_PAX_RANDKSTACK
13627+ pushq %rax
13628+ call pax_randomize_kstack
13629+ popq %rax
13630+#endif
13631+ .endm
13632+
13633+#ifdef CONFIG_PAX_MEMORY_UDEREF
13634+ENTRY(pax_enter_kernel_user)
13635+ pushq %rdi
13636+ pushq %rbx
13637+
13638+#ifdef CONFIG_PARAVIRT
13639+ PV_SAVE_REGS(CLBR_RDI)
13640+#endif
13641+
13642+ GET_CR3_INTO_RDI
13643+ mov %rdi,%rbx
13644+ add $__START_KERNEL_map,%rbx
13645+ sub phys_base(%rip),%rbx
13646+
13647+#ifdef CONFIG_PARAVIRT
13648+ pushq %rdi
13649+ cmpl $0, pv_info+PARAVIRT_enabled
13650+ jz 1f
13651+ i = 0
13652+ .rept USER_PGD_PTRS
13653+ mov i*8(%rbx),%rsi
13654+ mov $0,%sil
13655+ lea i*8(%rbx),%rdi
13656+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13657+ i = i + 1
13658+ .endr
13659+ jmp 2f
13660+1:
13661+#endif
13662+
13663+ i = 0
13664+ .rept USER_PGD_PTRS
13665+ movb $0,i*8(%rbx)
13666+ i = i + 1
13667+ .endr
13668+
13669+#ifdef CONFIG_PARAVIRT
13670+2: popq %rdi
13671+#endif
13672+ SET_RDI_INTO_CR3
13673+
13674+#ifdef CONFIG_PAX_KERNEXEC
13675+ GET_CR0_INTO_RDI
13676+ bts $16,%rdi
13677+ SET_RDI_INTO_CR0
13678+#endif
13679+
13680+#ifdef CONFIG_PARAVIRT
13681+ PV_RESTORE_REGS(CLBR_RDI)
13682+#endif
13683+
13684+ popq %rbx
13685+ popq %rdi
13686+ pax_force_retaddr
13687+ retq
13688+ENDPROC(pax_enter_kernel_user)
13689+
13690+ENTRY(pax_exit_kernel_user)
13691+ push %rdi
13692+
13693+#ifdef CONFIG_PARAVIRT
13694+ pushq %rbx
13695+ PV_SAVE_REGS(CLBR_RDI)
13696+#endif
13697+
13698+#ifdef CONFIG_PAX_KERNEXEC
13699+ GET_CR0_INTO_RDI
13700+ btr $16,%rdi
13701+ SET_RDI_INTO_CR0
13702+#endif
13703+
13704+ GET_CR3_INTO_RDI
13705+ add $__START_KERNEL_map,%rdi
13706+ sub phys_base(%rip),%rdi
13707+
13708+#ifdef CONFIG_PARAVIRT
13709+ cmpl $0, pv_info+PARAVIRT_enabled
13710+ jz 1f
13711+ mov %rdi,%rbx
13712+ i = 0
13713+ .rept USER_PGD_PTRS
13714+ mov i*8(%rbx),%rsi
13715+ mov $0x67,%sil
13716+ lea i*8(%rbx),%rdi
13717+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13718+ i = i + 1
13719+ .endr
13720+ jmp 2f
13721+1:
13722+#endif
13723+
13724+ i = 0
13725+ .rept USER_PGD_PTRS
13726+ movb $0x67,i*8(%rdi)
13727+ i = i + 1
13728+ .endr
13729+
13730+#ifdef CONFIG_PARAVIRT
13731+2: PV_RESTORE_REGS(CLBR_RDI)
13732+ popq %rbx
13733+#endif
13734+
13735+ popq %rdi
13736+ pax_force_retaddr
13737+ retq
13738+ENDPROC(pax_exit_kernel_user)
13739+#endif
13740+
13741+.macro pax_erase_kstack
13742+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13743+ call pax_erase_kstack
13744+#endif
13745+.endm
13746+
13747+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13748+/*
13749+ * r11: thread_info
13750+ * rcx, rdx: can be clobbered
13751+ */
13752+ENTRY(pax_erase_kstack)
13753+ pushq %rdi
13754+ pushq %rax
13755+ pushq %r11
13756+
13757+ GET_THREAD_INFO(%r11)
13758+ mov TI_lowest_stack(%r11), %rdi
13759+ mov $-0xBEEF, %rax
13760+ std
13761+
13762+1: mov %edi, %ecx
13763+ and $THREAD_SIZE_asm - 1, %ecx
13764+ shr $3, %ecx
13765+ repne scasq
13766+ jecxz 2f
13767+
13768+ cmp $2*8, %ecx
13769+ jc 2f
13770+
13771+ mov $2*8, %ecx
13772+ repe scasq
13773+ jecxz 2f
13774+ jne 1b
13775+
13776+2: cld
13777+ mov %esp, %ecx
13778+ sub %edi, %ecx
13779+
13780+ cmp $THREAD_SIZE_asm, %rcx
13781+ jb 3f
13782+ ud2
13783+3:
13784+
13785+ shr $3, %ecx
13786+ rep stosq
13787+
13788+ mov TI_task_thread_sp0(%r11), %rdi
13789+ sub $256, %rdi
13790+ mov %rdi, TI_lowest_stack(%r11)
13791+
13792+ popq %r11
13793+ popq %rax
13794+ popq %rdi
13795+ pax_force_retaddr
13796+ ret
13797+ENDPROC(pax_erase_kstack)
13798+#endif
13799
13800 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13801 #ifdef CONFIG_TRACE_IRQFLAGS
13802@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13803 .endm
13804
13805 .macro UNFAKE_STACK_FRAME
13806- addq $8*6, %rsp
13807- CFI_ADJUST_CFA_OFFSET -(6*8)
13808+ addq $8*6 + ARG_SKIP, %rsp
13809+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13810 .endm
13811
13812 /*
13813@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13814 movq %rsp, %rsi
13815
13816 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13817- testl $3, CS(%rdi)
13818+ testb $3, CS(%rdi)
13819 je 1f
13820 SWAPGS
13821 /*
13822@@ -350,9 +634,10 @@ ENTRY(save_rest)
13823 movq_cfi r15, R15+16
13824 movq %r11, 8(%rsp) /* return address */
13825 FIXUP_TOP_OF_STACK %r11, 16
13826+ pax_force_retaddr
13827 ret
13828 CFI_ENDPROC
13829-END(save_rest)
13830+ENDPROC(save_rest)
13831
13832 /* save complete stack frame */
13833 .pushsection .kprobes.text, "ax"
13834@@ -381,9 +666,10 @@ ENTRY(save_paranoid)
13835 js 1f /* negative -> in kernel */
13836 SWAPGS
13837 xorl %ebx,%ebx
13838-1: ret
13839+1: pax_force_retaddr_bts
13840+ ret
13841 CFI_ENDPROC
13842-END(save_paranoid)
13843+ENDPROC(save_paranoid)
13844 .popsection
13845
13846 /*
13847@@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
13848
13849 RESTORE_REST
13850
13851- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13852+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13853 je int_ret_from_sys_call
13854
13855 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13856@@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
13857 jmp ret_from_sys_call # go to the SYSRET fastpath
13858
13859 CFI_ENDPROC
13860-END(ret_from_fork)
13861+ENDPROC(ret_from_fork)
13862
13863 /*
13864 * System call entry. Up to 6 arguments in registers are supported.
13865@@ -451,7 +737,7 @@ END(ret_from_fork)
13866 ENTRY(system_call)
13867 CFI_STARTPROC simple
13868 CFI_SIGNAL_FRAME
13869- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13870+ CFI_DEF_CFA rsp,0
13871 CFI_REGISTER rip,rcx
13872 /*CFI_REGISTER rflags,r11*/
13873 SWAPGS_UNSAFE_STACK
13874@@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
13875
13876 movq %rsp,PER_CPU_VAR(old_rsp)
13877 movq PER_CPU_VAR(kernel_stack),%rsp
13878+ SAVE_ARGS 8*6,0
13879+ pax_enter_kernel_user
13880 /*
13881 * No need to follow this irqs off/on section - it's straight
13882 * and short:
13883 */
13884 ENABLE_INTERRUPTS(CLBR_NONE)
13885- SAVE_ARGS 8,0
13886 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13887 movq %rcx,RIP-ARGOFFSET(%rsp)
13888 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13889@@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13890 system_call_fastpath:
13891 cmpq $__NR_syscall_max,%rax
13892 ja badsys
13893- movq %r10,%rcx
13894+ movq R10-ARGOFFSET(%rsp),%rcx
13895 call *sys_call_table(,%rax,8) # XXX: rip relative
13896 movq %rax,RAX-ARGOFFSET(%rsp)
13897 /*
13898@@ -498,6 +785,8 @@ sysret_check:
13899 andl %edi,%edx
13900 jnz sysret_careful
13901 CFI_REMEMBER_STATE
13902+ pax_exit_kernel_user
13903+ pax_erase_kstack
13904 /*
13905 * sysretq will re-enable interrupts:
13906 */
13907@@ -549,14 +838,18 @@ badsys:
13908 * jump back to the normal fast path.
13909 */
13910 auditsys:
13911- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13912+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13913 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13914 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13915 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13916 movq %rax,%rsi /* 2nd arg: syscall number */
13917 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13918 call audit_syscall_entry
13919+
13920+ pax_erase_kstack
13921+
13922 LOAD_ARGS 0 /* reload call-clobbered registers */
13923+ pax_set_fptr_mask
13924 jmp system_call_fastpath
13925
13926 /*
13927@@ -586,16 +879,20 @@ tracesys:
13928 FIXUP_TOP_OF_STACK %rdi
13929 movq %rsp,%rdi
13930 call syscall_trace_enter
13931+
13932+ pax_erase_kstack
13933+
13934 /*
13935 * Reload arg registers from stack in case ptrace changed them.
13936 * We don't reload %rax because syscall_trace_enter() returned
13937 * the value it wants us to use in the table lookup.
13938 */
13939 LOAD_ARGS ARGOFFSET, 1
13940+ pax_set_fptr_mask
13941 RESTORE_REST
13942 cmpq $__NR_syscall_max,%rax
13943 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13944- movq %r10,%rcx /* fixup for C */
13945+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13946 call *sys_call_table(,%rax,8)
13947 movq %rax,RAX-ARGOFFSET(%rsp)
13948 /* Use IRET because user could have changed frame */
13949@@ -607,7 +904,7 @@ tracesys:
13950 GLOBAL(int_ret_from_sys_call)
13951 DISABLE_INTERRUPTS(CLBR_NONE)
13952 TRACE_IRQS_OFF
13953- testl $3,CS-ARGOFFSET(%rsp)
13954+ testb $3,CS-ARGOFFSET(%rsp)
13955 je retint_restore_args
13956 movl $_TIF_ALLWORK_MASK,%edi
13957 /* edi: mask to check */
13958@@ -664,7 +961,7 @@ int_restore_rest:
13959 TRACE_IRQS_OFF
13960 jmp int_with_check
13961 CFI_ENDPROC
13962-END(system_call)
13963+ENDPROC(system_call)
13964
13965 /*
13966 * Certain special system calls that need to save a complete full stack frame.
13967@@ -680,7 +977,7 @@ ENTRY(\label)
13968 call \func
13969 jmp ptregscall_common
13970 CFI_ENDPROC
13971-END(\label)
13972+ENDPROC(\label)
13973 .endm
13974
13975 PTREGSCALL stub_clone, sys_clone, %r8
13976@@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
13977 movq_cfi_restore R12+8, r12
13978 movq_cfi_restore RBP+8, rbp
13979 movq_cfi_restore RBX+8, rbx
13980+ pax_force_retaddr
13981 ret $REST_SKIP /* pop extended registers */
13982 CFI_ENDPROC
13983-END(ptregscall_common)
13984+ENDPROC(ptregscall_common)
13985
13986 ENTRY(stub_execve)
13987 CFI_STARTPROC
13988@@ -715,7 +1013,7 @@ ENTRY(stub_execve)
13989 RESTORE_REST
13990 jmp int_ret_from_sys_call
13991 CFI_ENDPROC
13992-END(stub_execve)
13993+ENDPROC(stub_execve)
13994
13995 /*
13996 * sigreturn is special because it needs to restore all registers on return.
13997@@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
13998 RESTORE_REST
13999 jmp int_ret_from_sys_call
14000 CFI_ENDPROC
14001-END(stub_rt_sigreturn)
14002+ENDPROC(stub_rt_sigreturn)
14003
14004 /*
14005 * Build the entry stubs and pointer table with some assembler magic.
14006@@ -768,7 +1066,7 @@ vector=vector+1
14007 2: jmp common_interrupt
14008 .endr
14009 CFI_ENDPROC
14010-END(irq_entries_start)
14011+ENDPROC(irq_entries_start)
14012
14013 .previous
14014 END(interrupt)
14015@@ -789,6 +1087,16 @@ END(interrupt)
14016 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14017 SAVE_ARGS_IRQ
14018 PARTIAL_FRAME 0
14019+#ifdef CONFIG_PAX_MEMORY_UDEREF
14020+ testb $3, CS(%rdi)
14021+ jnz 1f
14022+ pax_enter_kernel
14023+ jmp 2f
14024+1: pax_enter_kernel_user
14025+2:
14026+#else
14027+ pax_enter_kernel
14028+#endif
14029 call \func
14030 .endm
14031
14032@@ -820,7 +1128,7 @@ ret_from_intr:
14033
14034 exit_intr:
14035 GET_THREAD_INFO(%rcx)
14036- testl $3,CS-ARGOFFSET(%rsp)
14037+ testb $3,CS-ARGOFFSET(%rsp)
14038 je retint_kernel
14039
14040 /* Interrupt came from user space */
14041@@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */
14042 * The iretq could re-enable interrupts:
14043 */
14044 DISABLE_INTERRUPTS(CLBR_ANY)
14045+ pax_exit_kernel_user
14046+ pax_erase_kstack
14047 TRACE_IRQS_IRETQ
14048 SWAPGS
14049 jmp restore_args
14050
14051 retint_restore_args: /* return to kernel space */
14052 DISABLE_INTERRUPTS(CLBR_ANY)
14053+ pax_exit_kernel
14054+ pax_force_retaddr RIP-ARGOFFSET
14055 /*
14056 * The iretq could re-enable interrupts:
14057 */
14058@@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
14059 #endif
14060
14061 CFI_ENDPROC
14062-END(common_interrupt)
14063+ENDPROC(common_interrupt)
14064 /*
14065 * End of kprobes section
14066 */
14067@@ -952,7 +1264,7 @@ ENTRY(\sym)
14068 interrupt \do_sym
14069 jmp ret_from_intr
14070 CFI_ENDPROC
14071-END(\sym)
14072+ENDPROC(\sym)
14073 .endm
14074
14075 #ifdef CONFIG_SMP
14076@@ -1017,12 +1329,22 @@ ENTRY(\sym)
14077 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14078 call error_entry
14079 DEFAULT_FRAME 0
14080+#ifdef CONFIG_PAX_MEMORY_UDEREF
14081+ testb $3, CS(%rsp)
14082+ jnz 1f
14083+ pax_enter_kernel
14084+ jmp 2f
14085+1: pax_enter_kernel_user
14086+2:
14087+#else
14088+ pax_enter_kernel
14089+#endif
14090 movq %rsp,%rdi /* pt_regs pointer */
14091 xorl %esi,%esi /* no error code */
14092 call \do_sym
14093 jmp error_exit /* %ebx: no swapgs flag */
14094 CFI_ENDPROC
14095-END(\sym)
14096+ENDPROC(\sym)
14097 .endm
14098
14099 .macro paranoidzeroentry sym do_sym
14100@@ -1034,15 +1356,25 @@ ENTRY(\sym)
14101 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14102 call save_paranoid
14103 TRACE_IRQS_OFF
14104+#ifdef CONFIG_PAX_MEMORY_UDEREF
14105+ testb $3, CS(%rsp)
14106+ jnz 1f
14107+ pax_enter_kernel
14108+ jmp 2f
14109+1: pax_enter_kernel_user
14110+2:
14111+#else
14112+ pax_enter_kernel
14113+#endif
14114 movq %rsp,%rdi /* pt_regs pointer */
14115 xorl %esi,%esi /* no error code */
14116 call \do_sym
14117 jmp paranoid_exit /* %ebx: no swapgs flag */
14118 CFI_ENDPROC
14119-END(\sym)
14120+ENDPROC(\sym)
14121 .endm
14122
14123-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14124+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14125 .macro paranoidzeroentry_ist sym do_sym ist
14126 ENTRY(\sym)
14127 INTR_FRAME
14128@@ -1052,14 +1384,30 @@ ENTRY(\sym)
14129 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14130 call save_paranoid
14131 TRACE_IRQS_OFF
14132+#ifdef CONFIG_PAX_MEMORY_UDEREF
14133+ testb $3, CS(%rsp)
14134+ jnz 1f
14135+ pax_enter_kernel
14136+ jmp 2f
14137+1: pax_enter_kernel_user
14138+2:
14139+#else
14140+ pax_enter_kernel
14141+#endif
14142 movq %rsp,%rdi /* pt_regs pointer */
14143 xorl %esi,%esi /* no error code */
14144+#ifdef CONFIG_SMP
14145+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14146+ lea init_tss(%r12), %r12
14147+#else
14148+ lea init_tss(%rip), %r12
14149+#endif
14150 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14151 call \do_sym
14152 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14153 jmp paranoid_exit /* %ebx: no swapgs flag */
14154 CFI_ENDPROC
14155-END(\sym)
14156+ENDPROC(\sym)
14157 .endm
14158
14159 .macro errorentry sym do_sym
14160@@ -1070,13 +1418,23 @@ ENTRY(\sym)
14161 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14162 call error_entry
14163 DEFAULT_FRAME 0
14164+#ifdef CONFIG_PAX_MEMORY_UDEREF
14165+ testb $3, CS(%rsp)
14166+ jnz 1f
14167+ pax_enter_kernel
14168+ jmp 2f
14169+1: pax_enter_kernel_user
14170+2:
14171+#else
14172+ pax_enter_kernel
14173+#endif
14174 movq %rsp,%rdi /* pt_regs pointer */
14175 movq ORIG_RAX(%rsp),%rsi /* get error code */
14176 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14177 call \do_sym
14178 jmp error_exit /* %ebx: no swapgs flag */
14179 CFI_ENDPROC
14180-END(\sym)
14181+ENDPROC(\sym)
14182 .endm
14183
14184 /* error code is on the stack already */
14185@@ -1089,13 +1447,23 @@ ENTRY(\sym)
14186 call save_paranoid
14187 DEFAULT_FRAME 0
14188 TRACE_IRQS_OFF
14189+#ifdef CONFIG_PAX_MEMORY_UDEREF
14190+ testb $3, CS(%rsp)
14191+ jnz 1f
14192+ pax_enter_kernel
14193+ jmp 2f
14194+1: pax_enter_kernel_user
14195+2:
14196+#else
14197+ pax_enter_kernel
14198+#endif
14199 movq %rsp,%rdi /* pt_regs pointer */
14200 movq ORIG_RAX(%rsp),%rsi /* get error code */
14201 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14202 call \do_sym
14203 jmp paranoid_exit /* %ebx: no swapgs flag */
14204 CFI_ENDPROC
14205-END(\sym)
14206+ENDPROC(\sym)
14207 .endm
14208
14209 zeroentry divide_error do_divide_error
14210@@ -1125,9 +1493,10 @@ gs_change:
14211 2: mfence /* workaround */
14212 SWAPGS
14213 popfq_cfi
14214+ pax_force_retaddr
14215 ret
14216 CFI_ENDPROC
14217-END(native_load_gs_index)
14218+ENDPROC(native_load_gs_index)
14219
14220 .section __ex_table,"a"
14221 .align 8
14222@@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
14223 * Here we are in the child and the registers are set as they were
14224 * at kernel_thread() invocation in the parent.
14225 */
14226+ pax_force_fptr %rsi
14227 call *%rsi
14228 # exit
14229 mov %eax, %edi
14230 call do_exit
14231 ud2 # padding for call trace
14232 CFI_ENDPROC
14233-END(kernel_thread_helper)
14234+ENDPROC(kernel_thread_helper)
14235
14236 /*
14237 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14238@@ -1182,11 +1552,11 @@ ENTRY(kernel_execve)
14239 RESTORE_REST
14240 testq %rax,%rax
14241 je int_ret_from_sys_call
14242- RESTORE_ARGS
14243 UNFAKE_STACK_FRAME
14244+ pax_force_retaddr
14245 ret
14246 CFI_ENDPROC
14247-END(kernel_execve)
14248+ENDPROC(kernel_execve)
14249
14250 /* Call softirq on interrupt stack. Interrupts are off. */
14251 ENTRY(call_softirq)
14252@@ -1204,9 +1574,10 @@ ENTRY(call_softirq)
14253 CFI_DEF_CFA_REGISTER rsp
14254 CFI_ADJUST_CFA_OFFSET -8
14255 decl PER_CPU_VAR(irq_count)
14256+ pax_force_retaddr
14257 ret
14258 CFI_ENDPROC
14259-END(call_softirq)
14260+ENDPROC(call_softirq)
14261
14262 #ifdef CONFIG_XEN
14263 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14264@@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14265 decl PER_CPU_VAR(irq_count)
14266 jmp error_exit
14267 CFI_ENDPROC
14268-END(xen_do_hypervisor_callback)
14269+ENDPROC(xen_do_hypervisor_callback)
14270
14271 /*
14272 * Hypervisor uses this for application faults while it executes.
14273@@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback)
14274 SAVE_ALL
14275 jmp error_exit
14276 CFI_ENDPROC
14277-END(xen_failsafe_callback)
14278+ENDPROC(xen_failsafe_callback)
14279
14280 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14281 xen_hvm_callback_vector xen_evtchn_do_upcall
14282@@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit)
14283 TRACE_IRQS_OFF
14284 testl %ebx,%ebx /* swapgs needed? */
14285 jnz paranoid_restore
14286- testl $3,CS(%rsp)
14287+ testb $3,CS(%rsp)
14288 jnz paranoid_userspace
14289+#ifdef CONFIG_PAX_MEMORY_UDEREF
14290+ pax_exit_kernel
14291+ TRACE_IRQS_IRETQ 0
14292+ SWAPGS_UNSAFE_STACK
14293+ RESTORE_ALL 8
14294+ pax_force_retaddr_bts
14295+ jmp irq_return
14296+#endif
14297 paranoid_swapgs:
14298+#ifdef CONFIG_PAX_MEMORY_UDEREF
14299+ pax_exit_kernel_user
14300+#else
14301+ pax_exit_kernel
14302+#endif
14303 TRACE_IRQS_IRETQ 0
14304 SWAPGS_UNSAFE_STACK
14305 RESTORE_ALL 8
14306 jmp irq_return
14307 paranoid_restore:
14308+ pax_exit_kernel
14309 TRACE_IRQS_IRETQ 0
14310 RESTORE_ALL 8
14311+ pax_force_retaddr_bts
14312 jmp irq_return
14313 paranoid_userspace:
14314 GET_THREAD_INFO(%rcx)
14315@@ -1390,7 +1776,7 @@ paranoid_schedule:
14316 TRACE_IRQS_OFF
14317 jmp paranoid_userspace
14318 CFI_ENDPROC
14319-END(paranoid_exit)
14320+ENDPROC(paranoid_exit)
14321
14322 /*
14323 * Exception entry point. This expects an error code/orig_rax on the stack.
14324@@ -1417,12 +1803,13 @@ ENTRY(error_entry)
14325 movq_cfi r14, R14+8
14326 movq_cfi r15, R15+8
14327 xorl %ebx,%ebx
14328- testl $3,CS+8(%rsp)
14329+ testb $3,CS+8(%rsp)
14330 je error_kernelspace
14331 error_swapgs:
14332 SWAPGS
14333 error_sti:
14334 TRACE_IRQS_OFF
14335+ pax_force_retaddr_bts
14336 ret
14337
14338 /*
14339@@ -1449,7 +1836,7 @@ bstep_iret:
14340 movq %rcx,RIP+8(%rsp)
14341 jmp error_swapgs
14342 CFI_ENDPROC
14343-END(error_entry)
14344+ENDPROC(error_entry)
14345
14346
14347 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14348@@ -1469,7 +1856,7 @@ ENTRY(error_exit)
14349 jnz retint_careful
14350 jmp retint_swapgs
14351 CFI_ENDPROC
14352-END(error_exit)
14353+ENDPROC(error_exit)
14354
14355
14356 /* runs on exception stack */
14357@@ -1481,6 +1868,16 @@ ENTRY(nmi)
14358 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14359 call save_paranoid
14360 DEFAULT_FRAME 0
14361+#ifdef CONFIG_PAX_MEMORY_UDEREF
14362+ testb $3, CS(%rsp)
14363+ jnz 1f
14364+ pax_enter_kernel
14365+ jmp 2f
14366+1: pax_enter_kernel_user
14367+2:
14368+#else
14369+ pax_enter_kernel
14370+#endif
14371 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14372 movq %rsp,%rdi
14373 movq $-1,%rsi
14374@@ -1491,12 +1888,28 @@ ENTRY(nmi)
14375 DISABLE_INTERRUPTS(CLBR_NONE)
14376 testl %ebx,%ebx /* swapgs needed? */
14377 jnz nmi_restore
14378- testl $3,CS(%rsp)
14379+ testb $3,CS(%rsp)
14380 jnz nmi_userspace
14381+#ifdef CONFIG_PAX_MEMORY_UDEREF
14382+ pax_exit_kernel
14383+ SWAPGS_UNSAFE_STACK
14384+ RESTORE_ALL 8
14385+ pax_force_retaddr_bts
14386+ jmp irq_return
14387+#endif
14388 nmi_swapgs:
14389+#ifdef CONFIG_PAX_MEMORY_UDEREF
14390+ pax_exit_kernel_user
14391+#else
14392+ pax_exit_kernel
14393+#endif
14394 SWAPGS_UNSAFE_STACK
14395+ RESTORE_ALL 8
14396+ jmp irq_return
14397 nmi_restore:
14398+ pax_exit_kernel
14399 RESTORE_ALL 8
14400+ pax_force_retaddr_bts
14401 jmp irq_return
14402 nmi_userspace:
14403 GET_THREAD_INFO(%rcx)
14404@@ -1525,14 +1938,14 @@ nmi_schedule:
14405 jmp paranoid_exit
14406 CFI_ENDPROC
14407 #endif
14408-END(nmi)
14409+ENDPROC(nmi)
14410
14411 ENTRY(ignore_sysret)
14412 CFI_STARTPROC
14413 mov $-ENOSYS,%eax
14414 sysret
14415 CFI_ENDPROC
14416-END(ignore_sysret)
14417+ENDPROC(ignore_sysret)
14418
14419 /*
14420 * End of kprobes section
14421diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14422index c9a281f..ce2f317 100644
14423--- a/arch/x86/kernel/ftrace.c
14424+++ b/arch/x86/kernel/ftrace.c
14425@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14426 static const void *mod_code_newcode; /* holds the text to write to the IP */
14427
14428 static unsigned nmi_wait_count;
14429-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14430+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14431
14432 int ftrace_arch_read_dyn_info(char *buf, int size)
14433 {
14434@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14435
14436 r = snprintf(buf, size, "%u %u",
14437 nmi_wait_count,
14438- atomic_read(&nmi_update_count));
14439+ atomic_read_unchecked(&nmi_update_count));
14440 return r;
14441 }
14442
14443@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14444
14445 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14446 smp_rmb();
14447+ pax_open_kernel();
14448 ftrace_mod_code();
14449- atomic_inc(&nmi_update_count);
14450+ pax_close_kernel();
14451+ atomic_inc_unchecked(&nmi_update_count);
14452 }
14453 /* Must have previous changes seen before executions */
14454 smp_mb();
14455@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14456 {
14457 unsigned char replaced[MCOUNT_INSN_SIZE];
14458
14459+ ip = ktla_ktva(ip);
14460+
14461 /*
14462 * Note: Due to modules and __init, code can
14463 * disappear and change, we need to protect against faulting
14464@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14465 unsigned char old[MCOUNT_INSN_SIZE], *new;
14466 int ret;
14467
14468- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14469+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14470 new = ftrace_call_replace(ip, (unsigned long)func);
14471 ret = ftrace_modify_code(ip, old, new);
14472
14473@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14474 {
14475 unsigned char code[MCOUNT_INSN_SIZE];
14476
14477+ ip = ktla_ktva(ip);
14478+
14479 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14480 return -EFAULT;
14481
14482diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14483index 3bb0850..55a56f4 100644
14484--- a/arch/x86/kernel/head32.c
14485+++ b/arch/x86/kernel/head32.c
14486@@ -19,6 +19,7 @@
14487 #include <asm/io_apic.h>
14488 #include <asm/bios_ebda.h>
14489 #include <asm/tlbflush.h>
14490+#include <asm/boot.h>
14491
14492 static void __init i386_default_early_setup(void)
14493 {
14494@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14495 {
14496 memblock_init();
14497
14498- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14499+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14500
14501 #ifdef CONFIG_BLK_DEV_INITRD
14502 /* Reserve INITRD */
14503diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14504index ce0be7c..c41476e 100644
14505--- a/arch/x86/kernel/head_32.S
14506+++ b/arch/x86/kernel/head_32.S
14507@@ -25,6 +25,12 @@
14508 /* Physical address */
14509 #define pa(X) ((X) - __PAGE_OFFSET)
14510
14511+#ifdef CONFIG_PAX_KERNEXEC
14512+#define ta(X) (X)
14513+#else
14514+#define ta(X) ((X) - __PAGE_OFFSET)
14515+#endif
14516+
14517 /*
14518 * References to members of the new_cpu_data structure.
14519 */
14520@@ -54,11 +60,7 @@
14521 * and small than max_low_pfn, otherwise will waste some page table entries
14522 */
14523
14524-#if PTRS_PER_PMD > 1
14525-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14526-#else
14527-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14528-#endif
14529+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14530
14531 /* Number of possible pages in the lowmem region */
14532 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14533@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14534 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14535
14536 /*
14537+ * Real beginning of normal "text" segment
14538+ */
14539+ENTRY(stext)
14540+ENTRY(_stext)
14541+
14542+/*
14543 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14544 * %esi points to the real-mode code as a 32-bit pointer.
14545 * CS and DS must be 4 GB flat segments, but we don't depend on
14546@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14547 * can.
14548 */
14549 __HEAD
14550+
14551+#ifdef CONFIG_PAX_KERNEXEC
14552+ jmp startup_32
14553+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14554+.fill PAGE_SIZE-5,1,0xcc
14555+#endif
14556+
14557 ENTRY(startup_32)
14558 movl pa(stack_start),%ecx
14559
14560@@ -105,6 +120,57 @@ ENTRY(startup_32)
14561 2:
14562 leal -__PAGE_OFFSET(%ecx),%esp
14563
14564+#ifdef CONFIG_SMP
14565+ movl $pa(cpu_gdt_table),%edi
14566+ movl $__per_cpu_load,%eax
14567+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14568+ rorl $16,%eax
14569+ movb %al,__KERNEL_PERCPU + 4(%edi)
14570+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14571+ movl $__per_cpu_end - 1,%eax
14572+ subl $__per_cpu_start,%eax
14573+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14574+#endif
14575+
14576+#ifdef CONFIG_PAX_MEMORY_UDEREF
14577+ movl $NR_CPUS,%ecx
14578+ movl $pa(cpu_gdt_table),%edi
14579+1:
14580+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14581+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14582+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14583+ addl $PAGE_SIZE_asm,%edi
14584+ loop 1b
14585+#endif
14586+
14587+#ifdef CONFIG_PAX_KERNEXEC
14588+ movl $pa(boot_gdt),%edi
14589+ movl $__LOAD_PHYSICAL_ADDR,%eax
14590+ movw %ax,__BOOT_CS + 2(%edi)
14591+ rorl $16,%eax
14592+ movb %al,__BOOT_CS + 4(%edi)
14593+ movb %ah,__BOOT_CS + 7(%edi)
14594+ rorl $16,%eax
14595+
14596+ ljmp $(__BOOT_CS),$1f
14597+1:
14598+
14599+ movl $NR_CPUS,%ecx
14600+ movl $pa(cpu_gdt_table),%edi
14601+ addl $__PAGE_OFFSET,%eax
14602+1:
14603+ movw %ax,__KERNEL_CS + 2(%edi)
14604+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14605+ rorl $16,%eax
14606+ movb %al,__KERNEL_CS + 4(%edi)
14607+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14608+ movb %ah,__KERNEL_CS + 7(%edi)
14609+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14610+ rorl $16,%eax
14611+ addl $PAGE_SIZE_asm,%edi
14612+ loop 1b
14613+#endif
14614+
14615 /*
14616 * Clear BSS first so that there are no surprises...
14617 */
14618@@ -195,8 +261,11 @@ ENTRY(startup_32)
14619 movl %eax, pa(max_pfn_mapped)
14620
14621 /* Do early initialization of the fixmap area */
14622- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14623- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14624+#ifdef CONFIG_COMPAT_VDSO
14625+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14626+#else
14627+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14628+#endif
14629 #else /* Not PAE */
14630
14631 page_pde_offset = (__PAGE_OFFSET >> 20);
14632@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14633 movl %eax, pa(max_pfn_mapped)
14634
14635 /* Do early initialization of the fixmap area */
14636- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14637- movl %eax,pa(initial_page_table+0xffc)
14638+#ifdef CONFIG_COMPAT_VDSO
14639+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14640+#else
14641+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14642+#endif
14643 #endif
14644
14645 #ifdef CONFIG_PARAVIRT
14646@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14647 cmpl $num_subarch_entries, %eax
14648 jae bad_subarch
14649
14650- movl pa(subarch_entries)(,%eax,4), %eax
14651- subl $__PAGE_OFFSET, %eax
14652- jmp *%eax
14653+ jmp *pa(subarch_entries)(,%eax,4)
14654
14655 bad_subarch:
14656 WEAK(lguest_entry)
14657@@ -255,10 +325,10 @@ WEAK(xen_entry)
14658 __INITDATA
14659
14660 subarch_entries:
14661- .long default_entry /* normal x86/PC */
14662- .long lguest_entry /* lguest hypervisor */
14663- .long xen_entry /* Xen hypervisor */
14664- .long default_entry /* Moorestown MID */
14665+ .long ta(default_entry) /* normal x86/PC */
14666+ .long ta(lguest_entry) /* lguest hypervisor */
14667+ .long ta(xen_entry) /* Xen hypervisor */
14668+ .long ta(default_entry) /* Moorestown MID */
14669 num_subarch_entries = (. - subarch_entries) / 4
14670 .previous
14671 #else
14672@@ -312,6 +382,7 @@ default_entry:
14673 orl %edx,%eax
14674 movl %eax,%cr4
14675
14676+#ifdef CONFIG_X86_PAE
14677 testb $X86_CR4_PAE, %al # check if PAE is enabled
14678 jz 6f
14679
14680@@ -340,6 +411,9 @@ default_entry:
14681 /* Make changes effective */
14682 wrmsr
14683
14684+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14685+#endif
14686+
14687 6:
14688
14689 /*
14690@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14691 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14692 movl %eax,%ss # after changing gdt.
14693
14694- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14695+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14696 movl %eax,%ds
14697 movl %eax,%es
14698
14699@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14700 */
14701 cmpb $0,ready
14702 jne 1f
14703- movl $gdt_page,%eax
14704+ movl $cpu_gdt_table,%eax
14705 movl $stack_canary,%ecx
14706+#ifdef CONFIG_SMP
14707+ addl $__per_cpu_load,%ecx
14708+#endif
14709 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14710 shrl $16, %ecx
14711 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14712 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14713 1:
14714-#endif
14715 movl $(__KERNEL_STACK_CANARY),%eax
14716+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14717+ movl $(__USER_DS),%eax
14718+#else
14719+ xorl %eax,%eax
14720+#endif
14721 movl %eax,%gs
14722
14723 xorl %eax,%eax # Clear LDT
14724@@ -558,22 +639,22 @@ early_page_fault:
14725 jmp early_fault
14726
14727 early_fault:
14728- cld
14729 #ifdef CONFIG_PRINTK
14730+ cmpl $1,%ss:early_recursion_flag
14731+ je hlt_loop
14732+ incl %ss:early_recursion_flag
14733+ cld
14734 pusha
14735 movl $(__KERNEL_DS),%eax
14736 movl %eax,%ds
14737 movl %eax,%es
14738- cmpl $2,early_recursion_flag
14739- je hlt_loop
14740- incl early_recursion_flag
14741 movl %cr2,%eax
14742 pushl %eax
14743 pushl %edx /* trapno */
14744 pushl $fault_msg
14745 call printk
14746+; call dump_stack
14747 #endif
14748- call dump_stack
14749 hlt_loop:
14750 hlt
14751 jmp hlt_loop
14752@@ -581,8 +662,11 @@ hlt_loop:
14753 /* This is the default interrupt "handler" :-) */
14754 ALIGN
14755 ignore_int:
14756- cld
14757 #ifdef CONFIG_PRINTK
14758+ cmpl $2,%ss:early_recursion_flag
14759+ je hlt_loop
14760+ incl %ss:early_recursion_flag
14761+ cld
14762 pushl %eax
14763 pushl %ecx
14764 pushl %edx
14765@@ -591,9 +675,6 @@ ignore_int:
14766 movl $(__KERNEL_DS),%eax
14767 movl %eax,%ds
14768 movl %eax,%es
14769- cmpl $2,early_recursion_flag
14770- je hlt_loop
14771- incl early_recursion_flag
14772 pushl 16(%esp)
14773 pushl 24(%esp)
14774 pushl 32(%esp)
14775@@ -622,29 +703,43 @@ ENTRY(initial_code)
14776 /*
14777 * BSS section
14778 */
14779-__PAGE_ALIGNED_BSS
14780- .align PAGE_SIZE
14781 #ifdef CONFIG_X86_PAE
14782+.section .initial_pg_pmd,"a",@progbits
14783 initial_pg_pmd:
14784 .fill 1024*KPMDS,4,0
14785 #else
14786+.section .initial_page_table,"a",@progbits
14787 ENTRY(initial_page_table)
14788 .fill 1024,4,0
14789 #endif
14790+.section .initial_pg_fixmap,"a",@progbits
14791 initial_pg_fixmap:
14792 .fill 1024,4,0
14793+.section .empty_zero_page,"a",@progbits
14794 ENTRY(empty_zero_page)
14795 .fill 4096,1,0
14796+.section .swapper_pg_dir,"a",@progbits
14797 ENTRY(swapper_pg_dir)
14798+#ifdef CONFIG_X86_PAE
14799+ .fill 4,8,0
14800+#else
14801 .fill 1024,4,0
14802+#endif
14803+
14804+/*
14805+ * The IDT has to be page-aligned to simplify the Pentium
14806+ * F0 0F bug workaround.. We have a special link segment
14807+ * for this.
14808+ */
14809+.section .idt,"a",@progbits
14810+ENTRY(idt_table)
14811+ .fill 256,8,0
14812
14813 /*
14814 * This starts the data section.
14815 */
14816 #ifdef CONFIG_X86_PAE
14817-__PAGE_ALIGNED_DATA
14818- /* Page-aligned for the benefit of paravirt? */
14819- .align PAGE_SIZE
14820+.section .initial_page_table,"a",@progbits
14821 ENTRY(initial_page_table)
14822 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14823 # if KPMDS == 3
14824@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14825 # error "Kernel PMDs should be 1, 2 or 3"
14826 # endif
14827 .align PAGE_SIZE /* needs to be page-sized too */
14828+
14829+#ifdef CONFIG_PAX_PER_CPU_PGD
14830+ENTRY(cpu_pgd)
14831+ .rept NR_CPUS
14832+ .fill 4,8,0
14833+ .endr
14834+#endif
14835+
14836 #endif
14837
14838 .data
14839 .balign 4
14840 ENTRY(stack_start)
14841- .long init_thread_union+THREAD_SIZE
14842+ .long init_thread_union+THREAD_SIZE-8
14843
14844+ready: .byte 0
14845+
14846+.section .rodata,"a",@progbits
14847 early_recursion_flag:
14848 .long 0
14849
14850-ready: .byte 0
14851-
14852 int_msg:
14853 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14854
14855@@ -707,7 +811,7 @@ fault_msg:
14856 .word 0 # 32 bit align gdt_desc.address
14857 boot_gdt_descr:
14858 .word __BOOT_DS+7
14859- .long boot_gdt - __PAGE_OFFSET
14860+ .long pa(boot_gdt)
14861
14862 .word 0 # 32-bit align idt_desc.address
14863 idt_descr:
14864@@ -718,7 +822,7 @@ idt_descr:
14865 .word 0 # 32 bit align gdt_desc.address
14866 ENTRY(early_gdt_descr)
14867 .word GDT_ENTRIES*8-1
14868- .long gdt_page /* Overwritten for secondary CPUs */
14869+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14870
14871 /*
14872 * The boot_gdt must mirror the equivalent in setup.S and is
14873@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14874 .align L1_CACHE_BYTES
14875 ENTRY(boot_gdt)
14876 .fill GDT_ENTRY_BOOT_CS,8,0
14877- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14878- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14879+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14880+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14881+
14882+ .align PAGE_SIZE_asm
14883+ENTRY(cpu_gdt_table)
14884+ .rept NR_CPUS
14885+ .quad 0x0000000000000000 /* NULL descriptor */
14886+ .quad 0x0000000000000000 /* 0x0b reserved */
14887+ .quad 0x0000000000000000 /* 0x13 reserved */
14888+ .quad 0x0000000000000000 /* 0x1b reserved */
14889+
14890+#ifdef CONFIG_PAX_KERNEXEC
14891+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14892+#else
14893+ .quad 0x0000000000000000 /* 0x20 unused */
14894+#endif
14895+
14896+ .quad 0x0000000000000000 /* 0x28 unused */
14897+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14898+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14899+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14900+ .quad 0x0000000000000000 /* 0x4b reserved */
14901+ .quad 0x0000000000000000 /* 0x53 reserved */
14902+ .quad 0x0000000000000000 /* 0x5b reserved */
14903+
14904+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14905+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14906+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14907+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14908+
14909+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14910+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14911+
14912+ /*
14913+ * Segments used for calling PnP BIOS have byte granularity.
14914+ * The code segments and data segments have fixed 64k limits,
14915+ * the transfer segment sizes are set at run time.
14916+ */
14917+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14918+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14919+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14920+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14921+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14922+
14923+ /*
14924+ * The APM segments have byte granularity and their bases
14925+ * are set at run time. All have 64k limits.
14926+ */
14927+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14928+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14929+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14930+
14931+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14932+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14933+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14934+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14935+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14936+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14937+
14938+ /* Be sure this is zeroed to avoid false validations in Xen */
14939+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14940+ .endr
14941diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14942index e11e394..9aebc5d 100644
14943--- a/arch/x86/kernel/head_64.S
14944+++ b/arch/x86/kernel/head_64.S
14945@@ -19,6 +19,8 @@
14946 #include <asm/cache.h>
14947 #include <asm/processor-flags.h>
14948 #include <asm/percpu.h>
14949+#include <asm/cpufeature.h>
14950+#include <asm/alternative-asm.h>
14951
14952 #ifdef CONFIG_PARAVIRT
14953 #include <asm/asm-offsets.h>
14954@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
14955 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14956 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14957 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14958+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14959+L3_VMALLOC_START = pud_index(VMALLOC_START)
14960+L4_VMALLOC_END = pgd_index(VMALLOC_END)
14961+L3_VMALLOC_END = pud_index(VMALLOC_END)
14962+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14963+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14964
14965 .text
14966 __HEAD
14967@@ -85,35 +93,23 @@ startup_64:
14968 */
14969 addq %rbp, init_level4_pgt + 0(%rip)
14970 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14971+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14972+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
14973+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14974 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14975
14976 addq %rbp, level3_ident_pgt + 0(%rip)
14977+#ifndef CONFIG_XEN
14978+ addq %rbp, level3_ident_pgt + 8(%rip)
14979+#endif
14980
14981- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14982- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14983+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14984+
14985+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14986+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14987
14988 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14989-
14990- /* Add an Identity mapping if I am above 1G */
14991- leaq _text(%rip), %rdi
14992- andq $PMD_PAGE_MASK, %rdi
14993-
14994- movq %rdi, %rax
14995- shrq $PUD_SHIFT, %rax
14996- andq $(PTRS_PER_PUD - 1), %rax
14997- jz ident_complete
14998-
14999- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15000- leaq level3_ident_pgt(%rip), %rbx
15001- movq %rdx, 0(%rbx, %rax, 8)
15002-
15003- movq %rdi, %rax
15004- shrq $PMD_SHIFT, %rax
15005- andq $(PTRS_PER_PMD - 1), %rax
15006- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15007- leaq level2_spare_pgt(%rip), %rbx
15008- movq %rdx, 0(%rbx, %rax, 8)
15009-ident_complete:
15010+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15011
15012 /*
15013 * Fixup the kernel text+data virtual addresses. Note that
15014@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15015 * after the boot processor executes this code.
15016 */
15017
15018- /* Enable PAE mode and PGE */
15019- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15020+ /* Enable PAE mode and PSE/PGE */
15021+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15022 movq %rax, %cr4
15023
15024 /* Setup early boot stage 4 level pagetables. */
15025@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15026 movl $MSR_EFER, %ecx
15027 rdmsr
15028 btsl $_EFER_SCE, %eax /* Enable System Call */
15029- btl $20,%edi /* No Execute supported? */
15030+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15031 jnc 1f
15032 btsl $_EFER_NX, %eax
15033+ leaq init_level4_pgt(%rip), %rdi
15034+#ifndef CONFIG_EFI
15035+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15036+#endif
15037+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15038+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15039+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15040+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15041 1: wrmsr /* Make changes effective */
15042
15043 /* Setup cr0 */
15044@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15045 * jump. In addition we need to ensure %cs is set so we make this
15046 * a far return.
15047 */
15048+ pax_set_fptr_mask
15049 movq initial_code(%rip),%rax
15050 pushq $0 # fake return address to stop unwinder
15051 pushq $__KERNEL_CS # set correct cs
15052@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15053 bad_address:
15054 jmp bad_address
15055
15056- .section ".init.text","ax"
15057+ __INIT
15058 #ifdef CONFIG_EARLY_PRINTK
15059 .globl early_idt_handlers
15060 early_idt_handlers:
15061@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15062 #endif /* EARLY_PRINTK */
15063 1: hlt
15064 jmp 1b
15065+ .previous
15066
15067 #ifdef CONFIG_EARLY_PRINTK
15068+ __INITDATA
15069 early_recursion_flag:
15070 .long 0
15071+ .previous
15072
15073+ .section .rodata,"a",@progbits
15074 early_idt_msg:
15075 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15076 early_idt_ripmsg:
15077 .asciz "RIP %s\n"
15078+ .previous
15079 #endif /* CONFIG_EARLY_PRINTK */
15080- .previous
15081
15082+ .section .rodata,"a",@progbits
15083 #define NEXT_PAGE(name) \
15084 .balign PAGE_SIZE; \
15085 ENTRY(name)
15086@@ -338,7 +348,6 @@ ENTRY(name)
15087 i = i + 1 ; \
15088 .endr
15089
15090- .data
15091 /*
15092 * This default setting generates an ident mapping at address 0x100000
15093 * and a mapping for the kernel that precisely maps virtual address
15094@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15096 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15097 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15098+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15099+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15100+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15101+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15102+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15103+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15104 .org init_level4_pgt + L4_START_KERNEL*8, 0
15105 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15106 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15107
15108+#ifdef CONFIG_PAX_PER_CPU_PGD
15109+NEXT_PAGE(cpu_pgd)
15110+ .rept NR_CPUS
15111+ .fill 512,8,0
15112+ .endr
15113+#endif
15114+
15115 NEXT_PAGE(level3_ident_pgt)
15116 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15117+#ifdef CONFIG_XEN
15118 .fill 511,8,0
15119+#else
15120+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15121+ .fill 510,8,0
15122+#endif
15123+
15124+NEXT_PAGE(level3_vmalloc_start_pgt)
15125+ .fill 512,8,0
15126+
15127+NEXT_PAGE(level3_vmalloc_end_pgt)
15128+ .fill 512,8,0
15129+
15130+NEXT_PAGE(level3_vmemmap_pgt)
15131+ .fill L3_VMEMMAP_START,8,0
15132+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15133
15134 NEXT_PAGE(level3_kernel_pgt)
15135 .fill L3_START_KERNEL,8,0
15136@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15137 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15138 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15139
15140+NEXT_PAGE(level2_vmemmap_pgt)
15141+ .fill 512,8,0
15142+
15143 NEXT_PAGE(level2_fixmap_pgt)
15144- .fill 506,8,0
15145- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15146- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15147- .fill 5,8,0
15148+ .fill 507,8,0
15149+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15150+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15151+ .fill 4,8,0
15152
15153-NEXT_PAGE(level1_fixmap_pgt)
15154+NEXT_PAGE(level1_vsyscall_pgt)
15155 .fill 512,8,0
15156
15157-NEXT_PAGE(level2_ident_pgt)
15158- /* Since I easily can, map the first 1G.
15159+ /* Since I easily can, map the first 2G.
15160 * Don't set NX because code runs from these pages.
15161 */
15162- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15163+NEXT_PAGE(level2_ident_pgt)
15164+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15165
15166 NEXT_PAGE(level2_kernel_pgt)
15167 /*
15168@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15169 * If you want to increase this then increase MODULES_VADDR
15170 * too.)
15171 */
15172- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15173- KERNEL_IMAGE_SIZE/PMD_SIZE)
15174-
15175-NEXT_PAGE(level2_spare_pgt)
15176- .fill 512, 8, 0
15177+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15178
15179 #undef PMDS
15180 #undef NEXT_PAGE
15181
15182- .data
15183+ .align PAGE_SIZE
15184+ENTRY(cpu_gdt_table)
15185+ .rept NR_CPUS
15186+ .quad 0x0000000000000000 /* NULL descriptor */
15187+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15188+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15189+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15190+ .quad 0x00cffb000000ffff /* __USER32_CS */
15191+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15192+ .quad 0x00affb000000ffff /* __USER_CS */
15193+
15194+#ifdef CONFIG_PAX_KERNEXEC
15195+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15196+#else
15197+ .quad 0x0 /* unused */
15198+#endif
15199+
15200+ .quad 0,0 /* TSS */
15201+ .quad 0,0 /* LDT */
15202+ .quad 0,0,0 /* three TLS descriptors */
15203+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15204+ /* asm/segment.h:GDT_ENTRIES must match this */
15205+
15206+ /* zero the remaining page */
15207+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15208+ .endr
15209+
15210 .align 16
15211 .globl early_gdt_descr
15212 early_gdt_descr:
15213 .word GDT_ENTRIES*8-1
15214 early_gdt_descr_base:
15215- .quad INIT_PER_CPU_VAR(gdt_page)
15216+ .quad cpu_gdt_table
15217
15218 ENTRY(phys_base)
15219 /* This must match the first entry in level2_kernel_pgt */
15220 .quad 0x0000000000000000
15221
15222 #include "../../x86/xen/xen-head.S"
15223-
15224- .section .bss, "aw", @nobits
15225+
15226+ .section .rodata,"a",@progbits
15227 .align L1_CACHE_BYTES
15228 ENTRY(idt_table)
15229- .skip IDT_ENTRIES * 16
15230+ .fill 512,8,0
15231
15232 __PAGE_ALIGNED_BSS
15233 .align PAGE_SIZE
15234diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15235index 9c3bd4a..e1d9b35 100644
15236--- a/arch/x86/kernel/i386_ksyms_32.c
15237+++ b/arch/x86/kernel/i386_ksyms_32.c
15238@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15239 EXPORT_SYMBOL(cmpxchg8b_emu);
15240 #endif
15241
15242+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15243+
15244 /* Networking helper routines. */
15245 EXPORT_SYMBOL(csum_partial_copy_generic);
15246+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15247+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15248
15249 EXPORT_SYMBOL(__get_user_1);
15250 EXPORT_SYMBOL(__get_user_2);
15251@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15252
15253 EXPORT_SYMBOL(csum_partial);
15254 EXPORT_SYMBOL(empty_zero_page);
15255+
15256+#ifdef CONFIG_PAX_KERNEXEC
15257+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15258+#endif
15259diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15260index 6104852..6114160 100644
15261--- a/arch/x86/kernel/i8259.c
15262+++ b/arch/x86/kernel/i8259.c
15263@@ -210,7 +210,7 @@ spurious_8259A_irq:
15264 "spurious 8259A interrupt: IRQ%d.\n", irq);
15265 spurious_irq_mask |= irqmask;
15266 }
15267- atomic_inc(&irq_err_count);
15268+ atomic_inc_unchecked(&irq_err_count);
15269 /*
15270 * Theoretically we do not have to handle this IRQ,
15271 * but in Linux this does not cause problems and is
15272diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15273index 43e9ccf..44ccf6f 100644
15274--- a/arch/x86/kernel/init_task.c
15275+++ b/arch/x86/kernel/init_task.c
15276@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15277 * way process stacks are handled. This is done by having a special
15278 * "init_task" linker map entry..
15279 */
15280-union thread_union init_thread_union __init_task_data =
15281- { INIT_THREAD_INFO(init_task) };
15282+union thread_union init_thread_union __init_task_data;
15283
15284 /*
15285 * Initial task structure.
15286@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15287 * section. Since TSS's are completely CPU-local, we want them
15288 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15289 */
15290-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15291-
15292+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15293+EXPORT_SYMBOL(init_tss);
15294diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15295index 8c96897..be66bfa 100644
15296--- a/arch/x86/kernel/ioport.c
15297+++ b/arch/x86/kernel/ioport.c
15298@@ -6,6 +6,7 @@
15299 #include <linux/sched.h>
15300 #include <linux/kernel.h>
15301 #include <linux/capability.h>
15302+#include <linux/security.h>
15303 #include <linux/errno.h>
15304 #include <linux/types.h>
15305 #include <linux/ioport.h>
15306@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15307
15308 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15309 return -EINVAL;
15310+#ifdef CONFIG_GRKERNSEC_IO
15311+ if (turn_on && grsec_disable_privio) {
15312+ gr_handle_ioperm();
15313+ return -EPERM;
15314+ }
15315+#endif
15316 if (turn_on && !capable(CAP_SYS_RAWIO))
15317 return -EPERM;
15318
15319@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15320 * because the ->io_bitmap_max value must match the bitmap
15321 * contents:
15322 */
15323- tss = &per_cpu(init_tss, get_cpu());
15324+ tss = init_tss + get_cpu();
15325
15326 if (turn_on)
15327 bitmap_clear(t->io_bitmap_ptr, from, num);
15328@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15329 return -EINVAL;
15330 /* Trying to gain more privileges? */
15331 if (level > old) {
15332+#ifdef CONFIG_GRKERNSEC_IO
15333+ if (grsec_disable_privio) {
15334+ gr_handle_iopl();
15335+ return -EPERM;
15336+ }
15337+#endif
15338 if (!capable(CAP_SYS_RAWIO))
15339 return -EPERM;
15340 }
15341diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15342index 6c0802e..bea25ae 100644
15343--- a/arch/x86/kernel/irq.c
15344+++ b/arch/x86/kernel/irq.c
15345@@ -17,7 +17,7 @@
15346 #include <asm/mce.h>
15347 #include <asm/hw_irq.h>
15348
15349-atomic_t irq_err_count;
15350+atomic_unchecked_t irq_err_count;
15351
15352 /* Function pointer for generic interrupt vector handling */
15353 void (*x86_platform_ipi_callback)(void) = NULL;
15354@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15355 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15356 seq_printf(p, " Machine check polls\n");
15357 #endif
15358- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15359+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15360 #if defined(CONFIG_X86_IO_APIC)
15361- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15362+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15363 #endif
15364 return 0;
15365 }
15366@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15367
15368 u64 arch_irq_stat(void)
15369 {
15370- u64 sum = atomic_read(&irq_err_count);
15371+ u64 sum = atomic_read_unchecked(&irq_err_count);
15372
15373 #ifdef CONFIG_X86_IO_APIC
15374- sum += atomic_read(&irq_mis_count);
15375+ sum += atomic_read_unchecked(&irq_mis_count);
15376 #endif
15377 return sum;
15378 }
15379diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15380index 7209070..cbcd71a 100644
15381--- a/arch/x86/kernel/irq_32.c
15382+++ b/arch/x86/kernel/irq_32.c
15383@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15384 __asm__ __volatile__("andl %%esp,%0" :
15385 "=r" (sp) : "0" (THREAD_SIZE - 1));
15386
15387- return sp < (sizeof(struct thread_info) + STACK_WARN);
15388+ return sp < STACK_WARN;
15389 }
15390
15391 static void print_stack_overflow(void)
15392@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15393 * per-CPU IRQ handling contexts (thread information and stack)
15394 */
15395 union irq_ctx {
15396- struct thread_info tinfo;
15397- u32 stack[THREAD_SIZE/sizeof(u32)];
15398+ unsigned long previous_esp;
15399+ u32 stack[THREAD_SIZE/sizeof(u32)];
15400 } __attribute__((aligned(THREAD_SIZE)));
15401
15402 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15403@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15404 static inline int
15405 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15406 {
15407- union irq_ctx *curctx, *irqctx;
15408+ union irq_ctx *irqctx;
15409 u32 *isp, arg1, arg2;
15410
15411- curctx = (union irq_ctx *) current_thread_info();
15412 irqctx = __this_cpu_read(hardirq_ctx);
15413
15414 /*
15415@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15416 * handler) we can't do that and just have to keep using the
15417 * current stack (which is the irq stack already after all)
15418 */
15419- if (unlikely(curctx == irqctx))
15420+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15421 return 0;
15422
15423 /* build the stack frame on the IRQ stack */
15424- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15425- irqctx->tinfo.task = curctx->tinfo.task;
15426- irqctx->tinfo.previous_esp = current_stack_pointer;
15427+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15428+ irqctx->previous_esp = current_stack_pointer;
15429
15430- /*
15431- * Copy the softirq bits in preempt_count so that the
15432- * softirq checks work in the hardirq context.
15433- */
15434- irqctx->tinfo.preempt_count =
15435- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15436- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15437+#ifdef CONFIG_PAX_MEMORY_UDEREF
15438+ __set_fs(MAKE_MM_SEG(0));
15439+#endif
15440
15441 if (unlikely(overflow))
15442 call_on_stack(print_stack_overflow, isp);
15443@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15444 : "0" (irq), "1" (desc), "2" (isp),
15445 "D" (desc->handle_irq)
15446 : "memory", "cc", "ecx");
15447+
15448+#ifdef CONFIG_PAX_MEMORY_UDEREF
15449+ __set_fs(current_thread_info()->addr_limit);
15450+#endif
15451+
15452 return 1;
15453 }
15454
15455@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15456 */
15457 void __cpuinit irq_ctx_init(int cpu)
15458 {
15459- union irq_ctx *irqctx;
15460-
15461 if (per_cpu(hardirq_ctx, cpu))
15462 return;
15463
15464- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15465- THREAD_FLAGS,
15466- THREAD_ORDER));
15467- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15468- irqctx->tinfo.cpu = cpu;
15469- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15470- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15471-
15472- per_cpu(hardirq_ctx, cpu) = irqctx;
15473-
15474- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15475- THREAD_FLAGS,
15476- THREAD_ORDER));
15477- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15478- irqctx->tinfo.cpu = cpu;
15479- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15480-
15481- per_cpu(softirq_ctx, cpu) = irqctx;
15482+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15483+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15484
15485 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15486 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15487@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15488 asmlinkage void do_softirq(void)
15489 {
15490 unsigned long flags;
15491- struct thread_info *curctx;
15492 union irq_ctx *irqctx;
15493 u32 *isp;
15494
15495@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15496 local_irq_save(flags);
15497
15498 if (local_softirq_pending()) {
15499- curctx = current_thread_info();
15500 irqctx = __this_cpu_read(softirq_ctx);
15501- irqctx->tinfo.task = curctx->task;
15502- irqctx->tinfo.previous_esp = current_stack_pointer;
15503+ irqctx->previous_esp = current_stack_pointer;
15504
15505 /* build the stack frame on the softirq stack */
15506- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15507+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15508+
15509+#ifdef CONFIG_PAX_MEMORY_UDEREF
15510+ __set_fs(MAKE_MM_SEG(0));
15511+#endif
15512
15513 call_on_stack(__do_softirq, isp);
15514+
15515+#ifdef CONFIG_PAX_MEMORY_UDEREF
15516+ __set_fs(current_thread_info()->addr_limit);
15517+#endif
15518+
15519 /*
15520 * Shouldn't happen, we returned above if in_interrupt():
15521 */
15522diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15523index 00354d4..187ae44 100644
15524--- a/arch/x86/kernel/kgdb.c
15525+++ b/arch/x86/kernel/kgdb.c
15526@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15527 #ifdef CONFIG_X86_32
15528 switch (regno) {
15529 case GDB_SS:
15530- if (!user_mode_vm(regs))
15531+ if (!user_mode(regs))
15532 *(unsigned long *)mem = __KERNEL_DS;
15533 break;
15534 case GDB_SP:
15535- if (!user_mode_vm(regs))
15536+ if (!user_mode(regs))
15537 *(unsigned long *)mem = kernel_stack_pointer(regs);
15538 break;
15539 case GDB_GS:
15540@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15541 case 'k':
15542 /* clear the trace bit */
15543 linux_regs->flags &= ~X86_EFLAGS_TF;
15544- atomic_set(&kgdb_cpu_doing_single_step, -1);
15545+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15546
15547 /* set the trace bit if we're stepping */
15548 if (remcomInBuffer[0] == 's') {
15549 linux_regs->flags |= X86_EFLAGS_TF;
15550- atomic_set(&kgdb_cpu_doing_single_step,
15551+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15552 raw_smp_processor_id());
15553 }
15554
15555@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15556 return NOTIFY_DONE;
15557
15558 case DIE_DEBUG:
15559- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15560+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15561 if (user_mode(regs))
15562 return single_step_cont(regs, args);
15563 break;
15564diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15565index 794bc95..c6e29e9 100644
15566--- a/arch/x86/kernel/kprobes.c
15567+++ b/arch/x86/kernel/kprobes.c
15568@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15569 } __attribute__((packed)) *insn;
15570
15571 insn = (struct __arch_relative_insn *)from;
15572+
15573+ pax_open_kernel();
15574 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15575 insn->op = op;
15576+ pax_close_kernel();
15577 }
15578
15579 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15580@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15581 kprobe_opcode_t opcode;
15582 kprobe_opcode_t *orig_opcodes = opcodes;
15583
15584- if (search_exception_tables((unsigned long)opcodes))
15585+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15586 return 0; /* Page fault may occur on this address. */
15587
15588 retry:
15589@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15590 }
15591 }
15592 insn_get_length(&insn);
15593+ pax_open_kernel();
15594 memcpy(dest, insn.kaddr, insn.length);
15595+ pax_close_kernel();
15596
15597 #ifdef CONFIG_X86_64
15598 if (insn_rip_relative(&insn)) {
15599@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15600 (u8 *) dest;
15601 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15602 disp = (u8 *) dest + insn_offset_displacement(&insn);
15603+ pax_open_kernel();
15604 *(s32 *) disp = (s32) newdisp;
15605+ pax_close_kernel();
15606 }
15607 #endif
15608 return insn.length;
15609@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15610 */
15611 __copy_instruction(p->ainsn.insn, p->addr, 0);
15612
15613- if (can_boost(p->addr))
15614+ if (can_boost(ktla_ktva(p->addr)))
15615 p->ainsn.boostable = 0;
15616 else
15617 p->ainsn.boostable = -1;
15618
15619- p->opcode = *p->addr;
15620+ p->opcode = *(ktla_ktva(p->addr));
15621 }
15622
15623 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15624@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15625 * nor set current_kprobe, because it doesn't use single
15626 * stepping.
15627 */
15628- regs->ip = (unsigned long)p->ainsn.insn;
15629+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15630 preempt_enable_no_resched();
15631 return;
15632 }
15633@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15634 if (p->opcode == BREAKPOINT_INSTRUCTION)
15635 regs->ip = (unsigned long)p->addr;
15636 else
15637- regs->ip = (unsigned long)p->ainsn.insn;
15638+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15639 }
15640
15641 /*
15642@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15643 setup_singlestep(p, regs, kcb, 0);
15644 return 1;
15645 }
15646- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15647+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15648 /*
15649 * The breakpoint instruction was removed right
15650 * after we hit it. Another cpu has removed
15651@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15652 " movq %rax, 152(%rsp)\n"
15653 RESTORE_REGS_STRING
15654 " popfq\n"
15655+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15656+ " btsq $63,(%rsp)\n"
15657+#endif
15658 #else
15659 " pushf\n"
15660 SAVE_REGS_STRING
15661@@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15662 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15663 {
15664 unsigned long *tos = stack_addr(regs);
15665- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15666+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15667 unsigned long orig_ip = (unsigned long)p->addr;
15668 kprobe_opcode_t *insn = p->ainsn.insn;
15669
15670@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15671 struct die_args *args = data;
15672 int ret = NOTIFY_DONE;
15673
15674- if (args->regs && user_mode_vm(args->regs))
15675+ if (args->regs && user_mode(args->regs))
15676 return ret;
15677
15678 switch (val) {
15679@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15680 * Verify if the address gap is in 2GB range, because this uses
15681 * a relative jump.
15682 */
15683- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15684+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15685 if (abs(rel) > 0x7fffffff)
15686 return -ERANGE;
15687
15688@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15689 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15690
15691 /* Set probe function call */
15692- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15693+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15694
15695 /* Set returning jmp instruction at the tail of out-of-line buffer */
15696 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15697- (u8 *)op->kp.addr + op->optinsn.size);
15698+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15699
15700 flush_icache_range((unsigned long) buf,
15701 (unsigned long) buf + TMPL_END_IDX +
15702@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15703 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15704
15705 /* Backup instructions which will be replaced by jump address */
15706- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15707+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15708 RELATIVE_ADDR_SIZE);
15709
15710 insn_buf[0] = RELATIVEJUMP_OPCODE;
15711diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15712index a9c2116..a52d4fc 100644
15713--- a/arch/x86/kernel/kvm.c
15714+++ b/arch/x86/kernel/kvm.c
15715@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15716 pv_mmu_ops.set_pud = kvm_set_pud;
15717 #if PAGETABLE_LEVELS == 4
15718 pv_mmu_ops.set_pgd = kvm_set_pgd;
15719+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15720 #endif
15721 #endif
15722 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15723diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15724index ea69726..604d066 100644
15725--- a/arch/x86/kernel/ldt.c
15726+++ b/arch/x86/kernel/ldt.c
15727@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15728 if (reload) {
15729 #ifdef CONFIG_SMP
15730 preempt_disable();
15731- load_LDT(pc);
15732+ load_LDT_nolock(pc);
15733 if (!cpumask_equal(mm_cpumask(current->mm),
15734 cpumask_of(smp_processor_id())))
15735 smp_call_function(flush_ldt, current->mm, 1);
15736 preempt_enable();
15737 #else
15738- load_LDT(pc);
15739+ load_LDT_nolock(pc);
15740 #endif
15741 }
15742 if (oldsize) {
15743@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15744 return err;
15745
15746 for (i = 0; i < old->size; i++)
15747- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15748+ write_ldt_entry(new->ldt, i, old->ldt + i);
15749 return 0;
15750 }
15751
15752@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15753 retval = copy_ldt(&mm->context, &old_mm->context);
15754 mutex_unlock(&old_mm->context.lock);
15755 }
15756+
15757+ if (tsk == current) {
15758+ mm->context.vdso = 0;
15759+
15760+#ifdef CONFIG_X86_32
15761+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15762+ mm->context.user_cs_base = 0UL;
15763+ mm->context.user_cs_limit = ~0UL;
15764+
15765+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15766+ cpus_clear(mm->context.cpu_user_cs_mask);
15767+#endif
15768+
15769+#endif
15770+#endif
15771+
15772+ }
15773+
15774 return retval;
15775 }
15776
15777@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15778 }
15779 }
15780
15781+#ifdef CONFIG_PAX_SEGMEXEC
15782+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15783+ error = -EINVAL;
15784+ goto out_unlock;
15785+ }
15786+#endif
15787+
15788 fill_ldt(&ldt, &ldt_info);
15789 if (oldmode)
15790 ldt.avl = 0;
15791diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15792index a3fa43b..8966f4c 100644
15793--- a/arch/x86/kernel/machine_kexec_32.c
15794+++ b/arch/x86/kernel/machine_kexec_32.c
15795@@ -27,7 +27,7 @@
15796 #include <asm/cacheflush.h>
15797 #include <asm/debugreg.h>
15798
15799-static void set_idt(void *newidt, __u16 limit)
15800+static void set_idt(struct desc_struct *newidt, __u16 limit)
15801 {
15802 struct desc_ptr curidt;
15803
15804@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15805 }
15806
15807
15808-static void set_gdt(void *newgdt, __u16 limit)
15809+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15810 {
15811 struct desc_ptr curgdt;
15812
15813@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15814 }
15815
15816 control_page = page_address(image->control_code_page);
15817- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15818+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15819
15820 relocate_kernel_ptr = control_page;
15821 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15822diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15823index 1a1b606..5c89b55 100644
15824--- a/arch/x86/kernel/microcode_intel.c
15825+++ b/arch/x86/kernel/microcode_intel.c
15826@@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15827
15828 static int get_ucode_user(void *to, const void *from, size_t n)
15829 {
15830- return copy_from_user(to, from, n);
15831+ return copy_from_user(to, (const void __force_user *)from, n);
15832 }
15833
15834 static enum ucode_state
15835 request_microcode_user(int cpu, const void __user *buf, size_t size)
15836 {
15837- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15838+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15839 }
15840
15841 static void microcode_fini_cpu(int cpu)
15842diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15843index 925179f..85bec6c 100644
15844--- a/arch/x86/kernel/module.c
15845+++ b/arch/x86/kernel/module.c
15846@@ -36,15 +36,60 @@
15847 #define DEBUGP(fmt...)
15848 #endif
15849
15850-void *module_alloc(unsigned long size)
15851+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15852 {
15853 if (PAGE_ALIGN(size) > MODULES_LEN)
15854 return NULL;
15855 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15856- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15857+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15858 -1, __builtin_return_address(0));
15859 }
15860
15861+void *module_alloc(unsigned long size)
15862+{
15863+
15864+#ifdef CONFIG_PAX_KERNEXEC
15865+ return __module_alloc(size, PAGE_KERNEL);
15866+#else
15867+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15868+#endif
15869+
15870+}
15871+
15872+#ifdef CONFIG_PAX_KERNEXEC
15873+#ifdef CONFIG_X86_32
15874+void *module_alloc_exec(unsigned long size)
15875+{
15876+ struct vm_struct *area;
15877+
15878+ if (size == 0)
15879+ return NULL;
15880+
15881+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15882+ return area ? area->addr : NULL;
15883+}
15884+EXPORT_SYMBOL(module_alloc_exec);
15885+
15886+void module_free_exec(struct module *mod, void *module_region)
15887+{
15888+ vunmap(module_region);
15889+}
15890+EXPORT_SYMBOL(module_free_exec);
15891+#else
15892+void module_free_exec(struct module *mod, void *module_region)
15893+{
15894+ module_free(mod, module_region);
15895+}
15896+EXPORT_SYMBOL(module_free_exec);
15897+
15898+void *module_alloc_exec(unsigned long size)
15899+{
15900+ return __module_alloc(size, PAGE_KERNEL_RX);
15901+}
15902+EXPORT_SYMBOL(module_alloc_exec);
15903+#endif
15904+#endif
15905+
15906 #ifdef CONFIG_X86_32
15907 int apply_relocate(Elf32_Shdr *sechdrs,
15908 const char *strtab,
15909@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15910 unsigned int i;
15911 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15912 Elf32_Sym *sym;
15913- uint32_t *location;
15914+ uint32_t *plocation, location;
15915
15916 DEBUGP("Applying relocate section %u to %u\n", relsec,
15917 sechdrs[relsec].sh_info);
15918 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15919 /* This is where to make the change */
15920- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15921- + rel[i].r_offset;
15922+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15923+ location = (uint32_t)plocation;
15924+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15925+ plocation = ktla_ktva((void *)plocation);
15926 /* This is the symbol it is referring to. Note that all
15927 undefined symbols have been resolved. */
15928 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15929@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15930 switch (ELF32_R_TYPE(rel[i].r_info)) {
15931 case R_386_32:
15932 /* We add the value into the location given */
15933- *location += sym->st_value;
15934+ pax_open_kernel();
15935+ *plocation += sym->st_value;
15936+ pax_close_kernel();
15937 break;
15938 case R_386_PC32:
15939 /* Add the value, subtract its postition */
15940- *location += sym->st_value - (uint32_t)location;
15941+ pax_open_kernel();
15942+ *plocation += sym->st_value - location;
15943+ pax_close_kernel();
15944 break;
15945 default:
15946 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15947@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
15948 case R_X86_64_NONE:
15949 break;
15950 case R_X86_64_64:
15951+ pax_open_kernel();
15952 *(u64 *)loc = val;
15953+ pax_close_kernel();
15954 break;
15955 case R_X86_64_32:
15956+ pax_open_kernel();
15957 *(u32 *)loc = val;
15958+ pax_close_kernel();
15959 if (val != *(u32 *)loc)
15960 goto overflow;
15961 break;
15962 case R_X86_64_32S:
15963+ pax_open_kernel();
15964 *(s32 *)loc = val;
15965+ pax_close_kernel();
15966 if ((s64)val != *(s32 *)loc)
15967 goto overflow;
15968 break;
15969 case R_X86_64_PC32:
15970 val -= (u64)loc;
15971+ pax_open_kernel();
15972 *(u32 *)loc = val;
15973+ pax_close_kernel();
15974+
15975 #if 0
15976 if ((s64)val != *(s32 *)loc)
15977 goto overflow;
15978diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
15979index 676b8c7..870ba04 100644
15980--- a/arch/x86/kernel/paravirt-spinlocks.c
15981+++ b/arch/x86/kernel/paravirt-spinlocks.c
15982@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
15983 arch_spin_lock(lock);
15984 }
15985
15986-struct pv_lock_ops pv_lock_ops = {
15987+struct pv_lock_ops pv_lock_ops __read_only = {
15988 #ifdef CONFIG_SMP
15989 .spin_is_locked = __ticket_spin_is_locked,
15990 .spin_is_contended = __ticket_spin_is_contended,
15991diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
15992index d90272e..2d54e8e 100644
15993--- a/arch/x86/kernel/paravirt.c
15994+++ b/arch/x86/kernel/paravirt.c
15995@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15996 {
15997 return x;
15998 }
15999+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16000+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16001+#endif
16002
16003 void __init default_banner(void)
16004 {
16005@@ -133,6 +136,9 @@ static void *get_call_destination(u8 type)
16006 .pv_lock_ops = pv_lock_ops,
16007 #endif
16008 };
16009+
16010+ pax_track_stack();
16011+
16012 return *((void **)&tmpl + type);
16013 }
16014
16015@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16016 if (opfunc == NULL)
16017 /* If there's no function, patch it with a ud2a (BUG) */
16018 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16019- else if (opfunc == _paravirt_nop)
16020+ else if (opfunc == (void *)_paravirt_nop)
16021 /* If the operation is a nop, then nop the callsite */
16022 ret = paravirt_patch_nop();
16023
16024 /* identity functions just return their single argument */
16025- else if (opfunc == _paravirt_ident_32)
16026+ else if (opfunc == (void *)_paravirt_ident_32)
16027 ret = paravirt_patch_ident_32(insnbuf, len);
16028- else if (opfunc == _paravirt_ident_64)
16029+ else if (opfunc == (void *)_paravirt_ident_64)
16030 ret = paravirt_patch_ident_64(insnbuf, len);
16031+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16032+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16033+ ret = paravirt_patch_ident_64(insnbuf, len);
16034+#endif
16035
16036 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16037 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16038@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16039 if (insn_len > len || start == NULL)
16040 insn_len = len;
16041 else
16042- memcpy(insnbuf, start, insn_len);
16043+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16044
16045 return insn_len;
16046 }
16047@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
16048 preempt_enable();
16049 }
16050
16051-struct pv_info pv_info = {
16052+struct pv_info pv_info __read_only = {
16053 .name = "bare hardware",
16054 .paravirt_enabled = 0,
16055 .kernel_rpl = 0,
16056@@ -313,16 +323,16 @@ struct pv_info pv_info = {
16057 #endif
16058 };
16059
16060-struct pv_init_ops pv_init_ops = {
16061+struct pv_init_ops pv_init_ops __read_only = {
16062 .patch = native_patch,
16063 };
16064
16065-struct pv_time_ops pv_time_ops = {
16066+struct pv_time_ops pv_time_ops __read_only = {
16067 .sched_clock = native_sched_clock,
16068 .steal_clock = native_steal_clock,
16069 };
16070
16071-struct pv_irq_ops pv_irq_ops = {
16072+struct pv_irq_ops pv_irq_ops __read_only = {
16073 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16074 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16075 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16076@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
16077 #endif
16078 };
16079
16080-struct pv_cpu_ops pv_cpu_ops = {
16081+struct pv_cpu_ops pv_cpu_ops __read_only = {
16082 .cpuid = native_cpuid,
16083 .get_debugreg = native_get_debugreg,
16084 .set_debugreg = native_set_debugreg,
16085@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16086 .end_context_switch = paravirt_nop,
16087 };
16088
16089-struct pv_apic_ops pv_apic_ops = {
16090+struct pv_apic_ops pv_apic_ops __read_only = {
16091 #ifdef CONFIG_X86_LOCAL_APIC
16092 .startup_ipi_hook = paravirt_nop,
16093 #endif
16094 };
16095
16096-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16097+#ifdef CONFIG_X86_32
16098+#ifdef CONFIG_X86_PAE
16099+/* 64-bit pagetable entries */
16100+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16101+#else
16102 /* 32-bit pagetable entries */
16103 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16104+#endif
16105 #else
16106 /* 64-bit pagetable entries */
16107 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16108 #endif
16109
16110-struct pv_mmu_ops pv_mmu_ops = {
16111+struct pv_mmu_ops pv_mmu_ops __read_only = {
16112
16113 .read_cr2 = native_read_cr2,
16114 .write_cr2 = native_write_cr2,
16115@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16116 .make_pud = PTE_IDENT,
16117
16118 .set_pgd = native_set_pgd,
16119+ .set_pgd_batched = native_set_pgd_batched,
16120 #endif
16121 #endif /* PAGETABLE_LEVELS >= 3 */
16122
16123@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16124 },
16125
16126 .set_fixmap = native_set_fixmap,
16127+
16128+#ifdef CONFIG_PAX_KERNEXEC
16129+ .pax_open_kernel = native_pax_open_kernel,
16130+ .pax_close_kernel = native_pax_close_kernel,
16131+#endif
16132+
16133 };
16134
16135 EXPORT_SYMBOL_GPL(pv_time_ops);
16136diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16137index 35ccf75..67e7d4d 100644
16138--- a/arch/x86/kernel/pci-iommu_table.c
16139+++ b/arch/x86/kernel/pci-iommu_table.c
16140@@ -2,7 +2,7 @@
16141 #include <asm/iommu_table.h>
16142 #include <linux/string.h>
16143 #include <linux/kallsyms.h>
16144-
16145+#include <linux/sched.h>
16146
16147 #define DEBUG 1
16148
16149@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
16150 {
16151 struct iommu_table_entry *p, *q, *x;
16152
16153+ pax_track_stack();
16154+
16155 /* Simple cyclic dependency checker. */
16156 for (p = start; p < finish; p++) {
16157 q = find_dependents_of(start, finish, p);
16158diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16159index 30eb651..37fa2d7 100644
16160--- a/arch/x86/kernel/process.c
16161+++ b/arch/x86/kernel/process.c
16162@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16163
16164 void free_thread_info(struct thread_info *ti)
16165 {
16166- free_thread_xstate(ti->task);
16167 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16168 }
16169
16170+static struct kmem_cache *task_struct_cachep;
16171+
16172 void arch_task_cache_init(void)
16173 {
16174- task_xstate_cachep =
16175- kmem_cache_create("task_xstate", xstate_size,
16176+ /* create a slab on which task_structs can be allocated */
16177+ task_struct_cachep =
16178+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16179+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16180+
16181+ task_xstate_cachep =
16182+ kmem_cache_create("task_xstate", xstate_size,
16183 __alignof__(union thread_xstate),
16184- SLAB_PANIC | SLAB_NOTRACK, NULL);
16185+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16186+}
16187+
16188+struct task_struct *alloc_task_struct_node(int node)
16189+{
16190+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16191+}
16192+
16193+void free_task_struct(struct task_struct *task)
16194+{
16195+ free_thread_xstate(task);
16196+ kmem_cache_free(task_struct_cachep, task);
16197 }
16198
16199 /*
16200@@ -70,7 +87,7 @@ void exit_thread(void)
16201 unsigned long *bp = t->io_bitmap_ptr;
16202
16203 if (bp) {
16204- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16205+ struct tss_struct *tss = init_tss + get_cpu();
16206
16207 t->io_bitmap_ptr = NULL;
16208 clear_thread_flag(TIF_IO_BITMAP);
16209@@ -106,7 +123,7 @@ void show_regs_common(void)
16210
16211 printk(KERN_CONT "\n");
16212 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16213- current->pid, current->comm, print_tainted(),
16214+ task_pid_nr(current), current->comm, print_tainted(),
16215 init_utsname()->release,
16216 (int)strcspn(init_utsname()->version, " "),
16217 init_utsname()->version);
16218@@ -120,6 +137,9 @@ void flush_thread(void)
16219 {
16220 struct task_struct *tsk = current;
16221
16222+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16223+ loadsegment(gs, 0);
16224+#endif
16225 flush_ptrace_hw_breakpoint(tsk);
16226 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16227 /*
16228@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16229 regs.di = (unsigned long) arg;
16230
16231 #ifdef CONFIG_X86_32
16232- regs.ds = __USER_DS;
16233- regs.es = __USER_DS;
16234+ regs.ds = __KERNEL_DS;
16235+ regs.es = __KERNEL_DS;
16236 regs.fs = __KERNEL_PERCPU;
16237- regs.gs = __KERNEL_STACK_CANARY;
16238+ savesegment(gs, regs.gs);
16239 #else
16240 regs.ss = __KERNEL_DS;
16241 #endif
16242@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16243
16244 return ret;
16245 }
16246-void stop_this_cpu(void *dummy)
16247+__noreturn void stop_this_cpu(void *dummy)
16248 {
16249 local_irq_disable();
16250 /*
16251@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16252 }
16253 early_param("idle", idle_setup);
16254
16255-unsigned long arch_align_stack(unsigned long sp)
16256+#ifdef CONFIG_PAX_RANDKSTACK
16257+void pax_randomize_kstack(struct pt_regs *regs)
16258 {
16259- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16260- sp -= get_random_int() % 8192;
16261- return sp & ~0xf;
16262-}
16263+ struct thread_struct *thread = &current->thread;
16264+ unsigned long time;
16265
16266-unsigned long arch_randomize_brk(struct mm_struct *mm)
16267-{
16268- unsigned long range_end = mm->brk + 0x02000000;
16269- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16270-}
16271+ if (!randomize_va_space)
16272+ return;
16273+
16274+ if (v8086_mode(regs))
16275+ return;
16276
16277+ rdtscl(time);
16278+
16279+ /* P4 seems to return a 0 LSB, ignore it */
16280+#ifdef CONFIG_MPENTIUM4
16281+ time &= 0x3EUL;
16282+ time <<= 2;
16283+#elif defined(CONFIG_X86_64)
16284+ time &= 0xFUL;
16285+ time <<= 4;
16286+#else
16287+ time &= 0x1FUL;
16288+ time <<= 3;
16289+#endif
16290+
16291+ thread->sp0 ^= time;
16292+ load_sp0(init_tss + smp_processor_id(), thread);
16293+
16294+#ifdef CONFIG_X86_64
16295+ percpu_write(kernel_stack, thread->sp0);
16296+#endif
16297+}
16298+#endif
16299diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16300index 7a3b651..5a946f6 100644
16301--- a/arch/x86/kernel/process_32.c
16302+++ b/arch/x86/kernel/process_32.c
16303@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16304 unsigned long thread_saved_pc(struct task_struct *tsk)
16305 {
16306 return ((unsigned long *)tsk->thread.sp)[3];
16307+//XXX return tsk->thread.eip;
16308 }
16309
16310 #ifndef CONFIG_SMP
16311@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all)
16312 unsigned long sp;
16313 unsigned short ss, gs;
16314
16315- if (user_mode_vm(regs)) {
16316+ if (user_mode(regs)) {
16317 sp = regs->sp;
16318 ss = regs->ss & 0xffff;
16319- gs = get_user_gs(regs);
16320 } else {
16321 sp = kernel_stack_pointer(regs);
16322 savesegment(ss, ss);
16323- savesegment(gs, gs);
16324 }
16325+ gs = get_user_gs(regs);
16326
16327 show_regs_common();
16328
16329@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16330 struct task_struct *tsk;
16331 int err;
16332
16333- childregs = task_pt_regs(p);
16334+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16335 *childregs = *regs;
16336 childregs->ax = 0;
16337 childregs->sp = sp;
16338
16339 p->thread.sp = (unsigned long) childregs;
16340 p->thread.sp0 = (unsigned long) (childregs+1);
16341+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16342
16343 p->thread.ip = (unsigned long) ret_from_fork;
16344
16345@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16346 struct thread_struct *prev = &prev_p->thread,
16347 *next = &next_p->thread;
16348 int cpu = smp_processor_id();
16349- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16350+ struct tss_struct *tss = init_tss + cpu;
16351 bool preload_fpu;
16352
16353 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16354@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16355 */
16356 lazy_save_gs(prev->gs);
16357
16358+#ifdef CONFIG_PAX_MEMORY_UDEREF
16359+ __set_fs(task_thread_info(next_p)->addr_limit);
16360+#endif
16361+
16362 /*
16363 * Load the per-thread Thread-Local Storage descriptor.
16364 */
16365@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16366 */
16367 arch_end_context_switch(next_p);
16368
16369+ percpu_write(current_task, next_p);
16370+ percpu_write(current_tinfo, &next_p->tinfo);
16371+
16372 if (preload_fpu)
16373 __math_state_restore();
16374
16375@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16376 if (prev->gs | next->gs)
16377 lazy_load_gs(next->gs);
16378
16379- percpu_write(current_task, next_p);
16380-
16381 return prev_p;
16382 }
16383
16384@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p)
16385 } while (count++ < 16);
16386 return 0;
16387 }
16388-
16389diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16390index f693e44..3c979b2 100644
16391--- a/arch/x86/kernel/process_64.c
16392+++ b/arch/x86/kernel/process_64.c
16393@@ -88,7 +88,7 @@ static void __exit_idle(void)
16394 void exit_idle(void)
16395 {
16396 /* idle loop has pid 0 */
16397- if (current->pid)
16398+ if (task_pid_nr(current))
16399 return;
16400 __exit_idle();
16401 }
16402@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16403 struct pt_regs *childregs;
16404 struct task_struct *me = current;
16405
16406- childregs = ((struct pt_regs *)
16407- (THREAD_SIZE + task_stack_page(p))) - 1;
16408+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16409 *childregs = *regs;
16410
16411 childregs->ax = 0;
16412@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16413 p->thread.sp = (unsigned long) childregs;
16414 p->thread.sp0 = (unsigned long) (childregs+1);
16415 p->thread.usersp = me->thread.usersp;
16416+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16417
16418 set_tsk_thread_flag(p, TIF_FORK);
16419
16420@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16421 struct thread_struct *prev = &prev_p->thread;
16422 struct thread_struct *next = &next_p->thread;
16423 int cpu = smp_processor_id();
16424- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16425+ struct tss_struct *tss = init_tss + cpu;
16426 unsigned fsindex, gsindex;
16427 bool preload_fpu;
16428
16429@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 prev->usersp = percpu_read(old_rsp);
16431 percpu_write(old_rsp, next->usersp);
16432 percpu_write(current_task, next_p);
16433+ percpu_write(current_tinfo, &next_p->tinfo);
16434
16435- percpu_write(kernel_stack,
16436- (unsigned long)task_stack_page(next_p) +
16437- THREAD_SIZE - KERNEL_STACK_OFFSET);
16438+ percpu_write(kernel_stack, next->sp0);
16439
16440 /*
16441 * Now maybe reload the debug registers and handle I/O bitmaps
16442@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p)
16443 if (!p || p == current || p->state == TASK_RUNNING)
16444 return 0;
16445 stack = (unsigned long)task_stack_page(p);
16446- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16447+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16448 return 0;
16449 fp = *(u64 *)(p->thread.sp);
16450 do {
16451- if (fp < (unsigned long)stack ||
16452- fp >= (unsigned long)stack+THREAD_SIZE)
16453+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16454 return 0;
16455 ip = *(u64 *)(fp+8);
16456 if (!in_sched_functions(ip))
16457diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16458index 8252879..d3219e0 100644
16459--- a/arch/x86/kernel/ptrace.c
16460+++ b/arch/x86/kernel/ptrace.c
16461@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16462 unsigned long addr, unsigned long data)
16463 {
16464 int ret;
16465- unsigned long __user *datap = (unsigned long __user *)data;
16466+ unsigned long __user *datap = (__force unsigned long __user *)data;
16467
16468 switch (request) {
16469 /* read the word at location addr in the USER area. */
16470@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16471 if ((int) addr < 0)
16472 return -EIO;
16473 ret = do_get_thread_area(child, addr,
16474- (struct user_desc __user *)data);
16475+ (__force struct user_desc __user *) data);
16476 break;
16477
16478 case PTRACE_SET_THREAD_AREA:
16479 if ((int) addr < 0)
16480 return -EIO;
16481 ret = do_set_thread_area(child, addr,
16482- (struct user_desc __user *)data, 0);
16483+ (__force struct user_desc __user *) data, 0);
16484 break;
16485 #endif
16486
16487@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16488 memset(info, 0, sizeof(*info));
16489 info->si_signo = SIGTRAP;
16490 info->si_code = si_code;
16491- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16492+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16493 }
16494
16495 void user_single_step_siginfo(struct task_struct *tsk,
16496diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16497index 42eb330..139955c 100644
16498--- a/arch/x86/kernel/pvclock.c
16499+++ b/arch/x86/kernel/pvclock.c
16500@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16501 return pv_tsc_khz;
16502 }
16503
16504-static atomic64_t last_value = ATOMIC64_INIT(0);
16505+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16506
16507 void pvclock_resume(void)
16508 {
16509- atomic64_set(&last_value, 0);
16510+ atomic64_set_unchecked(&last_value, 0);
16511 }
16512
16513 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16514@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16515 * updating at the same time, and one of them could be slightly behind,
16516 * making the assumption that last_value always go forward fail to hold.
16517 */
16518- last = atomic64_read(&last_value);
16519+ last = atomic64_read_unchecked(&last_value);
16520 do {
16521 if (ret < last)
16522 return last;
16523- last = atomic64_cmpxchg(&last_value, last, ret);
16524+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16525 } while (unlikely(last != ret));
16526
16527 return ret;
16528diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16529index d4a705f..ef8f1a9 100644
16530--- a/arch/x86/kernel/reboot.c
16531+++ b/arch/x86/kernel/reboot.c
16532@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16533 EXPORT_SYMBOL(pm_power_off);
16534
16535 static const struct desc_ptr no_idt = {};
16536-static int reboot_mode;
16537+static unsigned short reboot_mode;
16538 enum reboot_type reboot_type = BOOT_ACPI;
16539 int reboot_force;
16540
16541@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16542 extern const unsigned char machine_real_restart_asm[];
16543 extern const u64 machine_real_restart_gdt[3];
16544
16545-void machine_real_restart(unsigned int type)
16546+__noreturn void machine_real_restart(unsigned int type)
16547 {
16548 void *restart_va;
16549 unsigned long restart_pa;
16550- void (*restart_lowmem)(unsigned int);
16551+ void (* __noreturn restart_lowmem)(unsigned int);
16552 u64 *lowmem_gdt;
16553
16554+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16555+ struct desc_struct *gdt;
16556+#endif
16557+
16558 local_irq_disable();
16559
16560 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16561@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16562 boot)". This seems like a fairly standard thing that gets set by
16563 REBOOT.COM programs, and the previous reset routine did this
16564 too. */
16565- *((unsigned short *)0x472) = reboot_mode;
16566+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16567
16568 /* Patch the GDT in the low memory trampoline */
16569 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16570
16571 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16572 restart_pa = virt_to_phys(restart_va);
16573- restart_lowmem = (void (*)(unsigned int))restart_pa;
16574+ restart_lowmem = (void *)restart_pa;
16575
16576 /* GDT[0]: GDT self-pointer */
16577 lowmem_gdt[0] =
16578@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16579 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16580
16581 /* Jump to the identity-mapped low memory code */
16582+
16583+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16584+ gdt = get_cpu_gdt_table(smp_processor_id());
16585+ pax_open_kernel();
16586+#ifdef CONFIG_PAX_MEMORY_UDEREF
16587+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16588+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16589+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16590+#endif
16591+#ifdef CONFIG_PAX_KERNEXEC
16592+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16593+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16594+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16595+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16596+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16597+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16598+#endif
16599+ pax_close_kernel();
16600+#endif
16601+
16602+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16603+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16604+ unreachable();
16605+#else
16606 restart_lowmem(type);
16607+#endif
16608+
16609 }
16610 #ifdef CONFIG_APM_MODULE
16611 EXPORT_SYMBOL(machine_real_restart);
16612@@ -532,7 +562,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16613 * try to force a triple fault and then cycle between hitting the keyboard
16614 * controller and doing that
16615 */
16616-static void native_machine_emergency_restart(void)
16617+__noreturn static void native_machine_emergency_restart(void)
16618 {
16619 int i;
16620 int attempt = 0;
16621@@ -656,13 +686,13 @@ void native_machine_shutdown(void)
16622 #endif
16623 }
16624
16625-static void __machine_emergency_restart(int emergency)
16626+static __noreturn void __machine_emergency_restart(int emergency)
16627 {
16628 reboot_emergency = emergency;
16629 machine_ops.emergency_restart();
16630 }
16631
16632-static void native_machine_restart(char *__unused)
16633+static __noreturn void native_machine_restart(char *__unused)
16634 {
16635 printk("machine restart\n");
16636
16637@@ -671,7 +701,7 @@ static void native_machine_restart(char *__unused)
16638 __machine_emergency_restart(0);
16639 }
16640
16641-static void native_machine_halt(void)
16642+static __noreturn void native_machine_halt(void)
16643 {
16644 /* stop other cpus and apics */
16645 machine_shutdown();
16646@@ -682,7 +712,7 @@ static void native_machine_halt(void)
16647 stop_this_cpu(NULL);
16648 }
16649
16650-static void native_machine_power_off(void)
16651+__noreturn static void native_machine_power_off(void)
16652 {
16653 if (pm_power_off) {
16654 if (!reboot_force)
16655@@ -691,6 +721,7 @@ static void native_machine_power_off(void)
16656 }
16657 /* a fallback in case there is no PM info available */
16658 tboot_shutdown(TB_SHUTDOWN_HALT);
16659+ unreachable();
16660 }
16661
16662 struct machine_ops machine_ops = {
16663diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16664index 7a6f3b3..bed145d7 100644
16665--- a/arch/x86/kernel/relocate_kernel_64.S
16666+++ b/arch/x86/kernel/relocate_kernel_64.S
16667@@ -11,6 +11,7 @@
16668 #include <asm/kexec.h>
16669 #include <asm/processor-flags.h>
16670 #include <asm/pgtable_types.h>
16671+#include <asm/alternative-asm.h>
16672
16673 /*
16674 * Must be relocatable PIC code callable as a C function
16675@@ -160,13 +161,14 @@ identity_mapped:
16676 xorq %rbp, %rbp
16677 xorq %r8, %r8
16678 xorq %r9, %r9
16679- xorq %r10, %r9
16680+ xorq %r10, %r10
16681 xorq %r11, %r11
16682 xorq %r12, %r12
16683 xorq %r13, %r13
16684 xorq %r14, %r14
16685 xorq %r15, %r15
16686
16687+ pax_force_retaddr 0, 1
16688 ret
16689
16690 1:
16691diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16692index afaf384..1a101fe 100644
16693--- a/arch/x86/kernel/setup.c
16694+++ b/arch/x86/kernel/setup.c
16695@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16696
16697 switch (data->type) {
16698 case SETUP_E820_EXT:
16699- parse_e820_ext(data);
16700+ parse_e820_ext((struct setup_data __force_kernel *)data);
16701 break;
16702 case SETUP_DTB:
16703 add_dtb(pa_data);
16704@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16705 * area (640->1Mb) as ram even though it is not.
16706 * take them out.
16707 */
16708- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16709+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16710 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16711 }
16712
16713@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16714
16715 if (!boot_params.hdr.root_flags)
16716 root_mountflags &= ~MS_RDONLY;
16717- init_mm.start_code = (unsigned long) _text;
16718- init_mm.end_code = (unsigned long) _etext;
16719+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16720+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16721 init_mm.end_data = (unsigned long) _edata;
16722 init_mm.brk = _brk_end;
16723
16724- code_resource.start = virt_to_phys(_text);
16725- code_resource.end = virt_to_phys(_etext)-1;
16726- data_resource.start = virt_to_phys(_etext);
16727+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16728+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16729+ data_resource.start = virt_to_phys(_sdata);
16730 data_resource.end = virt_to_phys(_edata)-1;
16731 bss_resource.start = virt_to_phys(&__bss_start);
16732 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16733diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16734index 71f4727..16dc9f7 100644
16735--- a/arch/x86/kernel/setup_percpu.c
16736+++ b/arch/x86/kernel/setup_percpu.c
16737@@ -21,19 +21,17 @@
16738 #include <asm/cpu.h>
16739 #include <asm/stackprotector.h>
16740
16741-DEFINE_PER_CPU(int, cpu_number);
16742+#ifdef CONFIG_SMP
16743+DEFINE_PER_CPU(unsigned int, cpu_number);
16744 EXPORT_PER_CPU_SYMBOL(cpu_number);
16745+#endif
16746
16747-#ifdef CONFIG_X86_64
16748 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16749-#else
16750-#define BOOT_PERCPU_OFFSET 0
16751-#endif
16752
16753 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16754 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16755
16756-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16757+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16758 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16759 };
16760 EXPORT_SYMBOL(__per_cpu_offset);
16761@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16762 {
16763 #ifdef CONFIG_X86_32
16764 struct desc_struct gdt;
16765+ unsigned long base = per_cpu_offset(cpu);
16766
16767- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16768- 0x2 | DESCTYPE_S, 0x8);
16769- gdt.s = 1;
16770+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16771+ 0x83 | DESCTYPE_S, 0xC);
16772 write_gdt_entry(get_cpu_gdt_table(cpu),
16773 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16774 #endif
16775@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16776 /* alrighty, percpu areas up and running */
16777 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16778 for_each_possible_cpu(cpu) {
16779+#ifdef CONFIG_CC_STACKPROTECTOR
16780+#ifdef CONFIG_X86_32
16781+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16782+#endif
16783+#endif
16784 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16785 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16786 per_cpu(cpu_number, cpu) = cpu;
16787@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16788 */
16789 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16790 #endif
16791+#ifdef CONFIG_CC_STACKPROTECTOR
16792+#ifdef CONFIG_X86_32
16793+ if (!cpu)
16794+ per_cpu(stack_canary.canary, cpu) = canary;
16795+#endif
16796+#endif
16797 /*
16798 * Up to this point, the boot CPU has been using .init.data
16799 * area. Reload any changed state for the boot CPU.
16800diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16801index 54ddaeb2..a6aa4d2 100644
16802--- a/arch/x86/kernel/signal.c
16803+++ b/arch/x86/kernel/signal.c
16804@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16805 * Align the stack pointer according to the i386 ABI,
16806 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16807 */
16808- sp = ((sp + 4) & -16ul) - 4;
16809+ sp = ((sp - 12) & -16ul) - 4;
16810 #else /* !CONFIG_X86_32 */
16811 sp = round_down(sp, 16) - 8;
16812 #endif
16813@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16814 * Return an always-bogus address instead so we will die with SIGSEGV.
16815 */
16816 if (onsigstack && !likely(on_sig_stack(sp)))
16817- return (void __user *)-1L;
16818+ return (__force void __user *)-1L;
16819
16820 /* save i387 state */
16821 if (used_math() && save_i387_xstate(*fpstate) < 0)
16822- return (void __user *)-1L;
16823+ return (__force void __user *)-1L;
16824
16825 return (void __user *)sp;
16826 }
16827@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16828 }
16829
16830 if (current->mm->context.vdso)
16831- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16832+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16833 else
16834- restorer = &frame->retcode;
16835+ restorer = (void __user *)&frame->retcode;
16836 if (ka->sa.sa_flags & SA_RESTORER)
16837 restorer = ka->sa.sa_restorer;
16838
16839@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16840 * reasons and because gdb uses it as a signature to notice
16841 * signal handler stack frames.
16842 */
16843- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16844+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16845
16846 if (err)
16847 return -EFAULT;
16848@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16849 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16850
16851 /* Set up to return from userspace. */
16852- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16853+ if (current->mm->context.vdso)
16854+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16855+ else
16856+ restorer = (void __user *)&frame->retcode;
16857 if (ka->sa.sa_flags & SA_RESTORER)
16858 restorer = ka->sa.sa_restorer;
16859 put_user_ex(restorer, &frame->pretcode);
16860@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16861 * reasons and because gdb uses it as a signature to notice
16862 * signal handler stack frames.
16863 */
16864- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16865+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16866 } put_user_catch(err);
16867
16868 if (err)
16869@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs)
16870 siginfo_t info;
16871 int signr;
16872
16873+ pax_track_stack();
16874+
16875 /*
16876 * We want the common case to go fast, which is why we may in certain
16877 * cases get here from kernel mode. Just return without doing anything
16878@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs)
16879 * X86_32: vm86 regs switched out by assembly code before reaching
16880 * here, so testing against kernel CS suffices.
16881 */
16882- if (!user_mode(regs))
16883+ if (!user_mode_novm(regs))
16884 return;
16885
16886 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16887diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16888index 9f548cb..caf76f7 100644
16889--- a/arch/x86/kernel/smpboot.c
16890+++ b/arch/x86/kernel/smpboot.c
16891@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16892 set_idle_for_cpu(cpu, c_idle.idle);
16893 do_rest:
16894 per_cpu(current_task, cpu) = c_idle.idle;
16895+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16896 #ifdef CONFIG_X86_32
16897 /* Stack for startup_32 can be just as for start_secondary onwards */
16898 irq_ctx_init(cpu);
16899 #else
16900 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16901 initial_gs = per_cpu_offset(cpu);
16902- per_cpu(kernel_stack, cpu) =
16903- (unsigned long)task_stack_page(c_idle.idle) -
16904- KERNEL_STACK_OFFSET + THREAD_SIZE;
16905+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16906 #endif
16907+
16908+ pax_open_kernel();
16909 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16910+ pax_close_kernel();
16911+
16912 initial_code = (unsigned long)start_secondary;
16913 stack_start = c_idle.idle->thread.sp;
16914
16915@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16916
16917 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16918
16919+#ifdef CONFIG_PAX_PER_CPU_PGD
16920+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16921+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16922+ KERNEL_PGD_PTRS);
16923+#endif
16924+
16925 err = do_boot_cpu(apicid, cpu);
16926 if (err) {
16927 pr_debug("do_boot_cpu failed %d\n", err);
16928diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16929index c346d11..d43b163 100644
16930--- a/arch/x86/kernel/step.c
16931+++ b/arch/x86/kernel/step.c
16932@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16933 struct desc_struct *desc;
16934 unsigned long base;
16935
16936- seg &= ~7UL;
16937+ seg >>= 3;
16938
16939 mutex_lock(&child->mm->context.lock);
16940- if (unlikely((seg >> 3) >= child->mm->context.size))
16941+ if (unlikely(seg >= child->mm->context.size))
16942 addr = -1L; /* bogus selector, access would fault */
16943 else {
16944 desc = child->mm->context.ldt + seg;
16945@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16946 addr += base;
16947 }
16948 mutex_unlock(&child->mm->context.lock);
16949- }
16950+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16951+ addr = ktla_ktva(addr);
16952
16953 return addr;
16954 }
16955@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
16956 unsigned char opcode[15];
16957 unsigned long addr = convert_ip_to_linear(child, regs);
16958
16959+ if (addr == -EINVAL)
16960+ return 0;
16961+
16962 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16963 for (i = 0; i < copied; i++) {
16964 switch (opcode[i]) {
16965diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
16966index 0b0cb5f..db6b9ed 100644
16967--- a/arch/x86/kernel/sys_i386_32.c
16968+++ b/arch/x86/kernel/sys_i386_32.c
16969@@ -24,17 +24,224 @@
16970
16971 #include <asm/syscalls.h>
16972
16973-/*
16974- * Do a system call from kernel instead of calling sys_execve so we
16975- * end up with proper pt_regs.
16976- */
16977-int kernel_execve(const char *filename,
16978- const char *const argv[],
16979- const char *const envp[])
16980+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16981 {
16982- long __res;
16983- asm volatile ("int $0x80"
16984- : "=a" (__res)
16985- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
16986- return __res;
16987+ unsigned long pax_task_size = TASK_SIZE;
16988+
16989+#ifdef CONFIG_PAX_SEGMEXEC
16990+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16991+ pax_task_size = SEGMEXEC_TASK_SIZE;
16992+#endif
16993+
16994+ if (len > pax_task_size || addr > pax_task_size - len)
16995+ return -EINVAL;
16996+
16997+ return 0;
16998+}
16999+
17000+unsigned long
17001+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17002+ unsigned long len, unsigned long pgoff, unsigned long flags)
17003+{
17004+ struct mm_struct *mm = current->mm;
17005+ struct vm_area_struct *vma;
17006+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17007+
17008+#ifdef CONFIG_PAX_SEGMEXEC
17009+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17010+ pax_task_size = SEGMEXEC_TASK_SIZE;
17011+#endif
17012+
17013+ pax_task_size -= PAGE_SIZE;
17014+
17015+ if (len > pax_task_size)
17016+ return -ENOMEM;
17017+
17018+ if (flags & MAP_FIXED)
17019+ return addr;
17020+
17021+#ifdef CONFIG_PAX_RANDMMAP
17022+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17023+#endif
17024+
17025+ if (addr) {
17026+ addr = PAGE_ALIGN(addr);
17027+ if (pax_task_size - len >= addr) {
17028+ vma = find_vma(mm, addr);
17029+ if (check_heap_stack_gap(vma, addr, len))
17030+ return addr;
17031+ }
17032+ }
17033+ if (len > mm->cached_hole_size) {
17034+ start_addr = addr = mm->free_area_cache;
17035+ } else {
17036+ start_addr = addr = mm->mmap_base;
17037+ mm->cached_hole_size = 0;
17038+ }
17039+
17040+#ifdef CONFIG_PAX_PAGEEXEC
17041+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17042+ start_addr = 0x00110000UL;
17043+
17044+#ifdef CONFIG_PAX_RANDMMAP
17045+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17046+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17047+#endif
17048+
17049+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17050+ start_addr = addr = mm->mmap_base;
17051+ else
17052+ addr = start_addr;
17053+ }
17054+#endif
17055+
17056+full_search:
17057+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17058+ /* At this point: (!vma || addr < vma->vm_end). */
17059+ if (pax_task_size - len < addr) {
17060+ /*
17061+ * Start a new search - just in case we missed
17062+ * some holes.
17063+ */
17064+ if (start_addr != mm->mmap_base) {
17065+ start_addr = addr = mm->mmap_base;
17066+ mm->cached_hole_size = 0;
17067+ goto full_search;
17068+ }
17069+ return -ENOMEM;
17070+ }
17071+ if (check_heap_stack_gap(vma, addr, len))
17072+ break;
17073+ if (addr + mm->cached_hole_size < vma->vm_start)
17074+ mm->cached_hole_size = vma->vm_start - addr;
17075+ addr = vma->vm_end;
17076+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17077+ start_addr = addr = mm->mmap_base;
17078+ mm->cached_hole_size = 0;
17079+ goto full_search;
17080+ }
17081+ }
17082+
17083+ /*
17084+ * Remember the place where we stopped the search:
17085+ */
17086+ mm->free_area_cache = addr + len;
17087+ return addr;
17088+}
17089+
17090+unsigned long
17091+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17092+ const unsigned long len, const unsigned long pgoff,
17093+ const unsigned long flags)
17094+{
17095+ struct vm_area_struct *vma;
17096+ struct mm_struct *mm = current->mm;
17097+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17098+
17099+#ifdef CONFIG_PAX_SEGMEXEC
17100+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17101+ pax_task_size = SEGMEXEC_TASK_SIZE;
17102+#endif
17103+
17104+ pax_task_size -= PAGE_SIZE;
17105+
17106+ /* requested length too big for entire address space */
17107+ if (len > pax_task_size)
17108+ return -ENOMEM;
17109+
17110+ if (flags & MAP_FIXED)
17111+ return addr;
17112+
17113+#ifdef CONFIG_PAX_PAGEEXEC
17114+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17115+ goto bottomup;
17116+#endif
17117+
17118+#ifdef CONFIG_PAX_RANDMMAP
17119+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17120+#endif
17121+
17122+ /* requesting a specific address */
17123+ if (addr) {
17124+ addr = PAGE_ALIGN(addr);
17125+ if (pax_task_size - len >= addr) {
17126+ vma = find_vma(mm, addr);
17127+ if (check_heap_stack_gap(vma, addr, len))
17128+ return addr;
17129+ }
17130+ }
17131+
17132+ /* check if free_area_cache is useful for us */
17133+ if (len <= mm->cached_hole_size) {
17134+ mm->cached_hole_size = 0;
17135+ mm->free_area_cache = mm->mmap_base;
17136+ }
17137+
17138+ /* either no address requested or can't fit in requested address hole */
17139+ addr = mm->free_area_cache;
17140+
17141+ /* make sure it can fit in the remaining address space */
17142+ if (addr > len) {
17143+ vma = find_vma(mm, addr-len);
17144+ if (check_heap_stack_gap(vma, addr - len, len))
17145+ /* remember the address as a hint for next time */
17146+ return (mm->free_area_cache = addr-len);
17147+ }
17148+
17149+ if (mm->mmap_base < len)
17150+ goto bottomup;
17151+
17152+ addr = mm->mmap_base-len;
17153+
17154+ do {
17155+ /*
17156+ * Lookup failure means no vma is above this address,
17157+ * else if new region fits below vma->vm_start,
17158+ * return with success:
17159+ */
17160+ vma = find_vma(mm, addr);
17161+ if (check_heap_stack_gap(vma, addr, len))
17162+ /* remember the address as a hint for next time */
17163+ return (mm->free_area_cache = addr);
17164+
17165+ /* remember the largest hole we saw so far */
17166+ if (addr + mm->cached_hole_size < vma->vm_start)
17167+ mm->cached_hole_size = vma->vm_start - addr;
17168+
17169+ /* try just below the current vma->vm_start */
17170+ addr = skip_heap_stack_gap(vma, len);
17171+ } while (!IS_ERR_VALUE(addr));
17172+
17173+bottomup:
17174+ /*
17175+ * A failed mmap() very likely causes application failure,
17176+ * so fall back to the bottom-up function here. This scenario
17177+ * can happen with large stack limits and large mmap()
17178+ * allocations.
17179+ */
17180+
17181+#ifdef CONFIG_PAX_SEGMEXEC
17182+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17183+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17184+ else
17185+#endif
17186+
17187+ mm->mmap_base = TASK_UNMAPPED_BASE;
17188+
17189+#ifdef CONFIG_PAX_RANDMMAP
17190+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17191+ mm->mmap_base += mm->delta_mmap;
17192+#endif
17193+
17194+ mm->free_area_cache = mm->mmap_base;
17195+ mm->cached_hole_size = ~0UL;
17196+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17197+ /*
17198+ * Restore the topdown base:
17199+ */
17200+ mm->mmap_base = base;
17201+ mm->free_area_cache = base;
17202+ mm->cached_hole_size = ~0UL;
17203+
17204+ return addr;
17205 }
17206diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17207index ff14a50..35626c3 100644
17208--- a/arch/x86/kernel/sys_x86_64.c
17209+++ b/arch/x86/kernel/sys_x86_64.c
17210@@ -32,8 +32,8 @@ out:
17211 return error;
17212 }
17213
17214-static void find_start_end(unsigned long flags, unsigned long *begin,
17215- unsigned long *end)
17216+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17217+ unsigned long *begin, unsigned long *end)
17218 {
17219 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17220 unsigned long new_begin;
17221@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17222 *begin = new_begin;
17223 }
17224 } else {
17225- *begin = TASK_UNMAPPED_BASE;
17226+ *begin = mm->mmap_base;
17227 *end = TASK_SIZE;
17228 }
17229 }
17230@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17231 if (flags & MAP_FIXED)
17232 return addr;
17233
17234- find_start_end(flags, &begin, &end);
17235+ find_start_end(mm, flags, &begin, &end);
17236
17237 if (len > end)
17238 return -ENOMEM;
17239
17240+#ifdef CONFIG_PAX_RANDMMAP
17241+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17242+#endif
17243+
17244 if (addr) {
17245 addr = PAGE_ALIGN(addr);
17246 vma = find_vma(mm, addr);
17247- if (end - len >= addr &&
17248- (!vma || addr + len <= vma->vm_start))
17249+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17250 return addr;
17251 }
17252 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17253@@ -106,7 +109,7 @@ full_search:
17254 }
17255 return -ENOMEM;
17256 }
17257- if (!vma || addr + len <= vma->vm_start) {
17258+ if (check_heap_stack_gap(vma, addr, len)) {
17259 /*
17260 * Remember the place where we stopped the search:
17261 */
17262@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17263 {
17264 struct vm_area_struct *vma;
17265 struct mm_struct *mm = current->mm;
17266- unsigned long addr = addr0;
17267+ unsigned long base = mm->mmap_base, addr = addr0;
17268
17269 /* requested length too big for entire address space */
17270 if (len > TASK_SIZE)
17271@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17272 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17273 goto bottomup;
17274
17275+#ifdef CONFIG_PAX_RANDMMAP
17276+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17277+#endif
17278+
17279 /* requesting a specific address */
17280 if (addr) {
17281 addr = PAGE_ALIGN(addr);
17282- vma = find_vma(mm, addr);
17283- if (TASK_SIZE - len >= addr &&
17284- (!vma || addr + len <= vma->vm_start))
17285- return addr;
17286+ if (TASK_SIZE - len >= addr) {
17287+ vma = find_vma(mm, addr);
17288+ if (check_heap_stack_gap(vma, addr, len))
17289+ return addr;
17290+ }
17291 }
17292
17293 /* check if free_area_cache is useful for us */
17294@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17295 /* make sure it can fit in the remaining address space */
17296 if (addr > len) {
17297 vma = find_vma(mm, addr-len);
17298- if (!vma || addr <= vma->vm_start)
17299+ if (check_heap_stack_gap(vma, addr - len, len))
17300 /* remember the address as a hint for next time */
17301 return mm->free_area_cache = addr-len;
17302 }
17303@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17304 * return with success:
17305 */
17306 vma = find_vma(mm, addr);
17307- if (!vma || addr+len <= vma->vm_start)
17308+ if (check_heap_stack_gap(vma, addr, len))
17309 /* remember the address as a hint for next time */
17310 return mm->free_area_cache = addr;
17311
17312@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17313 mm->cached_hole_size = vma->vm_start - addr;
17314
17315 /* try just below the current vma->vm_start */
17316- addr = vma->vm_start-len;
17317- } while (len < vma->vm_start);
17318+ addr = skip_heap_stack_gap(vma, len);
17319+ } while (!IS_ERR_VALUE(addr));
17320
17321 bottomup:
17322 /*
17323@@ -198,13 +206,21 @@ bottomup:
17324 * can happen with large stack limits and large mmap()
17325 * allocations.
17326 */
17327+ mm->mmap_base = TASK_UNMAPPED_BASE;
17328+
17329+#ifdef CONFIG_PAX_RANDMMAP
17330+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17331+ mm->mmap_base += mm->delta_mmap;
17332+#endif
17333+
17334+ mm->free_area_cache = mm->mmap_base;
17335 mm->cached_hole_size = ~0UL;
17336- mm->free_area_cache = TASK_UNMAPPED_BASE;
17337 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17338 /*
17339 * Restore the topdown base:
17340 */
17341- mm->free_area_cache = mm->mmap_base;
17342+ mm->mmap_base = base;
17343+ mm->free_area_cache = base;
17344 mm->cached_hole_size = ~0UL;
17345
17346 return addr;
17347diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17348index bc19be3..0f5fbf7 100644
17349--- a/arch/x86/kernel/syscall_table_32.S
17350+++ b/arch/x86/kernel/syscall_table_32.S
17351@@ -1,3 +1,4 @@
17352+.section .rodata,"a",@progbits
17353 ENTRY(sys_call_table)
17354 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17355 .long sys_exit
17356diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17357index e07a2fc..db0369d 100644
17358--- a/arch/x86/kernel/tboot.c
17359+++ b/arch/x86/kernel/tboot.c
17360@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
17361
17362 void tboot_shutdown(u32 shutdown_type)
17363 {
17364- void (*shutdown)(void);
17365+ void (* __noreturn shutdown)(void);
17366
17367 if (!tboot_enabled())
17368 return;
17369@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
17370
17371 switch_to_tboot_pt();
17372
17373- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17374+ shutdown = (void *)tboot->shutdown_entry;
17375 shutdown();
17376
17377 /* should not reach here */
17378@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17379 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17380 }
17381
17382-static atomic_t ap_wfs_count;
17383+static atomic_unchecked_t ap_wfs_count;
17384
17385 static int tboot_wait_for_aps(int num_aps)
17386 {
17387@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17388 {
17389 switch (action) {
17390 case CPU_DYING:
17391- atomic_inc(&ap_wfs_count);
17392+ atomic_inc_unchecked(&ap_wfs_count);
17393 if (num_online_cpus() == 1)
17394- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17395+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17396 return NOTIFY_BAD;
17397 break;
17398 }
17399@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
17400
17401 tboot_create_trampoline();
17402
17403- atomic_set(&ap_wfs_count, 0);
17404+ atomic_set_unchecked(&ap_wfs_count, 0);
17405 register_hotcpu_notifier(&tboot_cpu_notifier);
17406 return 0;
17407 }
17408diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17409index 5a64d05..804587b 100644
17410--- a/arch/x86/kernel/time.c
17411+++ b/arch/x86/kernel/time.c
17412@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17413 {
17414 unsigned long pc = instruction_pointer(regs);
17415
17416- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17417+ if (!user_mode(regs) && in_lock_functions(pc)) {
17418 #ifdef CONFIG_FRAME_POINTER
17419- return *(unsigned long *)(regs->bp + sizeof(long));
17420+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17421 #else
17422 unsigned long *sp =
17423 (unsigned long *)kernel_stack_pointer(regs);
17424@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17425 * or above a saved flags. Eflags has bits 22-31 zero,
17426 * kernel addresses don't.
17427 */
17428+
17429+#ifdef CONFIG_PAX_KERNEXEC
17430+ return ktla_ktva(sp[0]);
17431+#else
17432 if (sp[0] >> 22)
17433 return sp[0];
17434 if (sp[1] >> 22)
17435 return sp[1];
17436 #endif
17437+
17438+#endif
17439 }
17440 return pc;
17441 }
17442diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17443index 6bb7b85..dd853e1 100644
17444--- a/arch/x86/kernel/tls.c
17445+++ b/arch/x86/kernel/tls.c
17446@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17447 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17448 return -EINVAL;
17449
17450+#ifdef CONFIG_PAX_SEGMEXEC
17451+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17452+ return -EINVAL;
17453+#endif
17454+
17455 set_tls_desc(p, idx, &info, 1);
17456
17457 return 0;
17458diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17459index 451c0a7..e57f551 100644
17460--- a/arch/x86/kernel/trampoline_32.S
17461+++ b/arch/x86/kernel/trampoline_32.S
17462@@ -32,6 +32,12 @@
17463 #include <asm/segment.h>
17464 #include <asm/page_types.h>
17465
17466+#ifdef CONFIG_PAX_KERNEXEC
17467+#define ta(X) (X)
17468+#else
17469+#define ta(X) ((X) - __PAGE_OFFSET)
17470+#endif
17471+
17472 #ifdef CONFIG_SMP
17473
17474 .section ".x86_trampoline","a"
17475@@ -62,7 +68,7 @@ r_base = .
17476 inc %ax # protected mode (PE) bit
17477 lmsw %ax # into protected mode
17478 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17479- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17480+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17481
17482 # These need to be in the same 64K segment as the above;
17483 # hence we don't use the boot_gdt_descr defined in head.S
17484diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17485index 09ff517..df19fbff 100644
17486--- a/arch/x86/kernel/trampoline_64.S
17487+++ b/arch/x86/kernel/trampoline_64.S
17488@@ -90,7 +90,7 @@ startup_32:
17489 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17490 movl %eax, %ds
17491
17492- movl $X86_CR4_PAE, %eax
17493+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17494 movl %eax, %cr4 # Enable PAE mode
17495
17496 # Setup trampoline 4 level pagetables
17497@@ -138,7 +138,7 @@ tidt:
17498 # so the kernel can live anywhere
17499 .balign 4
17500 tgdt:
17501- .short tgdt_end - tgdt # gdt limit
17502+ .short tgdt_end - tgdt - 1 # gdt limit
17503 .long tgdt - r_base
17504 .short 0
17505 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17506diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17507index 6913369..7e7dff6 100644
17508--- a/arch/x86/kernel/traps.c
17509+++ b/arch/x86/kernel/traps.c
17510@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17511
17512 /* Do we ignore FPU interrupts ? */
17513 char ignore_fpu_irq;
17514-
17515-/*
17516- * The IDT has to be page-aligned to simplify the Pentium
17517- * F0 0F bug workaround.
17518- */
17519-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17520 #endif
17521
17522 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17523@@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17524 }
17525
17526 static void __kprobes
17527-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17528+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17529 long error_code, siginfo_t *info)
17530 {
17531 struct task_struct *tsk = current;
17532
17533 #ifdef CONFIG_X86_32
17534- if (regs->flags & X86_VM_MASK) {
17535+ if (v8086_mode(regs)) {
17536 /*
17537 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17538 * On nmi (interrupt 2), do_trap should not be called.
17539@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17540 }
17541 #endif
17542
17543- if (!user_mode(regs))
17544+ if (!user_mode_novm(regs))
17545 goto kernel_trap;
17546
17547 #ifdef CONFIG_X86_32
17548@@ -157,7 +151,7 @@ trap_signal:
17549 printk_ratelimit()) {
17550 printk(KERN_INFO
17551 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17552- tsk->comm, tsk->pid, str,
17553+ tsk->comm, task_pid_nr(tsk), str,
17554 regs->ip, regs->sp, error_code);
17555 print_vma_addr(" in ", regs->ip);
17556 printk("\n");
17557@@ -174,8 +168,20 @@ kernel_trap:
17558 if (!fixup_exception(regs)) {
17559 tsk->thread.error_code = error_code;
17560 tsk->thread.trap_no = trapnr;
17561+
17562+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17563+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17564+ str = "PAX: suspicious stack segment fault";
17565+#endif
17566+
17567 die(str, regs, error_code);
17568 }
17569+
17570+#ifdef CONFIG_PAX_REFCOUNT
17571+ if (trapnr == 4)
17572+ pax_report_refcount_overflow(regs);
17573+#endif
17574+
17575 return;
17576
17577 #ifdef CONFIG_X86_32
17578@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17579 conditional_sti(regs);
17580
17581 #ifdef CONFIG_X86_32
17582- if (regs->flags & X86_VM_MASK)
17583+ if (v8086_mode(regs))
17584 goto gp_in_vm86;
17585 #endif
17586
17587 tsk = current;
17588- if (!user_mode(regs))
17589+ if (!user_mode_novm(regs))
17590 goto gp_in_kernel;
17591
17592+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17593+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17594+ struct mm_struct *mm = tsk->mm;
17595+ unsigned long limit;
17596+
17597+ down_write(&mm->mmap_sem);
17598+ limit = mm->context.user_cs_limit;
17599+ if (limit < TASK_SIZE) {
17600+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17601+ up_write(&mm->mmap_sem);
17602+ return;
17603+ }
17604+ up_write(&mm->mmap_sem);
17605+ }
17606+#endif
17607+
17608 tsk->thread.error_code = error_code;
17609 tsk->thread.trap_no = 13;
17610
17611@@ -304,6 +326,13 @@ gp_in_kernel:
17612 if (notify_die(DIE_GPF, "general protection fault", regs,
17613 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17614 return;
17615+
17616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17617+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17618+ die("PAX: suspicious general protection fault", regs, error_code);
17619+ else
17620+#endif
17621+
17622 die("general protection fault", regs, error_code);
17623 }
17624
17625@@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17626 dotraplinkage notrace __kprobes void
17627 do_nmi(struct pt_regs *regs, long error_code)
17628 {
17629+
17630+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17631+ if (!user_mode(regs)) {
17632+ unsigned long cs = regs->cs & 0xFFFF;
17633+ unsigned long ip = ktva_ktla(regs->ip);
17634+
17635+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17636+ regs->ip = ip;
17637+ }
17638+#endif
17639+
17640 nmi_enter();
17641
17642 inc_irq_stat(__nmi_count);
17643@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17644 /* It's safe to allow irq's after DR6 has been saved */
17645 preempt_conditional_sti(regs);
17646
17647- if (regs->flags & X86_VM_MASK) {
17648+ if (v8086_mode(regs)) {
17649 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17650 error_code, 1);
17651 preempt_conditional_cli(regs);
17652@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17653 * We already checked v86 mode above, so we can check for kernel mode
17654 * by just checking the CPL of CS.
17655 */
17656- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17657+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17658 tsk->thread.debugreg6 &= ~DR_STEP;
17659 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17660 regs->flags &= ~X86_EFLAGS_TF;
17661@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17662 return;
17663 conditional_sti(regs);
17664
17665- if (!user_mode_vm(regs))
17666+ if (!user_mode(regs))
17667 {
17668 if (!fixup_exception(regs)) {
17669 task->thread.error_code = error_code;
17670@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17671 void __math_state_restore(void)
17672 {
17673 struct thread_info *thread = current_thread_info();
17674- struct task_struct *tsk = thread->task;
17675+ struct task_struct *tsk = current;
17676
17677 /*
17678 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17679@@ -750,8 +790,7 @@ void __math_state_restore(void)
17680 */
17681 asmlinkage void math_state_restore(void)
17682 {
17683- struct thread_info *thread = current_thread_info();
17684- struct task_struct *tsk = thread->task;
17685+ struct task_struct *tsk = current;
17686
17687 if (!tsk_used_math(tsk)) {
17688 local_irq_enable();
17689diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17690index b9242ba..50c5edd 100644
17691--- a/arch/x86/kernel/verify_cpu.S
17692+++ b/arch/x86/kernel/verify_cpu.S
17693@@ -20,6 +20,7 @@
17694 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17695 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17696 * arch/x86/kernel/head_32.S: processor startup
17697+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17698 *
17699 * verify_cpu, returns the status of longmode and SSE in register %eax.
17700 * 0: Success 1: Failure
17701diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17702index 863f875..4307295 100644
17703--- a/arch/x86/kernel/vm86_32.c
17704+++ b/arch/x86/kernel/vm86_32.c
17705@@ -41,6 +41,7 @@
17706 #include <linux/ptrace.h>
17707 #include <linux/audit.h>
17708 #include <linux/stddef.h>
17709+#include <linux/grsecurity.h>
17710
17711 #include <asm/uaccess.h>
17712 #include <asm/io.h>
17713@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17714 do_exit(SIGSEGV);
17715 }
17716
17717- tss = &per_cpu(init_tss, get_cpu());
17718+ tss = init_tss + get_cpu();
17719 current->thread.sp0 = current->thread.saved_sp0;
17720 current->thread.sysenter_cs = __KERNEL_CS;
17721 load_sp0(tss, &current->thread);
17722@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17723 struct task_struct *tsk;
17724 int tmp, ret = -EPERM;
17725
17726+#ifdef CONFIG_GRKERNSEC_VM86
17727+ if (!capable(CAP_SYS_RAWIO)) {
17728+ gr_handle_vm86();
17729+ goto out;
17730+ }
17731+#endif
17732+
17733 tsk = current;
17734 if (tsk->thread.saved_sp0)
17735 goto out;
17736@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17737 int tmp, ret;
17738 struct vm86plus_struct __user *v86;
17739
17740+#ifdef CONFIG_GRKERNSEC_VM86
17741+ if (!capable(CAP_SYS_RAWIO)) {
17742+ gr_handle_vm86();
17743+ ret = -EPERM;
17744+ goto out;
17745+ }
17746+#endif
17747+
17748 tsk = current;
17749 switch (cmd) {
17750 case VM86_REQUEST_IRQ:
17751@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17752 tsk->thread.saved_fs = info->regs32->fs;
17753 tsk->thread.saved_gs = get_user_gs(info->regs32);
17754
17755- tss = &per_cpu(init_tss, get_cpu());
17756+ tss = init_tss + get_cpu();
17757 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17758 if (cpu_has_sep)
17759 tsk->thread.sysenter_cs = 0;
17760@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17761 goto cannot_handle;
17762 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17763 goto cannot_handle;
17764- intr_ptr = (unsigned long __user *) (i << 2);
17765+ intr_ptr = (__force unsigned long __user *) (i << 2);
17766 if (get_user(segoffs, intr_ptr))
17767 goto cannot_handle;
17768 if ((segoffs >> 16) == BIOSSEG)
17769diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17770index 0f703f1..9e15f64 100644
17771--- a/arch/x86/kernel/vmlinux.lds.S
17772+++ b/arch/x86/kernel/vmlinux.lds.S
17773@@ -26,6 +26,13 @@
17774 #include <asm/page_types.h>
17775 #include <asm/cache.h>
17776 #include <asm/boot.h>
17777+#include <asm/segment.h>
17778+
17779+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17780+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17781+#else
17782+#define __KERNEL_TEXT_OFFSET 0
17783+#endif
17784
17785 #undef i386 /* in case the preprocessor is a 32bit one */
17786
17787@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17788
17789 PHDRS {
17790 text PT_LOAD FLAGS(5); /* R_E */
17791+#ifdef CONFIG_X86_32
17792+ module PT_LOAD FLAGS(5); /* R_E */
17793+#endif
17794+#ifdef CONFIG_XEN
17795+ rodata PT_LOAD FLAGS(5); /* R_E */
17796+#else
17797+ rodata PT_LOAD FLAGS(4); /* R__ */
17798+#endif
17799 data PT_LOAD FLAGS(6); /* RW_ */
17800-#ifdef CONFIG_X86_64
17801+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17802 #ifdef CONFIG_SMP
17803 percpu PT_LOAD FLAGS(6); /* RW_ */
17804 #endif
17805+ text.init PT_LOAD FLAGS(5); /* R_E */
17806+ text.exit PT_LOAD FLAGS(5); /* R_E */
17807 init PT_LOAD FLAGS(7); /* RWE */
17808-#endif
17809 note PT_NOTE FLAGS(0); /* ___ */
17810 }
17811
17812 SECTIONS
17813 {
17814 #ifdef CONFIG_X86_32
17815- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17816- phys_startup_32 = startup_32 - LOAD_OFFSET;
17817+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17818 #else
17819- . = __START_KERNEL;
17820- phys_startup_64 = startup_64 - LOAD_OFFSET;
17821+ . = __START_KERNEL;
17822 #endif
17823
17824 /* Text and read-only data */
17825- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17826- _text = .;
17827+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17828 /* bootstrapping code */
17829+#ifdef CONFIG_X86_32
17830+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17831+#else
17832+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17833+#endif
17834+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17835+ _text = .;
17836 HEAD_TEXT
17837 #ifdef CONFIG_X86_32
17838 . = ALIGN(PAGE_SIZE);
17839@@ -108,13 +128,47 @@ SECTIONS
17840 IRQENTRY_TEXT
17841 *(.fixup)
17842 *(.gnu.warning)
17843- /* End of text section */
17844- _etext = .;
17845 } :text = 0x9090
17846
17847- NOTES :text :note
17848+ . += __KERNEL_TEXT_OFFSET;
17849
17850- EXCEPTION_TABLE(16) :text = 0x9090
17851+#ifdef CONFIG_X86_32
17852+ . = ALIGN(PAGE_SIZE);
17853+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17854+
17855+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17856+ MODULES_EXEC_VADDR = .;
17857+ BYTE(0)
17858+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17859+ . = ALIGN(HPAGE_SIZE);
17860+ MODULES_EXEC_END = . - 1;
17861+#endif
17862+
17863+ } :module
17864+#endif
17865+
17866+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17867+ /* End of text section */
17868+ _etext = . - __KERNEL_TEXT_OFFSET;
17869+ }
17870+
17871+#ifdef CONFIG_X86_32
17872+ . = ALIGN(PAGE_SIZE);
17873+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17874+ *(.idt)
17875+ . = ALIGN(PAGE_SIZE);
17876+ *(.empty_zero_page)
17877+ *(.initial_pg_fixmap)
17878+ *(.initial_pg_pmd)
17879+ *(.initial_page_table)
17880+ *(.swapper_pg_dir)
17881+ } :rodata
17882+#endif
17883+
17884+ . = ALIGN(PAGE_SIZE);
17885+ NOTES :rodata :note
17886+
17887+ EXCEPTION_TABLE(16) :rodata
17888
17889 #if defined(CONFIG_DEBUG_RODATA)
17890 /* .text should occupy whole number of pages */
17891@@ -126,16 +180,20 @@ SECTIONS
17892
17893 /* Data */
17894 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17895+
17896+#ifdef CONFIG_PAX_KERNEXEC
17897+ . = ALIGN(HPAGE_SIZE);
17898+#else
17899+ . = ALIGN(PAGE_SIZE);
17900+#endif
17901+
17902 /* Start of data section */
17903 _sdata = .;
17904
17905 /* init_task */
17906 INIT_TASK_DATA(THREAD_SIZE)
17907
17908-#ifdef CONFIG_X86_32
17909- /* 32 bit has nosave before _edata */
17910 NOSAVE_DATA
17911-#endif
17912
17913 PAGE_ALIGNED_DATA(PAGE_SIZE)
17914
17915@@ -176,12 +234,19 @@ SECTIONS
17916 #endif /* CONFIG_X86_64 */
17917
17918 /* Init code and data - will be freed after init */
17919- . = ALIGN(PAGE_SIZE);
17920 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17921+ BYTE(0)
17922+
17923+#ifdef CONFIG_PAX_KERNEXEC
17924+ . = ALIGN(HPAGE_SIZE);
17925+#else
17926+ . = ALIGN(PAGE_SIZE);
17927+#endif
17928+
17929 __init_begin = .; /* paired with __init_end */
17930- }
17931+ } :init.begin
17932
17933-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17934+#ifdef CONFIG_SMP
17935 /*
17936 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17937 * output PHDR, so the next output section - .init.text - should
17938@@ -190,12 +255,27 @@ SECTIONS
17939 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17940 #endif
17941
17942- INIT_TEXT_SECTION(PAGE_SIZE)
17943-#ifdef CONFIG_X86_64
17944- :init
17945-#endif
17946+ . = ALIGN(PAGE_SIZE);
17947+ init_begin = .;
17948+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17949+ VMLINUX_SYMBOL(_sinittext) = .;
17950+ INIT_TEXT
17951+ VMLINUX_SYMBOL(_einittext) = .;
17952+ . = ALIGN(PAGE_SIZE);
17953+ } :text.init
17954
17955- INIT_DATA_SECTION(16)
17956+ /*
17957+ * .exit.text is discard at runtime, not link time, to deal with
17958+ * references from .altinstructions and .eh_frame
17959+ */
17960+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17961+ EXIT_TEXT
17962+ . = ALIGN(16);
17963+ } :text.exit
17964+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17965+
17966+ . = ALIGN(PAGE_SIZE);
17967+ INIT_DATA_SECTION(16) :init
17968
17969 /*
17970 * Code and data for a variety of lowlevel trampolines, to be
17971@@ -269,19 +349,12 @@ SECTIONS
17972 }
17973
17974 . = ALIGN(8);
17975- /*
17976- * .exit.text is discard at runtime, not link time, to deal with
17977- * references from .altinstructions and .eh_frame
17978- */
17979- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17980- EXIT_TEXT
17981- }
17982
17983 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17984 EXIT_DATA
17985 }
17986
17987-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17988+#ifndef CONFIG_SMP
17989 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
17990 #endif
17991
17992@@ -300,16 +373,10 @@ SECTIONS
17993 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17994 __smp_locks = .;
17995 *(.smp_locks)
17996- . = ALIGN(PAGE_SIZE);
17997 __smp_locks_end = .;
17998+ . = ALIGN(PAGE_SIZE);
17999 }
18000
18001-#ifdef CONFIG_X86_64
18002- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18003- NOSAVE_DATA
18004- }
18005-#endif
18006-
18007 /* BSS */
18008 . = ALIGN(PAGE_SIZE);
18009 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18010@@ -325,6 +392,7 @@ SECTIONS
18011 __brk_base = .;
18012 . += 64 * 1024; /* 64k alignment slop space */
18013 *(.brk_reservation) /* areas brk users have reserved */
18014+ . = ALIGN(HPAGE_SIZE);
18015 __brk_limit = .;
18016 }
18017
18018@@ -351,13 +419,12 @@ SECTIONS
18019 * for the boot processor.
18020 */
18021 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18022-INIT_PER_CPU(gdt_page);
18023 INIT_PER_CPU(irq_stack_union);
18024
18025 /*
18026 * Build-time check on the image size:
18027 */
18028-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18029+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18030 "kernel image bigger than KERNEL_IMAGE_SIZE");
18031
18032 #ifdef CONFIG_SMP
18033diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18034index b56c65de..561a55b 100644
18035--- a/arch/x86/kernel/vsyscall_64.c
18036+++ b/arch/x86/kernel/vsyscall_64.c
18037@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18038 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18039 };
18040
18041-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18042+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18043
18044 static int __init vsyscall_setup(char *str)
18045 {
18046 if (str) {
18047 if (!strcmp("emulate", str))
18048 vsyscall_mode = EMULATE;
18049- else if (!strcmp("native", str))
18050- vsyscall_mode = NATIVE;
18051 else if (!strcmp("none", str))
18052 vsyscall_mode = NONE;
18053 else
18054@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18055
18056 tsk = current;
18057 if (seccomp_mode(&tsk->seccomp))
18058- do_exit(SIGKILL);
18059+ do_group_exit(SIGKILL);
18060
18061 switch (vsyscall_nr) {
18062 case 0:
18063@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18064 return true;
18065
18066 sigsegv:
18067- force_sig(SIGSEGV, current);
18068- return true;
18069+ do_group_exit(SIGKILL);
18070 }
18071
18072 /*
18073@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
18074 extern char __vvar_page;
18075 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18076
18077- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18078- vsyscall_mode == NATIVE
18079- ? PAGE_KERNEL_VSYSCALL
18080- : PAGE_KERNEL_VVAR);
18081+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18082 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18083 (unsigned long)VSYSCALL_START);
18084
18085diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18086index 9796c2f..f686fbf 100644
18087--- a/arch/x86/kernel/x8664_ksyms_64.c
18088+++ b/arch/x86/kernel/x8664_ksyms_64.c
18089@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18090 EXPORT_SYMBOL(copy_user_generic_string);
18091 EXPORT_SYMBOL(copy_user_generic_unrolled);
18092 EXPORT_SYMBOL(__copy_user_nocache);
18093-EXPORT_SYMBOL(_copy_from_user);
18094-EXPORT_SYMBOL(_copy_to_user);
18095
18096 EXPORT_SYMBOL(copy_page);
18097 EXPORT_SYMBOL(clear_page);
18098diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18099index a391134..d0b63b6e 100644
18100--- a/arch/x86/kernel/xsave.c
18101+++ b/arch/x86/kernel/xsave.c
18102@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18103 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18104 return -EINVAL;
18105
18106- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18107+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18108 fx_sw_user->extended_size -
18109 FP_XSTATE_MAGIC2_SIZE));
18110 if (err)
18111@@ -267,7 +267,7 @@ fx_only:
18112 * the other extended state.
18113 */
18114 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18115- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18116+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18117 }
18118
18119 /*
18120@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18121 if (use_xsave())
18122 err = restore_user_xstate(buf);
18123 else
18124- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18125+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18126 buf);
18127 if (unlikely(err)) {
18128 /*
18129diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18130index 8b4cc5f..f086b5b 100644
18131--- a/arch/x86/kvm/emulate.c
18132+++ b/arch/x86/kvm/emulate.c
18133@@ -96,7 +96,7 @@
18134 #define Src2ImmByte (2<<29)
18135 #define Src2One (3<<29)
18136 #define Src2Imm (4<<29)
18137-#define Src2Mask (7<<29)
18138+#define Src2Mask (7U<<29)
18139
18140 #define X2(x...) x, x
18141 #define X3(x...) X2(x), x
18142@@ -207,6 +207,7 @@ struct gprefix {
18143
18144 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
18145 do { \
18146+ unsigned long _tmp; \
18147 __asm__ __volatile__ ( \
18148 _PRE_EFLAGS("0", "4", "2") \
18149 _op _suffix " %"_x"3,%1; " \
18150@@ -220,8 +221,6 @@ struct gprefix {
18151 /* Raw emulation: instruction has two explicit operands. */
18152 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18153 do { \
18154- unsigned long _tmp; \
18155- \
18156 switch ((_dst).bytes) { \
18157 case 2: \
18158 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
18159@@ -237,7 +236,6 @@ struct gprefix {
18160
18161 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18162 do { \
18163- unsigned long _tmp; \
18164 switch ((_dst).bytes) { \
18165 case 1: \
18166 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
18167diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18168index 57dcbd4..79aba9b 100644
18169--- a/arch/x86/kvm/lapic.c
18170+++ b/arch/x86/kvm/lapic.c
18171@@ -53,7 +53,7 @@
18172 #define APIC_BUS_CYCLE_NS 1
18173
18174 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18175-#define apic_debug(fmt, arg...)
18176+#define apic_debug(fmt, arg...) do {} while (0)
18177
18178 #define APIC_LVT_NUM 6
18179 /* 14 is the version for Xeon and Pentium 8.4.8*/
18180diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18181index 8e8da79..13bc641 100644
18182--- a/arch/x86/kvm/mmu.c
18183+++ b/arch/x86/kvm/mmu.c
18184@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18185
18186 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18187
18188- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18189+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18190
18191 /*
18192 * Assume that the pte write on a page table of the same type
18193@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18194 }
18195
18196 spin_lock(&vcpu->kvm->mmu_lock);
18197- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18198+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18199 gentry = 0;
18200 kvm_mmu_free_some_pages(vcpu);
18201 ++vcpu->kvm->stat.mmu_pte_write;
18202diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18203index 507e2b8..fc55f89 100644
18204--- a/arch/x86/kvm/paging_tmpl.h
18205+++ b/arch/x86/kvm/paging_tmpl.h
18206@@ -197,7 +197,7 @@ retry_walk:
18207 if (unlikely(kvm_is_error_hva(host_addr)))
18208 goto error;
18209
18210- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18211+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18212 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18213 goto error;
18214
18215@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
18216 unsigned long mmu_seq;
18217 bool map_writable;
18218
18219+ pax_track_stack();
18220+
18221 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18222
18223 if (unlikely(error_code & PFERR_RSVD_MASK))
18224@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18225 if (need_flush)
18226 kvm_flush_remote_tlbs(vcpu->kvm);
18227
18228- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18229+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18230
18231 spin_unlock(&vcpu->kvm->mmu_lock);
18232
18233diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18234index 475d1c9..33658ff 100644
18235--- a/arch/x86/kvm/svm.c
18236+++ b/arch/x86/kvm/svm.c
18237@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18238 int cpu = raw_smp_processor_id();
18239
18240 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18241+
18242+ pax_open_kernel();
18243 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18244+ pax_close_kernel();
18245+
18246 load_TR_desc();
18247 }
18248
18249@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18250 #endif
18251 #endif
18252
18253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18254+ __set_fs(current_thread_info()->addr_limit);
18255+#endif
18256+
18257 reload_tss(vcpu);
18258
18259 local_irq_disable();
18260diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18261index e65a158..656dc24 100644
18262--- a/arch/x86/kvm/vmx.c
18263+++ b/arch/x86/kvm/vmx.c
18264@@ -1251,7 +1251,11 @@ static void reload_tss(void)
18265 struct desc_struct *descs;
18266
18267 descs = (void *)gdt->address;
18268+
18269+ pax_open_kernel();
18270 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18271+ pax_close_kernel();
18272+
18273 load_TR_desc();
18274 }
18275
18276@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
18277 if (!cpu_has_vmx_flexpriority())
18278 flexpriority_enabled = 0;
18279
18280- if (!cpu_has_vmx_tpr_shadow())
18281- kvm_x86_ops->update_cr8_intercept = NULL;
18282+ if (!cpu_has_vmx_tpr_shadow()) {
18283+ pax_open_kernel();
18284+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18285+ pax_close_kernel();
18286+ }
18287
18288 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18289 kvm_disable_largepages();
18290@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void)
18291 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18292
18293 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18294- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18295+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18296
18297 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18298 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18299@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18300 "jmp .Lkvm_vmx_return \n\t"
18301 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18302 ".Lkvm_vmx_return: "
18303+
18304+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18305+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18306+ ".Lkvm_vmx_return2: "
18307+#endif
18308+
18309 /* Save guest registers, load host registers, keep flags */
18310 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18311 "pop %0 \n\t"
18312@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18313 #endif
18314 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18315 [wordsize]"i"(sizeof(ulong))
18316+
18317+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18318+ ,[cs]"i"(__KERNEL_CS)
18319+#endif
18320+
18321 : "cc", "memory"
18322 , R"ax", R"bx", R"di", R"si"
18323 #ifdef CONFIG_X86_64
18324@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18325 }
18326 }
18327
18328- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18329+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18330+
18331+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18332+ loadsegment(fs, __KERNEL_PERCPU);
18333+#endif
18334+
18335+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18336+ __set_fs(current_thread_info()->addr_limit);
18337+#endif
18338+
18339 vmx->loaded_vmcs->launched = 1;
18340
18341 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18342diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18343index 84a28ea..9326501 100644
18344--- a/arch/x86/kvm/x86.c
18345+++ b/arch/x86/kvm/x86.c
18346@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18347 {
18348 struct kvm *kvm = vcpu->kvm;
18349 int lm = is_long_mode(vcpu);
18350- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18351- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18352+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18353+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18354 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18355 : kvm->arch.xen_hvm_config.blob_size_32;
18356 u32 page_num = data & ~PAGE_MASK;
18357@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18358 if (n < msr_list.nmsrs)
18359 goto out;
18360 r = -EFAULT;
18361+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18362+ goto out;
18363 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18364 num_msrs_to_save * sizeof(u32)))
18365 goto out;
18366@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18367 struct kvm_cpuid2 *cpuid,
18368 struct kvm_cpuid_entry2 __user *entries)
18369 {
18370- int r;
18371+ int r, i;
18372
18373 r = -E2BIG;
18374 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18375 goto out;
18376 r = -EFAULT;
18377- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18378- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18379+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18380 goto out;
18381+ for (i = 0; i < cpuid->nent; ++i) {
18382+ struct kvm_cpuid_entry2 cpuid_entry;
18383+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18384+ goto out;
18385+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18386+ }
18387 vcpu->arch.cpuid_nent = cpuid->nent;
18388 kvm_apic_set_version(vcpu);
18389 kvm_x86_ops->cpuid_update(vcpu);
18390@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18391 struct kvm_cpuid2 *cpuid,
18392 struct kvm_cpuid_entry2 __user *entries)
18393 {
18394- int r;
18395+ int r, i;
18396
18397 r = -E2BIG;
18398 if (cpuid->nent < vcpu->arch.cpuid_nent)
18399 goto out;
18400 r = -EFAULT;
18401- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18402- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18403+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18404 goto out;
18405+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18406+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18407+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18408+ goto out;
18409+ }
18410 return 0;
18411
18412 out:
18413@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18414 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18415 struct kvm_interrupt *irq)
18416 {
18417- if (irq->irq < 0 || irq->irq >= 256)
18418+ if (irq->irq >= 256)
18419 return -EINVAL;
18420 if (irqchip_in_kernel(vcpu->kvm))
18421 return -ENXIO;
18422@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
18423 kvm_mmu_set_mmio_spte_mask(mask);
18424 }
18425
18426-int kvm_arch_init(void *opaque)
18427+int kvm_arch_init(const void *opaque)
18428 {
18429 int r;
18430 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18431diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18432index 13ee258..b9632f6 100644
18433--- a/arch/x86/lguest/boot.c
18434+++ b/arch/x86/lguest/boot.c
18435@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18436 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18437 * Launcher to reboot us.
18438 */
18439-static void lguest_restart(char *reason)
18440+static __noreturn void lguest_restart(char *reason)
18441 {
18442 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18443+ BUG();
18444 }
18445
18446 /*G:050
18447diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18448index 042f682..c92afb6 100644
18449--- a/arch/x86/lib/atomic64_32.c
18450+++ b/arch/x86/lib/atomic64_32.c
18451@@ -8,18 +8,30 @@
18452
18453 long long atomic64_read_cx8(long long, const atomic64_t *v);
18454 EXPORT_SYMBOL(atomic64_read_cx8);
18455+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18456+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18457 long long atomic64_set_cx8(long long, const atomic64_t *v);
18458 EXPORT_SYMBOL(atomic64_set_cx8);
18459+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18460+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18461 long long atomic64_xchg_cx8(long long, unsigned high);
18462 EXPORT_SYMBOL(atomic64_xchg_cx8);
18463 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18464 EXPORT_SYMBOL(atomic64_add_return_cx8);
18465+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18466+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18467 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18468 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18469+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18470+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18471 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18472 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18473+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18474+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18475 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18476 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18477+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18478+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18479 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18480 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18481 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18482@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18483 #ifndef CONFIG_X86_CMPXCHG64
18484 long long atomic64_read_386(long long, const atomic64_t *v);
18485 EXPORT_SYMBOL(atomic64_read_386);
18486+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18487+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18488 long long atomic64_set_386(long long, const atomic64_t *v);
18489 EXPORT_SYMBOL(atomic64_set_386);
18490+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18491+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18492 long long atomic64_xchg_386(long long, unsigned high);
18493 EXPORT_SYMBOL(atomic64_xchg_386);
18494 long long atomic64_add_return_386(long long a, atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_add_return_386);
18496+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18497+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18498 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18499 EXPORT_SYMBOL(atomic64_sub_return_386);
18500+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18501+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18502 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18503 EXPORT_SYMBOL(atomic64_inc_return_386);
18504+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18505+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18506 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18507 EXPORT_SYMBOL(atomic64_dec_return_386);
18508+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18509+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18510 long long atomic64_add_386(long long a, atomic64_t *v);
18511 EXPORT_SYMBOL(atomic64_add_386);
18512+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18513+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18514 long long atomic64_sub_386(long long a, atomic64_t *v);
18515 EXPORT_SYMBOL(atomic64_sub_386);
18516+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18517+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18518 long long atomic64_inc_386(long long a, atomic64_t *v);
18519 EXPORT_SYMBOL(atomic64_inc_386);
18520+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18521+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18522 long long atomic64_dec_386(long long a, atomic64_t *v);
18523 EXPORT_SYMBOL(atomic64_dec_386);
18524+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18525+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18526 long long atomic64_dec_if_positive_386(atomic64_t *v);
18527 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18528 int atomic64_inc_not_zero_386(atomic64_t *v);
18529diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18530index e8e7e0d..56fd1b0 100644
18531--- a/arch/x86/lib/atomic64_386_32.S
18532+++ b/arch/x86/lib/atomic64_386_32.S
18533@@ -48,6 +48,10 @@ BEGIN(read)
18534 movl (v), %eax
18535 movl 4(v), %edx
18536 RET_ENDP
18537+BEGIN(read_unchecked)
18538+ movl (v), %eax
18539+ movl 4(v), %edx
18540+RET_ENDP
18541 #undef v
18542
18543 #define v %esi
18544@@ -55,6 +59,10 @@ BEGIN(set)
18545 movl %ebx, (v)
18546 movl %ecx, 4(v)
18547 RET_ENDP
18548+BEGIN(set_unchecked)
18549+ movl %ebx, (v)
18550+ movl %ecx, 4(v)
18551+RET_ENDP
18552 #undef v
18553
18554 #define v %esi
18555@@ -70,6 +78,20 @@ RET_ENDP
18556 BEGIN(add)
18557 addl %eax, (v)
18558 adcl %edx, 4(v)
18559+
18560+#ifdef CONFIG_PAX_REFCOUNT
18561+ jno 0f
18562+ subl %eax, (v)
18563+ sbbl %edx, 4(v)
18564+ int $4
18565+0:
18566+ _ASM_EXTABLE(0b, 0b)
18567+#endif
18568+
18569+RET_ENDP
18570+BEGIN(add_unchecked)
18571+ addl %eax, (v)
18572+ adcl %edx, 4(v)
18573 RET_ENDP
18574 #undef v
18575
18576@@ -77,6 +99,24 @@ RET_ENDP
18577 BEGIN(add_return)
18578 addl (v), %eax
18579 adcl 4(v), %edx
18580+
18581+#ifdef CONFIG_PAX_REFCOUNT
18582+ into
18583+1234:
18584+ _ASM_EXTABLE(1234b, 2f)
18585+#endif
18586+
18587+ movl %eax, (v)
18588+ movl %edx, 4(v)
18589+
18590+#ifdef CONFIG_PAX_REFCOUNT
18591+2:
18592+#endif
18593+
18594+RET_ENDP
18595+BEGIN(add_return_unchecked)
18596+ addl (v), %eax
18597+ adcl 4(v), %edx
18598 movl %eax, (v)
18599 movl %edx, 4(v)
18600 RET_ENDP
18601@@ -86,6 +126,20 @@ RET_ENDP
18602 BEGIN(sub)
18603 subl %eax, (v)
18604 sbbl %edx, 4(v)
18605+
18606+#ifdef CONFIG_PAX_REFCOUNT
18607+ jno 0f
18608+ addl %eax, (v)
18609+ adcl %edx, 4(v)
18610+ int $4
18611+0:
18612+ _ASM_EXTABLE(0b, 0b)
18613+#endif
18614+
18615+RET_ENDP
18616+BEGIN(sub_unchecked)
18617+ subl %eax, (v)
18618+ sbbl %edx, 4(v)
18619 RET_ENDP
18620 #undef v
18621
18622@@ -96,6 +150,27 @@ BEGIN(sub_return)
18623 sbbl $0, %edx
18624 addl (v), %eax
18625 adcl 4(v), %edx
18626+
18627+#ifdef CONFIG_PAX_REFCOUNT
18628+ into
18629+1234:
18630+ _ASM_EXTABLE(1234b, 2f)
18631+#endif
18632+
18633+ movl %eax, (v)
18634+ movl %edx, 4(v)
18635+
18636+#ifdef CONFIG_PAX_REFCOUNT
18637+2:
18638+#endif
18639+
18640+RET_ENDP
18641+BEGIN(sub_return_unchecked)
18642+ negl %edx
18643+ negl %eax
18644+ sbbl $0, %edx
18645+ addl (v), %eax
18646+ adcl 4(v), %edx
18647 movl %eax, (v)
18648 movl %edx, 4(v)
18649 RET_ENDP
18650@@ -105,6 +180,20 @@ RET_ENDP
18651 BEGIN(inc)
18652 addl $1, (v)
18653 adcl $0, 4(v)
18654+
18655+#ifdef CONFIG_PAX_REFCOUNT
18656+ jno 0f
18657+ subl $1, (v)
18658+ sbbl $0, 4(v)
18659+ int $4
18660+0:
18661+ _ASM_EXTABLE(0b, 0b)
18662+#endif
18663+
18664+RET_ENDP
18665+BEGIN(inc_unchecked)
18666+ addl $1, (v)
18667+ adcl $0, 4(v)
18668 RET_ENDP
18669 #undef v
18670
18671@@ -114,6 +203,26 @@ BEGIN(inc_return)
18672 movl 4(v), %edx
18673 addl $1, %eax
18674 adcl $0, %edx
18675+
18676+#ifdef CONFIG_PAX_REFCOUNT
18677+ into
18678+1234:
18679+ _ASM_EXTABLE(1234b, 2f)
18680+#endif
18681+
18682+ movl %eax, (v)
18683+ movl %edx, 4(v)
18684+
18685+#ifdef CONFIG_PAX_REFCOUNT
18686+2:
18687+#endif
18688+
18689+RET_ENDP
18690+BEGIN(inc_return_unchecked)
18691+ movl (v), %eax
18692+ movl 4(v), %edx
18693+ addl $1, %eax
18694+ adcl $0, %edx
18695 movl %eax, (v)
18696 movl %edx, 4(v)
18697 RET_ENDP
18698@@ -123,6 +232,20 @@ RET_ENDP
18699 BEGIN(dec)
18700 subl $1, (v)
18701 sbbl $0, 4(v)
18702+
18703+#ifdef CONFIG_PAX_REFCOUNT
18704+ jno 0f
18705+ addl $1, (v)
18706+ adcl $0, 4(v)
18707+ int $4
18708+0:
18709+ _ASM_EXTABLE(0b, 0b)
18710+#endif
18711+
18712+RET_ENDP
18713+BEGIN(dec_unchecked)
18714+ subl $1, (v)
18715+ sbbl $0, 4(v)
18716 RET_ENDP
18717 #undef v
18718
18719@@ -132,6 +255,26 @@ BEGIN(dec_return)
18720 movl 4(v), %edx
18721 subl $1, %eax
18722 sbbl $0, %edx
18723+
18724+#ifdef CONFIG_PAX_REFCOUNT
18725+ into
18726+1234:
18727+ _ASM_EXTABLE(1234b, 2f)
18728+#endif
18729+
18730+ movl %eax, (v)
18731+ movl %edx, 4(v)
18732+
18733+#ifdef CONFIG_PAX_REFCOUNT
18734+2:
18735+#endif
18736+
18737+RET_ENDP
18738+BEGIN(dec_return_unchecked)
18739+ movl (v), %eax
18740+ movl 4(v), %edx
18741+ subl $1, %eax
18742+ sbbl $0, %edx
18743 movl %eax, (v)
18744 movl %edx, 4(v)
18745 RET_ENDP
18746@@ -143,6 +286,13 @@ BEGIN(add_unless)
18747 adcl %edx, %edi
18748 addl (v), %eax
18749 adcl 4(v), %edx
18750+
18751+#ifdef CONFIG_PAX_REFCOUNT
18752+ into
18753+1234:
18754+ _ASM_EXTABLE(1234b, 2f)
18755+#endif
18756+
18757 cmpl %eax, %esi
18758 je 3f
18759 1:
18760@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18761 1:
18762 addl $1, %eax
18763 adcl $0, %edx
18764+
18765+#ifdef CONFIG_PAX_REFCOUNT
18766+ into
18767+1234:
18768+ _ASM_EXTABLE(1234b, 2f)
18769+#endif
18770+
18771 movl %eax, (v)
18772 movl %edx, 4(v)
18773 movl $1, %eax
18774@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18775 movl 4(v), %edx
18776 subl $1, %eax
18777 sbbl $0, %edx
18778+
18779+#ifdef CONFIG_PAX_REFCOUNT
18780+ into
18781+1234:
18782+ _ASM_EXTABLE(1234b, 1f)
18783+#endif
18784+
18785 js 1f
18786 movl %eax, (v)
18787 movl %edx, 4(v)
18788diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18789index 391a083..d658e9f 100644
18790--- a/arch/x86/lib/atomic64_cx8_32.S
18791+++ b/arch/x86/lib/atomic64_cx8_32.S
18792@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18793 CFI_STARTPROC
18794
18795 read64 %ecx
18796+ pax_force_retaddr
18797 ret
18798 CFI_ENDPROC
18799 ENDPROC(atomic64_read_cx8)
18800
18801+ENTRY(atomic64_read_unchecked_cx8)
18802+ CFI_STARTPROC
18803+
18804+ read64 %ecx
18805+ pax_force_retaddr
18806+ ret
18807+ CFI_ENDPROC
18808+ENDPROC(atomic64_read_unchecked_cx8)
18809+
18810 ENTRY(atomic64_set_cx8)
18811 CFI_STARTPROC
18812
18813@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18814 cmpxchg8b (%esi)
18815 jne 1b
18816
18817+ pax_force_retaddr
18818 ret
18819 CFI_ENDPROC
18820 ENDPROC(atomic64_set_cx8)
18821
18822+ENTRY(atomic64_set_unchecked_cx8)
18823+ CFI_STARTPROC
18824+
18825+1:
18826+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18827+ * are atomic on 586 and newer */
18828+ cmpxchg8b (%esi)
18829+ jne 1b
18830+
18831+ pax_force_retaddr
18832+ ret
18833+ CFI_ENDPROC
18834+ENDPROC(atomic64_set_unchecked_cx8)
18835+
18836 ENTRY(atomic64_xchg_cx8)
18837 CFI_STARTPROC
18838
18839@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18840 cmpxchg8b (%esi)
18841 jne 1b
18842
18843+ pax_force_retaddr
18844 ret
18845 CFI_ENDPROC
18846 ENDPROC(atomic64_xchg_cx8)
18847
18848-.macro addsub_return func ins insc
18849-ENTRY(atomic64_\func\()_return_cx8)
18850+.macro addsub_return func ins insc unchecked=""
18851+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18852 CFI_STARTPROC
18853 SAVE ebp
18854 SAVE ebx
18855@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18856 movl %edx, %ecx
18857 \ins\()l %esi, %ebx
18858 \insc\()l %edi, %ecx
18859+
18860+.ifb \unchecked
18861+#ifdef CONFIG_PAX_REFCOUNT
18862+ into
18863+2:
18864+ _ASM_EXTABLE(2b, 3f)
18865+#endif
18866+.endif
18867+
18868 LOCK_PREFIX
18869 cmpxchg8b (%ebp)
18870 jne 1b
18871-
18872-10:
18873 movl %ebx, %eax
18874 movl %ecx, %edx
18875+
18876+.ifb \unchecked
18877+#ifdef CONFIG_PAX_REFCOUNT
18878+3:
18879+#endif
18880+.endif
18881+
18882 RESTORE edi
18883 RESTORE esi
18884 RESTORE ebx
18885 RESTORE ebp
18886+ pax_force_retaddr
18887 ret
18888 CFI_ENDPROC
18889-ENDPROC(atomic64_\func\()_return_cx8)
18890+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18891 .endm
18892
18893 addsub_return add add adc
18894 addsub_return sub sub sbb
18895+addsub_return add add adc _unchecked
18896+addsub_return sub sub sbb _unchecked
18897
18898-.macro incdec_return func ins insc
18899-ENTRY(atomic64_\func\()_return_cx8)
18900+.macro incdec_return func ins insc unchecked
18901+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18902 CFI_STARTPROC
18903 SAVE ebx
18904
18905@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18906 movl %edx, %ecx
18907 \ins\()l $1, %ebx
18908 \insc\()l $0, %ecx
18909+
18910+.ifb \unchecked
18911+#ifdef CONFIG_PAX_REFCOUNT
18912+ into
18913+2:
18914+ _ASM_EXTABLE(2b, 3f)
18915+#endif
18916+.endif
18917+
18918 LOCK_PREFIX
18919 cmpxchg8b (%esi)
18920 jne 1b
18921
18922-10:
18923 movl %ebx, %eax
18924 movl %ecx, %edx
18925+
18926+.ifb \unchecked
18927+#ifdef CONFIG_PAX_REFCOUNT
18928+3:
18929+#endif
18930+.endif
18931+
18932 RESTORE ebx
18933+ pax_force_retaddr
18934 ret
18935 CFI_ENDPROC
18936-ENDPROC(atomic64_\func\()_return_cx8)
18937+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18938 .endm
18939
18940 incdec_return inc add adc
18941 incdec_return dec sub sbb
18942+incdec_return inc add adc _unchecked
18943+incdec_return dec sub sbb _unchecked
18944
18945 ENTRY(atomic64_dec_if_positive_cx8)
18946 CFI_STARTPROC
18947@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18948 movl %edx, %ecx
18949 subl $1, %ebx
18950 sbb $0, %ecx
18951+
18952+#ifdef CONFIG_PAX_REFCOUNT
18953+ into
18954+1234:
18955+ _ASM_EXTABLE(1234b, 2f)
18956+#endif
18957+
18958 js 2f
18959 LOCK_PREFIX
18960 cmpxchg8b (%esi)
18961@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18962 movl %ebx, %eax
18963 movl %ecx, %edx
18964 RESTORE ebx
18965+ pax_force_retaddr
18966 ret
18967 CFI_ENDPROC
18968 ENDPROC(atomic64_dec_if_positive_cx8)
18969@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18970 movl %edx, %ecx
18971 addl %esi, %ebx
18972 adcl %edi, %ecx
18973+
18974+#ifdef CONFIG_PAX_REFCOUNT
18975+ into
18976+1234:
18977+ _ASM_EXTABLE(1234b, 3f)
18978+#endif
18979+
18980 LOCK_PREFIX
18981 cmpxchg8b (%ebp)
18982 jne 1b
18983@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18984 CFI_ADJUST_CFA_OFFSET -8
18985 RESTORE ebx
18986 RESTORE ebp
18987+ pax_force_retaddr
18988 ret
18989 4:
18990 cmpl %edx, 4(%esp)
18991@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18992 movl %edx, %ecx
18993 addl $1, %ebx
18994 adcl $0, %ecx
18995+
18996+#ifdef CONFIG_PAX_REFCOUNT
18997+ into
18998+1234:
18999+ _ASM_EXTABLE(1234b, 3f)
19000+#endif
19001+
19002 LOCK_PREFIX
19003 cmpxchg8b (%esi)
19004 jne 1b
19005@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19006 movl $1, %eax
19007 3:
19008 RESTORE ebx
19009+ pax_force_retaddr
19010 ret
19011 4:
19012 testl %edx, %edx
19013diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19014index 78d16a5..fbcf666 100644
19015--- a/arch/x86/lib/checksum_32.S
19016+++ b/arch/x86/lib/checksum_32.S
19017@@ -28,7 +28,8 @@
19018 #include <linux/linkage.h>
19019 #include <asm/dwarf2.h>
19020 #include <asm/errno.h>
19021-
19022+#include <asm/segment.h>
19023+
19024 /*
19025 * computes a partial checksum, e.g. for TCP/UDP fragments
19026 */
19027@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19028
19029 #define ARGBASE 16
19030 #define FP 12
19031-
19032-ENTRY(csum_partial_copy_generic)
19033+
19034+ENTRY(csum_partial_copy_generic_to_user)
19035 CFI_STARTPROC
19036+
19037+#ifdef CONFIG_PAX_MEMORY_UDEREF
19038+ pushl_cfi %gs
19039+ popl_cfi %es
19040+ jmp csum_partial_copy_generic
19041+#endif
19042+
19043+ENTRY(csum_partial_copy_generic_from_user)
19044+
19045+#ifdef CONFIG_PAX_MEMORY_UDEREF
19046+ pushl_cfi %gs
19047+ popl_cfi %ds
19048+#endif
19049+
19050+ENTRY(csum_partial_copy_generic)
19051 subl $4,%esp
19052 CFI_ADJUST_CFA_OFFSET 4
19053 pushl_cfi %edi
19054@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19055 jmp 4f
19056 SRC(1: movw (%esi), %bx )
19057 addl $2, %esi
19058-DST( movw %bx, (%edi) )
19059+DST( movw %bx, %es:(%edi) )
19060 addl $2, %edi
19061 addw %bx, %ax
19062 adcl $0, %eax
19063@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19064 SRC(1: movl (%esi), %ebx )
19065 SRC( movl 4(%esi), %edx )
19066 adcl %ebx, %eax
19067-DST( movl %ebx, (%edi) )
19068+DST( movl %ebx, %es:(%edi) )
19069 adcl %edx, %eax
19070-DST( movl %edx, 4(%edi) )
19071+DST( movl %edx, %es:4(%edi) )
19072
19073 SRC( movl 8(%esi), %ebx )
19074 SRC( movl 12(%esi), %edx )
19075 adcl %ebx, %eax
19076-DST( movl %ebx, 8(%edi) )
19077+DST( movl %ebx, %es:8(%edi) )
19078 adcl %edx, %eax
19079-DST( movl %edx, 12(%edi) )
19080+DST( movl %edx, %es:12(%edi) )
19081
19082 SRC( movl 16(%esi), %ebx )
19083 SRC( movl 20(%esi), %edx )
19084 adcl %ebx, %eax
19085-DST( movl %ebx, 16(%edi) )
19086+DST( movl %ebx, %es:16(%edi) )
19087 adcl %edx, %eax
19088-DST( movl %edx, 20(%edi) )
19089+DST( movl %edx, %es:20(%edi) )
19090
19091 SRC( movl 24(%esi), %ebx )
19092 SRC( movl 28(%esi), %edx )
19093 adcl %ebx, %eax
19094-DST( movl %ebx, 24(%edi) )
19095+DST( movl %ebx, %es:24(%edi) )
19096 adcl %edx, %eax
19097-DST( movl %edx, 28(%edi) )
19098+DST( movl %edx, %es:28(%edi) )
19099
19100 lea 32(%esi), %esi
19101 lea 32(%edi), %edi
19102@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19103 shrl $2, %edx # This clears CF
19104 SRC(3: movl (%esi), %ebx )
19105 adcl %ebx, %eax
19106-DST( movl %ebx, (%edi) )
19107+DST( movl %ebx, %es:(%edi) )
19108 lea 4(%esi), %esi
19109 lea 4(%edi), %edi
19110 dec %edx
19111@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19112 jb 5f
19113 SRC( movw (%esi), %cx )
19114 leal 2(%esi), %esi
19115-DST( movw %cx, (%edi) )
19116+DST( movw %cx, %es:(%edi) )
19117 leal 2(%edi), %edi
19118 je 6f
19119 shll $16,%ecx
19120 SRC(5: movb (%esi), %cl )
19121-DST( movb %cl, (%edi) )
19122+DST( movb %cl, %es:(%edi) )
19123 6: addl %ecx, %eax
19124 adcl $0, %eax
19125 7:
19126@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19127
19128 6001:
19129 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19130- movl $-EFAULT, (%ebx)
19131+ movl $-EFAULT, %ss:(%ebx)
19132
19133 # zero the complete destination - computing the rest
19134 # is too much work
19135@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19136
19137 6002:
19138 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19139- movl $-EFAULT,(%ebx)
19140+ movl $-EFAULT,%ss:(%ebx)
19141 jmp 5000b
19142
19143 .previous
19144
19145+ pushl_cfi %ss
19146+ popl_cfi %ds
19147+ pushl_cfi %ss
19148+ popl_cfi %es
19149 popl_cfi %ebx
19150 CFI_RESTORE ebx
19151 popl_cfi %esi
19152@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19153 popl_cfi %ecx # equivalent to addl $4,%esp
19154 ret
19155 CFI_ENDPROC
19156-ENDPROC(csum_partial_copy_generic)
19157+ENDPROC(csum_partial_copy_generic_to_user)
19158
19159 #else
19160
19161 /* Version for PentiumII/PPro */
19162
19163 #define ROUND1(x) \
19164+ nop; nop; nop; \
19165 SRC(movl x(%esi), %ebx ) ; \
19166 addl %ebx, %eax ; \
19167- DST(movl %ebx, x(%edi) ) ;
19168+ DST(movl %ebx, %es:x(%edi)) ;
19169
19170 #define ROUND(x) \
19171+ nop; nop; nop; \
19172 SRC(movl x(%esi), %ebx ) ; \
19173 adcl %ebx, %eax ; \
19174- DST(movl %ebx, x(%edi) ) ;
19175+ DST(movl %ebx, %es:x(%edi)) ;
19176
19177 #define ARGBASE 12
19178-
19179-ENTRY(csum_partial_copy_generic)
19180+
19181+ENTRY(csum_partial_copy_generic_to_user)
19182 CFI_STARTPROC
19183+
19184+#ifdef CONFIG_PAX_MEMORY_UDEREF
19185+ pushl_cfi %gs
19186+ popl_cfi %es
19187+ jmp csum_partial_copy_generic
19188+#endif
19189+
19190+ENTRY(csum_partial_copy_generic_from_user)
19191+
19192+#ifdef CONFIG_PAX_MEMORY_UDEREF
19193+ pushl_cfi %gs
19194+ popl_cfi %ds
19195+#endif
19196+
19197+ENTRY(csum_partial_copy_generic)
19198 pushl_cfi %ebx
19199 CFI_REL_OFFSET ebx, 0
19200 pushl_cfi %edi
19201@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19202 subl %ebx, %edi
19203 lea -1(%esi),%edx
19204 andl $-32,%edx
19205- lea 3f(%ebx,%ebx), %ebx
19206+ lea 3f(%ebx,%ebx,2), %ebx
19207 testl %esi, %esi
19208 jmp *%ebx
19209 1: addl $64,%esi
19210@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19211 jb 5f
19212 SRC( movw (%esi), %dx )
19213 leal 2(%esi), %esi
19214-DST( movw %dx, (%edi) )
19215+DST( movw %dx, %es:(%edi) )
19216 leal 2(%edi), %edi
19217 je 6f
19218 shll $16,%edx
19219 5:
19220 SRC( movb (%esi), %dl )
19221-DST( movb %dl, (%edi) )
19222+DST( movb %dl, %es:(%edi) )
19223 6: addl %edx, %eax
19224 adcl $0, %eax
19225 7:
19226 .section .fixup, "ax"
19227 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19228- movl $-EFAULT, (%ebx)
19229+ movl $-EFAULT, %ss:(%ebx)
19230 # zero the complete destination (computing the rest is too much work)
19231 movl ARGBASE+8(%esp),%edi # dst
19232 movl ARGBASE+12(%esp),%ecx # len
19233@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19234 rep; stosb
19235 jmp 7b
19236 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19237- movl $-EFAULT, (%ebx)
19238+ movl $-EFAULT, %ss:(%ebx)
19239 jmp 7b
19240 .previous
19241
19242+#ifdef CONFIG_PAX_MEMORY_UDEREF
19243+ pushl_cfi %ss
19244+ popl_cfi %ds
19245+ pushl_cfi %ss
19246+ popl_cfi %es
19247+#endif
19248+
19249 popl_cfi %esi
19250 CFI_RESTORE esi
19251 popl_cfi %edi
19252@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19253 CFI_RESTORE ebx
19254 ret
19255 CFI_ENDPROC
19256-ENDPROC(csum_partial_copy_generic)
19257+ENDPROC(csum_partial_copy_generic_to_user)
19258
19259 #undef ROUND
19260 #undef ROUND1
19261diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19262index f2145cf..cea889d 100644
19263--- a/arch/x86/lib/clear_page_64.S
19264+++ b/arch/x86/lib/clear_page_64.S
19265@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19266 movl $4096/8,%ecx
19267 xorl %eax,%eax
19268 rep stosq
19269+ pax_force_retaddr
19270 ret
19271 CFI_ENDPROC
19272 ENDPROC(clear_page_c)
19273@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19274 movl $4096,%ecx
19275 xorl %eax,%eax
19276 rep stosb
19277+ pax_force_retaddr
19278 ret
19279 CFI_ENDPROC
19280 ENDPROC(clear_page_c_e)
19281@@ -43,6 +45,7 @@ ENTRY(clear_page)
19282 leaq 64(%rdi),%rdi
19283 jnz .Lloop
19284 nop
19285+ pax_force_retaddr
19286 ret
19287 CFI_ENDPROC
19288 .Lclear_page_end:
19289@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19290
19291 #include <asm/cpufeature.h>
19292
19293- .section .altinstr_replacement,"ax"
19294+ .section .altinstr_replacement,"a"
19295 1: .byte 0xeb /* jmp <disp8> */
19296 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19297 2: .byte 0xeb /* jmp <disp8> */
19298diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19299index 1e572c5..2a162cd 100644
19300--- a/arch/x86/lib/cmpxchg16b_emu.S
19301+++ b/arch/x86/lib/cmpxchg16b_emu.S
19302@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19303
19304 popf
19305 mov $1, %al
19306+ pax_force_retaddr
19307 ret
19308
19309 not_same:
19310 popf
19311 xor %al,%al
19312+ pax_force_retaddr
19313 ret
19314
19315 CFI_ENDPROC
19316diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19317index 01c805b..dccb07f 100644
19318--- a/arch/x86/lib/copy_page_64.S
19319+++ b/arch/x86/lib/copy_page_64.S
19320@@ -9,6 +9,7 @@ copy_page_c:
19321 CFI_STARTPROC
19322 movl $4096/8,%ecx
19323 rep movsq
19324+ pax_force_retaddr
19325 ret
19326 CFI_ENDPROC
19327 ENDPROC(copy_page_c)
19328@@ -39,7 +40,7 @@ ENTRY(copy_page)
19329 movq 16 (%rsi), %rdx
19330 movq 24 (%rsi), %r8
19331 movq 32 (%rsi), %r9
19332- movq 40 (%rsi), %r10
19333+ movq 40 (%rsi), %r13
19334 movq 48 (%rsi), %r11
19335 movq 56 (%rsi), %r12
19336
19337@@ -50,7 +51,7 @@ ENTRY(copy_page)
19338 movq %rdx, 16 (%rdi)
19339 movq %r8, 24 (%rdi)
19340 movq %r9, 32 (%rdi)
19341- movq %r10, 40 (%rdi)
19342+ movq %r13, 40 (%rdi)
19343 movq %r11, 48 (%rdi)
19344 movq %r12, 56 (%rdi)
19345
19346@@ -69,7 +70,7 @@ ENTRY(copy_page)
19347 movq 16 (%rsi), %rdx
19348 movq 24 (%rsi), %r8
19349 movq 32 (%rsi), %r9
19350- movq 40 (%rsi), %r10
19351+ movq 40 (%rsi), %r13
19352 movq 48 (%rsi), %r11
19353 movq 56 (%rsi), %r12
19354
19355@@ -78,7 +79,7 @@ ENTRY(copy_page)
19356 movq %rdx, 16 (%rdi)
19357 movq %r8, 24 (%rdi)
19358 movq %r9, 32 (%rdi)
19359- movq %r10, 40 (%rdi)
19360+ movq %r13, 40 (%rdi)
19361 movq %r11, 48 (%rdi)
19362 movq %r12, 56 (%rdi)
19363
19364@@ -95,6 +96,7 @@ ENTRY(copy_page)
19365 CFI_RESTORE r13
19366 addq $3*8,%rsp
19367 CFI_ADJUST_CFA_OFFSET -3*8
19368+ pax_force_retaddr
19369 ret
19370 .Lcopy_page_end:
19371 CFI_ENDPROC
19372@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19373
19374 #include <asm/cpufeature.h>
19375
19376- .section .altinstr_replacement,"ax"
19377+ .section .altinstr_replacement,"a"
19378 1: .byte 0xeb /* jmp <disp8> */
19379 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19380 2:
19381diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19382index 0248402..821c786 100644
19383--- a/arch/x86/lib/copy_user_64.S
19384+++ b/arch/x86/lib/copy_user_64.S
19385@@ -16,6 +16,7 @@
19386 #include <asm/thread_info.h>
19387 #include <asm/cpufeature.h>
19388 #include <asm/alternative-asm.h>
19389+#include <asm/pgtable.h>
19390
19391 /*
19392 * By placing feature2 after feature1 in altinstructions section, we logically
19393@@ -29,7 +30,7 @@
19394 .byte 0xe9 /* 32bit jump */
19395 .long \orig-1f /* by default jump to orig */
19396 1:
19397- .section .altinstr_replacement,"ax"
19398+ .section .altinstr_replacement,"a"
19399 2: .byte 0xe9 /* near jump with 32bit immediate */
19400 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19401 3: .byte 0xe9 /* near jump with 32bit immediate */
19402@@ -71,47 +72,20 @@
19403 #endif
19404 .endm
19405
19406-/* Standard copy_to_user with segment limit checking */
19407-ENTRY(_copy_to_user)
19408- CFI_STARTPROC
19409- GET_THREAD_INFO(%rax)
19410- movq %rdi,%rcx
19411- addq %rdx,%rcx
19412- jc bad_to_user
19413- cmpq TI_addr_limit(%rax),%rcx
19414- ja bad_to_user
19415- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19416- copy_user_generic_unrolled,copy_user_generic_string, \
19417- copy_user_enhanced_fast_string
19418- CFI_ENDPROC
19419-ENDPROC(_copy_to_user)
19420-
19421-/* Standard copy_from_user with segment limit checking */
19422-ENTRY(_copy_from_user)
19423- CFI_STARTPROC
19424- GET_THREAD_INFO(%rax)
19425- movq %rsi,%rcx
19426- addq %rdx,%rcx
19427- jc bad_from_user
19428- cmpq TI_addr_limit(%rax),%rcx
19429- ja bad_from_user
19430- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19431- copy_user_generic_unrolled,copy_user_generic_string, \
19432- copy_user_enhanced_fast_string
19433- CFI_ENDPROC
19434-ENDPROC(_copy_from_user)
19435-
19436 .section .fixup,"ax"
19437 /* must zero dest */
19438 ENTRY(bad_from_user)
19439 bad_from_user:
19440 CFI_STARTPROC
19441+ testl %edx,%edx
19442+ js bad_to_user
19443 movl %edx,%ecx
19444 xorl %eax,%eax
19445 rep
19446 stosb
19447 bad_to_user:
19448 movl %edx,%eax
19449+ pax_force_retaddr
19450 ret
19451 CFI_ENDPROC
19452 ENDPROC(bad_from_user)
19453@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19454 jz 17f
19455 1: movq (%rsi),%r8
19456 2: movq 1*8(%rsi),%r9
19457-3: movq 2*8(%rsi),%r10
19458+3: movq 2*8(%rsi),%rax
19459 4: movq 3*8(%rsi),%r11
19460 5: movq %r8,(%rdi)
19461 6: movq %r9,1*8(%rdi)
19462-7: movq %r10,2*8(%rdi)
19463+7: movq %rax,2*8(%rdi)
19464 8: movq %r11,3*8(%rdi)
19465 9: movq 4*8(%rsi),%r8
19466 10: movq 5*8(%rsi),%r9
19467-11: movq 6*8(%rsi),%r10
19468+11: movq 6*8(%rsi),%rax
19469 12: movq 7*8(%rsi),%r11
19470 13: movq %r8,4*8(%rdi)
19471 14: movq %r9,5*8(%rdi)
19472-15: movq %r10,6*8(%rdi)
19473+15: movq %rax,6*8(%rdi)
19474 16: movq %r11,7*8(%rdi)
19475 leaq 64(%rsi),%rsi
19476 leaq 64(%rdi),%rdi
19477@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19478 decl %ecx
19479 jnz 21b
19480 23: xor %eax,%eax
19481+ pax_force_retaddr
19482 ret
19483
19484 .section .fixup,"ax"
19485@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19486 3: rep
19487 movsb
19488 4: xorl %eax,%eax
19489+ pax_force_retaddr
19490 ret
19491
19492 .section .fixup,"ax"
19493@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19494 1: rep
19495 movsb
19496 2: xorl %eax,%eax
19497+ pax_force_retaddr
19498 ret
19499
19500 .section .fixup,"ax"
19501diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19502index cb0c112..e3a6895 100644
19503--- a/arch/x86/lib/copy_user_nocache_64.S
19504+++ b/arch/x86/lib/copy_user_nocache_64.S
19505@@ -8,12 +8,14 @@
19506
19507 #include <linux/linkage.h>
19508 #include <asm/dwarf2.h>
19509+#include <asm/alternative-asm.h>
19510
19511 #define FIX_ALIGNMENT 1
19512
19513 #include <asm/current.h>
19514 #include <asm/asm-offsets.h>
19515 #include <asm/thread_info.h>
19516+#include <asm/pgtable.h>
19517
19518 .macro ALIGN_DESTINATION
19519 #ifdef FIX_ALIGNMENT
19520@@ -50,6 +52,15 @@
19521 */
19522 ENTRY(__copy_user_nocache)
19523 CFI_STARTPROC
19524+
19525+#ifdef CONFIG_PAX_MEMORY_UDEREF
19526+ mov $PAX_USER_SHADOW_BASE,%rcx
19527+ cmp %rcx,%rsi
19528+ jae 1f
19529+ add %rcx,%rsi
19530+1:
19531+#endif
19532+
19533 cmpl $8,%edx
19534 jb 20f /* less then 8 bytes, go to byte copy loop */
19535 ALIGN_DESTINATION
19536@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19537 jz 17f
19538 1: movq (%rsi),%r8
19539 2: movq 1*8(%rsi),%r9
19540-3: movq 2*8(%rsi),%r10
19541+3: movq 2*8(%rsi),%rax
19542 4: movq 3*8(%rsi),%r11
19543 5: movnti %r8,(%rdi)
19544 6: movnti %r9,1*8(%rdi)
19545-7: movnti %r10,2*8(%rdi)
19546+7: movnti %rax,2*8(%rdi)
19547 8: movnti %r11,3*8(%rdi)
19548 9: movq 4*8(%rsi),%r8
19549 10: movq 5*8(%rsi),%r9
19550-11: movq 6*8(%rsi),%r10
19551+11: movq 6*8(%rsi),%rax
19552 12: movq 7*8(%rsi),%r11
19553 13: movnti %r8,4*8(%rdi)
19554 14: movnti %r9,5*8(%rdi)
19555-15: movnti %r10,6*8(%rdi)
19556+15: movnti %rax,6*8(%rdi)
19557 16: movnti %r11,7*8(%rdi)
19558 leaq 64(%rsi),%rsi
19559 leaq 64(%rdi),%rdi
19560@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19561 jnz 21b
19562 23: xorl %eax,%eax
19563 sfence
19564+ pax_force_retaddr
19565 ret
19566
19567 .section .fixup,"ax"
19568diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19569index fb903b7..c92b7f7 100644
19570--- a/arch/x86/lib/csum-copy_64.S
19571+++ b/arch/x86/lib/csum-copy_64.S
19572@@ -8,6 +8,7 @@
19573 #include <linux/linkage.h>
19574 #include <asm/dwarf2.h>
19575 #include <asm/errno.h>
19576+#include <asm/alternative-asm.h>
19577
19578 /*
19579 * Checksum copy with exception handling.
19580@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19581 CFI_RESTORE rbp
19582 addq $7*8, %rsp
19583 CFI_ADJUST_CFA_OFFSET -7*8
19584+ pax_force_retaddr 0, 1
19585 ret
19586 CFI_RESTORE_STATE
19587
19588diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19589index 459b58a..9570bc7 100644
19590--- a/arch/x86/lib/csum-wrappers_64.c
19591+++ b/arch/x86/lib/csum-wrappers_64.c
19592@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19593 len -= 2;
19594 }
19595 }
19596- isum = csum_partial_copy_generic((__force const void *)src,
19597+
19598+#ifdef CONFIG_PAX_MEMORY_UDEREF
19599+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19600+ src += PAX_USER_SHADOW_BASE;
19601+#endif
19602+
19603+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19604 dst, len, isum, errp, NULL);
19605 if (unlikely(*errp))
19606 goto out_err;
19607@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19608 }
19609
19610 *errp = 0;
19611- return csum_partial_copy_generic(src, (void __force *)dst,
19612+
19613+#ifdef CONFIG_PAX_MEMORY_UDEREF
19614+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19615+ dst += PAX_USER_SHADOW_BASE;
19616+#endif
19617+
19618+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19619 len, isum, NULL, errp);
19620 }
19621 EXPORT_SYMBOL(csum_partial_copy_to_user);
19622diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19623index 51f1504..ddac4c1 100644
19624--- a/arch/x86/lib/getuser.S
19625+++ b/arch/x86/lib/getuser.S
19626@@ -33,15 +33,38 @@
19627 #include <asm/asm-offsets.h>
19628 #include <asm/thread_info.h>
19629 #include <asm/asm.h>
19630+#include <asm/segment.h>
19631+#include <asm/pgtable.h>
19632+#include <asm/alternative-asm.h>
19633+
19634+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19635+#define __copyuser_seg gs;
19636+#else
19637+#define __copyuser_seg
19638+#endif
19639
19640 .text
19641 ENTRY(__get_user_1)
19642 CFI_STARTPROC
19643+
19644+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19645 GET_THREAD_INFO(%_ASM_DX)
19646 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19647 jae bad_get_user
19648-1: movzb (%_ASM_AX),%edx
19649+
19650+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19651+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19652+ cmp %_ASM_DX,%_ASM_AX
19653+ jae 1234f
19654+ add %_ASM_DX,%_ASM_AX
19655+1234:
19656+#endif
19657+
19658+#endif
19659+
19660+1: __copyuser_seg movzb (%_ASM_AX),%edx
19661 xor %eax,%eax
19662+ pax_force_retaddr
19663 ret
19664 CFI_ENDPROC
19665 ENDPROC(__get_user_1)
19666@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19667 ENTRY(__get_user_2)
19668 CFI_STARTPROC
19669 add $1,%_ASM_AX
19670+
19671+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19672 jc bad_get_user
19673 GET_THREAD_INFO(%_ASM_DX)
19674 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19675 jae bad_get_user
19676-2: movzwl -1(%_ASM_AX),%edx
19677+
19678+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19679+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19680+ cmp %_ASM_DX,%_ASM_AX
19681+ jae 1234f
19682+ add %_ASM_DX,%_ASM_AX
19683+1234:
19684+#endif
19685+
19686+#endif
19687+
19688+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19689 xor %eax,%eax
19690+ pax_force_retaddr
19691 ret
19692 CFI_ENDPROC
19693 ENDPROC(__get_user_2)
19694@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19695 ENTRY(__get_user_4)
19696 CFI_STARTPROC
19697 add $3,%_ASM_AX
19698+
19699+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19700 jc bad_get_user
19701 GET_THREAD_INFO(%_ASM_DX)
19702 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19703 jae bad_get_user
19704-3: mov -3(%_ASM_AX),%edx
19705+
19706+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19707+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19708+ cmp %_ASM_DX,%_ASM_AX
19709+ jae 1234f
19710+ add %_ASM_DX,%_ASM_AX
19711+1234:
19712+#endif
19713+
19714+#endif
19715+
19716+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19717 xor %eax,%eax
19718+ pax_force_retaddr
19719 ret
19720 CFI_ENDPROC
19721 ENDPROC(__get_user_4)
19722@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19723 GET_THREAD_INFO(%_ASM_DX)
19724 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19725 jae bad_get_user
19726+
19727+#ifdef CONFIG_PAX_MEMORY_UDEREF
19728+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19729+ cmp %_ASM_DX,%_ASM_AX
19730+ jae 1234f
19731+ add %_ASM_DX,%_ASM_AX
19732+1234:
19733+#endif
19734+
19735 4: movq -7(%_ASM_AX),%_ASM_DX
19736 xor %eax,%eax
19737+ pax_force_retaddr
19738 ret
19739 CFI_ENDPROC
19740 ENDPROC(__get_user_8)
19741@@ -91,6 +152,7 @@ bad_get_user:
19742 CFI_STARTPROC
19743 xor %edx,%edx
19744 mov $(-EFAULT),%_ASM_AX
19745+ pax_force_retaddr
19746 ret
19747 CFI_ENDPROC
19748 END(bad_get_user)
19749diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19750index 9f33b98..dfc7678 100644
19751--- a/arch/x86/lib/insn.c
19752+++ b/arch/x86/lib/insn.c
19753@@ -21,6 +21,11 @@
19754 #include <linux/string.h>
19755 #include <asm/inat.h>
19756 #include <asm/insn.h>
19757+#ifdef __KERNEL__
19758+#include <asm/pgtable_types.h>
19759+#else
19760+#define ktla_ktva(addr) addr
19761+#endif
19762
19763 #define get_next(t, insn) \
19764 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
19765@@ -40,8 +45,8 @@
19766 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19767 {
19768 memset(insn, 0, sizeof(*insn));
19769- insn->kaddr = kaddr;
19770- insn->next_byte = kaddr;
19771+ insn->kaddr = ktla_ktva(kaddr);
19772+ insn->next_byte = ktla_ktva(kaddr);
19773 insn->x86_64 = x86_64 ? 1 : 0;
19774 insn->opnd_bytes = 4;
19775 if (x86_64)
19776diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19777index 05a95e7..326f2fa 100644
19778--- a/arch/x86/lib/iomap_copy_64.S
19779+++ b/arch/x86/lib/iomap_copy_64.S
19780@@ -17,6 +17,7 @@
19781
19782 #include <linux/linkage.h>
19783 #include <asm/dwarf2.h>
19784+#include <asm/alternative-asm.h>
19785
19786 /*
19787 * override generic version in lib/iomap_copy.c
19788@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19789 CFI_STARTPROC
19790 movl %edx,%ecx
19791 rep movsd
19792+ pax_force_retaddr
19793 ret
19794 CFI_ENDPROC
19795 ENDPROC(__iowrite32_copy)
19796diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19797index efbf2a0..8893637 100644
19798--- a/arch/x86/lib/memcpy_64.S
19799+++ b/arch/x86/lib/memcpy_64.S
19800@@ -34,6 +34,7 @@
19801 rep movsq
19802 movl %edx, %ecx
19803 rep movsb
19804+ pax_force_retaddr
19805 ret
19806 .Lmemcpy_e:
19807 .previous
19808@@ -51,6 +52,7 @@
19809
19810 movl %edx, %ecx
19811 rep movsb
19812+ pax_force_retaddr
19813 ret
19814 .Lmemcpy_e_e:
19815 .previous
19816@@ -81,13 +83,13 @@ ENTRY(memcpy)
19817 */
19818 movq 0*8(%rsi), %r8
19819 movq 1*8(%rsi), %r9
19820- movq 2*8(%rsi), %r10
19821+ movq 2*8(%rsi), %rcx
19822 movq 3*8(%rsi), %r11
19823 leaq 4*8(%rsi), %rsi
19824
19825 movq %r8, 0*8(%rdi)
19826 movq %r9, 1*8(%rdi)
19827- movq %r10, 2*8(%rdi)
19828+ movq %rcx, 2*8(%rdi)
19829 movq %r11, 3*8(%rdi)
19830 leaq 4*8(%rdi), %rdi
19831 jae .Lcopy_forward_loop
19832@@ -110,12 +112,12 @@ ENTRY(memcpy)
19833 subq $0x20, %rdx
19834 movq -1*8(%rsi), %r8
19835 movq -2*8(%rsi), %r9
19836- movq -3*8(%rsi), %r10
19837+ movq -3*8(%rsi), %rcx
19838 movq -4*8(%rsi), %r11
19839 leaq -4*8(%rsi), %rsi
19840 movq %r8, -1*8(%rdi)
19841 movq %r9, -2*8(%rdi)
19842- movq %r10, -3*8(%rdi)
19843+ movq %rcx, -3*8(%rdi)
19844 movq %r11, -4*8(%rdi)
19845 leaq -4*8(%rdi), %rdi
19846 jae .Lcopy_backward_loop
19847@@ -135,12 +137,13 @@ ENTRY(memcpy)
19848 */
19849 movq 0*8(%rsi), %r8
19850 movq 1*8(%rsi), %r9
19851- movq -2*8(%rsi, %rdx), %r10
19852+ movq -2*8(%rsi, %rdx), %rcx
19853 movq -1*8(%rsi, %rdx), %r11
19854 movq %r8, 0*8(%rdi)
19855 movq %r9, 1*8(%rdi)
19856- movq %r10, -2*8(%rdi, %rdx)
19857+ movq %rcx, -2*8(%rdi, %rdx)
19858 movq %r11, -1*8(%rdi, %rdx)
19859+ pax_force_retaddr
19860 retq
19861 .p2align 4
19862 .Lless_16bytes:
19863@@ -153,6 +156,7 @@ ENTRY(memcpy)
19864 movq -1*8(%rsi, %rdx), %r9
19865 movq %r8, 0*8(%rdi)
19866 movq %r9, -1*8(%rdi, %rdx)
19867+ pax_force_retaddr
19868 retq
19869 .p2align 4
19870 .Lless_8bytes:
19871@@ -166,6 +170,7 @@ ENTRY(memcpy)
19872 movl -4(%rsi, %rdx), %r8d
19873 movl %ecx, (%rdi)
19874 movl %r8d, -4(%rdi, %rdx)
19875+ pax_force_retaddr
19876 retq
19877 .p2align 4
19878 .Lless_3bytes:
19879@@ -183,6 +188,7 @@ ENTRY(memcpy)
19880 jnz .Lloop_1
19881
19882 .Lend:
19883+ pax_force_retaddr
19884 retq
19885 CFI_ENDPROC
19886 ENDPROC(memcpy)
19887diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19888index ee16461..c39c199 100644
19889--- a/arch/x86/lib/memmove_64.S
19890+++ b/arch/x86/lib/memmove_64.S
19891@@ -61,13 +61,13 @@ ENTRY(memmove)
19892 5:
19893 sub $0x20, %rdx
19894 movq 0*8(%rsi), %r11
19895- movq 1*8(%rsi), %r10
19896+ movq 1*8(%rsi), %rcx
19897 movq 2*8(%rsi), %r9
19898 movq 3*8(%rsi), %r8
19899 leaq 4*8(%rsi), %rsi
19900
19901 movq %r11, 0*8(%rdi)
19902- movq %r10, 1*8(%rdi)
19903+ movq %rcx, 1*8(%rdi)
19904 movq %r9, 2*8(%rdi)
19905 movq %r8, 3*8(%rdi)
19906 leaq 4*8(%rdi), %rdi
19907@@ -81,10 +81,10 @@ ENTRY(memmove)
19908 4:
19909 movq %rdx, %rcx
19910 movq -8(%rsi, %rdx), %r11
19911- lea -8(%rdi, %rdx), %r10
19912+ lea -8(%rdi, %rdx), %r9
19913 shrq $3, %rcx
19914 rep movsq
19915- movq %r11, (%r10)
19916+ movq %r11, (%r9)
19917 jmp 13f
19918 .Lmemmove_end_forward:
19919
19920@@ -95,14 +95,14 @@ ENTRY(memmove)
19921 7:
19922 movq %rdx, %rcx
19923 movq (%rsi), %r11
19924- movq %rdi, %r10
19925+ movq %rdi, %r9
19926 leaq -8(%rsi, %rdx), %rsi
19927 leaq -8(%rdi, %rdx), %rdi
19928 shrq $3, %rcx
19929 std
19930 rep movsq
19931 cld
19932- movq %r11, (%r10)
19933+ movq %r11, (%r9)
19934 jmp 13f
19935
19936 /*
19937@@ -127,13 +127,13 @@ ENTRY(memmove)
19938 8:
19939 subq $0x20, %rdx
19940 movq -1*8(%rsi), %r11
19941- movq -2*8(%rsi), %r10
19942+ movq -2*8(%rsi), %rcx
19943 movq -3*8(%rsi), %r9
19944 movq -4*8(%rsi), %r8
19945 leaq -4*8(%rsi), %rsi
19946
19947 movq %r11, -1*8(%rdi)
19948- movq %r10, -2*8(%rdi)
19949+ movq %rcx, -2*8(%rdi)
19950 movq %r9, -3*8(%rdi)
19951 movq %r8, -4*8(%rdi)
19952 leaq -4*8(%rdi), %rdi
19953@@ -151,11 +151,11 @@ ENTRY(memmove)
19954 * Move data from 16 bytes to 31 bytes.
19955 */
19956 movq 0*8(%rsi), %r11
19957- movq 1*8(%rsi), %r10
19958+ movq 1*8(%rsi), %rcx
19959 movq -2*8(%rsi, %rdx), %r9
19960 movq -1*8(%rsi, %rdx), %r8
19961 movq %r11, 0*8(%rdi)
19962- movq %r10, 1*8(%rdi)
19963+ movq %rcx, 1*8(%rdi)
19964 movq %r9, -2*8(%rdi, %rdx)
19965 movq %r8, -1*8(%rdi, %rdx)
19966 jmp 13f
19967@@ -167,9 +167,9 @@ ENTRY(memmove)
19968 * Move data from 8 bytes to 15 bytes.
19969 */
19970 movq 0*8(%rsi), %r11
19971- movq -1*8(%rsi, %rdx), %r10
19972+ movq -1*8(%rsi, %rdx), %r9
19973 movq %r11, 0*8(%rdi)
19974- movq %r10, -1*8(%rdi, %rdx)
19975+ movq %r9, -1*8(%rdi, %rdx)
19976 jmp 13f
19977 10:
19978 cmpq $4, %rdx
19979@@ -178,9 +178,9 @@ ENTRY(memmove)
19980 * Move data from 4 bytes to 7 bytes.
19981 */
19982 movl (%rsi), %r11d
19983- movl -4(%rsi, %rdx), %r10d
19984+ movl -4(%rsi, %rdx), %r9d
19985 movl %r11d, (%rdi)
19986- movl %r10d, -4(%rdi, %rdx)
19987+ movl %r9d, -4(%rdi, %rdx)
19988 jmp 13f
19989 11:
19990 cmp $2, %rdx
19991@@ -189,9 +189,9 @@ ENTRY(memmove)
19992 * Move data from 2 bytes to 3 bytes.
19993 */
19994 movw (%rsi), %r11w
19995- movw -2(%rsi, %rdx), %r10w
19996+ movw -2(%rsi, %rdx), %r9w
19997 movw %r11w, (%rdi)
19998- movw %r10w, -2(%rdi, %rdx)
19999+ movw %r9w, -2(%rdi, %rdx)
20000 jmp 13f
20001 12:
20002 cmp $1, %rdx
20003@@ -202,6 +202,7 @@ ENTRY(memmove)
20004 movb (%rsi), %r11b
20005 movb %r11b, (%rdi)
20006 13:
20007+ pax_force_retaddr
20008 retq
20009 CFI_ENDPROC
20010
20011@@ -210,6 +211,7 @@ ENTRY(memmove)
20012 /* Forward moving data. */
20013 movq %rdx, %rcx
20014 rep movsb
20015+ pax_force_retaddr
20016 retq
20017 .Lmemmove_end_forward_efs:
20018 .previous
20019diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20020index 79bd454..dff325a 100644
20021--- a/arch/x86/lib/memset_64.S
20022+++ b/arch/x86/lib/memset_64.S
20023@@ -31,6 +31,7 @@
20024 movl %r8d,%ecx
20025 rep stosb
20026 movq %r9,%rax
20027+ pax_force_retaddr
20028 ret
20029 .Lmemset_e:
20030 .previous
20031@@ -53,6 +54,7 @@
20032 movl %edx,%ecx
20033 rep stosb
20034 movq %r9,%rax
20035+ pax_force_retaddr
20036 ret
20037 .Lmemset_e_e:
20038 .previous
20039@@ -60,13 +62,13 @@
20040 ENTRY(memset)
20041 ENTRY(__memset)
20042 CFI_STARTPROC
20043- movq %rdi,%r10
20044 movq %rdx,%r11
20045
20046 /* expand byte value */
20047 movzbl %sil,%ecx
20048 movabs $0x0101010101010101,%rax
20049 mul %rcx /* with rax, clobbers rdx */
20050+ movq %rdi,%rdx
20051
20052 /* align dst */
20053 movl %edi,%r9d
20054@@ -120,7 +122,8 @@ ENTRY(__memset)
20055 jnz .Lloop_1
20056
20057 .Lende:
20058- movq %r10,%rax
20059+ movq %rdx,%rax
20060+ pax_force_retaddr
20061 ret
20062
20063 CFI_RESTORE_STATE
20064diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20065index c9f2d9b..e7fd2c0 100644
20066--- a/arch/x86/lib/mmx_32.c
20067+++ b/arch/x86/lib/mmx_32.c
20068@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20069 {
20070 void *p;
20071 int i;
20072+ unsigned long cr0;
20073
20074 if (unlikely(in_interrupt()))
20075 return __memcpy(to, from, len);
20076@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20077 kernel_fpu_begin();
20078
20079 __asm__ __volatile__ (
20080- "1: prefetch (%0)\n" /* This set is 28 bytes */
20081- " prefetch 64(%0)\n"
20082- " prefetch 128(%0)\n"
20083- " prefetch 192(%0)\n"
20084- " prefetch 256(%0)\n"
20085+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20086+ " prefetch 64(%1)\n"
20087+ " prefetch 128(%1)\n"
20088+ " prefetch 192(%1)\n"
20089+ " prefetch 256(%1)\n"
20090 "2: \n"
20091 ".section .fixup, \"ax\"\n"
20092- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20093+ "3: \n"
20094+
20095+#ifdef CONFIG_PAX_KERNEXEC
20096+ " movl %%cr0, %0\n"
20097+ " movl %0, %%eax\n"
20098+ " andl $0xFFFEFFFF, %%eax\n"
20099+ " movl %%eax, %%cr0\n"
20100+#endif
20101+
20102+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20103+
20104+#ifdef CONFIG_PAX_KERNEXEC
20105+ " movl %0, %%cr0\n"
20106+#endif
20107+
20108 " jmp 2b\n"
20109 ".previous\n"
20110 _ASM_EXTABLE(1b, 3b)
20111- : : "r" (from));
20112+ : "=&r" (cr0) : "r" (from) : "ax");
20113
20114 for ( ; i > 5; i--) {
20115 __asm__ __volatile__ (
20116- "1: prefetch 320(%0)\n"
20117- "2: movq (%0), %%mm0\n"
20118- " movq 8(%0), %%mm1\n"
20119- " movq 16(%0), %%mm2\n"
20120- " movq 24(%0), %%mm3\n"
20121- " movq %%mm0, (%1)\n"
20122- " movq %%mm1, 8(%1)\n"
20123- " movq %%mm2, 16(%1)\n"
20124- " movq %%mm3, 24(%1)\n"
20125- " movq 32(%0), %%mm0\n"
20126- " movq 40(%0), %%mm1\n"
20127- " movq 48(%0), %%mm2\n"
20128- " movq 56(%0), %%mm3\n"
20129- " movq %%mm0, 32(%1)\n"
20130- " movq %%mm1, 40(%1)\n"
20131- " movq %%mm2, 48(%1)\n"
20132- " movq %%mm3, 56(%1)\n"
20133+ "1: prefetch 320(%1)\n"
20134+ "2: movq (%1), %%mm0\n"
20135+ " movq 8(%1), %%mm1\n"
20136+ " movq 16(%1), %%mm2\n"
20137+ " movq 24(%1), %%mm3\n"
20138+ " movq %%mm0, (%2)\n"
20139+ " movq %%mm1, 8(%2)\n"
20140+ " movq %%mm2, 16(%2)\n"
20141+ " movq %%mm3, 24(%2)\n"
20142+ " movq 32(%1), %%mm0\n"
20143+ " movq 40(%1), %%mm1\n"
20144+ " movq 48(%1), %%mm2\n"
20145+ " movq 56(%1), %%mm3\n"
20146+ " movq %%mm0, 32(%2)\n"
20147+ " movq %%mm1, 40(%2)\n"
20148+ " movq %%mm2, 48(%2)\n"
20149+ " movq %%mm3, 56(%2)\n"
20150 ".section .fixup, \"ax\"\n"
20151- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20152+ "3:\n"
20153+
20154+#ifdef CONFIG_PAX_KERNEXEC
20155+ " movl %%cr0, %0\n"
20156+ " movl %0, %%eax\n"
20157+ " andl $0xFFFEFFFF, %%eax\n"
20158+ " movl %%eax, %%cr0\n"
20159+#endif
20160+
20161+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20162+
20163+#ifdef CONFIG_PAX_KERNEXEC
20164+ " movl %0, %%cr0\n"
20165+#endif
20166+
20167 " jmp 2b\n"
20168 ".previous\n"
20169 _ASM_EXTABLE(1b, 3b)
20170- : : "r" (from), "r" (to) : "memory");
20171+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20172
20173 from += 64;
20174 to += 64;
20175@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20176 static void fast_copy_page(void *to, void *from)
20177 {
20178 int i;
20179+ unsigned long cr0;
20180
20181 kernel_fpu_begin();
20182
20183@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20184 * but that is for later. -AV
20185 */
20186 __asm__ __volatile__(
20187- "1: prefetch (%0)\n"
20188- " prefetch 64(%0)\n"
20189- " prefetch 128(%0)\n"
20190- " prefetch 192(%0)\n"
20191- " prefetch 256(%0)\n"
20192+ "1: prefetch (%1)\n"
20193+ " prefetch 64(%1)\n"
20194+ " prefetch 128(%1)\n"
20195+ " prefetch 192(%1)\n"
20196+ " prefetch 256(%1)\n"
20197 "2: \n"
20198 ".section .fixup, \"ax\"\n"
20199- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20200+ "3: \n"
20201+
20202+#ifdef CONFIG_PAX_KERNEXEC
20203+ " movl %%cr0, %0\n"
20204+ " movl %0, %%eax\n"
20205+ " andl $0xFFFEFFFF, %%eax\n"
20206+ " movl %%eax, %%cr0\n"
20207+#endif
20208+
20209+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20210+
20211+#ifdef CONFIG_PAX_KERNEXEC
20212+ " movl %0, %%cr0\n"
20213+#endif
20214+
20215 " jmp 2b\n"
20216 ".previous\n"
20217- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20218+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20219
20220 for (i = 0; i < (4096-320)/64; i++) {
20221 __asm__ __volatile__ (
20222- "1: prefetch 320(%0)\n"
20223- "2: movq (%0), %%mm0\n"
20224- " movntq %%mm0, (%1)\n"
20225- " movq 8(%0), %%mm1\n"
20226- " movntq %%mm1, 8(%1)\n"
20227- " movq 16(%0), %%mm2\n"
20228- " movntq %%mm2, 16(%1)\n"
20229- " movq 24(%0), %%mm3\n"
20230- " movntq %%mm3, 24(%1)\n"
20231- " movq 32(%0), %%mm4\n"
20232- " movntq %%mm4, 32(%1)\n"
20233- " movq 40(%0), %%mm5\n"
20234- " movntq %%mm5, 40(%1)\n"
20235- " movq 48(%0), %%mm6\n"
20236- " movntq %%mm6, 48(%1)\n"
20237- " movq 56(%0), %%mm7\n"
20238- " movntq %%mm7, 56(%1)\n"
20239+ "1: prefetch 320(%1)\n"
20240+ "2: movq (%1), %%mm0\n"
20241+ " movntq %%mm0, (%2)\n"
20242+ " movq 8(%1), %%mm1\n"
20243+ " movntq %%mm1, 8(%2)\n"
20244+ " movq 16(%1), %%mm2\n"
20245+ " movntq %%mm2, 16(%2)\n"
20246+ " movq 24(%1), %%mm3\n"
20247+ " movntq %%mm3, 24(%2)\n"
20248+ " movq 32(%1), %%mm4\n"
20249+ " movntq %%mm4, 32(%2)\n"
20250+ " movq 40(%1), %%mm5\n"
20251+ " movntq %%mm5, 40(%2)\n"
20252+ " movq 48(%1), %%mm6\n"
20253+ " movntq %%mm6, 48(%2)\n"
20254+ " movq 56(%1), %%mm7\n"
20255+ " movntq %%mm7, 56(%2)\n"
20256 ".section .fixup, \"ax\"\n"
20257- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20258+ "3:\n"
20259+
20260+#ifdef CONFIG_PAX_KERNEXEC
20261+ " movl %%cr0, %0\n"
20262+ " movl %0, %%eax\n"
20263+ " andl $0xFFFEFFFF, %%eax\n"
20264+ " movl %%eax, %%cr0\n"
20265+#endif
20266+
20267+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20268+
20269+#ifdef CONFIG_PAX_KERNEXEC
20270+ " movl %0, %%cr0\n"
20271+#endif
20272+
20273 " jmp 2b\n"
20274 ".previous\n"
20275- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20276+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20277
20278 from += 64;
20279 to += 64;
20280@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20281 static void fast_copy_page(void *to, void *from)
20282 {
20283 int i;
20284+ unsigned long cr0;
20285
20286 kernel_fpu_begin();
20287
20288 __asm__ __volatile__ (
20289- "1: prefetch (%0)\n"
20290- " prefetch 64(%0)\n"
20291- " prefetch 128(%0)\n"
20292- " prefetch 192(%0)\n"
20293- " prefetch 256(%0)\n"
20294+ "1: prefetch (%1)\n"
20295+ " prefetch 64(%1)\n"
20296+ " prefetch 128(%1)\n"
20297+ " prefetch 192(%1)\n"
20298+ " prefetch 256(%1)\n"
20299 "2: \n"
20300 ".section .fixup, \"ax\"\n"
20301- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20302+ "3: \n"
20303+
20304+#ifdef CONFIG_PAX_KERNEXEC
20305+ " movl %%cr0, %0\n"
20306+ " movl %0, %%eax\n"
20307+ " andl $0xFFFEFFFF, %%eax\n"
20308+ " movl %%eax, %%cr0\n"
20309+#endif
20310+
20311+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20312+
20313+#ifdef CONFIG_PAX_KERNEXEC
20314+ " movl %0, %%cr0\n"
20315+#endif
20316+
20317 " jmp 2b\n"
20318 ".previous\n"
20319- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20320+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20321
20322 for (i = 0; i < 4096/64; i++) {
20323 __asm__ __volatile__ (
20324- "1: prefetch 320(%0)\n"
20325- "2: movq (%0), %%mm0\n"
20326- " movq 8(%0), %%mm1\n"
20327- " movq 16(%0), %%mm2\n"
20328- " movq 24(%0), %%mm3\n"
20329- " movq %%mm0, (%1)\n"
20330- " movq %%mm1, 8(%1)\n"
20331- " movq %%mm2, 16(%1)\n"
20332- " movq %%mm3, 24(%1)\n"
20333- " movq 32(%0), %%mm0\n"
20334- " movq 40(%0), %%mm1\n"
20335- " movq 48(%0), %%mm2\n"
20336- " movq 56(%0), %%mm3\n"
20337- " movq %%mm0, 32(%1)\n"
20338- " movq %%mm1, 40(%1)\n"
20339- " movq %%mm2, 48(%1)\n"
20340- " movq %%mm3, 56(%1)\n"
20341+ "1: prefetch 320(%1)\n"
20342+ "2: movq (%1), %%mm0\n"
20343+ " movq 8(%1), %%mm1\n"
20344+ " movq 16(%1), %%mm2\n"
20345+ " movq 24(%1), %%mm3\n"
20346+ " movq %%mm0, (%2)\n"
20347+ " movq %%mm1, 8(%2)\n"
20348+ " movq %%mm2, 16(%2)\n"
20349+ " movq %%mm3, 24(%2)\n"
20350+ " movq 32(%1), %%mm0\n"
20351+ " movq 40(%1), %%mm1\n"
20352+ " movq 48(%1), %%mm2\n"
20353+ " movq 56(%1), %%mm3\n"
20354+ " movq %%mm0, 32(%2)\n"
20355+ " movq %%mm1, 40(%2)\n"
20356+ " movq %%mm2, 48(%2)\n"
20357+ " movq %%mm3, 56(%2)\n"
20358 ".section .fixup, \"ax\"\n"
20359- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20360+ "3:\n"
20361+
20362+#ifdef CONFIG_PAX_KERNEXEC
20363+ " movl %%cr0, %0\n"
20364+ " movl %0, %%eax\n"
20365+ " andl $0xFFFEFFFF, %%eax\n"
20366+ " movl %%eax, %%cr0\n"
20367+#endif
20368+
20369+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20370+
20371+#ifdef CONFIG_PAX_KERNEXEC
20372+ " movl %0, %%cr0\n"
20373+#endif
20374+
20375 " jmp 2b\n"
20376 ".previous\n"
20377 _ASM_EXTABLE(1b, 3b)
20378- : : "r" (from), "r" (to) : "memory");
20379+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20380
20381 from += 64;
20382 to += 64;
20383diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20384index 69fa106..adda88b 100644
20385--- a/arch/x86/lib/msr-reg.S
20386+++ b/arch/x86/lib/msr-reg.S
20387@@ -3,6 +3,7 @@
20388 #include <asm/dwarf2.h>
20389 #include <asm/asm.h>
20390 #include <asm/msr.h>
20391+#include <asm/alternative-asm.h>
20392
20393 #ifdef CONFIG_X86_64
20394 /*
20395@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20396 CFI_STARTPROC
20397 pushq_cfi %rbx
20398 pushq_cfi %rbp
20399- movq %rdi, %r10 /* Save pointer */
20400+ movq %rdi, %r9 /* Save pointer */
20401 xorl %r11d, %r11d /* Return value */
20402 movl (%rdi), %eax
20403 movl 4(%rdi), %ecx
20404@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20405 movl 28(%rdi), %edi
20406 CFI_REMEMBER_STATE
20407 1: \op
20408-2: movl %eax, (%r10)
20409+2: movl %eax, (%r9)
20410 movl %r11d, %eax /* Return value */
20411- movl %ecx, 4(%r10)
20412- movl %edx, 8(%r10)
20413- movl %ebx, 12(%r10)
20414- movl %ebp, 20(%r10)
20415- movl %esi, 24(%r10)
20416- movl %edi, 28(%r10)
20417+ movl %ecx, 4(%r9)
20418+ movl %edx, 8(%r9)
20419+ movl %ebx, 12(%r9)
20420+ movl %ebp, 20(%r9)
20421+ movl %esi, 24(%r9)
20422+ movl %edi, 28(%r9)
20423 popq_cfi %rbp
20424 popq_cfi %rbx
20425+ pax_force_retaddr
20426 ret
20427 3:
20428 CFI_RESTORE_STATE
20429diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20430index 36b0d15..d381858 100644
20431--- a/arch/x86/lib/putuser.S
20432+++ b/arch/x86/lib/putuser.S
20433@@ -15,7 +15,9 @@
20434 #include <asm/thread_info.h>
20435 #include <asm/errno.h>
20436 #include <asm/asm.h>
20437-
20438+#include <asm/segment.h>
20439+#include <asm/pgtable.h>
20440+#include <asm/alternative-asm.h>
20441
20442 /*
20443 * __put_user_X
20444@@ -29,52 +31,119 @@
20445 * as they get called from within inline assembly.
20446 */
20447
20448-#define ENTER CFI_STARTPROC ; \
20449- GET_THREAD_INFO(%_ASM_BX)
20450-#define EXIT ret ; \
20451+#define ENTER CFI_STARTPROC
20452+#define EXIT pax_force_retaddr; ret ; \
20453 CFI_ENDPROC
20454
20455+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20456+#define _DEST %_ASM_CX,%_ASM_BX
20457+#else
20458+#define _DEST %_ASM_CX
20459+#endif
20460+
20461+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20462+#define __copyuser_seg gs;
20463+#else
20464+#define __copyuser_seg
20465+#endif
20466+
20467 .text
20468 ENTRY(__put_user_1)
20469 ENTER
20470+
20471+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20472+ GET_THREAD_INFO(%_ASM_BX)
20473 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20474 jae bad_put_user
20475-1: movb %al,(%_ASM_CX)
20476+
20477+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20478+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20479+ cmp %_ASM_BX,%_ASM_CX
20480+ jb 1234f
20481+ xor %ebx,%ebx
20482+1234:
20483+#endif
20484+
20485+#endif
20486+
20487+1: __copyuser_seg movb %al,(_DEST)
20488 xor %eax,%eax
20489 EXIT
20490 ENDPROC(__put_user_1)
20491
20492 ENTRY(__put_user_2)
20493 ENTER
20494+
20495+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20496+ GET_THREAD_INFO(%_ASM_BX)
20497 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20498 sub $1,%_ASM_BX
20499 cmp %_ASM_BX,%_ASM_CX
20500 jae bad_put_user
20501-2: movw %ax,(%_ASM_CX)
20502+
20503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20504+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20505+ cmp %_ASM_BX,%_ASM_CX
20506+ jb 1234f
20507+ xor %ebx,%ebx
20508+1234:
20509+#endif
20510+
20511+#endif
20512+
20513+2: __copyuser_seg movw %ax,(_DEST)
20514 xor %eax,%eax
20515 EXIT
20516 ENDPROC(__put_user_2)
20517
20518 ENTRY(__put_user_4)
20519 ENTER
20520+
20521+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20522+ GET_THREAD_INFO(%_ASM_BX)
20523 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20524 sub $3,%_ASM_BX
20525 cmp %_ASM_BX,%_ASM_CX
20526 jae bad_put_user
20527-3: movl %eax,(%_ASM_CX)
20528+
20529+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20530+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20531+ cmp %_ASM_BX,%_ASM_CX
20532+ jb 1234f
20533+ xor %ebx,%ebx
20534+1234:
20535+#endif
20536+
20537+#endif
20538+
20539+3: __copyuser_seg movl %eax,(_DEST)
20540 xor %eax,%eax
20541 EXIT
20542 ENDPROC(__put_user_4)
20543
20544 ENTRY(__put_user_8)
20545 ENTER
20546+
20547+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20548+ GET_THREAD_INFO(%_ASM_BX)
20549 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20550 sub $7,%_ASM_BX
20551 cmp %_ASM_BX,%_ASM_CX
20552 jae bad_put_user
20553-4: mov %_ASM_AX,(%_ASM_CX)
20554+
20555+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20556+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20557+ cmp %_ASM_BX,%_ASM_CX
20558+ jb 1234f
20559+ xor %ebx,%ebx
20560+1234:
20561+#endif
20562+
20563+#endif
20564+
20565+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20566 #ifdef CONFIG_X86_32
20567-5: movl %edx,4(%_ASM_CX)
20568+5: __copyuser_seg movl %edx,4(_DEST)
20569 #endif
20570 xor %eax,%eax
20571 EXIT
20572diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20573index 1cad221..de671ee 100644
20574--- a/arch/x86/lib/rwlock.S
20575+++ b/arch/x86/lib/rwlock.S
20576@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20577 FRAME
20578 0: LOCK_PREFIX
20579 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20580+
20581+#ifdef CONFIG_PAX_REFCOUNT
20582+ jno 1234f
20583+ LOCK_PREFIX
20584+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20585+ int $4
20586+1234:
20587+ _ASM_EXTABLE(1234b, 1234b)
20588+#endif
20589+
20590 1: rep; nop
20591 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20592 jne 1b
20593 LOCK_PREFIX
20594 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20595+
20596+#ifdef CONFIG_PAX_REFCOUNT
20597+ jno 1234f
20598+ LOCK_PREFIX
20599+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20600+ int $4
20601+1234:
20602+ _ASM_EXTABLE(1234b, 1234b)
20603+#endif
20604+
20605 jnz 0b
20606 ENDFRAME
20607+ pax_force_retaddr
20608 ret
20609 CFI_ENDPROC
20610 END(__write_lock_failed)
20611@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20612 FRAME
20613 0: LOCK_PREFIX
20614 READ_LOCK_SIZE(inc) (%__lock_ptr)
20615+
20616+#ifdef CONFIG_PAX_REFCOUNT
20617+ jno 1234f
20618+ LOCK_PREFIX
20619+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20620+ int $4
20621+1234:
20622+ _ASM_EXTABLE(1234b, 1234b)
20623+#endif
20624+
20625 1: rep; nop
20626 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20627 js 1b
20628 LOCK_PREFIX
20629 READ_LOCK_SIZE(dec) (%__lock_ptr)
20630+
20631+#ifdef CONFIG_PAX_REFCOUNT
20632+ jno 1234f
20633+ LOCK_PREFIX
20634+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20635+ int $4
20636+1234:
20637+ _ASM_EXTABLE(1234b, 1234b)
20638+#endif
20639+
20640 js 0b
20641 ENDFRAME
20642+ pax_force_retaddr
20643 ret
20644 CFI_ENDPROC
20645 END(__read_lock_failed)
20646diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20647index 5dff5f0..cadebf4 100644
20648--- a/arch/x86/lib/rwsem.S
20649+++ b/arch/x86/lib/rwsem.S
20650@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20651 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20652 CFI_RESTORE __ASM_REG(dx)
20653 restore_common_regs
20654+ pax_force_retaddr
20655 ret
20656 CFI_ENDPROC
20657 ENDPROC(call_rwsem_down_read_failed)
20658@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20659 movq %rax,%rdi
20660 call rwsem_down_write_failed
20661 restore_common_regs
20662+ pax_force_retaddr
20663 ret
20664 CFI_ENDPROC
20665 ENDPROC(call_rwsem_down_write_failed)
20666@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20667 movq %rax,%rdi
20668 call rwsem_wake
20669 restore_common_regs
20670-1: ret
20671+1: pax_force_retaddr
20672+ ret
20673 CFI_ENDPROC
20674 ENDPROC(call_rwsem_wake)
20675
20676@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20677 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20678 CFI_RESTORE __ASM_REG(dx)
20679 restore_common_regs
20680+ pax_force_retaddr
20681 ret
20682 CFI_ENDPROC
20683 ENDPROC(call_rwsem_downgrade_wake)
20684diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20685index a63efd6..ccecad8 100644
20686--- a/arch/x86/lib/thunk_64.S
20687+++ b/arch/x86/lib/thunk_64.S
20688@@ -8,6 +8,7 @@
20689 #include <linux/linkage.h>
20690 #include <asm/dwarf2.h>
20691 #include <asm/calling.h>
20692+#include <asm/alternative-asm.h>
20693
20694 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20695 .macro THUNK name, func, put_ret_addr_in_rdi=0
20696@@ -41,5 +42,6 @@
20697 SAVE_ARGS
20698 restore:
20699 RESTORE_ARGS
20700+ pax_force_retaddr
20701 ret
20702 CFI_ENDPROC
20703diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20704index e218d5d..35679b4 100644
20705--- a/arch/x86/lib/usercopy_32.c
20706+++ b/arch/x86/lib/usercopy_32.c
20707@@ -43,7 +43,7 @@ do { \
20708 __asm__ __volatile__( \
20709 " testl %1,%1\n" \
20710 " jz 2f\n" \
20711- "0: lodsb\n" \
20712+ "0: "__copyuser_seg"lodsb\n" \
20713 " stosb\n" \
20714 " testb %%al,%%al\n" \
20715 " jz 1f\n" \
20716@@ -128,10 +128,12 @@ do { \
20717 int __d0; \
20718 might_fault(); \
20719 __asm__ __volatile__( \
20720+ __COPYUSER_SET_ES \
20721 "0: rep; stosl\n" \
20722 " movl %2,%0\n" \
20723 "1: rep; stosb\n" \
20724 "2:\n" \
20725+ __COPYUSER_RESTORE_ES \
20726 ".section .fixup,\"ax\"\n" \
20727 "3: lea 0(%2,%0,4),%0\n" \
20728 " jmp 2b\n" \
20729@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20730 might_fault();
20731
20732 __asm__ __volatile__(
20733+ __COPYUSER_SET_ES
20734 " testl %0, %0\n"
20735 " jz 3f\n"
20736 " andl %0,%%ecx\n"
20737@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20738 " subl %%ecx,%0\n"
20739 " addl %0,%%eax\n"
20740 "1:\n"
20741+ __COPYUSER_RESTORE_ES
20742 ".section .fixup,\"ax\"\n"
20743 "2: xorl %%eax,%%eax\n"
20744 " jmp 1b\n"
20745@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20746
20747 #ifdef CONFIG_X86_INTEL_USERCOPY
20748 static unsigned long
20749-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20750+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20751 {
20752 int d0, d1;
20753 __asm__ __volatile__(
20754@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20755 " .align 2,0x90\n"
20756 "3: movl 0(%4), %%eax\n"
20757 "4: movl 4(%4), %%edx\n"
20758- "5: movl %%eax, 0(%3)\n"
20759- "6: movl %%edx, 4(%3)\n"
20760+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20761+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20762 "7: movl 8(%4), %%eax\n"
20763 "8: movl 12(%4),%%edx\n"
20764- "9: movl %%eax, 8(%3)\n"
20765- "10: movl %%edx, 12(%3)\n"
20766+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20767+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20768 "11: movl 16(%4), %%eax\n"
20769 "12: movl 20(%4), %%edx\n"
20770- "13: movl %%eax, 16(%3)\n"
20771- "14: movl %%edx, 20(%3)\n"
20772+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20773+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20774 "15: movl 24(%4), %%eax\n"
20775 "16: movl 28(%4), %%edx\n"
20776- "17: movl %%eax, 24(%3)\n"
20777- "18: movl %%edx, 28(%3)\n"
20778+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20779+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20780 "19: movl 32(%4), %%eax\n"
20781 "20: movl 36(%4), %%edx\n"
20782- "21: movl %%eax, 32(%3)\n"
20783- "22: movl %%edx, 36(%3)\n"
20784+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20785+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20786 "23: movl 40(%4), %%eax\n"
20787 "24: movl 44(%4), %%edx\n"
20788- "25: movl %%eax, 40(%3)\n"
20789- "26: movl %%edx, 44(%3)\n"
20790+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20791+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20792 "27: movl 48(%4), %%eax\n"
20793 "28: movl 52(%4), %%edx\n"
20794- "29: movl %%eax, 48(%3)\n"
20795- "30: movl %%edx, 52(%3)\n"
20796+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20797+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20798 "31: movl 56(%4), %%eax\n"
20799 "32: movl 60(%4), %%edx\n"
20800- "33: movl %%eax, 56(%3)\n"
20801- "34: movl %%edx, 60(%3)\n"
20802+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20803+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20804 " addl $-64, %0\n"
20805 " addl $64, %4\n"
20806 " addl $64, %3\n"
20807@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20808 " shrl $2, %0\n"
20809 " andl $3, %%eax\n"
20810 " cld\n"
20811+ __COPYUSER_SET_ES
20812 "99: rep; movsl\n"
20813 "36: movl %%eax, %0\n"
20814 "37: rep; movsb\n"
20815 "100:\n"
20816+ __COPYUSER_RESTORE_ES
20817+ ".section .fixup,\"ax\"\n"
20818+ "101: lea 0(%%eax,%0,4),%0\n"
20819+ " jmp 100b\n"
20820+ ".previous\n"
20821+ ".section __ex_table,\"a\"\n"
20822+ " .align 4\n"
20823+ " .long 1b,100b\n"
20824+ " .long 2b,100b\n"
20825+ " .long 3b,100b\n"
20826+ " .long 4b,100b\n"
20827+ " .long 5b,100b\n"
20828+ " .long 6b,100b\n"
20829+ " .long 7b,100b\n"
20830+ " .long 8b,100b\n"
20831+ " .long 9b,100b\n"
20832+ " .long 10b,100b\n"
20833+ " .long 11b,100b\n"
20834+ " .long 12b,100b\n"
20835+ " .long 13b,100b\n"
20836+ " .long 14b,100b\n"
20837+ " .long 15b,100b\n"
20838+ " .long 16b,100b\n"
20839+ " .long 17b,100b\n"
20840+ " .long 18b,100b\n"
20841+ " .long 19b,100b\n"
20842+ " .long 20b,100b\n"
20843+ " .long 21b,100b\n"
20844+ " .long 22b,100b\n"
20845+ " .long 23b,100b\n"
20846+ " .long 24b,100b\n"
20847+ " .long 25b,100b\n"
20848+ " .long 26b,100b\n"
20849+ " .long 27b,100b\n"
20850+ " .long 28b,100b\n"
20851+ " .long 29b,100b\n"
20852+ " .long 30b,100b\n"
20853+ " .long 31b,100b\n"
20854+ " .long 32b,100b\n"
20855+ " .long 33b,100b\n"
20856+ " .long 34b,100b\n"
20857+ " .long 35b,100b\n"
20858+ " .long 36b,100b\n"
20859+ " .long 37b,100b\n"
20860+ " .long 99b,101b\n"
20861+ ".previous"
20862+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20863+ : "1"(to), "2"(from), "0"(size)
20864+ : "eax", "edx", "memory");
20865+ return size;
20866+}
20867+
20868+static unsigned long
20869+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20870+{
20871+ int d0, d1;
20872+ __asm__ __volatile__(
20873+ " .align 2,0x90\n"
20874+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20875+ " cmpl $67, %0\n"
20876+ " jbe 3f\n"
20877+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20878+ " .align 2,0x90\n"
20879+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20880+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20881+ "5: movl %%eax, 0(%3)\n"
20882+ "6: movl %%edx, 4(%3)\n"
20883+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20884+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20885+ "9: movl %%eax, 8(%3)\n"
20886+ "10: movl %%edx, 12(%3)\n"
20887+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20888+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20889+ "13: movl %%eax, 16(%3)\n"
20890+ "14: movl %%edx, 20(%3)\n"
20891+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20892+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20893+ "17: movl %%eax, 24(%3)\n"
20894+ "18: movl %%edx, 28(%3)\n"
20895+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20896+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20897+ "21: movl %%eax, 32(%3)\n"
20898+ "22: movl %%edx, 36(%3)\n"
20899+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20900+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20901+ "25: movl %%eax, 40(%3)\n"
20902+ "26: movl %%edx, 44(%3)\n"
20903+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20904+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20905+ "29: movl %%eax, 48(%3)\n"
20906+ "30: movl %%edx, 52(%3)\n"
20907+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20908+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20909+ "33: movl %%eax, 56(%3)\n"
20910+ "34: movl %%edx, 60(%3)\n"
20911+ " addl $-64, %0\n"
20912+ " addl $64, %4\n"
20913+ " addl $64, %3\n"
20914+ " cmpl $63, %0\n"
20915+ " ja 1b\n"
20916+ "35: movl %0, %%eax\n"
20917+ " shrl $2, %0\n"
20918+ " andl $3, %%eax\n"
20919+ " cld\n"
20920+ "99: rep; "__copyuser_seg" movsl\n"
20921+ "36: movl %%eax, %0\n"
20922+ "37: rep; "__copyuser_seg" movsb\n"
20923+ "100:\n"
20924 ".section .fixup,\"ax\"\n"
20925 "101: lea 0(%%eax,%0,4),%0\n"
20926 " jmp 100b\n"
20927@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20928 int d0, d1;
20929 __asm__ __volatile__(
20930 " .align 2,0x90\n"
20931- "0: movl 32(%4), %%eax\n"
20932+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20933 " cmpl $67, %0\n"
20934 " jbe 2f\n"
20935- "1: movl 64(%4), %%eax\n"
20936+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20937 " .align 2,0x90\n"
20938- "2: movl 0(%4), %%eax\n"
20939- "21: movl 4(%4), %%edx\n"
20940+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20941+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20942 " movl %%eax, 0(%3)\n"
20943 " movl %%edx, 4(%3)\n"
20944- "3: movl 8(%4), %%eax\n"
20945- "31: movl 12(%4),%%edx\n"
20946+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20947+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20948 " movl %%eax, 8(%3)\n"
20949 " movl %%edx, 12(%3)\n"
20950- "4: movl 16(%4), %%eax\n"
20951- "41: movl 20(%4), %%edx\n"
20952+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20953+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20954 " movl %%eax, 16(%3)\n"
20955 " movl %%edx, 20(%3)\n"
20956- "10: movl 24(%4), %%eax\n"
20957- "51: movl 28(%4), %%edx\n"
20958+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20959+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20960 " movl %%eax, 24(%3)\n"
20961 " movl %%edx, 28(%3)\n"
20962- "11: movl 32(%4), %%eax\n"
20963- "61: movl 36(%4), %%edx\n"
20964+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20965+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20966 " movl %%eax, 32(%3)\n"
20967 " movl %%edx, 36(%3)\n"
20968- "12: movl 40(%4), %%eax\n"
20969- "71: movl 44(%4), %%edx\n"
20970+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20971+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20972 " movl %%eax, 40(%3)\n"
20973 " movl %%edx, 44(%3)\n"
20974- "13: movl 48(%4), %%eax\n"
20975- "81: movl 52(%4), %%edx\n"
20976+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20977+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20978 " movl %%eax, 48(%3)\n"
20979 " movl %%edx, 52(%3)\n"
20980- "14: movl 56(%4), %%eax\n"
20981- "91: movl 60(%4), %%edx\n"
20982+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20983+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20984 " movl %%eax, 56(%3)\n"
20985 " movl %%edx, 60(%3)\n"
20986 " addl $-64, %0\n"
20987@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20988 " shrl $2, %0\n"
20989 " andl $3, %%eax\n"
20990 " cld\n"
20991- "6: rep; movsl\n"
20992+ "6: rep; "__copyuser_seg" movsl\n"
20993 " movl %%eax,%0\n"
20994- "7: rep; movsb\n"
20995+ "7: rep; "__copyuser_seg" movsb\n"
20996 "8:\n"
20997 ".section .fixup,\"ax\"\n"
20998 "9: lea 0(%%eax,%0,4),%0\n"
20999@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21000
21001 __asm__ __volatile__(
21002 " .align 2,0x90\n"
21003- "0: movl 32(%4), %%eax\n"
21004+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21005 " cmpl $67, %0\n"
21006 " jbe 2f\n"
21007- "1: movl 64(%4), %%eax\n"
21008+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21009 " .align 2,0x90\n"
21010- "2: movl 0(%4), %%eax\n"
21011- "21: movl 4(%4), %%edx\n"
21012+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21013+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21014 " movnti %%eax, 0(%3)\n"
21015 " movnti %%edx, 4(%3)\n"
21016- "3: movl 8(%4), %%eax\n"
21017- "31: movl 12(%4),%%edx\n"
21018+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21019+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21020 " movnti %%eax, 8(%3)\n"
21021 " movnti %%edx, 12(%3)\n"
21022- "4: movl 16(%4), %%eax\n"
21023- "41: movl 20(%4), %%edx\n"
21024+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21025+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21026 " movnti %%eax, 16(%3)\n"
21027 " movnti %%edx, 20(%3)\n"
21028- "10: movl 24(%4), %%eax\n"
21029- "51: movl 28(%4), %%edx\n"
21030+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21031+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21032 " movnti %%eax, 24(%3)\n"
21033 " movnti %%edx, 28(%3)\n"
21034- "11: movl 32(%4), %%eax\n"
21035- "61: movl 36(%4), %%edx\n"
21036+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21037+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21038 " movnti %%eax, 32(%3)\n"
21039 " movnti %%edx, 36(%3)\n"
21040- "12: movl 40(%4), %%eax\n"
21041- "71: movl 44(%4), %%edx\n"
21042+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21043+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21044 " movnti %%eax, 40(%3)\n"
21045 " movnti %%edx, 44(%3)\n"
21046- "13: movl 48(%4), %%eax\n"
21047- "81: movl 52(%4), %%edx\n"
21048+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21049+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21050 " movnti %%eax, 48(%3)\n"
21051 " movnti %%edx, 52(%3)\n"
21052- "14: movl 56(%4), %%eax\n"
21053- "91: movl 60(%4), %%edx\n"
21054+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21055+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21056 " movnti %%eax, 56(%3)\n"
21057 " movnti %%edx, 60(%3)\n"
21058 " addl $-64, %0\n"
21059@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21060 " shrl $2, %0\n"
21061 " andl $3, %%eax\n"
21062 " cld\n"
21063- "6: rep; movsl\n"
21064+ "6: rep; "__copyuser_seg" movsl\n"
21065 " movl %%eax,%0\n"
21066- "7: rep; movsb\n"
21067+ "7: rep; "__copyuser_seg" movsb\n"
21068 "8:\n"
21069 ".section .fixup,\"ax\"\n"
21070 "9: lea 0(%%eax,%0,4),%0\n"
21071@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21072
21073 __asm__ __volatile__(
21074 " .align 2,0x90\n"
21075- "0: movl 32(%4), %%eax\n"
21076+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21077 " cmpl $67, %0\n"
21078 " jbe 2f\n"
21079- "1: movl 64(%4), %%eax\n"
21080+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21081 " .align 2,0x90\n"
21082- "2: movl 0(%4), %%eax\n"
21083- "21: movl 4(%4), %%edx\n"
21084+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21085+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21086 " movnti %%eax, 0(%3)\n"
21087 " movnti %%edx, 4(%3)\n"
21088- "3: movl 8(%4), %%eax\n"
21089- "31: movl 12(%4),%%edx\n"
21090+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21091+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21092 " movnti %%eax, 8(%3)\n"
21093 " movnti %%edx, 12(%3)\n"
21094- "4: movl 16(%4), %%eax\n"
21095- "41: movl 20(%4), %%edx\n"
21096+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21097+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21098 " movnti %%eax, 16(%3)\n"
21099 " movnti %%edx, 20(%3)\n"
21100- "10: movl 24(%4), %%eax\n"
21101- "51: movl 28(%4), %%edx\n"
21102+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21103+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21104 " movnti %%eax, 24(%3)\n"
21105 " movnti %%edx, 28(%3)\n"
21106- "11: movl 32(%4), %%eax\n"
21107- "61: movl 36(%4), %%edx\n"
21108+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21109+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21110 " movnti %%eax, 32(%3)\n"
21111 " movnti %%edx, 36(%3)\n"
21112- "12: movl 40(%4), %%eax\n"
21113- "71: movl 44(%4), %%edx\n"
21114+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21115+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21116 " movnti %%eax, 40(%3)\n"
21117 " movnti %%edx, 44(%3)\n"
21118- "13: movl 48(%4), %%eax\n"
21119- "81: movl 52(%4), %%edx\n"
21120+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21121+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21122 " movnti %%eax, 48(%3)\n"
21123 " movnti %%edx, 52(%3)\n"
21124- "14: movl 56(%4), %%eax\n"
21125- "91: movl 60(%4), %%edx\n"
21126+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21127+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21128 " movnti %%eax, 56(%3)\n"
21129 " movnti %%edx, 60(%3)\n"
21130 " addl $-64, %0\n"
21131@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21132 " shrl $2, %0\n"
21133 " andl $3, %%eax\n"
21134 " cld\n"
21135- "6: rep; movsl\n"
21136+ "6: rep; "__copyuser_seg" movsl\n"
21137 " movl %%eax,%0\n"
21138- "7: rep; movsb\n"
21139+ "7: rep; "__copyuser_seg" movsb\n"
21140 "8:\n"
21141 ".section .fixup,\"ax\"\n"
21142 "9: lea 0(%%eax,%0,4),%0\n"
21143@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21144 */
21145 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21146 unsigned long size);
21147-unsigned long __copy_user_intel(void __user *to, const void *from,
21148+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21149+ unsigned long size);
21150+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21151 unsigned long size);
21152 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21153 const void __user *from, unsigned long size);
21154 #endif /* CONFIG_X86_INTEL_USERCOPY */
21155
21156 /* Generic arbitrary sized copy. */
21157-#define __copy_user(to, from, size) \
21158+#define __copy_user(to, from, size, prefix, set, restore) \
21159 do { \
21160 int __d0, __d1, __d2; \
21161 __asm__ __volatile__( \
21162+ set \
21163 " cmp $7,%0\n" \
21164 " jbe 1f\n" \
21165 " movl %1,%0\n" \
21166 " negl %0\n" \
21167 " andl $7,%0\n" \
21168 " subl %0,%3\n" \
21169- "4: rep; movsb\n" \
21170+ "4: rep; "prefix"movsb\n" \
21171 " movl %3,%0\n" \
21172 " shrl $2,%0\n" \
21173 " andl $3,%3\n" \
21174 " .align 2,0x90\n" \
21175- "0: rep; movsl\n" \
21176+ "0: rep; "prefix"movsl\n" \
21177 " movl %3,%0\n" \
21178- "1: rep; movsb\n" \
21179+ "1: rep; "prefix"movsb\n" \
21180 "2:\n" \
21181+ restore \
21182 ".section .fixup,\"ax\"\n" \
21183 "5: addl %3,%0\n" \
21184 " jmp 2b\n" \
21185@@ -682,14 +799,14 @@ do { \
21186 " negl %0\n" \
21187 " andl $7,%0\n" \
21188 " subl %0,%3\n" \
21189- "4: rep; movsb\n" \
21190+ "4: rep; "__copyuser_seg"movsb\n" \
21191 " movl %3,%0\n" \
21192 " shrl $2,%0\n" \
21193 " andl $3,%3\n" \
21194 " .align 2,0x90\n" \
21195- "0: rep; movsl\n" \
21196+ "0: rep; "__copyuser_seg"movsl\n" \
21197 " movl %3,%0\n" \
21198- "1: rep; movsb\n" \
21199+ "1: rep; "__copyuser_seg"movsb\n" \
21200 "2:\n" \
21201 ".section .fixup,\"ax\"\n" \
21202 "5: addl %3,%0\n" \
21203@@ -775,9 +892,9 @@ survive:
21204 }
21205 #endif
21206 if (movsl_is_ok(to, from, n))
21207- __copy_user(to, from, n);
21208+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21209 else
21210- n = __copy_user_intel(to, from, n);
21211+ n = __generic_copy_to_user_intel(to, from, n);
21212 return n;
21213 }
21214 EXPORT_SYMBOL(__copy_to_user_ll);
21215@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21216 unsigned long n)
21217 {
21218 if (movsl_is_ok(to, from, n))
21219- __copy_user(to, from, n);
21220+ __copy_user(to, from, n, __copyuser_seg, "", "");
21221 else
21222- n = __copy_user_intel((void __user *)to,
21223- (const void *)from, n);
21224+ n = __generic_copy_from_user_intel(to, from, n);
21225 return n;
21226 }
21227 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21228@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21229 if (n > 64 && cpu_has_xmm2)
21230 n = __copy_user_intel_nocache(to, from, n);
21231 else
21232- __copy_user(to, from, n);
21233+ __copy_user(to, from, n, __copyuser_seg, "", "");
21234 #else
21235- __copy_user(to, from, n);
21236+ __copy_user(to, from, n, __copyuser_seg, "", "");
21237 #endif
21238 return n;
21239 }
21240 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21241
21242-/**
21243- * copy_to_user: - Copy a block of data into user space.
21244- * @to: Destination address, in user space.
21245- * @from: Source address, in kernel space.
21246- * @n: Number of bytes to copy.
21247- *
21248- * Context: User context only. This function may sleep.
21249- *
21250- * Copy data from kernel space to user space.
21251- *
21252- * Returns number of bytes that could not be copied.
21253- * On success, this will be zero.
21254- */
21255-unsigned long
21256-copy_to_user(void __user *to, const void *from, unsigned long n)
21257-{
21258- if (access_ok(VERIFY_WRITE, to, n))
21259- n = __copy_to_user(to, from, n);
21260- return n;
21261-}
21262-EXPORT_SYMBOL(copy_to_user);
21263-
21264-/**
21265- * copy_from_user: - Copy a block of data from user space.
21266- * @to: Destination address, in kernel space.
21267- * @from: Source address, in user space.
21268- * @n: Number of bytes to copy.
21269- *
21270- * Context: User context only. This function may sleep.
21271- *
21272- * Copy data from user space to kernel space.
21273- *
21274- * Returns number of bytes that could not be copied.
21275- * On success, this will be zero.
21276- *
21277- * If some data could not be copied, this function will pad the copied
21278- * data to the requested size using zero bytes.
21279- */
21280-unsigned long
21281-_copy_from_user(void *to, const void __user *from, unsigned long n)
21282-{
21283- if (access_ok(VERIFY_READ, from, n))
21284- n = __copy_from_user(to, from, n);
21285- else
21286- memset(to, 0, n);
21287- return n;
21288-}
21289-EXPORT_SYMBOL(_copy_from_user);
21290-
21291 void copy_from_user_overflow(void)
21292 {
21293 WARN(1, "Buffer overflow detected!\n");
21294 }
21295 EXPORT_SYMBOL(copy_from_user_overflow);
21296+
21297+void copy_to_user_overflow(void)
21298+{
21299+ WARN(1, "Buffer overflow detected!\n");
21300+}
21301+EXPORT_SYMBOL(copy_to_user_overflow);
21302+
21303+#ifdef CONFIG_PAX_MEMORY_UDEREF
21304+void __set_fs(mm_segment_t x)
21305+{
21306+ switch (x.seg) {
21307+ case 0:
21308+ loadsegment(gs, 0);
21309+ break;
21310+ case TASK_SIZE_MAX:
21311+ loadsegment(gs, __USER_DS);
21312+ break;
21313+ case -1UL:
21314+ loadsegment(gs, __KERNEL_DS);
21315+ break;
21316+ default:
21317+ BUG();
21318+ }
21319+ return;
21320+}
21321+EXPORT_SYMBOL(__set_fs);
21322+
21323+void set_fs(mm_segment_t x)
21324+{
21325+ current_thread_info()->addr_limit = x;
21326+ __set_fs(x);
21327+}
21328+EXPORT_SYMBOL(set_fs);
21329+#endif
21330diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21331index b7c2849..8633ad8 100644
21332--- a/arch/x86/lib/usercopy_64.c
21333+++ b/arch/x86/lib/usercopy_64.c
21334@@ -42,6 +42,12 @@ long
21335 __strncpy_from_user(char *dst, const char __user *src, long count)
21336 {
21337 long res;
21338+
21339+#ifdef CONFIG_PAX_MEMORY_UDEREF
21340+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21341+ src += PAX_USER_SHADOW_BASE;
21342+#endif
21343+
21344 __do_strncpy_from_user(dst, src, count, res);
21345 return res;
21346 }
21347@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21348 {
21349 long __d0;
21350 might_fault();
21351+
21352+#ifdef CONFIG_PAX_MEMORY_UDEREF
21353+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21354+ addr += PAX_USER_SHADOW_BASE;
21355+#endif
21356+
21357 /* no memory constraint because it doesn't change any memory gcc knows
21358 about */
21359 asm volatile(
21360@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21361 }
21362 EXPORT_SYMBOL(strlen_user);
21363
21364-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21365+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21366 {
21367- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21368- return copy_user_generic((__force void *)to, (__force void *)from, len);
21369- }
21370- return len;
21371+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21372+
21373+#ifdef CONFIG_PAX_MEMORY_UDEREF
21374+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21375+ to += PAX_USER_SHADOW_BASE;
21376+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21377+ from += PAX_USER_SHADOW_BASE;
21378+#endif
21379+
21380+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21381+ }
21382+ return len;
21383 }
21384 EXPORT_SYMBOL(copy_in_user);
21385
21386@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21387 * it is not necessary to optimize tail handling.
21388 */
21389 unsigned long
21390-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21391+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21392 {
21393 char c;
21394 unsigned zero_len;
21395diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21396index d0474ad..36e9257 100644
21397--- a/arch/x86/mm/extable.c
21398+++ b/arch/x86/mm/extable.c
21399@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21400 const struct exception_table_entry *fixup;
21401
21402 #ifdef CONFIG_PNPBIOS
21403- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21404+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21405 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21406 extern u32 pnp_bios_is_utter_crap;
21407 pnp_bios_is_utter_crap = 1;
21408diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21409index 0d17c8c..c5d9925 100644
21410--- a/arch/x86/mm/fault.c
21411+++ b/arch/x86/mm/fault.c
21412@@ -13,11 +13,18 @@
21413 #include <linux/perf_event.h> /* perf_sw_event */
21414 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21415 #include <linux/prefetch.h> /* prefetchw */
21416+#include <linux/unistd.h>
21417+#include <linux/compiler.h>
21418
21419 #include <asm/traps.h> /* dotraplinkage, ... */
21420 #include <asm/pgalloc.h> /* pgd_*(), ... */
21421 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21422 #include <asm/vsyscall.h>
21423+#include <asm/tlbflush.h>
21424+
21425+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21426+#include <asm/stacktrace.h>
21427+#endif
21428
21429 /*
21430 * Page fault error code bits:
21431@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21432 int ret = 0;
21433
21434 /* kprobe_running() needs smp_processor_id() */
21435- if (kprobes_built_in() && !user_mode_vm(regs)) {
21436+ if (kprobes_built_in() && !user_mode(regs)) {
21437 preempt_disable();
21438 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21439 ret = 1;
21440@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21441 return !instr_lo || (instr_lo>>1) == 1;
21442 case 0x00:
21443 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21444- if (probe_kernel_address(instr, opcode))
21445+ if (user_mode(regs)) {
21446+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21447+ return 0;
21448+ } else if (probe_kernel_address(instr, opcode))
21449 return 0;
21450
21451 *prefetch = (instr_lo == 0xF) &&
21452@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21453 while (instr < max_instr) {
21454 unsigned char opcode;
21455
21456- if (probe_kernel_address(instr, opcode))
21457+ if (user_mode(regs)) {
21458+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21459+ break;
21460+ } else if (probe_kernel_address(instr, opcode))
21461 break;
21462
21463 instr++;
21464@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21465 force_sig_info(si_signo, &info, tsk);
21466 }
21467
21468+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21469+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21470+#endif
21471+
21472+#ifdef CONFIG_PAX_EMUTRAMP
21473+static int pax_handle_fetch_fault(struct pt_regs *regs);
21474+#endif
21475+
21476+#ifdef CONFIG_PAX_PAGEEXEC
21477+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21478+{
21479+ pgd_t *pgd;
21480+ pud_t *pud;
21481+ pmd_t *pmd;
21482+
21483+ pgd = pgd_offset(mm, address);
21484+ if (!pgd_present(*pgd))
21485+ return NULL;
21486+ pud = pud_offset(pgd, address);
21487+ if (!pud_present(*pud))
21488+ return NULL;
21489+ pmd = pmd_offset(pud, address);
21490+ if (!pmd_present(*pmd))
21491+ return NULL;
21492+ return pmd;
21493+}
21494+#endif
21495+
21496 DEFINE_SPINLOCK(pgd_lock);
21497 LIST_HEAD(pgd_list);
21498
21499@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21500 for (address = VMALLOC_START & PMD_MASK;
21501 address >= TASK_SIZE && address < FIXADDR_TOP;
21502 address += PMD_SIZE) {
21503+
21504+#ifdef CONFIG_PAX_PER_CPU_PGD
21505+ unsigned long cpu;
21506+#else
21507 struct page *page;
21508+#endif
21509
21510 spin_lock(&pgd_lock);
21511+
21512+#ifdef CONFIG_PAX_PER_CPU_PGD
21513+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21514+ pgd_t *pgd = get_cpu_pgd(cpu);
21515+ pmd_t *ret;
21516+#else
21517 list_for_each_entry(page, &pgd_list, lru) {
21518+ pgd_t *pgd = page_address(page);
21519 spinlock_t *pgt_lock;
21520 pmd_t *ret;
21521
21522@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21523 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21524
21525 spin_lock(pgt_lock);
21526- ret = vmalloc_sync_one(page_address(page), address);
21527+#endif
21528+
21529+ ret = vmalloc_sync_one(pgd, address);
21530+
21531+#ifndef CONFIG_PAX_PER_CPU_PGD
21532 spin_unlock(pgt_lock);
21533+#endif
21534
21535 if (!ret)
21536 break;
21537@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21538 * an interrupt in the middle of a task switch..
21539 */
21540 pgd_paddr = read_cr3();
21541+
21542+#ifdef CONFIG_PAX_PER_CPU_PGD
21543+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21544+#endif
21545+
21546 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21547 if (!pmd_k)
21548 return -1;
21549@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21550 * happen within a race in page table update. In the later
21551 * case just flush:
21552 */
21553+
21554+#ifdef CONFIG_PAX_PER_CPU_PGD
21555+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21556+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21557+#else
21558 pgd = pgd_offset(current->active_mm, address);
21559+#endif
21560+
21561 pgd_ref = pgd_offset_k(address);
21562 if (pgd_none(*pgd_ref))
21563 return -1;
21564@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21565 static int is_errata100(struct pt_regs *regs, unsigned long address)
21566 {
21567 #ifdef CONFIG_X86_64
21568- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21569+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21570 return 1;
21571 #endif
21572 return 0;
21573@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21574 }
21575
21576 static const char nx_warning[] = KERN_CRIT
21577-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21578+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21579
21580 static void
21581 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21582@@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21583 if (!oops_may_print())
21584 return;
21585
21586- if (error_code & PF_INSTR) {
21587+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21588 unsigned int level;
21589
21590 pte_t *pte = lookup_address(address, &level);
21591
21592 if (pte && pte_present(*pte) && !pte_exec(*pte))
21593- printk(nx_warning, current_uid());
21594+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21595 }
21596
21597+#ifdef CONFIG_PAX_KERNEXEC
21598+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21599+ if (current->signal->curr_ip)
21600+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21601+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21602+ else
21603+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21604+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21605+ }
21606+#endif
21607+
21608 printk(KERN_ALERT "BUG: unable to handle kernel ");
21609 if (address < PAGE_SIZE)
21610 printk(KERN_CONT "NULL pointer dereference");
21611@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21612 }
21613 #endif
21614
21615+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21616+ if (pax_is_fetch_fault(regs, error_code, address)) {
21617+
21618+#ifdef CONFIG_PAX_EMUTRAMP
21619+ switch (pax_handle_fetch_fault(regs)) {
21620+ case 2:
21621+ return;
21622+ }
21623+#endif
21624+
21625+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21626+ do_group_exit(SIGKILL);
21627+ }
21628+#endif
21629+
21630 if (unlikely(show_unhandled_signals))
21631 show_signal_msg(regs, error_code, address, tsk);
21632
21633@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21634 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21635 printk(KERN_ERR
21636 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21637- tsk->comm, tsk->pid, address);
21638+ tsk->comm, task_pid_nr(tsk), address);
21639 code = BUS_MCEERR_AR;
21640 }
21641 #endif
21642@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21643 return 1;
21644 }
21645
21646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21647+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21648+{
21649+ pte_t *pte;
21650+ pmd_t *pmd;
21651+ spinlock_t *ptl;
21652+ unsigned char pte_mask;
21653+
21654+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21655+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21656+ return 0;
21657+
21658+ /* PaX: it's our fault, let's handle it if we can */
21659+
21660+ /* PaX: take a look at read faults before acquiring any locks */
21661+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21662+ /* instruction fetch attempt from a protected page in user mode */
21663+ up_read(&mm->mmap_sem);
21664+
21665+#ifdef CONFIG_PAX_EMUTRAMP
21666+ switch (pax_handle_fetch_fault(regs)) {
21667+ case 2:
21668+ return 1;
21669+ }
21670+#endif
21671+
21672+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21673+ do_group_exit(SIGKILL);
21674+ }
21675+
21676+ pmd = pax_get_pmd(mm, address);
21677+ if (unlikely(!pmd))
21678+ return 0;
21679+
21680+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21681+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21682+ pte_unmap_unlock(pte, ptl);
21683+ return 0;
21684+ }
21685+
21686+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21687+ /* write attempt to a protected page in user mode */
21688+ pte_unmap_unlock(pte, ptl);
21689+ return 0;
21690+ }
21691+
21692+#ifdef CONFIG_SMP
21693+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21694+#else
21695+ if (likely(address > get_limit(regs->cs)))
21696+#endif
21697+ {
21698+ set_pte(pte, pte_mkread(*pte));
21699+ __flush_tlb_one(address);
21700+ pte_unmap_unlock(pte, ptl);
21701+ up_read(&mm->mmap_sem);
21702+ return 1;
21703+ }
21704+
21705+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21706+
21707+ /*
21708+ * PaX: fill DTLB with user rights and retry
21709+ */
21710+ __asm__ __volatile__ (
21711+ "orb %2,(%1)\n"
21712+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21713+/*
21714+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21715+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21716+ * page fault when examined during a TLB load attempt. this is true not only
21717+ * for PTEs holding a non-present entry but also present entries that will
21718+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21719+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21720+ * for our target pages since their PTEs are simply not in the TLBs at all.
21721+
21722+ * the best thing in omitting it is that we gain around 15-20% speed in the
21723+ * fast path of the page fault handler and can get rid of tracing since we
21724+ * can no longer flush unintended entries.
21725+ */
21726+ "invlpg (%0)\n"
21727+#endif
21728+ __copyuser_seg"testb $0,(%0)\n"
21729+ "xorb %3,(%1)\n"
21730+ :
21731+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21732+ : "memory", "cc");
21733+ pte_unmap_unlock(pte, ptl);
21734+ up_read(&mm->mmap_sem);
21735+ return 1;
21736+}
21737+#endif
21738+
21739 /*
21740 * Handle a spurious fault caused by a stale TLB entry.
21741 *
21742@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
21743 static inline int
21744 access_error(unsigned long error_code, struct vm_area_struct *vma)
21745 {
21746+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21747+ return 1;
21748+
21749 if (error_code & PF_WRITE) {
21750 /* write, present and write, not present: */
21751 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21752@@ -989,18 +1181,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21753 {
21754 struct vm_area_struct *vma;
21755 struct task_struct *tsk;
21756- unsigned long address;
21757 struct mm_struct *mm;
21758 int fault;
21759 int write = error_code & PF_WRITE;
21760 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21761 (write ? FAULT_FLAG_WRITE : 0);
21762
21763- tsk = current;
21764- mm = tsk->mm;
21765-
21766 /* Get the faulting address: */
21767- address = read_cr2();
21768+ unsigned long address = read_cr2();
21769+
21770+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21771+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21772+ if (!search_exception_tables(regs->ip)) {
21773+ bad_area_nosemaphore(regs, error_code, address);
21774+ return;
21775+ }
21776+ if (address < PAX_USER_SHADOW_BASE) {
21777+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21778+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21779+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21780+ } else
21781+ address -= PAX_USER_SHADOW_BASE;
21782+ }
21783+#endif
21784+
21785+ tsk = current;
21786+ mm = tsk->mm;
21787
21788 /*
21789 * Detect and handle instructions that would cause a page fault for
21790@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21791 * User-mode registers count as a user access even for any
21792 * potential system fault or CPU buglet:
21793 */
21794- if (user_mode_vm(regs)) {
21795+ if (user_mode(regs)) {
21796 local_irq_enable();
21797 error_code |= PF_USER;
21798 } else {
21799@@ -1116,6 +1322,11 @@ retry:
21800 might_sleep();
21801 }
21802
21803+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21804+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21805+ return;
21806+#endif
21807+
21808 vma = find_vma(mm, address);
21809 if (unlikely(!vma)) {
21810 bad_area(regs, error_code, address);
21811@@ -1127,18 +1338,24 @@ retry:
21812 bad_area(regs, error_code, address);
21813 return;
21814 }
21815- if (error_code & PF_USER) {
21816- /*
21817- * Accessing the stack below %sp is always a bug.
21818- * The large cushion allows instructions like enter
21819- * and pusha to work. ("enter $65535, $31" pushes
21820- * 32 pointers and then decrements %sp by 65535.)
21821- */
21822- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21823- bad_area(regs, error_code, address);
21824- return;
21825- }
21826+ /*
21827+ * Accessing the stack below %sp is always a bug.
21828+ * The large cushion allows instructions like enter
21829+ * and pusha to work. ("enter $65535, $31" pushes
21830+ * 32 pointers and then decrements %sp by 65535.)
21831+ */
21832+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21833+ bad_area(regs, error_code, address);
21834+ return;
21835 }
21836+
21837+#ifdef CONFIG_PAX_SEGMEXEC
21838+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21839+ bad_area(regs, error_code, address);
21840+ return;
21841+ }
21842+#endif
21843+
21844 if (unlikely(expand_stack(vma, address))) {
21845 bad_area(regs, error_code, address);
21846 return;
21847@@ -1193,3 +1410,292 @@ good_area:
21848
21849 up_read(&mm->mmap_sem);
21850 }
21851+
21852+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21853+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21854+{
21855+ struct mm_struct *mm = current->mm;
21856+ unsigned long ip = regs->ip;
21857+
21858+ if (v8086_mode(regs))
21859+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21860+
21861+#ifdef CONFIG_PAX_PAGEEXEC
21862+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21863+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21864+ return true;
21865+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21866+ return true;
21867+ return false;
21868+ }
21869+#endif
21870+
21871+#ifdef CONFIG_PAX_SEGMEXEC
21872+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21873+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21874+ return true;
21875+ return false;
21876+ }
21877+#endif
21878+
21879+ return false;
21880+}
21881+#endif
21882+
21883+#ifdef CONFIG_PAX_EMUTRAMP
21884+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21885+{
21886+ int err;
21887+
21888+ do { /* PaX: libffi trampoline emulation */
21889+ unsigned char mov, jmp;
21890+ unsigned int addr1, addr2;
21891+
21892+#ifdef CONFIG_X86_64
21893+ if ((regs->ip + 9) >> 32)
21894+ break;
21895+#endif
21896+
21897+ err = get_user(mov, (unsigned char __user *)regs->ip);
21898+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21899+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21900+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21901+
21902+ if (err)
21903+ break;
21904+
21905+ if (mov == 0xB8 && jmp == 0xE9) {
21906+ regs->ax = addr1;
21907+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21908+ return 2;
21909+ }
21910+ } while (0);
21911+
21912+ do { /* PaX: gcc trampoline emulation #1 */
21913+ unsigned char mov1, mov2;
21914+ unsigned short jmp;
21915+ unsigned int addr1, addr2;
21916+
21917+#ifdef CONFIG_X86_64
21918+ if ((regs->ip + 11) >> 32)
21919+ break;
21920+#endif
21921+
21922+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21923+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21924+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21925+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21926+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21927+
21928+ if (err)
21929+ break;
21930+
21931+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21932+ regs->cx = addr1;
21933+ regs->ax = addr2;
21934+ regs->ip = addr2;
21935+ return 2;
21936+ }
21937+ } while (0);
21938+
21939+ do { /* PaX: gcc trampoline emulation #2 */
21940+ unsigned char mov, jmp;
21941+ unsigned int addr1, addr2;
21942+
21943+#ifdef CONFIG_X86_64
21944+ if ((regs->ip + 9) >> 32)
21945+ break;
21946+#endif
21947+
21948+ err = get_user(mov, (unsigned char __user *)regs->ip);
21949+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21950+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21951+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21952+
21953+ if (err)
21954+ break;
21955+
21956+ if (mov == 0xB9 && jmp == 0xE9) {
21957+ regs->cx = addr1;
21958+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21959+ return 2;
21960+ }
21961+ } while (0);
21962+
21963+ return 1; /* PaX in action */
21964+}
21965+
21966+#ifdef CONFIG_X86_64
21967+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21968+{
21969+ int err;
21970+
21971+ do { /* PaX: libffi trampoline emulation */
21972+ unsigned short mov1, mov2, jmp1;
21973+ unsigned char stcclc, jmp2;
21974+ unsigned long addr1, addr2;
21975+
21976+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21977+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21978+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21979+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21980+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
21981+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
21982+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
21983+
21984+ if (err)
21985+ break;
21986+
21987+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21988+ regs->r11 = addr1;
21989+ regs->r10 = addr2;
21990+ if (stcclc == 0xF8)
21991+ regs->flags &= ~X86_EFLAGS_CF;
21992+ else
21993+ regs->flags |= X86_EFLAGS_CF;
21994+ regs->ip = addr1;
21995+ return 2;
21996+ }
21997+ } while (0);
21998+
21999+ do { /* PaX: gcc trampoline emulation #1 */
22000+ unsigned short mov1, mov2, jmp1;
22001+ unsigned char jmp2;
22002+ unsigned int addr1;
22003+ unsigned long addr2;
22004+
22005+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22006+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22007+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22008+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22009+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22010+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22011+
22012+ if (err)
22013+ break;
22014+
22015+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22016+ regs->r11 = addr1;
22017+ regs->r10 = addr2;
22018+ regs->ip = addr1;
22019+ return 2;
22020+ }
22021+ } while (0);
22022+
22023+ do { /* PaX: gcc trampoline emulation #2 */
22024+ unsigned short mov1, mov2, jmp1;
22025+ unsigned char jmp2;
22026+ unsigned long addr1, addr2;
22027+
22028+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22029+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22030+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22031+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22032+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22033+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22034+
22035+ if (err)
22036+ break;
22037+
22038+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22039+ regs->r11 = addr1;
22040+ regs->r10 = addr2;
22041+ regs->ip = addr1;
22042+ return 2;
22043+ }
22044+ } while (0);
22045+
22046+ return 1; /* PaX in action */
22047+}
22048+#endif
22049+
22050+/*
22051+ * PaX: decide what to do with offenders (regs->ip = fault address)
22052+ *
22053+ * returns 1 when task should be killed
22054+ * 2 when gcc trampoline was detected
22055+ */
22056+static int pax_handle_fetch_fault(struct pt_regs *regs)
22057+{
22058+ if (v8086_mode(regs))
22059+ return 1;
22060+
22061+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22062+ return 1;
22063+
22064+#ifdef CONFIG_X86_32
22065+ return pax_handle_fetch_fault_32(regs);
22066+#else
22067+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22068+ return pax_handle_fetch_fault_32(regs);
22069+ else
22070+ return pax_handle_fetch_fault_64(regs);
22071+#endif
22072+}
22073+#endif
22074+
22075+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22076+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22077+{
22078+ long i;
22079+
22080+ printk(KERN_ERR "PAX: bytes at PC: ");
22081+ for (i = 0; i < 20; i++) {
22082+ unsigned char c;
22083+ if (get_user(c, (unsigned char __force_user *)pc+i))
22084+ printk(KERN_CONT "?? ");
22085+ else
22086+ printk(KERN_CONT "%02x ", c);
22087+ }
22088+ printk("\n");
22089+
22090+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22091+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22092+ unsigned long c;
22093+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22094+#ifdef CONFIG_X86_32
22095+ printk(KERN_CONT "???????? ");
22096+#else
22097+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22098+ printk(KERN_CONT "???????? ???????? ");
22099+ else
22100+ printk(KERN_CONT "???????????????? ");
22101+#endif
22102+ } else {
22103+#ifdef CONFIG_X86_64
22104+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22105+ printk(KERN_CONT "%08x ", (unsigned int)c);
22106+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22107+ } else
22108+#endif
22109+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22110+ }
22111+ }
22112+ printk("\n");
22113+}
22114+#endif
22115+
22116+/**
22117+ * probe_kernel_write(): safely attempt to write to a location
22118+ * @dst: address to write to
22119+ * @src: pointer to the data that shall be written
22120+ * @size: size of the data chunk
22121+ *
22122+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22123+ * happens, handle that and return -EFAULT.
22124+ */
22125+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22126+{
22127+ long ret;
22128+ mm_segment_t old_fs = get_fs();
22129+
22130+ set_fs(KERNEL_DS);
22131+ pagefault_disable();
22132+ pax_open_kernel();
22133+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22134+ pax_close_kernel();
22135+ pagefault_enable();
22136+ set_fs(old_fs);
22137+
22138+ return ret ? -EFAULT : 0;
22139+}
22140diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22141index dd74e46..7d26398 100644
22142--- a/arch/x86/mm/gup.c
22143+++ b/arch/x86/mm/gup.c
22144@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22145 addr = start;
22146 len = (unsigned long) nr_pages << PAGE_SHIFT;
22147 end = start + len;
22148- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22149+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22150 (void __user *)start, len)))
22151 return 0;
22152
22153diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22154index f4f29b1..5cac4fb 100644
22155--- a/arch/x86/mm/highmem_32.c
22156+++ b/arch/x86/mm/highmem_32.c
22157@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22158 idx = type + KM_TYPE_NR*smp_processor_id();
22159 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22160 BUG_ON(!pte_none(*(kmap_pte-idx)));
22161+
22162+ pax_open_kernel();
22163 set_pte(kmap_pte-idx, mk_pte(page, prot));
22164+ pax_close_kernel();
22165+
22166 arch_flush_lazy_mmu_mode();
22167
22168 return (void *)vaddr;
22169diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22170index f581a18..29efd37 100644
22171--- a/arch/x86/mm/hugetlbpage.c
22172+++ b/arch/x86/mm/hugetlbpage.c
22173@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22174 struct hstate *h = hstate_file(file);
22175 struct mm_struct *mm = current->mm;
22176 struct vm_area_struct *vma;
22177- unsigned long start_addr;
22178+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22179+
22180+#ifdef CONFIG_PAX_SEGMEXEC
22181+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22182+ pax_task_size = SEGMEXEC_TASK_SIZE;
22183+#endif
22184+
22185+ pax_task_size -= PAGE_SIZE;
22186
22187 if (len > mm->cached_hole_size) {
22188- start_addr = mm->free_area_cache;
22189+ start_addr = mm->free_area_cache;
22190 } else {
22191- start_addr = TASK_UNMAPPED_BASE;
22192- mm->cached_hole_size = 0;
22193+ start_addr = mm->mmap_base;
22194+ mm->cached_hole_size = 0;
22195 }
22196
22197 full_search:
22198@@ -280,26 +287,27 @@ full_search:
22199
22200 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22201 /* At this point: (!vma || addr < vma->vm_end). */
22202- if (TASK_SIZE - len < addr) {
22203+ if (pax_task_size - len < addr) {
22204 /*
22205 * Start a new search - just in case we missed
22206 * some holes.
22207 */
22208- if (start_addr != TASK_UNMAPPED_BASE) {
22209- start_addr = TASK_UNMAPPED_BASE;
22210+ if (start_addr != mm->mmap_base) {
22211+ start_addr = mm->mmap_base;
22212 mm->cached_hole_size = 0;
22213 goto full_search;
22214 }
22215 return -ENOMEM;
22216 }
22217- if (!vma || addr + len <= vma->vm_start) {
22218- mm->free_area_cache = addr + len;
22219- return addr;
22220- }
22221+ if (check_heap_stack_gap(vma, addr, len))
22222+ break;
22223 if (addr + mm->cached_hole_size < vma->vm_start)
22224 mm->cached_hole_size = vma->vm_start - addr;
22225 addr = ALIGN(vma->vm_end, huge_page_size(h));
22226 }
22227+
22228+ mm->free_area_cache = addr + len;
22229+ return addr;
22230 }
22231
22232 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22233@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22234 {
22235 struct hstate *h = hstate_file(file);
22236 struct mm_struct *mm = current->mm;
22237- struct vm_area_struct *vma, *prev_vma;
22238- unsigned long base = mm->mmap_base, addr = addr0;
22239+ struct vm_area_struct *vma;
22240+ unsigned long base = mm->mmap_base, addr;
22241 unsigned long largest_hole = mm->cached_hole_size;
22242- int first_time = 1;
22243
22244 /* don't allow allocations above current base */
22245 if (mm->free_area_cache > base)
22246@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22247 largest_hole = 0;
22248 mm->free_area_cache = base;
22249 }
22250-try_again:
22251+
22252 /* make sure it can fit in the remaining address space */
22253 if (mm->free_area_cache < len)
22254 goto fail;
22255
22256 /* either no address requested or can't fit in requested address hole */
22257- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22258+ addr = (mm->free_area_cache - len);
22259 do {
22260+ addr &= huge_page_mask(h);
22261+ vma = find_vma(mm, addr);
22262 /*
22263 * Lookup failure means no vma is above this address,
22264 * i.e. return with success:
22265- */
22266- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22267- return addr;
22268-
22269- /*
22270 * new region fits between prev_vma->vm_end and
22271 * vma->vm_start, use it:
22272 */
22273- if (addr + len <= vma->vm_start &&
22274- (!prev_vma || (addr >= prev_vma->vm_end))) {
22275+ if (check_heap_stack_gap(vma, addr, len)) {
22276 /* remember the address as a hint for next time */
22277- mm->cached_hole_size = largest_hole;
22278- return (mm->free_area_cache = addr);
22279- } else {
22280- /* pull free_area_cache down to the first hole */
22281- if (mm->free_area_cache == vma->vm_end) {
22282- mm->free_area_cache = vma->vm_start;
22283- mm->cached_hole_size = largest_hole;
22284- }
22285+ mm->cached_hole_size = largest_hole;
22286+ return (mm->free_area_cache = addr);
22287+ }
22288+ /* pull free_area_cache down to the first hole */
22289+ if (mm->free_area_cache == vma->vm_end) {
22290+ mm->free_area_cache = vma->vm_start;
22291+ mm->cached_hole_size = largest_hole;
22292 }
22293
22294 /* remember the largest hole we saw so far */
22295 if (addr + largest_hole < vma->vm_start)
22296- largest_hole = vma->vm_start - addr;
22297+ largest_hole = vma->vm_start - addr;
22298
22299 /* try just below the current vma->vm_start */
22300- addr = (vma->vm_start - len) & huge_page_mask(h);
22301- } while (len <= vma->vm_start);
22302+ addr = skip_heap_stack_gap(vma, len);
22303+ } while (!IS_ERR_VALUE(addr));
22304
22305 fail:
22306 /*
22307- * if hint left us with no space for the requested
22308- * mapping then try again:
22309- */
22310- if (first_time) {
22311- mm->free_area_cache = base;
22312- largest_hole = 0;
22313- first_time = 0;
22314- goto try_again;
22315- }
22316- /*
22317 * A failed mmap() very likely causes application failure,
22318 * so fall back to the bottom-up function here. This scenario
22319 * can happen with large stack limits and large mmap()
22320 * allocations.
22321 */
22322- mm->free_area_cache = TASK_UNMAPPED_BASE;
22323+
22324+#ifdef CONFIG_PAX_SEGMEXEC
22325+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22326+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22327+ else
22328+#endif
22329+
22330+ mm->mmap_base = TASK_UNMAPPED_BASE;
22331+
22332+#ifdef CONFIG_PAX_RANDMMAP
22333+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22334+ mm->mmap_base += mm->delta_mmap;
22335+#endif
22336+
22337+ mm->free_area_cache = mm->mmap_base;
22338 mm->cached_hole_size = ~0UL;
22339 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22340 len, pgoff, flags);
22341@@ -386,6 +392,7 @@ fail:
22342 /*
22343 * Restore the topdown base:
22344 */
22345+ mm->mmap_base = base;
22346 mm->free_area_cache = base;
22347 mm->cached_hole_size = ~0UL;
22348
22349@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22350 struct hstate *h = hstate_file(file);
22351 struct mm_struct *mm = current->mm;
22352 struct vm_area_struct *vma;
22353+ unsigned long pax_task_size = TASK_SIZE;
22354
22355 if (len & ~huge_page_mask(h))
22356 return -EINVAL;
22357- if (len > TASK_SIZE)
22358+
22359+#ifdef CONFIG_PAX_SEGMEXEC
22360+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22361+ pax_task_size = SEGMEXEC_TASK_SIZE;
22362+#endif
22363+
22364+ pax_task_size -= PAGE_SIZE;
22365+
22366+ if (len > pax_task_size)
22367 return -ENOMEM;
22368
22369 if (flags & MAP_FIXED) {
22370@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22371 if (addr) {
22372 addr = ALIGN(addr, huge_page_size(h));
22373 vma = find_vma(mm, addr);
22374- if (TASK_SIZE - len >= addr &&
22375- (!vma || addr + len <= vma->vm_start))
22376+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22377 return addr;
22378 }
22379 if (mm->get_unmapped_area == arch_get_unmapped_area)
22380diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22381index 87488b9..7129f32 100644
22382--- a/arch/x86/mm/init.c
22383+++ b/arch/x86/mm/init.c
22384@@ -31,7 +31,7 @@ int direct_gbpages
22385 static void __init find_early_table_space(unsigned long end, int use_pse,
22386 int use_gbpages)
22387 {
22388- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22389+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22390 phys_addr_t base;
22391
22392 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22393@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22394 */
22395 int devmem_is_allowed(unsigned long pagenr)
22396 {
22397+#ifdef CONFIG_GRKERNSEC_KMEM
22398+ /* allow BDA */
22399+ if (!pagenr)
22400+ return 1;
22401+ /* allow EBDA */
22402+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22403+ return 1;
22404+#else
22405+ if (!pagenr)
22406+ return 1;
22407+#ifdef CONFIG_VM86
22408+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22409+ return 1;
22410+#endif
22411+#endif
22412+
22413+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22414+ return 1;
22415+#ifdef CONFIG_GRKERNSEC_KMEM
22416+ /* throw out everything else below 1MB */
22417 if (pagenr <= 256)
22418- return 1;
22419+ return 0;
22420+#endif
22421 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22422 return 0;
22423 if (!page_is_ram(pagenr))
22424@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22425
22426 void free_initmem(void)
22427 {
22428+
22429+#ifdef CONFIG_PAX_KERNEXEC
22430+#ifdef CONFIG_X86_32
22431+ /* PaX: limit KERNEL_CS to actual size */
22432+ unsigned long addr, limit;
22433+ struct desc_struct d;
22434+ int cpu;
22435+
22436+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22437+ limit = (limit - 1UL) >> PAGE_SHIFT;
22438+
22439+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22440+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22441+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22442+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22443+ }
22444+
22445+ /* PaX: make KERNEL_CS read-only */
22446+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22447+ if (!paravirt_enabled())
22448+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22449+/*
22450+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22451+ pgd = pgd_offset_k(addr);
22452+ pud = pud_offset(pgd, addr);
22453+ pmd = pmd_offset(pud, addr);
22454+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22455+ }
22456+*/
22457+#ifdef CONFIG_X86_PAE
22458+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22459+/*
22460+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22461+ pgd = pgd_offset_k(addr);
22462+ pud = pud_offset(pgd, addr);
22463+ pmd = pmd_offset(pud, addr);
22464+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22465+ }
22466+*/
22467+#endif
22468+
22469+#ifdef CONFIG_MODULES
22470+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22471+#endif
22472+
22473+#else
22474+ pgd_t *pgd;
22475+ pud_t *pud;
22476+ pmd_t *pmd;
22477+ unsigned long addr, end;
22478+
22479+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22480+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22481+ pgd = pgd_offset_k(addr);
22482+ pud = pud_offset(pgd, addr);
22483+ pmd = pmd_offset(pud, addr);
22484+ if (!pmd_present(*pmd))
22485+ continue;
22486+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22487+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22488+ else
22489+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22490+ }
22491+
22492+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22493+ end = addr + KERNEL_IMAGE_SIZE;
22494+ for (; addr < end; addr += PMD_SIZE) {
22495+ pgd = pgd_offset_k(addr);
22496+ pud = pud_offset(pgd, addr);
22497+ pmd = pmd_offset(pud, addr);
22498+ if (!pmd_present(*pmd))
22499+ continue;
22500+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22501+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22502+ }
22503+#endif
22504+
22505+ flush_tlb_all();
22506+#endif
22507+
22508 free_init_pages("unused kernel memory",
22509 (unsigned long)(&__init_begin),
22510 (unsigned long)(&__init_end));
22511diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22512index 29f7c6d..b46b35b 100644
22513--- a/arch/x86/mm/init_32.c
22514+++ b/arch/x86/mm/init_32.c
22515@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22516 }
22517
22518 /*
22519- * Creates a middle page table and puts a pointer to it in the
22520- * given global directory entry. This only returns the gd entry
22521- * in non-PAE compilation mode, since the middle layer is folded.
22522- */
22523-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22524-{
22525- pud_t *pud;
22526- pmd_t *pmd_table;
22527-
22528-#ifdef CONFIG_X86_PAE
22529- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22530- if (after_bootmem)
22531- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22532- else
22533- pmd_table = (pmd_t *)alloc_low_page();
22534- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22535- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22536- pud = pud_offset(pgd, 0);
22537- BUG_ON(pmd_table != pmd_offset(pud, 0));
22538-
22539- return pmd_table;
22540- }
22541-#endif
22542- pud = pud_offset(pgd, 0);
22543- pmd_table = pmd_offset(pud, 0);
22544-
22545- return pmd_table;
22546-}
22547-
22548-/*
22549 * Create a page table and place a pointer to it in a middle page
22550 * directory entry:
22551 */
22552@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22553 page_table = (pte_t *)alloc_low_page();
22554
22555 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22556+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22557+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22558+#else
22559 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22560+#endif
22561 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22562 }
22563
22564 return pte_offset_kernel(pmd, 0);
22565 }
22566
22567+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22568+{
22569+ pud_t *pud;
22570+ pmd_t *pmd_table;
22571+
22572+ pud = pud_offset(pgd, 0);
22573+ pmd_table = pmd_offset(pud, 0);
22574+
22575+ return pmd_table;
22576+}
22577+
22578 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22579 {
22580 int pgd_idx = pgd_index(vaddr);
22581@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22582 int pgd_idx, pmd_idx;
22583 unsigned long vaddr;
22584 pgd_t *pgd;
22585+ pud_t *pud;
22586 pmd_t *pmd;
22587 pte_t *pte = NULL;
22588
22589@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22590 pgd = pgd_base + pgd_idx;
22591
22592 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22593- pmd = one_md_table_init(pgd);
22594- pmd = pmd + pmd_index(vaddr);
22595+ pud = pud_offset(pgd, vaddr);
22596+ pmd = pmd_offset(pud, vaddr);
22597+
22598+#ifdef CONFIG_X86_PAE
22599+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22600+#endif
22601+
22602 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22603 pmd++, pmd_idx++) {
22604 pte = page_table_kmap_check(one_page_table_init(pmd),
22605@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22606 }
22607 }
22608
22609-static inline int is_kernel_text(unsigned long addr)
22610+static inline int is_kernel_text(unsigned long start, unsigned long end)
22611 {
22612- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22613- return 1;
22614- return 0;
22615+ if ((start > ktla_ktva((unsigned long)_etext) ||
22616+ end <= ktla_ktva((unsigned long)_stext)) &&
22617+ (start > ktla_ktva((unsigned long)_einittext) ||
22618+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22619+
22620+#ifdef CONFIG_ACPI_SLEEP
22621+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22622+#endif
22623+
22624+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22625+ return 0;
22626+ return 1;
22627 }
22628
22629 /*
22630@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22631 unsigned long last_map_addr = end;
22632 unsigned long start_pfn, end_pfn;
22633 pgd_t *pgd_base = swapper_pg_dir;
22634- int pgd_idx, pmd_idx, pte_ofs;
22635+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22636 unsigned long pfn;
22637 pgd_t *pgd;
22638+ pud_t *pud;
22639 pmd_t *pmd;
22640 pte_t *pte;
22641 unsigned pages_2m, pages_4k;
22642@@ -281,8 +282,13 @@ repeat:
22643 pfn = start_pfn;
22644 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22645 pgd = pgd_base + pgd_idx;
22646- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22647- pmd = one_md_table_init(pgd);
22648+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22649+ pud = pud_offset(pgd, 0);
22650+ pmd = pmd_offset(pud, 0);
22651+
22652+#ifdef CONFIG_X86_PAE
22653+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22654+#endif
22655
22656 if (pfn >= end_pfn)
22657 continue;
22658@@ -294,14 +300,13 @@ repeat:
22659 #endif
22660 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22661 pmd++, pmd_idx++) {
22662- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22663+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22664
22665 /*
22666 * Map with big pages if possible, otherwise
22667 * create normal page tables:
22668 */
22669 if (use_pse) {
22670- unsigned int addr2;
22671 pgprot_t prot = PAGE_KERNEL_LARGE;
22672 /*
22673 * first pass will use the same initial
22674@@ -311,11 +316,7 @@ repeat:
22675 __pgprot(PTE_IDENT_ATTR |
22676 _PAGE_PSE);
22677
22678- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22679- PAGE_OFFSET + PAGE_SIZE-1;
22680-
22681- if (is_kernel_text(addr) ||
22682- is_kernel_text(addr2))
22683+ if (is_kernel_text(address, address + PMD_SIZE))
22684 prot = PAGE_KERNEL_LARGE_EXEC;
22685
22686 pages_2m++;
22687@@ -332,7 +333,7 @@ repeat:
22688 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22689 pte += pte_ofs;
22690 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22691- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22692+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22693 pgprot_t prot = PAGE_KERNEL;
22694 /*
22695 * first pass will use the same initial
22696@@ -340,7 +341,7 @@ repeat:
22697 */
22698 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22699
22700- if (is_kernel_text(addr))
22701+ if (is_kernel_text(address, address + PAGE_SIZE))
22702 prot = PAGE_KERNEL_EXEC;
22703
22704 pages_4k++;
22705@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22706
22707 pud = pud_offset(pgd, va);
22708 pmd = pmd_offset(pud, va);
22709- if (!pmd_present(*pmd))
22710+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22711 break;
22712
22713 pte = pte_offset_kernel(pmd, va);
22714@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22715
22716 static void __init pagetable_init(void)
22717 {
22718- pgd_t *pgd_base = swapper_pg_dir;
22719-
22720- permanent_kmaps_init(pgd_base);
22721+ permanent_kmaps_init(swapper_pg_dir);
22722 }
22723
22724-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22725+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22726 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22727
22728 /* user-defined highmem size */
22729@@ -757,6 +756,12 @@ void __init mem_init(void)
22730
22731 pci_iommu_alloc();
22732
22733+#ifdef CONFIG_PAX_PER_CPU_PGD
22734+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22735+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22736+ KERNEL_PGD_PTRS);
22737+#endif
22738+
22739 #ifdef CONFIG_FLATMEM
22740 BUG_ON(!mem_map);
22741 #endif
22742@@ -774,7 +779,7 @@ void __init mem_init(void)
22743 set_highmem_pages_init();
22744
22745 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22746- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22747+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22748 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22749
22750 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22751@@ -815,10 +820,10 @@ void __init mem_init(void)
22752 ((unsigned long)&__init_end -
22753 (unsigned long)&__init_begin) >> 10,
22754
22755- (unsigned long)&_etext, (unsigned long)&_edata,
22756- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22757+ (unsigned long)&_sdata, (unsigned long)&_edata,
22758+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22759
22760- (unsigned long)&_text, (unsigned long)&_etext,
22761+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22762 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22763
22764 /*
22765@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22766 if (!kernel_set_to_readonly)
22767 return;
22768
22769+ start = ktla_ktva(start);
22770 pr_debug("Set kernel text: %lx - %lx for read write\n",
22771 start, start+size);
22772
22773@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22774 if (!kernel_set_to_readonly)
22775 return;
22776
22777+ start = ktla_ktva(start);
22778 pr_debug("Set kernel text: %lx - %lx for read only\n",
22779 start, start+size);
22780
22781@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22782 unsigned long start = PFN_ALIGN(_text);
22783 unsigned long size = PFN_ALIGN(_etext) - start;
22784
22785+ start = ktla_ktva(start);
22786 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22787 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22788 size >> 10);
22789diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22790index bbaaa00..16dffad 100644
22791--- a/arch/x86/mm/init_64.c
22792+++ b/arch/x86/mm/init_64.c
22793@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22794 * around without checking the pgd every time.
22795 */
22796
22797-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22798+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22799 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22800
22801 int force_personality32;
22802@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22803
22804 for (address = start; address <= end; address += PGDIR_SIZE) {
22805 const pgd_t *pgd_ref = pgd_offset_k(address);
22806+
22807+#ifdef CONFIG_PAX_PER_CPU_PGD
22808+ unsigned long cpu;
22809+#else
22810 struct page *page;
22811+#endif
22812
22813 if (pgd_none(*pgd_ref))
22814 continue;
22815
22816 spin_lock(&pgd_lock);
22817+
22818+#ifdef CONFIG_PAX_PER_CPU_PGD
22819+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22820+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22821+#else
22822 list_for_each_entry(page, &pgd_list, lru) {
22823 pgd_t *pgd;
22824 spinlock_t *pgt_lock;
22825@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22826 /* the pgt_lock only for Xen */
22827 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22828 spin_lock(pgt_lock);
22829+#endif
22830
22831 if (pgd_none(*pgd))
22832 set_pgd(pgd, *pgd_ref);
22833@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22834 BUG_ON(pgd_page_vaddr(*pgd)
22835 != pgd_page_vaddr(*pgd_ref));
22836
22837+#ifndef CONFIG_PAX_PER_CPU_PGD
22838 spin_unlock(pgt_lock);
22839+#endif
22840+
22841 }
22842 spin_unlock(&pgd_lock);
22843 }
22844@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22845 pmd = fill_pmd(pud, vaddr);
22846 pte = fill_pte(pmd, vaddr);
22847
22848+ pax_open_kernel();
22849 set_pte(pte, new_pte);
22850+ pax_close_kernel();
22851
22852 /*
22853 * It's enough to flush this one mapping.
22854@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22855 pgd = pgd_offset_k((unsigned long)__va(phys));
22856 if (pgd_none(*pgd)) {
22857 pud = (pud_t *) spp_getpage();
22858- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22859- _PAGE_USER));
22860+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22861 }
22862 pud = pud_offset(pgd, (unsigned long)__va(phys));
22863 if (pud_none(*pud)) {
22864 pmd = (pmd_t *) spp_getpage();
22865- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22866- _PAGE_USER));
22867+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22868 }
22869 pmd = pmd_offset(pud, phys);
22870 BUG_ON(!pmd_none(*pmd));
22871@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22872 if (pfn >= pgt_buf_top)
22873 panic("alloc_low_page: ran out of memory");
22874
22875- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22876+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22877 clear_page(adr);
22878 *phys = pfn * PAGE_SIZE;
22879 return adr;
22880@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22881
22882 phys = __pa(virt);
22883 left = phys & (PAGE_SIZE - 1);
22884- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22885+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22886 adr = (void *)(((unsigned long)adr) | left);
22887
22888 return adr;
22889@@ -693,6 +707,12 @@ void __init mem_init(void)
22890
22891 pci_iommu_alloc();
22892
22893+#ifdef CONFIG_PAX_PER_CPU_PGD
22894+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22895+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22896+ KERNEL_PGD_PTRS);
22897+#endif
22898+
22899 /* clear_bss() already clear the empty_zero_page */
22900
22901 reservedpages = 0;
22902@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22903 static struct vm_area_struct gate_vma = {
22904 .vm_start = VSYSCALL_START,
22905 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22906- .vm_page_prot = PAGE_READONLY_EXEC,
22907- .vm_flags = VM_READ | VM_EXEC
22908+ .vm_page_prot = PAGE_READONLY,
22909+ .vm_flags = VM_READ
22910 };
22911
22912 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22913@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22914
22915 const char *arch_vma_name(struct vm_area_struct *vma)
22916 {
22917- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22918+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22919 return "[vdso]";
22920 if (vma == &gate_vma)
22921 return "[vsyscall]";
22922diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22923index 7b179b4..6bd1777 100644
22924--- a/arch/x86/mm/iomap_32.c
22925+++ b/arch/x86/mm/iomap_32.c
22926@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22927 type = kmap_atomic_idx_push();
22928 idx = type + KM_TYPE_NR * smp_processor_id();
22929 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22930+
22931+ pax_open_kernel();
22932 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22933+ pax_close_kernel();
22934+
22935 arch_flush_lazy_mmu_mode();
22936
22937 return (void *)vaddr;
22938diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22939index be1ef57..9680edc 100644
22940--- a/arch/x86/mm/ioremap.c
22941+++ b/arch/x86/mm/ioremap.c
22942@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22943 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22944 int is_ram = page_is_ram(pfn);
22945
22946- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22947+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22948 return NULL;
22949 WARN_ON_ONCE(is_ram);
22950 }
22951@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22952 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22953
22954 static __initdata int after_paging_init;
22955-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22956+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22957
22958 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22959 {
22960@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22961 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22962
22963 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22964- memset(bm_pte, 0, sizeof(bm_pte));
22965- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22966+ pmd_populate_user(&init_mm, pmd, bm_pte);
22967
22968 /*
22969 * The boot-ioremap range spans multiple pmds, for which
22970diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22971index d87dd6d..bf3fa66 100644
22972--- a/arch/x86/mm/kmemcheck/kmemcheck.c
22973+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22974@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
22975 * memory (e.g. tracked pages)? For now, we need this to avoid
22976 * invoking kmemcheck for PnP BIOS calls.
22977 */
22978- if (regs->flags & X86_VM_MASK)
22979+ if (v8086_mode(regs))
22980 return false;
22981- if (regs->cs != __KERNEL_CS)
22982+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22983 return false;
22984
22985 pte = kmemcheck_pte_lookup(address);
22986diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22987index f927429..39c2947 100644
22988--- a/arch/x86/mm/mmap.c
22989+++ b/arch/x86/mm/mmap.c
22990@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
22991 * Leave an at least ~128 MB hole with possible stack randomization.
22992 */
22993 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22994-#define MAX_GAP (TASK_SIZE/6*5)
22995+#define MAX_GAP (pax_task_size/6*5)
22996
22997 /*
22998 * True on X86_32 or when emulating IA32 on X86_64
22999@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
23000 return rnd << PAGE_SHIFT;
23001 }
23002
23003-static unsigned long mmap_base(void)
23004+static unsigned long mmap_base(struct mm_struct *mm)
23005 {
23006 unsigned long gap = rlimit(RLIMIT_STACK);
23007+ unsigned long pax_task_size = TASK_SIZE;
23008+
23009+#ifdef CONFIG_PAX_SEGMEXEC
23010+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23011+ pax_task_size = SEGMEXEC_TASK_SIZE;
23012+#endif
23013
23014 if (gap < MIN_GAP)
23015 gap = MIN_GAP;
23016 else if (gap > MAX_GAP)
23017 gap = MAX_GAP;
23018
23019- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23020+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23021 }
23022
23023 /*
23024 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23025 * does, but not when emulating X86_32
23026 */
23027-static unsigned long mmap_legacy_base(void)
23028+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23029 {
23030- if (mmap_is_ia32())
23031+ if (mmap_is_ia32()) {
23032+
23033+#ifdef CONFIG_PAX_SEGMEXEC
23034+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23035+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23036+ else
23037+#endif
23038+
23039 return TASK_UNMAPPED_BASE;
23040- else
23041+ } else
23042 return TASK_UNMAPPED_BASE + mmap_rnd();
23043 }
23044
23045@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
23046 void arch_pick_mmap_layout(struct mm_struct *mm)
23047 {
23048 if (mmap_is_legacy()) {
23049- mm->mmap_base = mmap_legacy_base();
23050+ mm->mmap_base = mmap_legacy_base(mm);
23051+
23052+#ifdef CONFIG_PAX_RANDMMAP
23053+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23054+ mm->mmap_base += mm->delta_mmap;
23055+#endif
23056+
23057 mm->get_unmapped_area = arch_get_unmapped_area;
23058 mm->unmap_area = arch_unmap_area;
23059 } else {
23060- mm->mmap_base = mmap_base();
23061+ mm->mmap_base = mmap_base(mm);
23062+
23063+#ifdef CONFIG_PAX_RANDMMAP
23064+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23065+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23066+#endif
23067+
23068 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23069 mm->unmap_area = arch_unmap_area_topdown;
23070 }
23071diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23072index 67421f3..8d6b107 100644
23073--- a/arch/x86/mm/mmio-mod.c
23074+++ b/arch/x86/mm/mmio-mod.c
23075@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23076 break;
23077 default:
23078 {
23079- unsigned char *ip = (unsigned char *)instptr;
23080+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23081 my_trace->opcode = MMIO_UNKNOWN_OP;
23082 my_trace->width = 0;
23083 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23084@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23085 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23086 void __iomem *addr)
23087 {
23088- static atomic_t next_id;
23089+ static atomic_unchecked_t next_id;
23090 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23091 /* These are page-unaligned. */
23092 struct mmiotrace_map map = {
23093@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23094 .private = trace
23095 },
23096 .phys = offset,
23097- .id = atomic_inc_return(&next_id)
23098+ .id = atomic_inc_return_unchecked(&next_id)
23099 };
23100 map.map_id = trace->id;
23101
23102diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23103index b008656..773eac2 100644
23104--- a/arch/x86/mm/pageattr-test.c
23105+++ b/arch/x86/mm/pageattr-test.c
23106@@ -36,7 +36,7 @@ enum {
23107
23108 static int pte_testbit(pte_t pte)
23109 {
23110- return pte_flags(pte) & _PAGE_UNUSED1;
23111+ return pte_flags(pte) & _PAGE_CPA_TEST;
23112 }
23113
23114 struct split_state {
23115diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23116index f9e5267..6f6e27f 100644
23117--- a/arch/x86/mm/pageattr.c
23118+++ b/arch/x86/mm/pageattr.c
23119@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23120 */
23121 #ifdef CONFIG_PCI_BIOS
23122 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23123- pgprot_val(forbidden) |= _PAGE_NX;
23124+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23125 #endif
23126
23127 /*
23128@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23129 * Does not cover __inittext since that is gone later on. On
23130 * 64bit we do not enforce !NX on the low mapping
23131 */
23132- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23133- pgprot_val(forbidden) |= _PAGE_NX;
23134+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23135+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23136
23137+#ifdef CONFIG_DEBUG_RODATA
23138 /*
23139 * The .rodata section needs to be read-only. Using the pfn
23140 * catches all aliases.
23141@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23142 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23143 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23144 pgprot_val(forbidden) |= _PAGE_RW;
23145+#endif
23146
23147 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23148 /*
23149@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23150 }
23151 #endif
23152
23153+#ifdef CONFIG_PAX_KERNEXEC
23154+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23155+ pgprot_val(forbidden) |= _PAGE_RW;
23156+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23157+ }
23158+#endif
23159+
23160 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23161
23162 return prot;
23163@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23164 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23165 {
23166 /* change init_mm */
23167+ pax_open_kernel();
23168 set_pte_atomic(kpte, pte);
23169+
23170 #ifdef CONFIG_X86_32
23171 if (!SHARED_KERNEL_PMD) {
23172+
23173+#ifdef CONFIG_PAX_PER_CPU_PGD
23174+ unsigned long cpu;
23175+#else
23176 struct page *page;
23177+#endif
23178
23179+#ifdef CONFIG_PAX_PER_CPU_PGD
23180+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23181+ pgd_t *pgd = get_cpu_pgd(cpu);
23182+#else
23183 list_for_each_entry(page, &pgd_list, lru) {
23184- pgd_t *pgd;
23185+ pgd_t *pgd = (pgd_t *)page_address(page);
23186+#endif
23187+
23188 pud_t *pud;
23189 pmd_t *pmd;
23190
23191- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23192+ pgd += pgd_index(address);
23193 pud = pud_offset(pgd, address);
23194 pmd = pmd_offset(pud, address);
23195 set_pte_atomic((pte_t *)pmd, pte);
23196 }
23197 }
23198 #endif
23199+ pax_close_kernel();
23200 }
23201
23202 static int
23203diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23204index f6ff57b..481690f 100644
23205--- a/arch/x86/mm/pat.c
23206+++ b/arch/x86/mm/pat.c
23207@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23208
23209 if (!entry) {
23210 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23211- current->comm, current->pid, start, end);
23212+ current->comm, task_pid_nr(current), start, end);
23213 return -EINVAL;
23214 }
23215
23216@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23217 while (cursor < to) {
23218 if (!devmem_is_allowed(pfn)) {
23219 printk(KERN_INFO
23220- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23221- current->comm, from, to);
23222+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23223+ current->comm, from, to, cursor);
23224 return 0;
23225 }
23226 cursor += PAGE_SIZE;
23227@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23228 printk(KERN_INFO
23229 "%s:%d ioremap_change_attr failed %s "
23230 "for %Lx-%Lx\n",
23231- current->comm, current->pid,
23232+ current->comm, task_pid_nr(current),
23233 cattr_name(flags),
23234 base, (unsigned long long)(base + size));
23235 return -EINVAL;
23236@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23237 if (want_flags != flags) {
23238 printk(KERN_WARNING
23239 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23240- current->comm, current->pid,
23241+ current->comm, task_pid_nr(current),
23242 cattr_name(want_flags),
23243 (unsigned long long)paddr,
23244 (unsigned long long)(paddr + size),
23245@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23246 free_memtype(paddr, paddr + size);
23247 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23248 " for %Lx-%Lx, got %s\n",
23249- current->comm, current->pid,
23250+ current->comm, task_pid_nr(current),
23251 cattr_name(want_flags),
23252 (unsigned long long)paddr,
23253 (unsigned long long)(paddr + size),
23254diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23255index 9f0614d..92ae64a 100644
23256--- a/arch/x86/mm/pf_in.c
23257+++ b/arch/x86/mm/pf_in.c
23258@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23259 int i;
23260 enum reason_type rv = OTHERS;
23261
23262- p = (unsigned char *)ins_addr;
23263+ p = (unsigned char *)ktla_ktva(ins_addr);
23264 p += skip_prefix(p, &prf);
23265 p += get_opcode(p, &opcode);
23266
23267@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23268 struct prefix_bits prf;
23269 int i;
23270
23271- p = (unsigned char *)ins_addr;
23272+ p = (unsigned char *)ktla_ktva(ins_addr);
23273 p += skip_prefix(p, &prf);
23274 p += get_opcode(p, &opcode);
23275
23276@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23277 struct prefix_bits prf;
23278 int i;
23279
23280- p = (unsigned char *)ins_addr;
23281+ p = (unsigned char *)ktla_ktva(ins_addr);
23282 p += skip_prefix(p, &prf);
23283 p += get_opcode(p, &opcode);
23284
23285@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23286 struct prefix_bits prf;
23287 int i;
23288
23289- p = (unsigned char *)ins_addr;
23290+ p = (unsigned char *)ktla_ktva(ins_addr);
23291 p += skip_prefix(p, &prf);
23292 p += get_opcode(p, &opcode);
23293 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23294@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23295 struct prefix_bits prf;
23296 int i;
23297
23298- p = (unsigned char *)ins_addr;
23299+ p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23303diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23304index 8573b83..6372501 100644
23305--- a/arch/x86/mm/pgtable.c
23306+++ b/arch/x86/mm/pgtable.c
23307@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23308 list_del(&page->lru);
23309 }
23310
23311-#define UNSHARED_PTRS_PER_PGD \
23312- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23314+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23315
23316+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23317+{
23318+ while (count--)
23319+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23320+}
23321+#endif
23322
23323+#ifdef CONFIG_PAX_PER_CPU_PGD
23324+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23325+{
23326+ while (count--)
23327+
23328+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23329+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23330+#else
23331+ *dst++ = *src++;
23332+#endif
23333+
23334+}
23335+#endif
23336+
23337+#ifdef CONFIG_X86_64
23338+#define pxd_t pud_t
23339+#define pyd_t pgd_t
23340+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23341+#define pxd_free(mm, pud) pud_free((mm), (pud))
23342+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23343+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23344+#define PYD_SIZE PGDIR_SIZE
23345+#else
23346+#define pxd_t pmd_t
23347+#define pyd_t pud_t
23348+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23349+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23350+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23351+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23352+#define PYD_SIZE PUD_SIZE
23353+#endif
23354+
23355+#ifdef CONFIG_PAX_PER_CPU_PGD
23356+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23357+static inline void pgd_dtor(pgd_t *pgd) {}
23358+#else
23359 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23360 {
23361 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23362@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23363 pgd_list_del(pgd);
23364 spin_unlock(&pgd_lock);
23365 }
23366+#endif
23367
23368 /*
23369 * List of all pgd's needed for non-PAE so it can invalidate entries
23370@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23371 * -- wli
23372 */
23373
23374-#ifdef CONFIG_X86_PAE
23375+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23376 /*
23377 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23378 * updating the top-level pagetable entries to guarantee the
23379@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23380 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23381 * and initialize the kernel pmds here.
23382 */
23383-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23384+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23385
23386 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23387 {
23388@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23389 */
23390 flush_tlb_mm(mm);
23391 }
23392+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23393+#define PREALLOCATED_PXDS USER_PGD_PTRS
23394 #else /* !CONFIG_X86_PAE */
23395
23396 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23397-#define PREALLOCATED_PMDS 0
23398+#define PREALLOCATED_PXDS 0
23399
23400 #endif /* CONFIG_X86_PAE */
23401
23402-static void free_pmds(pmd_t *pmds[])
23403+static void free_pxds(pxd_t *pxds[])
23404 {
23405 int i;
23406
23407- for(i = 0; i < PREALLOCATED_PMDS; i++)
23408- if (pmds[i])
23409- free_page((unsigned long)pmds[i]);
23410+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23411+ if (pxds[i])
23412+ free_page((unsigned long)pxds[i]);
23413 }
23414
23415-static int preallocate_pmds(pmd_t *pmds[])
23416+static int preallocate_pxds(pxd_t *pxds[])
23417 {
23418 int i;
23419 bool failed = false;
23420
23421- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23422- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23423- if (pmd == NULL)
23424+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23425+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23426+ if (pxd == NULL)
23427 failed = true;
23428- pmds[i] = pmd;
23429+ pxds[i] = pxd;
23430 }
23431
23432 if (failed) {
23433- free_pmds(pmds);
23434+ free_pxds(pxds);
23435 return -ENOMEM;
23436 }
23437
23438@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23439 * preallocate which never got a corresponding vma will need to be
23440 * freed manually.
23441 */
23442-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23443+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23444 {
23445 int i;
23446
23447- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23448+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23449 pgd_t pgd = pgdp[i];
23450
23451 if (pgd_val(pgd) != 0) {
23452- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23453+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23454
23455- pgdp[i] = native_make_pgd(0);
23456+ set_pgd(pgdp + i, native_make_pgd(0));
23457
23458- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23459- pmd_free(mm, pmd);
23460+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23461+ pxd_free(mm, pxd);
23462 }
23463 }
23464 }
23465
23466-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23467+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23468 {
23469- pud_t *pud;
23470+ pyd_t *pyd;
23471 unsigned long addr;
23472 int i;
23473
23474- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23475+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23476 return;
23477
23478- pud = pud_offset(pgd, 0);
23479+#ifdef CONFIG_X86_64
23480+ pyd = pyd_offset(mm, 0L);
23481+#else
23482+ pyd = pyd_offset(pgd, 0L);
23483+#endif
23484
23485- for (addr = i = 0; i < PREALLOCATED_PMDS;
23486- i++, pud++, addr += PUD_SIZE) {
23487- pmd_t *pmd = pmds[i];
23488+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23489+ i++, pyd++, addr += PYD_SIZE) {
23490+ pxd_t *pxd = pxds[i];
23491
23492 if (i >= KERNEL_PGD_BOUNDARY)
23493- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23494- sizeof(pmd_t) * PTRS_PER_PMD);
23495+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23496+ sizeof(pxd_t) * PTRS_PER_PMD);
23497
23498- pud_populate(mm, pud, pmd);
23499+ pyd_populate(mm, pyd, pxd);
23500 }
23501 }
23502
23503 pgd_t *pgd_alloc(struct mm_struct *mm)
23504 {
23505 pgd_t *pgd;
23506- pmd_t *pmds[PREALLOCATED_PMDS];
23507+ pxd_t *pxds[PREALLOCATED_PXDS];
23508
23509 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23510
23511@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23512
23513 mm->pgd = pgd;
23514
23515- if (preallocate_pmds(pmds) != 0)
23516+ if (preallocate_pxds(pxds) != 0)
23517 goto out_free_pgd;
23518
23519 if (paravirt_pgd_alloc(mm) != 0)
23520- goto out_free_pmds;
23521+ goto out_free_pxds;
23522
23523 /*
23524 * Make sure that pre-populating the pmds is atomic with
23525@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23526 spin_lock(&pgd_lock);
23527
23528 pgd_ctor(mm, pgd);
23529- pgd_prepopulate_pmd(mm, pgd, pmds);
23530+ pgd_prepopulate_pxd(mm, pgd, pxds);
23531
23532 spin_unlock(&pgd_lock);
23533
23534 return pgd;
23535
23536-out_free_pmds:
23537- free_pmds(pmds);
23538+out_free_pxds:
23539+ free_pxds(pxds);
23540 out_free_pgd:
23541 free_page((unsigned long)pgd);
23542 out:
23543@@ -295,7 +344,7 @@ out:
23544
23545 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23546 {
23547- pgd_mop_up_pmds(mm, pgd);
23548+ pgd_mop_up_pxds(mm, pgd);
23549 pgd_dtor(pgd);
23550 paravirt_pgd_free(mm, pgd);
23551 free_page((unsigned long)pgd);
23552diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23553index cac7184..09a39fa 100644
23554--- a/arch/x86/mm/pgtable_32.c
23555+++ b/arch/x86/mm/pgtable_32.c
23556@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23557 return;
23558 }
23559 pte = pte_offset_kernel(pmd, vaddr);
23560+
23561+ pax_open_kernel();
23562 if (pte_val(pteval))
23563 set_pte_at(&init_mm, vaddr, pte, pteval);
23564 else
23565 pte_clear(&init_mm, vaddr, pte);
23566+ pax_close_kernel();
23567
23568 /*
23569 * It's enough to flush this one mapping.
23570diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23571index 410531d..0f16030 100644
23572--- a/arch/x86/mm/setup_nx.c
23573+++ b/arch/x86/mm/setup_nx.c
23574@@ -5,8 +5,10 @@
23575 #include <asm/pgtable.h>
23576 #include <asm/proto.h>
23577
23578+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23579 static int disable_nx __cpuinitdata;
23580
23581+#ifndef CONFIG_PAX_PAGEEXEC
23582 /*
23583 * noexec = on|off
23584 *
23585@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23586 return 0;
23587 }
23588 early_param("noexec", noexec_setup);
23589+#endif
23590+
23591+#endif
23592
23593 void __cpuinit x86_configure_nx(void)
23594 {
23595+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23596 if (cpu_has_nx && !disable_nx)
23597 __supported_pte_mask |= _PAGE_NX;
23598 else
23599+#endif
23600 __supported_pte_mask &= ~_PAGE_NX;
23601 }
23602
23603diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23604index d6c0418..06a0ad5 100644
23605--- a/arch/x86/mm/tlb.c
23606+++ b/arch/x86/mm/tlb.c
23607@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23608 BUG();
23609 cpumask_clear_cpu(cpu,
23610 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23611+
23612+#ifndef CONFIG_PAX_PER_CPU_PGD
23613 load_cr3(swapper_pg_dir);
23614+#endif
23615+
23616 }
23617 EXPORT_SYMBOL_GPL(leave_mm);
23618
23619diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23620index 6687022..ceabcfa 100644
23621--- a/arch/x86/net/bpf_jit.S
23622+++ b/arch/x86/net/bpf_jit.S
23623@@ -9,6 +9,7 @@
23624 */
23625 #include <linux/linkage.h>
23626 #include <asm/dwarf2.h>
23627+#include <asm/alternative-asm.h>
23628
23629 /*
23630 * Calling convention :
23631@@ -35,6 +36,7 @@ sk_load_word:
23632 jle bpf_slow_path_word
23633 mov (SKBDATA,%rsi),%eax
23634 bswap %eax /* ntohl() */
23635+ pax_force_retaddr
23636 ret
23637
23638
23639@@ -53,6 +55,7 @@ sk_load_half:
23640 jle bpf_slow_path_half
23641 movzwl (SKBDATA,%rsi),%eax
23642 rol $8,%ax # ntohs()
23643+ pax_force_retaddr
23644 ret
23645
23646 sk_load_byte_ind:
23647@@ -66,6 +69,7 @@ sk_load_byte:
23648 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23649 jle bpf_slow_path_byte
23650 movzbl (SKBDATA,%rsi),%eax
23651+ pax_force_retaddr
23652 ret
23653
23654 /**
23655@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23656 movzbl (SKBDATA,%rsi),%ebx
23657 and $15,%bl
23658 shl $2,%bl
23659+ pax_force_retaddr
23660 ret
23661 CFI_ENDPROC
23662 ENDPROC(sk_load_byte_msh)
23663@@ -91,6 +96,7 @@ bpf_error:
23664 xor %eax,%eax
23665 mov -8(%rbp),%rbx
23666 leaveq
23667+ pax_force_retaddr
23668 ret
23669
23670 /* rsi contains offset and can be scratched */
23671@@ -113,6 +119,7 @@ bpf_slow_path_word:
23672 js bpf_error
23673 mov -12(%rbp),%eax
23674 bswap %eax
23675+ pax_force_retaddr
23676 ret
23677
23678 bpf_slow_path_half:
23679@@ -121,12 +128,14 @@ bpf_slow_path_half:
23680 mov -12(%rbp),%ax
23681 rol $8,%ax
23682 movzwl %ax,%eax
23683+ pax_force_retaddr
23684 ret
23685
23686 bpf_slow_path_byte:
23687 bpf_slow_path_common(1)
23688 js bpf_error
23689 movzbl -12(%rbp),%eax
23690+ pax_force_retaddr
23691 ret
23692
23693 bpf_slow_path_byte_msh:
23694@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23695 and $15,%al
23696 shl $2,%al
23697 xchg %eax,%ebx
23698+ pax_force_retaddr
23699 ret
23700diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23701index 7b65f75..63097f6 100644
23702--- a/arch/x86/net/bpf_jit_comp.c
23703+++ b/arch/x86/net/bpf_jit_comp.c
23704@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23705 set_fs(old_fs);
23706 }
23707
23708+struct bpf_jit_work {
23709+ struct work_struct work;
23710+ void *image;
23711+};
23712
23713 void bpf_jit_compile(struct sk_filter *fp)
23714 {
23715@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23716 if (addrs == NULL)
23717 return;
23718
23719+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23720+ if (!fp->work)
23721+ goto out;
23722+
23723 /* Before first pass, make a rough estimation of addrs[]
23724 * each bpf instruction is translated to less than 64 bytes
23725 */
23726@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23727 if (image) {
23728 if (unlikely(proglen + ilen > oldproglen)) {
23729 pr_err("bpb_jit_compile fatal error\n");
23730- kfree(addrs);
23731- module_free(NULL, image);
23732- return;
23733+ module_free_exec(NULL, image);
23734+ goto out;
23735 }
23736+ pax_open_kernel();
23737 memcpy(image + proglen, temp, ilen);
23738+ pax_close_kernel();
23739 }
23740 proglen += ilen;
23741 addrs[i] = proglen;
23742@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23743 break;
23744 }
23745 if (proglen == oldproglen) {
23746- image = module_alloc(max_t(unsigned int,
23747+ image = module_alloc_exec(max_t(unsigned int,
23748 proglen,
23749 sizeof(struct work_struct)));
23750 if (!image)
23751@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23752 fp->bpf_func = (void *)image;
23753 }
23754 out:
23755+ kfree(fp->work);
23756 kfree(addrs);
23757 return;
23758 }
23759
23760 static void jit_free_defer(struct work_struct *arg)
23761 {
23762- module_free(NULL, arg);
23763+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23764+ kfree(arg);
23765 }
23766
23767 /* run from softirq, we must use a work_struct to call
23768- * module_free() from process context
23769+ * module_free_exec() from process context
23770 */
23771 void bpf_jit_free(struct sk_filter *fp)
23772 {
23773 if (fp->bpf_func != sk_run_filter) {
23774- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23775+ struct work_struct *work = &fp->work->work;
23776
23777 INIT_WORK(work, jit_free_defer);
23778+ fp->work->image = fp->bpf_func;
23779 schedule_work(work);
23780 }
23781 }
23782diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23783index bff89df..377758a 100644
23784--- a/arch/x86/oprofile/backtrace.c
23785+++ b/arch/x86/oprofile/backtrace.c
23786@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23787 struct stack_frame_ia32 *fp;
23788 unsigned long bytes;
23789
23790- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23791+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23792 if (bytes != sizeof(bufhead))
23793 return NULL;
23794
23795- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23796+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23797
23798 oprofile_add_trace(bufhead[0].return_address);
23799
23800@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23801 struct stack_frame bufhead[2];
23802 unsigned long bytes;
23803
23804- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23805+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23806 if (bytes != sizeof(bufhead))
23807 return NULL;
23808
23809@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23810 {
23811 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23812
23813- if (!user_mode_vm(regs)) {
23814+ if (!user_mode(regs)) {
23815 unsigned long stack = kernel_stack_pointer(regs);
23816 if (depth)
23817 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23818diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23819index cb29191..036766d 100644
23820--- a/arch/x86/pci/mrst.c
23821+++ b/arch/x86/pci/mrst.c
23822@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23823 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23824 pci_mmcfg_late_init();
23825 pcibios_enable_irq = mrst_pci_irq_enable;
23826- pci_root_ops = pci_mrst_ops;
23827+ pax_open_kernel();
23828+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23829+ pax_close_kernel();
23830 /* Continue with standard init */
23831 return 1;
23832 }
23833diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23834index f685535..2b76a81 100644
23835--- a/arch/x86/pci/pcbios.c
23836+++ b/arch/x86/pci/pcbios.c
23837@@ -79,50 +79,93 @@ union bios32 {
23838 static struct {
23839 unsigned long address;
23840 unsigned short segment;
23841-} bios32_indirect = { 0, __KERNEL_CS };
23842+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23843
23844 /*
23845 * Returns the entry point for the given service, NULL on error
23846 */
23847
23848-static unsigned long bios32_service(unsigned long service)
23849+static unsigned long __devinit bios32_service(unsigned long service)
23850 {
23851 unsigned char return_code; /* %al */
23852 unsigned long address; /* %ebx */
23853 unsigned long length; /* %ecx */
23854 unsigned long entry; /* %edx */
23855 unsigned long flags;
23856+ struct desc_struct d, *gdt;
23857
23858 local_irq_save(flags);
23859- __asm__("lcall *(%%edi); cld"
23860+
23861+ gdt = get_cpu_gdt_table(smp_processor_id());
23862+
23863+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23864+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23865+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23866+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23867+
23868+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23869 : "=a" (return_code),
23870 "=b" (address),
23871 "=c" (length),
23872 "=d" (entry)
23873 : "0" (service),
23874 "1" (0),
23875- "D" (&bios32_indirect));
23876+ "D" (&bios32_indirect),
23877+ "r"(__PCIBIOS_DS)
23878+ : "memory");
23879+
23880+ pax_open_kernel();
23881+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23882+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23883+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23884+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23885+ pax_close_kernel();
23886+
23887 local_irq_restore(flags);
23888
23889 switch (return_code) {
23890- case 0:
23891- return address + entry;
23892- case 0x80: /* Not present */
23893- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23894- return 0;
23895- default: /* Shouldn't happen */
23896- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23897- service, return_code);
23898+ case 0: {
23899+ int cpu;
23900+ unsigned char flags;
23901+
23902+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23903+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23904+ printk(KERN_WARNING "bios32_service: not valid\n");
23905 return 0;
23906+ }
23907+ address = address + PAGE_OFFSET;
23908+ length += 16UL; /* some BIOSs underreport this... */
23909+ flags = 4;
23910+ if (length >= 64*1024*1024) {
23911+ length >>= PAGE_SHIFT;
23912+ flags |= 8;
23913+ }
23914+
23915+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23916+ gdt = get_cpu_gdt_table(cpu);
23917+ pack_descriptor(&d, address, length, 0x9b, flags);
23918+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23919+ pack_descriptor(&d, address, length, 0x93, flags);
23920+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23921+ }
23922+ return entry;
23923+ }
23924+ case 0x80: /* Not present */
23925+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23926+ return 0;
23927+ default: /* Shouldn't happen */
23928+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23929+ service, return_code);
23930+ return 0;
23931 }
23932 }
23933
23934 static struct {
23935 unsigned long address;
23936 unsigned short segment;
23937-} pci_indirect = { 0, __KERNEL_CS };
23938+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23939
23940-static int pci_bios_present;
23941+static int pci_bios_present __read_only;
23942
23943 static int __devinit check_pcibios(void)
23944 {
23945@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23946 unsigned long flags, pcibios_entry;
23947
23948 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23949- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23950+ pci_indirect.address = pcibios_entry;
23951
23952 local_irq_save(flags);
23953- __asm__(
23954- "lcall *(%%edi); cld\n\t"
23955+ __asm__("movw %w6, %%ds\n\t"
23956+ "lcall *%%ss:(%%edi); cld\n\t"
23957+ "push %%ss\n\t"
23958+ "pop %%ds\n\t"
23959 "jc 1f\n\t"
23960 "xor %%ah, %%ah\n"
23961 "1:"
23962@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23963 "=b" (ebx),
23964 "=c" (ecx)
23965 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23966- "D" (&pci_indirect)
23967+ "D" (&pci_indirect),
23968+ "r" (__PCIBIOS_DS)
23969 : "memory");
23970 local_irq_restore(flags);
23971
23972@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23973
23974 switch (len) {
23975 case 1:
23976- __asm__("lcall *(%%esi); cld\n\t"
23977+ __asm__("movw %w6, %%ds\n\t"
23978+ "lcall *%%ss:(%%esi); cld\n\t"
23979+ "push %%ss\n\t"
23980+ "pop %%ds\n\t"
23981 "jc 1f\n\t"
23982 "xor %%ah, %%ah\n"
23983 "1:"
23984@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23985 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23986 "b" (bx),
23987 "D" ((long)reg),
23988- "S" (&pci_indirect));
23989+ "S" (&pci_indirect),
23990+ "r" (__PCIBIOS_DS));
23991 /*
23992 * Zero-extend the result beyond 8 bits, do not trust the
23993 * BIOS having done it:
23994@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23995 *value &= 0xff;
23996 break;
23997 case 2:
23998- __asm__("lcall *(%%esi); cld\n\t"
23999+ __asm__("movw %w6, %%ds\n\t"
24000+ "lcall *%%ss:(%%esi); cld\n\t"
24001+ "push %%ss\n\t"
24002+ "pop %%ds\n\t"
24003 "jc 1f\n\t"
24004 "xor %%ah, %%ah\n"
24005 "1:"
24006@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24007 : "1" (PCIBIOS_READ_CONFIG_WORD),
24008 "b" (bx),
24009 "D" ((long)reg),
24010- "S" (&pci_indirect));
24011+ "S" (&pci_indirect),
24012+ "r" (__PCIBIOS_DS));
24013 /*
24014 * Zero-extend the result beyond 16 bits, do not trust the
24015 * BIOS having done it:
24016@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24017 *value &= 0xffff;
24018 break;
24019 case 4:
24020- __asm__("lcall *(%%esi); cld\n\t"
24021+ __asm__("movw %w6, %%ds\n\t"
24022+ "lcall *%%ss:(%%esi); cld\n\t"
24023+ "push %%ss\n\t"
24024+ "pop %%ds\n\t"
24025 "jc 1f\n\t"
24026 "xor %%ah, %%ah\n"
24027 "1:"
24028@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24029 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24030 "b" (bx),
24031 "D" ((long)reg),
24032- "S" (&pci_indirect));
24033+ "S" (&pci_indirect),
24034+ "r" (__PCIBIOS_DS));
24035 break;
24036 }
24037
24038@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24039
24040 switch (len) {
24041 case 1:
24042- __asm__("lcall *(%%esi); cld\n\t"
24043+ __asm__("movw %w6, %%ds\n\t"
24044+ "lcall *%%ss:(%%esi); cld\n\t"
24045+ "push %%ss\n\t"
24046+ "pop %%ds\n\t"
24047 "jc 1f\n\t"
24048 "xor %%ah, %%ah\n"
24049 "1:"
24050@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24051 "c" (value),
24052 "b" (bx),
24053 "D" ((long)reg),
24054- "S" (&pci_indirect));
24055+ "S" (&pci_indirect),
24056+ "r" (__PCIBIOS_DS));
24057 break;
24058 case 2:
24059- __asm__("lcall *(%%esi); cld\n\t"
24060+ __asm__("movw %w6, %%ds\n\t"
24061+ "lcall *%%ss:(%%esi); cld\n\t"
24062+ "push %%ss\n\t"
24063+ "pop %%ds\n\t"
24064 "jc 1f\n\t"
24065 "xor %%ah, %%ah\n"
24066 "1:"
24067@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24068 "c" (value),
24069 "b" (bx),
24070 "D" ((long)reg),
24071- "S" (&pci_indirect));
24072+ "S" (&pci_indirect),
24073+ "r" (__PCIBIOS_DS));
24074 break;
24075 case 4:
24076- __asm__("lcall *(%%esi); cld\n\t"
24077+ __asm__("movw %w6, %%ds\n\t"
24078+ "lcall *%%ss:(%%esi); cld\n\t"
24079+ "push %%ss\n\t"
24080+ "pop %%ds\n\t"
24081 "jc 1f\n\t"
24082 "xor %%ah, %%ah\n"
24083 "1:"
24084@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24085 "c" (value),
24086 "b" (bx),
24087 "D" ((long)reg),
24088- "S" (&pci_indirect));
24089+ "S" (&pci_indirect),
24090+ "r" (__PCIBIOS_DS));
24091 break;
24092 }
24093
24094@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24095
24096 DBG("PCI: Fetching IRQ routing table... ");
24097 __asm__("push %%es\n\t"
24098+ "movw %w8, %%ds\n\t"
24099 "push %%ds\n\t"
24100 "pop %%es\n\t"
24101- "lcall *(%%esi); cld\n\t"
24102+ "lcall *%%ss:(%%esi); cld\n\t"
24103 "pop %%es\n\t"
24104+ "push %%ss\n\t"
24105+ "pop %%ds\n"
24106 "jc 1f\n\t"
24107 "xor %%ah, %%ah\n"
24108 "1:"
24109@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24110 "1" (0),
24111 "D" ((long) &opt),
24112 "S" (&pci_indirect),
24113- "m" (opt)
24114+ "m" (opt),
24115+ "r" (__PCIBIOS_DS)
24116 : "memory");
24117 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24118 if (ret & 0xff00)
24119@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24120 {
24121 int ret;
24122
24123- __asm__("lcall *(%%esi); cld\n\t"
24124+ __asm__("movw %w5, %%ds\n\t"
24125+ "lcall *%%ss:(%%esi); cld\n\t"
24126+ "push %%ss\n\t"
24127+ "pop %%ds\n"
24128 "jc 1f\n\t"
24129 "xor %%ah, %%ah\n"
24130 "1:"
24131@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24132 : "0" (PCIBIOS_SET_PCI_HW_INT),
24133 "b" ((dev->bus->number << 8) | dev->devfn),
24134 "c" ((irq << 8) | (pin + 10)),
24135- "S" (&pci_indirect));
24136+ "S" (&pci_indirect),
24137+ "r" (__PCIBIOS_DS));
24138 return !(ret & 0xff00);
24139 }
24140 EXPORT_SYMBOL(pcibios_set_irq_routing);
24141diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24142index 5cab48e..b025f9b 100644
24143--- a/arch/x86/platform/efi/efi_32.c
24144+++ b/arch/x86/platform/efi/efi_32.c
24145@@ -38,70 +38,56 @@
24146 */
24147
24148 static unsigned long efi_rt_eflags;
24149-static pgd_t efi_bak_pg_dir_pointer[2];
24150+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
24151
24152-void efi_call_phys_prelog(void)
24153+void __init efi_call_phys_prelog(void)
24154 {
24155- unsigned long cr4;
24156- unsigned long temp;
24157 struct desc_ptr gdt_descr;
24158
24159+#ifdef CONFIG_PAX_KERNEXEC
24160+ struct desc_struct d;
24161+#endif
24162+
24163 local_irq_save(efi_rt_eflags);
24164
24165- /*
24166- * If I don't have PAE, I should just duplicate two entries in page
24167- * directory. If I have PAE, I just need to duplicate one entry in
24168- * page directory.
24169- */
24170- cr4 = read_cr4_safe();
24171-
24172- if (cr4 & X86_CR4_PAE) {
24173- efi_bak_pg_dir_pointer[0].pgd =
24174- swapper_pg_dir[pgd_index(0)].pgd;
24175- swapper_pg_dir[0].pgd =
24176- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24177- } else {
24178- efi_bak_pg_dir_pointer[0].pgd =
24179- swapper_pg_dir[pgd_index(0)].pgd;
24180- efi_bak_pg_dir_pointer[1].pgd =
24181- swapper_pg_dir[pgd_index(0x400000)].pgd;
24182- swapper_pg_dir[pgd_index(0)].pgd =
24183- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24184- temp = PAGE_OFFSET + 0x400000;
24185- swapper_pg_dir[pgd_index(0x400000)].pgd =
24186- swapper_pg_dir[pgd_index(temp)].pgd;
24187- }
24188+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
24189+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24190+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
24191
24192 /*
24193 * After the lock is released, the original page table is restored.
24194 */
24195 __flush_tlb_all();
24196
24197+#ifdef CONFIG_PAX_KERNEXEC
24198+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24199+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24200+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24201+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24202+#endif
24203+
24204 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24205 gdt_descr.size = GDT_SIZE - 1;
24206 load_gdt(&gdt_descr);
24207 }
24208
24209-void efi_call_phys_epilog(void)
24210+void __init efi_call_phys_epilog(void)
24211 {
24212- unsigned long cr4;
24213 struct desc_ptr gdt_descr;
24214
24215+#ifdef CONFIG_PAX_KERNEXEC
24216+ struct desc_struct d;
24217+
24218+ memset(&d, 0, sizeof d);
24219+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24220+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24221+#endif
24222+
24223 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24224 gdt_descr.size = GDT_SIZE - 1;
24225 load_gdt(&gdt_descr);
24226
24227- cr4 = read_cr4_safe();
24228-
24229- if (cr4 & X86_CR4_PAE) {
24230- swapper_pg_dir[pgd_index(0)].pgd =
24231- efi_bak_pg_dir_pointer[0].pgd;
24232- } else {
24233- swapper_pg_dir[pgd_index(0)].pgd =
24234- efi_bak_pg_dir_pointer[0].pgd;
24235- swapper_pg_dir[pgd_index(0x400000)].pgd =
24236- efi_bak_pg_dir_pointer[1].pgd;
24237- }
24238+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
24239
24240 /*
24241 * After the lock is released, the original page table is restored.
24242diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24243index fbe66e6..c5c0dd2 100644
24244--- a/arch/x86/platform/efi/efi_stub_32.S
24245+++ b/arch/x86/platform/efi/efi_stub_32.S
24246@@ -6,7 +6,9 @@
24247 */
24248
24249 #include <linux/linkage.h>
24250+#include <linux/init.h>
24251 #include <asm/page_types.h>
24252+#include <asm/segment.h>
24253
24254 /*
24255 * efi_call_phys(void *, ...) is a function with variable parameters.
24256@@ -20,7 +22,7 @@
24257 * service functions will comply with gcc calling convention, too.
24258 */
24259
24260-.text
24261+__INIT
24262 ENTRY(efi_call_phys)
24263 /*
24264 * 0. The function can only be called in Linux kernel. So CS has been
24265@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24266 * The mapping of lower virtual memory has been created in prelog and
24267 * epilog.
24268 */
24269- movl $1f, %edx
24270- subl $__PAGE_OFFSET, %edx
24271- jmp *%edx
24272+ movl $(__KERNEXEC_EFI_DS), %edx
24273+ mov %edx, %ds
24274+ mov %edx, %es
24275+ mov %edx, %ss
24276+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24277 1:
24278
24279 /*
24280@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24281 * parameter 2, ..., param n. To make things easy, we save the return
24282 * address of efi_call_phys in a global variable.
24283 */
24284- popl %edx
24285- movl %edx, saved_return_addr
24286- /* get the function pointer into ECX*/
24287- popl %ecx
24288- movl %ecx, efi_rt_function_ptr
24289- movl $2f, %edx
24290- subl $__PAGE_OFFSET, %edx
24291- pushl %edx
24292+ popl (saved_return_addr)
24293+ popl (efi_rt_function_ptr)
24294
24295 /*
24296 * 3. Clear PG bit in %CR0.
24297@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24298 /*
24299 * 5. Call the physical function.
24300 */
24301- jmp *%ecx
24302+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24303
24304-2:
24305 /*
24306 * 6. After EFI runtime service returns, control will return to
24307 * following instruction. We'd better readjust stack pointer first.
24308@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24309 movl %cr0, %edx
24310 orl $0x80000000, %edx
24311 movl %edx, %cr0
24312- jmp 1f
24313-1:
24314+
24315 /*
24316 * 8. Now restore the virtual mode from flat mode by
24317 * adding EIP with PAGE_OFFSET.
24318 */
24319- movl $1f, %edx
24320- jmp *%edx
24321+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24322 1:
24323+ movl $(__KERNEL_DS), %edx
24324+ mov %edx, %ds
24325+ mov %edx, %es
24326+ mov %edx, %ss
24327
24328 /*
24329 * 9. Balance the stack. And because EAX contain the return value,
24330 * we'd better not clobber it.
24331 */
24332- leal efi_rt_function_ptr, %edx
24333- movl (%edx), %ecx
24334- pushl %ecx
24335+ pushl (efi_rt_function_ptr)
24336
24337 /*
24338- * 10. Push the saved return address onto the stack and return.
24339+ * 10. Return to the saved return address.
24340 */
24341- leal saved_return_addr, %edx
24342- movl (%edx), %ecx
24343- pushl %ecx
24344- ret
24345+ jmpl *(saved_return_addr)
24346 ENDPROC(efi_call_phys)
24347 .previous
24348
24349-.data
24350+__INITDATA
24351 saved_return_addr:
24352 .long 0
24353 efi_rt_function_ptr:
24354diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24355index 4c07cca..2c8427d 100644
24356--- a/arch/x86/platform/efi/efi_stub_64.S
24357+++ b/arch/x86/platform/efi/efi_stub_64.S
24358@@ -7,6 +7,7 @@
24359 */
24360
24361 #include <linux/linkage.h>
24362+#include <asm/alternative-asm.h>
24363
24364 #define SAVE_XMM \
24365 mov %rsp, %rax; \
24366@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24367 call *%rdi
24368 addq $32, %rsp
24369 RESTORE_XMM
24370+ pax_force_retaddr 0, 1
24371 ret
24372 ENDPROC(efi_call0)
24373
24374@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24375 call *%rdi
24376 addq $32, %rsp
24377 RESTORE_XMM
24378+ pax_force_retaddr 0, 1
24379 ret
24380 ENDPROC(efi_call1)
24381
24382@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24383 call *%rdi
24384 addq $32, %rsp
24385 RESTORE_XMM
24386+ pax_force_retaddr 0, 1
24387 ret
24388 ENDPROC(efi_call2)
24389
24390@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24391 call *%rdi
24392 addq $32, %rsp
24393 RESTORE_XMM
24394+ pax_force_retaddr 0, 1
24395 ret
24396 ENDPROC(efi_call3)
24397
24398@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24399 call *%rdi
24400 addq $32, %rsp
24401 RESTORE_XMM
24402+ pax_force_retaddr 0, 1
24403 ret
24404 ENDPROC(efi_call4)
24405
24406@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24407 call *%rdi
24408 addq $48, %rsp
24409 RESTORE_XMM
24410+ pax_force_retaddr 0, 1
24411 ret
24412 ENDPROC(efi_call5)
24413
24414@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24415 call *%rdi
24416 addq $48, %rsp
24417 RESTORE_XMM
24418+ pax_force_retaddr 0, 1
24419 ret
24420 ENDPROC(efi_call6)
24421diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24422index fe73276..70fe25a 100644
24423--- a/arch/x86/platform/mrst/mrst.c
24424+++ b/arch/x86/platform/mrst/mrst.c
24425@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
24426 }
24427
24428 /* Reboot and power off are handled by the SCU on a MID device */
24429-static void mrst_power_off(void)
24430+static __noreturn void mrst_power_off(void)
24431 {
24432 intel_scu_ipc_simple_command(0xf1, 1);
24433+ BUG();
24434 }
24435
24436-static void mrst_reboot(void)
24437+static __noreturn void mrst_reboot(void)
24438 {
24439 intel_scu_ipc_simple_command(0xf1, 0);
24440+ BUG();
24441 }
24442
24443 /*
24444diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
24445index 5b55219..b326540 100644
24446--- a/arch/x86/platform/uv/tlb_uv.c
24447+++ b/arch/x86/platform/uv/tlb_uv.c
24448@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
24449 struct bau_control *smaster = bcp->socket_master;
24450 struct reset_args reset_args;
24451
24452+ pax_track_stack();
24453+
24454 reset_args.sender = sender;
24455 cpus_clear(*mask);
24456 /* find a single cpu for each uvhub in this distribution mask */
24457diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24458index 87bb35e..eff2da8 100644
24459--- a/arch/x86/power/cpu.c
24460+++ b/arch/x86/power/cpu.c
24461@@ -130,7 +130,7 @@ static void do_fpu_end(void)
24462 static void fix_processor_context(void)
24463 {
24464 int cpu = smp_processor_id();
24465- struct tss_struct *t = &per_cpu(init_tss, cpu);
24466+ struct tss_struct *t = init_tss + cpu;
24467
24468 set_tss_desc(cpu, t); /*
24469 * This just modifies memory; should not be
24470@@ -140,7 +140,9 @@ static void fix_processor_context(void)
24471 */
24472
24473 #ifdef CONFIG_X86_64
24474+ pax_open_kernel();
24475 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24476+ pax_close_kernel();
24477
24478 syscall_init(); /* This sets MSR_*STAR and related */
24479 #endif
24480diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24481index 5d17950..2253fc9 100644
24482--- a/arch/x86/vdso/Makefile
24483+++ b/arch/x86/vdso/Makefile
24484@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24485 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24486 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24487
24488-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24489+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24490 GCOV_PROFILE := n
24491
24492 #
24493diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24494index 468d591..8e80a0a 100644
24495--- a/arch/x86/vdso/vdso32-setup.c
24496+++ b/arch/x86/vdso/vdso32-setup.c
24497@@ -25,6 +25,7 @@
24498 #include <asm/tlbflush.h>
24499 #include <asm/vdso.h>
24500 #include <asm/proto.h>
24501+#include <asm/mman.h>
24502
24503 enum {
24504 VDSO_DISABLED = 0,
24505@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24506 void enable_sep_cpu(void)
24507 {
24508 int cpu = get_cpu();
24509- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24510+ struct tss_struct *tss = init_tss + cpu;
24511
24512 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24513 put_cpu();
24514@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24515 gate_vma.vm_start = FIXADDR_USER_START;
24516 gate_vma.vm_end = FIXADDR_USER_END;
24517 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24518- gate_vma.vm_page_prot = __P101;
24519+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24520 /*
24521 * Make sure the vDSO gets into every core dump.
24522 * Dumping its contents makes post-mortem fully interpretable later
24523@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24524 if (compat)
24525 addr = VDSO_HIGH_BASE;
24526 else {
24527- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24528+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24529 if (IS_ERR_VALUE(addr)) {
24530 ret = addr;
24531 goto up_fail;
24532 }
24533 }
24534
24535- current->mm->context.vdso = (void *)addr;
24536+ current->mm->context.vdso = addr;
24537
24538 if (compat_uses_vma || !compat) {
24539 /*
24540@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24541 }
24542
24543 current_thread_info()->sysenter_return =
24544- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24545+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24546
24547 up_fail:
24548 if (ret)
24549- current->mm->context.vdso = NULL;
24550+ current->mm->context.vdso = 0;
24551
24552 up_write(&mm->mmap_sem);
24553
24554@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24555
24556 const char *arch_vma_name(struct vm_area_struct *vma)
24557 {
24558- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24559+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24560 return "[vdso]";
24561+
24562+#ifdef CONFIG_PAX_SEGMEXEC
24563+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24564+ return "[vdso]";
24565+#endif
24566+
24567 return NULL;
24568 }
24569
24570@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24571 * Check to see if the corresponding task was created in compat vdso
24572 * mode.
24573 */
24574- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24575+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24576 return &gate_vma;
24577 return NULL;
24578 }
24579diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24580index 316fbca..4638633 100644
24581--- a/arch/x86/vdso/vma.c
24582+++ b/arch/x86/vdso/vma.c
24583@@ -16,8 +16,6 @@
24584 #include <asm/vdso.h>
24585 #include <asm/page.h>
24586
24587-unsigned int __read_mostly vdso_enabled = 1;
24588-
24589 extern char vdso_start[], vdso_end[];
24590 extern unsigned short vdso_sync_cpuid;
24591
24592@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24593 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24594 {
24595 struct mm_struct *mm = current->mm;
24596- unsigned long addr;
24597+ unsigned long addr = 0;
24598 int ret;
24599
24600- if (!vdso_enabled)
24601- return 0;
24602-
24603 down_write(&mm->mmap_sem);
24604+
24605+#ifdef CONFIG_PAX_RANDMMAP
24606+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24607+#endif
24608+
24609 addr = vdso_addr(mm->start_stack, vdso_size);
24610 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24611 if (IS_ERR_VALUE(addr)) {
24612@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24613 goto up_fail;
24614 }
24615
24616- current->mm->context.vdso = (void *)addr;
24617+ mm->context.vdso = addr;
24618
24619 ret = install_special_mapping(mm, addr, vdso_size,
24620 VM_READ|VM_EXEC|
24621 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24622 VM_ALWAYSDUMP,
24623 vdso_pages);
24624- if (ret) {
24625- current->mm->context.vdso = NULL;
24626- goto up_fail;
24627- }
24628+
24629+ if (ret)
24630+ mm->context.vdso = 0;
24631
24632 up_fail:
24633 up_write(&mm->mmap_sem);
24634 return ret;
24635 }
24636-
24637-static __init int vdso_setup(char *s)
24638-{
24639- vdso_enabled = simple_strtoul(s, NULL, 0);
24640- return 0;
24641-}
24642-__setup("vdso=", vdso_setup);
24643diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24644index 46c8069..6330d3c 100644
24645--- a/arch/x86/xen/enlighten.c
24646+++ b/arch/x86/xen/enlighten.c
24647@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24648
24649 struct shared_info xen_dummy_shared_info;
24650
24651-void *xen_initial_gdt;
24652-
24653 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24654 __read_mostly int xen_have_vector_callback;
24655 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24656@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24657 #endif
24658 };
24659
24660-static void xen_reboot(int reason)
24661+static __noreturn void xen_reboot(int reason)
24662 {
24663 struct sched_shutdown r = { .reason = reason };
24664
24665@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
24666 BUG();
24667 }
24668
24669-static void xen_restart(char *msg)
24670+static __noreturn void xen_restart(char *msg)
24671 {
24672 xen_reboot(SHUTDOWN_reboot);
24673 }
24674
24675-static void xen_emergency_restart(void)
24676+static __noreturn void xen_emergency_restart(void)
24677 {
24678 xen_reboot(SHUTDOWN_reboot);
24679 }
24680
24681-static void xen_machine_halt(void)
24682+static __noreturn void xen_machine_halt(void)
24683 {
24684 xen_reboot(SHUTDOWN_poweroff);
24685 }
24686@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void)
24687 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24688
24689 /* Work out if we support NX */
24690- x86_configure_nx();
24691+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24692+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24693+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24694+ unsigned l, h;
24695+
24696+ __supported_pte_mask |= _PAGE_NX;
24697+ rdmsr(MSR_EFER, l, h);
24698+ l |= EFER_NX;
24699+ wrmsr(MSR_EFER, l, h);
24700+ }
24701+#endif
24702
24703 xen_setup_features();
24704
24705@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void)
24706
24707 machine_ops = xen_machine_ops;
24708
24709- /*
24710- * The only reliable way to retain the initial address of the
24711- * percpu gdt_page is to remember it here, so we can go and
24712- * mark it RW later, when the initial percpu area is freed.
24713- */
24714- xen_initial_gdt = &per_cpu(gdt_page, 0);
24715-
24716 xen_smp_init();
24717
24718 #ifdef CONFIG_ACPI_NUMA
24719diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24720index 3dd53f9..5aa5df3 100644
24721--- a/arch/x86/xen/mmu.c
24722+++ b/arch/x86/xen/mmu.c
24723@@ -1768,6 +1768,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24724 convert_pfn_mfn(init_level4_pgt);
24725 convert_pfn_mfn(level3_ident_pgt);
24726 convert_pfn_mfn(level3_kernel_pgt);
24727+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24728+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24729+ convert_pfn_mfn(level3_vmemmap_pgt);
24730
24731 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24732 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24733@@ -1786,7 +1789,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24734 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24735 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24736 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24737+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24738+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24739+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24740 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24741+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24742 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24743 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24744
24745@@ -2000,6 +2007,7 @@ static void __init xen_post_allocator_init(void)
24746 pv_mmu_ops.set_pud = xen_set_pud;
24747 #if PAGETABLE_LEVELS == 4
24748 pv_mmu_ops.set_pgd = xen_set_pgd;
24749+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24750 #endif
24751
24752 /* This will work as long as patching hasn't happened yet
24753@@ -2081,6 +2089,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24754 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24755 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24756 .set_pgd = xen_set_pgd_hyper,
24757+ .set_pgd_batched = xen_set_pgd_hyper,
24758
24759 .alloc_pud = xen_alloc_pmd_init,
24760 .release_pud = xen_release_pmd_init,
24761diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24762index 041d4fe..7666b7e 100644
24763--- a/arch/x86/xen/smp.c
24764+++ b/arch/x86/xen/smp.c
24765@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24766 {
24767 BUG_ON(smp_processor_id() != 0);
24768 native_smp_prepare_boot_cpu();
24769-
24770- /* We've switched to the "real" per-cpu gdt, so make sure the
24771- old memory can be recycled */
24772- make_lowmem_page_readwrite(xen_initial_gdt);
24773-
24774 xen_filter_cpu_maps();
24775 xen_setup_vcpu_info_placement();
24776 }
24777@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24778 gdt = get_cpu_gdt_table(cpu);
24779
24780 ctxt->flags = VGCF_IN_KERNEL;
24781- ctxt->user_regs.ds = __USER_DS;
24782- ctxt->user_regs.es = __USER_DS;
24783+ ctxt->user_regs.ds = __KERNEL_DS;
24784+ ctxt->user_regs.es = __KERNEL_DS;
24785 ctxt->user_regs.ss = __KERNEL_DS;
24786 #ifdef CONFIG_X86_32
24787 ctxt->user_regs.fs = __KERNEL_PERCPU;
24788- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24789+ savesegment(gs, ctxt->user_regs.gs);
24790 #else
24791 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24792 #endif
24793@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24794 int rc;
24795
24796 per_cpu(current_task, cpu) = idle;
24797+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24798 #ifdef CONFIG_X86_32
24799 irq_ctx_init(cpu);
24800 #else
24801 clear_tsk_thread_flag(idle, TIF_FORK);
24802- per_cpu(kernel_stack, cpu) =
24803- (unsigned long)task_stack_page(idle) -
24804- KERNEL_STACK_OFFSET + THREAD_SIZE;
24805+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24806 #endif
24807 xen_setup_runstate_info(cpu);
24808 xen_setup_timer(cpu);
24809diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24810index b040b0e..8cc4fe0 100644
24811--- a/arch/x86/xen/xen-asm_32.S
24812+++ b/arch/x86/xen/xen-asm_32.S
24813@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24814 ESP_OFFSET=4 # bytes pushed onto stack
24815
24816 /*
24817- * Store vcpu_info pointer for easy access. Do it this way to
24818- * avoid having to reload %fs
24819+ * Store vcpu_info pointer for easy access.
24820 */
24821 #ifdef CONFIG_SMP
24822- GET_THREAD_INFO(%eax)
24823- movl TI_cpu(%eax), %eax
24824- movl __per_cpu_offset(,%eax,4), %eax
24825- mov xen_vcpu(%eax), %eax
24826+ push %fs
24827+ mov $(__KERNEL_PERCPU), %eax
24828+ mov %eax, %fs
24829+ mov PER_CPU_VAR(xen_vcpu), %eax
24830+ pop %fs
24831 #else
24832 movl xen_vcpu, %eax
24833 #endif
24834diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24835index aaa7291..3f77960 100644
24836--- a/arch/x86/xen/xen-head.S
24837+++ b/arch/x86/xen/xen-head.S
24838@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24839 #ifdef CONFIG_X86_32
24840 mov %esi,xen_start_info
24841 mov $init_thread_union+THREAD_SIZE,%esp
24842+#ifdef CONFIG_SMP
24843+ movl $cpu_gdt_table,%edi
24844+ movl $__per_cpu_load,%eax
24845+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24846+ rorl $16,%eax
24847+ movb %al,__KERNEL_PERCPU + 4(%edi)
24848+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24849+ movl $__per_cpu_end - 1,%eax
24850+ subl $__per_cpu_start,%eax
24851+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24852+#endif
24853 #else
24854 mov %rsi,xen_start_info
24855 mov $init_thread_union+THREAD_SIZE,%rsp
24856diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24857index b095739..8c17bcd 100644
24858--- a/arch/x86/xen/xen-ops.h
24859+++ b/arch/x86/xen/xen-ops.h
24860@@ -10,8 +10,6 @@
24861 extern const char xen_hypervisor_callback[];
24862 extern const char xen_failsafe_callback[];
24863
24864-extern void *xen_initial_gdt;
24865-
24866 struct trap_info;
24867 void xen_copy_trap_info(struct trap_info *traps);
24868
24869diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24870index 58916af..9cb880b 100644
24871--- a/block/blk-iopoll.c
24872+++ b/block/blk-iopoll.c
24873@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24874 }
24875 EXPORT_SYMBOL(blk_iopoll_complete);
24876
24877-static void blk_iopoll_softirq(struct softirq_action *h)
24878+static void blk_iopoll_softirq(void)
24879 {
24880 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24881 int rearm = 0, budget = blk_iopoll_budget;
24882diff --git a/block/blk-map.c b/block/blk-map.c
24883index 164cd00..6d96fc1 100644
24884--- a/block/blk-map.c
24885+++ b/block/blk-map.c
24886@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24887 if (!len || !kbuf)
24888 return -EINVAL;
24889
24890- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24891+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24892 if (do_copy)
24893 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24894 else
24895diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24896index 1366a89..e17f54b 100644
24897--- a/block/blk-softirq.c
24898+++ b/block/blk-softirq.c
24899@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24900 * Softirq action handler - move entries to local list and loop over them
24901 * while passing them to the queue registered handler.
24902 */
24903-static void blk_done_softirq(struct softirq_action *h)
24904+static void blk_done_softirq(void)
24905 {
24906 struct list_head *cpu_list, local_list;
24907
24908diff --git a/block/bsg.c b/block/bsg.c
24909index 702f131..37808bf 100644
24910--- a/block/bsg.c
24911+++ b/block/bsg.c
24912@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24913 struct sg_io_v4 *hdr, struct bsg_device *bd,
24914 fmode_t has_write_perm)
24915 {
24916+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24917+ unsigned char *cmdptr;
24918+
24919 if (hdr->request_len > BLK_MAX_CDB) {
24920 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24921 if (!rq->cmd)
24922 return -ENOMEM;
24923- }
24924+ cmdptr = rq->cmd;
24925+ } else
24926+ cmdptr = tmpcmd;
24927
24928- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24929+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24930 hdr->request_len))
24931 return -EFAULT;
24932
24933+ if (cmdptr != rq->cmd)
24934+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24935+
24936 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24937 if (blk_verify_command(rq->cmd, has_write_perm))
24938 return -EPERM;
24939diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24940index 7b72502..646105c 100644
24941--- a/block/compat_ioctl.c
24942+++ b/block/compat_ioctl.c
24943@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24944 err |= __get_user(f->spec1, &uf->spec1);
24945 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24946 err |= __get_user(name, &uf->name);
24947- f->name = compat_ptr(name);
24948+ f->name = (void __force_kernel *)compat_ptr(name);
24949 if (err) {
24950 err = -EFAULT;
24951 goto out;
24952diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24953index 4f4230b..2ac96e7 100644
24954--- a/block/scsi_ioctl.c
24955+++ b/block/scsi_ioctl.c
24956@@ -24,6 +24,7 @@
24957 #include <linux/capability.h>
24958 #include <linux/completion.h>
24959 #include <linux/cdrom.h>
24960+#include <linux/ratelimit.h>
24961 #include <linux/slab.h>
24962 #include <linux/times.h>
24963 #include <asm/uaccess.h>
24964@@ -222,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24965 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24966 struct sg_io_hdr *hdr, fmode_t mode)
24967 {
24968- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24969+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24970+ unsigned char *cmdptr;
24971+
24972+ if (rq->cmd != rq->__cmd)
24973+ cmdptr = rq->cmd;
24974+ else
24975+ cmdptr = tmpcmd;
24976+
24977+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24978 return -EFAULT;
24979+
24980+ if (cmdptr != rq->cmd)
24981+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24982+
24983 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24984 return -EPERM;
24985
24986@@ -432,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24987 int err;
24988 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24989 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24990+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24991+ unsigned char *cmdptr;
24992
24993 if (!sic)
24994 return -EINVAL;
24995@@ -465,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24996 */
24997 err = -EFAULT;
24998 rq->cmd_len = cmdlen;
24999- if (copy_from_user(rq->cmd, sic->data, cmdlen))
25000+
25001+ if (rq->cmd != rq->__cmd)
25002+ cmdptr = rq->cmd;
25003+ else
25004+ cmdptr = tmpcmd;
25005+
25006+ if (copy_from_user(cmdptr, sic->data, cmdlen))
25007 goto error;
25008
25009+ if (rq->cmd != cmdptr)
25010+ memcpy(rq->cmd, cmdptr, cmdlen);
25011+
25012 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25013 goto error;
25014
25015@@ -691,6 +715,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
25016 }
25017 EXPORT_SYMBOL(scsi_cmd_ioctl);
25018
25019+int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
25020+{
25021+ if (bd && bd == bd->bd_contains)
25022+ return 0;
25023+
25024+ /* Actually none of these is particularly useful on a partition,
25025+ * but they are safe.
25026+ */
25027+ switch (cmd) {
25028+ case SCSI_IOCTL_GET_IDLUN:
25029+ case SCSI_IOCTL_GET_BUS_NUMBER:
25030+ case SCSI_IOCTL_GET_PCI:
25031+ case SCSI_IOCTL_PROBE_HOST:
25032+ case SG_GET_VERSION_NUM:
25033+ case SG_SET_TIMEOUT:
25034+ case SG_GET_TIMEOUT:
25035+ case SG_GET_RESERVED_SIZE:
25036+ case SG_SET_RESERVED_SIZE:
25037+ case SG_EMULATED_HOST:
25038+ return 0;
25039+ case CDROM_GET_CAPABILITY:
25040+ /* Keep this until we remove the printk below. udev sends it
25041+ * and we do not want to spam dmesg about it. CD-ROMs do
25042+ * not have partitions, so we get here only for disks.
25043+ */
25044+ return -ENOIOCTLCMD;
25045+ default:
25046+ break;
25047+ }
25048+
25049+ /* In particular, rule out all resets and host-specific ioctls. */
25050+ printk_ratelimited(KERN_WARNING
25051+ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
25052+
25053+ return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
25054+}
25055+EXPORT_SYMBOL(scsi_verify_blk_ioctl);
25056+
25057+int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
25058+ unsigned int cmd, void __user *arg)
25059+{
25060+ int ret;
25061+
25062+ ret = scsi_verify_blk_ioctl(bd, cmd);
25063+ if (ret < 0)
25064+ return ret;
25065+
25066+ return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
25067+}
25068+EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
25069+
25070 static int __init blk_scsi_ioctl_init(void)
25071 {
25072 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
25073diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25074index 671d4d6..5f24030 100644
25075--- a/crypto/cryptd.c
25076+++ b/crypto/cryptd.c
25077@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25078
25079 struct cryptd_blkcipher_request_ctx {
25080 crypto_completion_t complete;
25081-};
25082+} __no_const;
25083
25084 struct cryptd_hash_ctx {
25085 struct crypto_shash *child;
25086@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25087
25088 struct cryptd_aead_request_ctx {
25089 crypto_completion_t complete;
25090-};
25091+} __no_const;
25092
25093 static void cryptd_queue_worker(struct work_struct *work);
25094
25095diff --git a/crypto/serpent.c b/crypto/serpent.c
25096index b651a55..a9ddd79b 100644
25097--- a/crypto/serpent.c
25098+++ b/crypto/serpent.c
25099@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
25100 u32 r0,r1,r2,r3,r4;
25101 int i;
25102
25103+ pax_track_stack();
25104+
25105 /* Copy key, add padding */
25106
25107 for (i = 0; i < keylen; ++i)
25108diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25109index 5d41894..22021e4 100644
25110--- a/drivers/acpi/apei/cper.c
25111+++ b/drivers/acpi/apei/cper.c
25112@@ -38,12 +38,12 @@
25113 */
25114 u64 cper_next_record_id(void)
25115 {
25116- static atomic64_t seq;
25117+ static atomic64_unchecked_t seq;
25118
25119- if (!atomic64_read(&seq))
25120- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25121+ if (!atomic64_read_unchecked(&seq))
25122+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25123
25124- return atomic64_inc_return(&seq);
25125+ return atomic64_inc_return_unchecked(&seq);
25126 }
25127 EXPORT_SYMBOL_GPL(cper_next_record_id);
25128
25129diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25130index 22f918b..9fafb84 100644
25131--- a/drivers/acpi/ec_sys.c
25132+++ b/drivers/acpi/ec_sys.c
25133@@ -11,6 +11,7 @@
25134 #include <linux/kernel.h>
25135 #include <linux/acpi.h>
25136 #include <linux/debugfs.h>
25137+#include <asm/uaccess.h>
25138 #include "internal.h"
25139
25140 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25141@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25142 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25143 */
25144 unsigned int size = EC_SPACE_SIZE;
25145- u8 *data = (u8 *) buf;
25146+ u8 data;
25147 loff_t init_off = *off;
25148 int err = 0;
25149
25150@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25151 size = count;
25152
25153 while (size) {
25154- err = ec_read(*off, &data[*off - init_off]);
25155+ err = ec_read(*off, &data);
25156 if (err)
25157 return err;
25158+ if (put_user(data, &buf[*off - init_off]))
25159+ return -EFAULT;
25160 *off += 1;
25161 size--;
25162 }
25163@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25164
25165 unsigned int size = count;
25166 loff_t init_off = *off;
25167- u8 *data = (u8 *) buf;
25168 int err = 0;
25169
25170 if (*off >= EC_SPACE_SIZE)
25171@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25172 }
25173
25174 while (size) {
25175- u8 byte_write = data[*off - init_off];
25176+ u8 byte_write;
25177+ if (get_user(byte_write, &buf[*off - init_off]))
25178+ return -EFAULT;
25179 err = ec_write(*off, byte_write);
25180 if (err)
25181 return err;
25182diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25183index f5f9869..da87aeb 100644
25184--- a/drivers/acpi/proc.c
25185+++ b/drivers/acpi/proc.c
25186@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file,
25187 size_t count, loff_t * ppos)
25188 {
25189 struct list_head *node, *next;
25190- char strbuf[5];
25191- char str[5] = "";
25192- unsigned int len = count;
25193+ char strbuf[5] = {0};
25194
25195- if (len > 4)
25196- len = 4;
25197- if (len < 0)
25198+ if (count > 4)
25199+ count = 4;
25200+ if (copy_from_user(strbuf, buffer, count))
25201 return -EFAULT;
25202-
25203- if (copy_from_user(strbuf, buffer, len))
25204- return -EFAULT;
25205- strbuf[len] = '\0';
25206- sscanf(strbuf, "%s", str);
25207+ strbuf[count] = '\0';
25208
25209 mutex_lock(&acpi_device_lock);
25210 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25211@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file,
25212 if (!dev->wakeup.flags.valid)
25213 continue;
25214
25215- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25216+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25217 if (device_can_wakeup(&dev->dev)) {
25218 bool enable = !device_may_wakeup(&dev->dev);
25219 device_set_wakeup_enable(&dev->dev, enable);
25220diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25221index a4e0f1b..9793b28 100644
25222--- a/drivers/acpi/processor_driver.c
25223+++ b/drivers/acpi/processor_driver.c
25224@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25225 return 0;
25226 #endif
25227
25228- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25229+ BUG_ON(pr->id >= nr_cpu_ids);
25230
25231 /*
25232 * Buggy BIOS check
25233diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25234index 4a3a5ae..cbee192 100644
25235--- a/drivers/ata/libata-core.c
25236+++ b/drivers/ata/libata-core.c
25237@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25238 struct ata_port *ap;
25239 unsigned int tag;
25240
25241- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25242+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25243 ap = qc->ap;
25244
25245 qc->flags = 0;
25246@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25247 struct ata_port *ap;
25248 struct ata_link *link;
25249
25250- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25251+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25252 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25253 ap = qc->ap;
25254 link = qc->dev->link;
25255@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25256 return;
25257
25258 spin_lock(&lock);
25259+ pax_open_kernel();
25260
25261 for (cur = ops->inherits; cur; cur = cur->inherits) {
25262 void **inherit = (void **)cur;
25263@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25264 if (IS_ERR(*pp))
25265 *pp = NULL;
25266
25267- ops->inherits = NULL;
25268+ *(struct ata_port_operations **)&ops->inherits = NULL;
25269
25270+ pax_close_kernel();
25271 spin_unlock(&lock);
25272 }
25273
25274diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
25275index ed16fbe..fc92cb8 100644
25276--- a/drivers/ata/libata-eh.c
25277+++ b/drivers/ata/libata-eh.c
25278@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
25279 {
25280 struct ata_link *link;
25281
25282+ pax_track_stack();
25283+
25284 ata_for_each_link(link, ap, HOST_FIRST)
25285 ata_eh_link_report(link);
25286 }
25287diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25288index 719bb73..79ce858 100644
25289--- a/drivers/ata/pata_arasan_cf.c
25290+++ b/drivers/ata/pata_arasan_cf.c
25291@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25292 /* Handle platform specific quirks */
25293 if (pdata->quirk) {
25294 if (pdata->quirk & CF_BROKEN_PIO) {
25295- ap->ops->set_piomode = NULL;
25296+ pax_open_kernel();
25297+ *(void **)&ap->ops->set_piomode = NULL;
25298+ pax_close_kernel();
25299 ap->pio_mask = 0;
25300 }
25301 if (pdata->quirk & CF_BROKEN_MWDMA)
25302diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25303index f9b983a..887b9d8 100644
25304--- a/drivers/atm/adummy.c
25305+++ b/drivers/atm/adummy.c
25306@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25307 vcc->pop(vcc, skb);
25308 else
25309 dev_kfree_skb_any(skb);
25310- atomic_inc(&vcc->stats->tx);
25311+ atomic_inc_unchecked(&vcc->stats->tx);
25312
25313 return 0;
25314 }
25315diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25316index f8f41e0..1f987dd 100644
25317--- a/drivers/atm/ambassador.c
25318+++ b/drivers/atm/ambassador.c
25319@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25320 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25321
25322 // VC layer stats
25323- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25324+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25325
25326 // free the descriptor
25327 kfree (tx_descr);
25328@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25329 dump_skb ("<<<", vc, skb);
25330
25331 // VC layer stats
25332- atomic_inc(&atm_vcc->stats->rx);
25333+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25334 __net_timestamp(skb);
25335 // end of our responsibility
25336 atm_vcc->push (atm_vcc, skb);
25337@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25338 } else {
25339 PRINTK (KERN_INFO, "dropped over-size frame");
25340 // should we count this?
25341- atomic_inc(&atm_vcc->stats->rx_drop);
25342+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25343 }
25344
25345 } else {
25346@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25347 }
25348
25349 if (check_area (skb->data, skb->len)) {
25350- atomic_inc(&atm_vcc->stats->tx_err);
25351+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25352 return -ENOMEM; // ?
25353 }
25354
25355diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25356index b22d71c..d6e1049 100644
25357--- a/drivers/atm/atmtcp.c
25358+++ b/drivers/atm/atmtcp.c
25359@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25360 if (vcc->pop) vcc->pop(vcc,skb);
25361 else dev_kfree_skb(skb);
25362 if (dev_data) return 0;
25363- atomic_inc(&vcc->stats->tx_err);
25364+ atomic_inc_unchecked(&vcc->stats->tx_err);
25365 return -ENOLINK;
25366 }
25367 size = skb->len+sizeof(struct atmtcp_hdr);
25368@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25369 if (!new_skb) {
25370 if (vcc->pop) vcc->pop(vcc,skb);
25371 else dev_kfree_skb(skb);
25372- atomic_inc(&vcc->stats->tx_err);
25373+ atomic_inc_unchecked(&vcc->stats->tx_err);
25374 return -ENOBUFS;
25375 }
25376 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25377@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25378 if (vcc->pop) vcc->pop(vcc,skb);
25379 else dev_kfree_skb(skb);
25380 out_vcc->push(out_vcc,new_skb);
25381- atomic_inc(&vcc->stats->tx);
25382- atomic_inc(&out_vcc->stats->rx);
25383+ atomic_inc_unchecked(&vcc->stats->tx);
25384+ atomic_inc_unchecked(&out_vcc->stats->rx);
25385 return 0;
25386 }
25387
25388@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25389 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25390 read_unlock(&vcc_sklist_lock);
25391 if (!out_vcc) {
25392- atomic_inc(&vcc->stats->tx_err);
25393+ atomic_inc_unchecked(&vcc->stats->tx_err);
25394 goto done;
25395 }
25396 skb_pull(skb,sizeof(struct atmtcp_hdr));
25397@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25398 __net_timestamp(new_skb);
25399 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25400 out_vcc->push(out_vcc,new_skb);
25401- atomic_inc(&vcc->stats->tx);
25402- atomic_inc(&out_vcc->stats->rx);
25403+ atomic_inc_unchecked(&vcc->stats->tx);
25404+ atomic_inc_unchecked(&out_vcc->stats->rx);
25405 done:
25406 if (vcc->pop) vcc->pop(vcc,skb);
25407 else dev_kfree_skb(skb);
25408diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25409index 9307141..d8521bf 100644
25410--- a/drivers/atm/eni.c
25411+++ b/drivers/atm/eni.c
25412@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25413 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25414 vcc->dev->number);
25415 length = 0;
25416- atomic_inc(&vcc->stats->rx_err);
25417+ atomic_inc_unchecked(&vcc->stats->rx_err);
25418 }
25419 else {
25420 length = ATM_CELL_SIZE-1; /* no HEC */
25421@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25422 size);
25423 }
25424 eff = length = 0;
25425- atomic_inc(&vcc->stats->rx_err);
25426+ atomic_inc_unchecked(&vcc->stats->rx_err);
25427 }
25428 else {
25429 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25430@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25431 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25432 vcc->dev->number,vcc->vci,length,size << 2,descr);
25433 length = eff = 0;
25434- atomic_inc(&vcc->stats->rx_err);
25435+ atomic_inc_unchecked(&vcc->stats->rx_err);
25436 }
25437 }
25438 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25439@@ -771,7 +771,7 @@ rx_dequeued++;
25440 vcc->push(vcc,skb);
25441 pushed++;
25442 }
25443- atomic_inc(&vcc->stats->rx);
25444+ atomic_inc_unchecked(&vcc->stats->rx);
25445 }
25446 wake_up(&eni_dev->rx_wait);
25447 }
25448@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev)
25449 PCI_DMA_TODEVICE);
25450 if (vcc->pop) vcc->pop(vcc,skb);
25451 else dev_kfree_skb_irq(skb);
25452- atomic_inc(&vcc->stats->tx);
25453+ atomic_inc_unchecked(&vcc->stats->tx);
25454 wake_up(&eni_dev->tx_wait);
25455 dma_complete++;
25456 }
25457@@ -1568,7 +1568,7 @@ tx_complete++;
25458 /*--------------------------------- entries ---------------------------------*/
25459
25460
25461-static const char *media_name[] __devinitdata = {
25462+static const char *media_name[] __devinitconst = {
25463 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25464 "UTP", "05?", "06?", "07?", /* 4- 7 */
25465 "TAXI","09?", "10?", "11?", /* 8-11 */
25466diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25467index 5072f8a..fa52520d 100644
25468--- a/drivers/atm/firestream.c
25469+++ b/drivers/atm/firestream.c
25470@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25471 }
25472 }
25473
25474- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25475+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25476
25477 fs_dprintk (FS_DEBUG_TXMEM, "i");
25478 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25479@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25480 #endif
25481 skb_put (skb, qe->p1 & 0xffff);
25482 ATM_SKB(skb)->vcc = atm_vcc;
25483- atomic_inc(&atm_vcc->stats->rx);
25484+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25485 __net_timestamp(skb);
25486 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25487 atm_vcc->push (atm_vcc, skb);
25488@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25489 kfree (pe);
25490 }
25491 if (atm_vcc)
25492- atomic_inc(&atm_vcc->stats->rx_drop);
25493+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25494 break;
25495 case 0x1f: /* Reassembly abort: no buffers. */
25496 /* Silently increment error counter. */
25497 if (atm_vcc)
25498- atomic_inc(&atm_vcc->stats->rx_drop);
25499+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25500 break;
25501 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25502 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25503diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25504index 361f5ae..7fc552d 100644
25505--- a/drivers/atm/fore200e.c
25506+++ b/drivers/atm/fore200e.c
25507@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25508 #endif
25509 /* check error condition */
25510 if (*entry->status & STATUS_ERROR)
25511- atomic_inc(&vcc->stats->tx_err);
25512+ atomic_inc_unchecked(&vcc->stats->tx_err);
25513 else
25514- atomic_inc(&vcc->stats->tx);
25515+ atomic_inc_unchecked(&vcc->stats->tx);
25516 }
25517 }
25518
25519@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25520 if (skb == NULL) {
25521 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25522
25523- atomic_inc(&vcc->stats->rx_drop);
25524+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25525 return -ENOMEM;
25526 }
25527
25528@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25529
25530 dev_kfree_skb_any(skb);
25531
25532- atomic_inc(&vcc->stats->rx_drop);
25533+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25534 return -ENOMEM;
25535 }
25536
25537 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25538
25539 vcc->push(vcc, skb);
25540- atomic_inc(&vcc->stats->rx);
25541+ atomic_inc_unchecked(&vcc->stats->rx);
25542
25543 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25544
25545@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25546 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25547 fore200e->atm_dev->number,
25548 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25549- atomic_inc(&vcc->stats->rx_err);
25550+ atomic_inc_unchecked(&vcc->stats->rx_err);
25551 }
25552 }
25553
25554@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25555 goto retry_here;
25556 }
25557
25558- atomic_inc(&vcc->stats->tx_err);
25559+ atomic_inc_unchecked(&vcc->stats->tx_err);
25560
25561 fore200e->tx_sat++;
25562 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25563diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25564index 9a51df4..f3bb5f8 100644
25565--- a/drivers/atm/he.c
25566+++ b/drivers/atm/he.c
25567@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25568
25569 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25570 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25571- atomic_inc(&vcc->stats->rx_drop);
25572+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25573 goto return_host_buffers;
25574 }
25575
25576@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25577 RBRQ_LEN_ERR(he_dev->rbrq_head)
25578 ? "LEN_ERR" : "",
25579 vcc->vpi, vcc->vci);
25580- atomic_inc(&vcc->stats->rx_err);
25581+ atomic_inc_unchecked(&vcc->stats->rx_err);
25582 goto return_host_buffers;
25583 }
25584
25585@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25586 vcc->push(vcc, skb);
25587 spin_lock(&he_dev->global_lock);
25588
25589- atomic_inc(&vcc->stats->rx);
25590+ atomic_inc_unchecked(&vcc->stats->rx);
25591
25592 return_host_buffers:
25593 ++pdus_assembled;
25594@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25595 tpd->vcc->pop(tpd->vcc, tpd->skb);
25596 else
25597 dev_kfree_skb_any(tpd->skb);
25598- atomic_inc(&tpd->vcc->stats->tx_err);
25599+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25600 }
25601 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25602 return;
25603@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25604 vcc->pop(vcc, skb);
25605 else
25606 dev_kfree_skb_any(skb);
25607- atomic_inc(&vcc->stats->tx_err);
25608+ atomic_inc_unchecked(&vcc->stats->tx_err);
25609 return -EINVAL;
25610 }
25611
25612@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25613 vcc->pop(vcc, skb);
25614 else
25615 dev_kfree_skb_any(skb);
25616- atomic_inc(&vcc->stats->tx_err);
25617+ atomic_inc_unchecked(&vcc->stats->tx_err);
25618 return -EINVAL;
25619 }
25620 #endif
25621@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25622 vcc->pop(vcc, skb);
25623 else
25624 dev_kfree_skb_any(skb);
25625- atomic_inc(&vcc->stats->tx_err);
25626+ atomic_inc_unchecked(&vcc->stats->tx_err);
25627 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25628 return -ENOMEM;
25629 }
25630@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25631 vcc->pop(vcc, skb);
25632 else
25633 dev_kfree_skb_any(skb);
25634- atomic_inc(&vcc->stats->tx_err);
25635+ atomic_inc_unchecked(&vcc->stats->tx_err);
25636 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25637 return -ENOMEM;
25638 }
25639@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25640 __enqueue_tpd(he_dev, tpd, cid);
25641 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25642
25643- atomic_inc(&vcc->stats->tx);
25644+ atomic_inc_unchecked(&vcc->stats->tx);
25645
25646 return 0;
25647 }
25648diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25649index b812103..e391a49 100644
25650--- a/drivers/atm/horizon.c
25651+++ b/drivers/atm/horizon.c
25652@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25653 {
25654 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25655 // VC layer stats
25656- atomic_inc(&vcc->stats->rx);
25657+ atomic_inc_unchecked(&vcc->stats->rx);
25658 __net_timestamp(skb);
25659 // end of our responsibility
25660 vcc->push (vcc, skb);
25661@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25662 dev->tx_iovec = NULL;
25663
25664 // VC layer stats
25665- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25666+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25667
25668 // free the skb
25669 hrz_kfree_skb (skb);
25670diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25671index db06f34..dcebb61 100644
25672--- a/drivers/atm/idt77252.c
25673+++ b/drivers/atm/idt77252.c
25674@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25675 else
25676 dev_kfree_skb(skb);
25677
25678- atomic_inc(&vcc->stats->tx);
25679+ atomic_inc_unchecked(&vcc->stats->tx);
25680 }
25681
25682 atomic_dec(&scq->used);
25683@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25684 if ((sb = dev_alloc_skb(64)) == NULL) {
25685 printk("%s: Can't allocate buffers for aal0.\n",
25686 card->name);
25687- atomic_add(i, &vcc->stats->rx_drop);
25688+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25689 break;
25690 }
25691 if (!atm_charge(vcc, sb->truesize)) {
25692 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25693 card->name);
25694- atomic_add(i - 1, &vcc->stats->rx_drop);
25695+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25696 dev_kfree_skb(sb);
25697 break;
25698 }
25699@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25700 ATM_SKB(sb)->vcc = vcc;
25701 __net_timestamp(sb);
25702 vcc->push(vcc, sb);
25703- atomic_inc(&vcc->stats->rx);
25704+ atomic_inc_unchecked(&vcc->stats->rx);
25705
25706 cell += ATM_CELL_PAYLOAD;
25707 }
25708@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25709 "(CDC: %08x)\n",
25710 card->name, len, rpp->len, readl(SAR_REG_CDC));
25711 recycle_rx_pool_skb(card, rpp);
25712- atomic_inc(&vcc->stats->rx_err);
25713+ atomic_inc_unchecked(&vcc->stats->rx_err);
25714 return;
25715 }
25716 if (stat & SAR_RSQE_CRC) {
25717 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25718 recycle_rx_pool_skb(card, rpp);
25719- atomic_inc(&vcc->stats->rx_err);
25720+ atomic_inc_unchecked(&vcc->stats->rx_err);
25721 return;
25722 }
25723 if (skb_queue_len(&rpp->queue) > 1) {
25724@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25725 RXPRINTK("%s: Can't alloc RX skb.\n",
25726 card->name);
25727 recycle_rx_pool_skb(card, rpp);
25728- atomic_inc(&vcc->stats->rx_err);
25729+ atomic_inc_unchecked(&vcc->stats->rx_err);
25730 return;
25731 }
25732 if (!atm_charge(vcc, skb->truesize)) {
25733@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25734 __net_timestamp(skb);
25735
25736 vcc->push(vcc, skb);
25737- atomic_inc(&vcc->stats->rx);
25738+ atomic_inc_unchecked(&vcc->stats->rx);
25739
25740 return;
25741 }
25742@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25743 __net_timestamp(skb);
25744
25745 vcc->push(vcc, skb);
25746- atomic_inc(&vcc->stats->rx);
25747+ atomic_inc_unchecked(&vcc->stats->rx);
25748
25749 if (skb->truesize > SAR_FB_SIZE_3)
25750 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25751@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25752 if (vcc->qos.aal != ATM_AAL0) {
25753 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25754 card->name, vpi, vci);
25755- atomic_inc(&vcc->stats->rx_drop);
25756+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25757 goto drop;
25758 }
25759
25760 if ((sb = dev_alloc_skb(64)) == NULL) {
25761 printk("%s: Can't allocate buffers for AAL0.\n",
25762 card->name);
25763- atomic_inc(&vcc->stats->rx_err);
25764+ atomic_inc_unchecked(&vcc->stats->rx_err);
25765 goto drop;
25766 }
25767
25768@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25769 ATM_SKB(sb)->vcc = vcc;
25770 __net_timestamp(sb);
25771 vcc->push(vcc, sb);
25772- atomic_inc(&vcc->stats->rx);
25773+ atomic_inc_unchecked(&vcc->stats->rx);
25774
25775 drop:
25776 skb_pull(queue, 64);
25777@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25778
25779 if (vc == NULL) {
25780 printk("%s: NULL connection in send().\n", card->name);
25781- atomic_inc(&vcc->stats->tx_err);
25782+ atomic_inc_unchecked(&vcc->stats->tx_err);
25783 dev_kfree_skb(skb);
25784 return -EINVAL;
25785 }
25786 if (!test_bit(VCF_TX, &vc->flags)) {
25787 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25788- atomic_inc(&vcc->stats->tx_err);
25789+ atomic_inc_unchecked(&vcc->stats->tx_err);
25790 dev_kfree_skb(skb);
25791 return -EINVAL;
25792 }
25793@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25794 break;
25795 default:
25796 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25797- atomic_inc(&vcc->stats->tx_err);
25798+ atomic_inc_unchecked(&vcc->stats->tx_err);
25799 dev_kfree_skb(skb);
25800 return -EINVAL;
25801 }
25802
25803 if (skb_shinfo(skb)->nr_frags != 0) {
25804 printk("%s: No scatter-gather yet.\n", card->name);
25805- atomic_inc(&vcc->stats->tx_err);
25806+ atomic_inc_unchecked(&vcc->stats->tx_err);
25807 dev_kfree_skb(skb);
25808 return -EINVAL;
25809 }
25810@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25811
25812 err = queue_skb(card, vc, skb, oam);
25813 if (err) {
25814- atomic_inc(&vcc->stats->tx_err);
25815+ atomic_inc_unchecked(&vcc->stats->tx_err);
25816 dev_kfree_skb(skb);
25817 return err;
25818 }
25819@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25820 skb = dev_alloc_skb(64);
25821 if (!skb) {
25822 printk("%s: Out of memory in send_oam().\n", card->name);
25823- atomic_inc(&vcc->stats->tx_err);
25824+ atomic_inc_unchecked(&vcc->stats->tx_err);
25825 return -ENOMEM;
25826 }
25827 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25828diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25829index cb90f7a..bd33566 100644
25830--- a/drivers/atm/iphase.c
25831+++ b/drivers/atm/iphase.c
25832@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
25833 status = (u_short) (buf_desc_ptr->desc_mode);
25834 if (status & (RX_CER | RX_PTE | RX_OFL))
25835 {
25836- atomic_inc(&vcc->stats->rx_err);
25837+ atomic_inc_unchecked(&vcc->stats->rx_err);
25838 IF_ERR(printk("IA: bad packet, dropping it");)
25839 if (status & RX_CER) {
25840 IF_ERR(printk(" cause: packet CRC error\n");)
25841@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
25842 len = dma_addr - buf_addr;
25843 if (len > iadev->rx_buf_sz) {
25844 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25845- atomic_inc(&vcc->stats->rx_err);
25846+ atomic_inc_unchecked(&vcc->stats->rx_err);
25847 goto out_free_desc;
25848 }
25849
25850@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25851 ia_vcc = INPH_IA_VCC(vcc);
25852 if (ia_vcc == NULL)
25853 {
25854- atomic_inc(&vcc->stats->rx_err);
25855+ atomic_inc_unchecked(&vcc->stats->rx_err);
25856 dev_kfree_skb_any(skb);
25857 atm_return(vcc, atm_guess_pdu2truesize(len));
25858 goto INCR_DLE;
25859@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25860 if ((length > iadev->rx_buf_sz) || (length >
25861 (skb->len - sizeof(struct cpcs_trailer))))
25862 {
25863- atomic_inc(&vcc->stats->rx_err);
25864+ atomic_inc_unchecked(&vcc->stats->rx_err);
25865 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25866 length, skb->len);)
25867 dev_kfree_skb_any(skb);
25868@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25869
25870 IF_RX(printk("rx_dle_intr: skb push");)
25871 vcc->push(vcc,skb);
25872- atomic_inc(&vcc->stats->rx);
25873+ atomic_inc_unchecked(&vcc->stats->rx);
25874 iadev->rx_pkt_cnt++;
25875 }
25876 INCR_DLE:
25877@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25878 {
25879 struct k_sonet_stats *stats;
25880 stats = &PRIV(_ia_dev[board])->sonet_stats;
25881- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25882- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25883- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25884- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25885- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25886- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25887- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25888- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25889- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25890+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25891+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25892+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25893+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25894+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25895+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25896+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25897+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25898+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25899 }
25900 ia_cmds.status = 0;
25901 break;
25902@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25903 if ((desc == 0) || (desc > iadev->num_tx_desc))
25904 {
25905 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25906- atomic_inc(&vcc->stats->tx);
25907+ atomic_inc_unchecked(&vcc->stats->tx);
25908 if (vcc->pop)
25909 vcc->pop(vcc, skb);
25910 else
25911@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25912 ATM_DESC(skb) = vcc->vci;
25913 skb_queue_tail(&iadev->tx_dma_q, skb);
25914
25915- atomic_inc(&vcc->stats->tx);
25916+ atomic_inc_unchecked(&vcc->stats->tx);
25917 iadev->tx_pkt_cnt++;
25918 /* Increment transaction counter */
25919 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25920
25921 #if 0
25922 /* add flow control logic */
25923- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25924+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25925 if (iavcc->vc_desc_cnt > 10) {
25926 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25927 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25928diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25929index e828c54..ae83976 100644
25930--- a/drivers/atm/lanai.c
25931+++ b/drivers/atm/lanai.c
25932@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25933 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25934 lanai_endtx(lanai, lvcc);
25935 lanai_free_skb(lvcc->tx.atmvcc, skb);
25936- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25937+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25938 }
25939
25940 /* Try to fill the buffer - don't call unless there is backlog */
25941@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25942 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25943 __net_timestamp(skb);
25944 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25945- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25946+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25947 out:
25948 lvcc->rx.buf.ptr = end;
25949 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25950@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25951 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25952 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25953 lanai->stats.service_rxnotaal5++;
25954- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25955+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25956 return 0;
25957 }
25958 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25959@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25960 int bytes;
25961 read_unlock(&vcc_sklist_lock);
25962 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25963- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25964+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25965 lvcc->stats.x.aal5.service_trash++;
25966 bytes = (SERVICE_GET_END(s) * 16) -
25967 (((unsigned long) lvcc->rx.buf.ptr) -
25968@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25969 }
25970 if (s & SERVICE_STREAM) {
25971 read_unlock(&vcc_sklist_lock);
25972- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25973+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25974 lvcc->stats.x.aal5.service_stream++;
25975 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25976 "PDU on VCI %d!\n", lanai->number, vci);
25977@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25978 return 0;
25979 }
25980 DPRINTK("got rx crc error on vci %d\n", vci);
25981- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25982+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25983 lvcc->stats.x.aal5.service_rxcrc++;
25984 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25985 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25986diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25987index 1c70c45..300718d 100644
25988--- a/drivers/atm/nicstar.c
25989+++ b/drivers/atm/nicstar.c
25990@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25991 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25992 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25993 card->index);
25994- atomic_inc(&vcc->stats->tx_err);
25995+ atomic_inc_unchecked(&vcc->stats->tx_err);
25996 dev_kfree_skb_any(skb);
25997 return -EINVAL;
25998 }
25999@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26000 if (!vc->tx) {
26001 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26002 card->index);
26003- atomic_inc(&vcc->stats->tx_err);
26004+ atomic_inc_unchecked(&vcc->stats->tx_err);
26005 dev_kfree_skb_any(skb);
26006 return -EINVAL;
26007 }
26008@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26009 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26010 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26011 card->index);
26012- atomic_inc(&vcc->stats->tx_err);
26013+ atomic_inc_unchecked(&vcc->stats->tx_err);
26014 dev_kfree_skb_any(skb);
26015 return -EINVAL;
26016 }
26017
26018 if (skb_shinfo(skb)->nr_frags != 0) {
26019 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26020- atomic_inc(&vcc->stats->tx_err);
26021+ atomic_inc_unchecked(&vcc->stats->tx_err);
26022 dev_kfree_skb_any(skb);
26023 return -EINVAL;
26024 }
26025@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26026 }
26027
26028 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26029- atomic_inc(&vcc->stats->tx_err);
26030+ atomic_inc_unchecked(&vcc->stats->tx_err);
26031 dev_kfree_skb_any(skb);
26032 return -EIO;
26033 }
26034- atomic_inc(&vcc->stats->tx);
26035+ atomic_inc_unchecked(&vcc->stats->tx);
26036
26037 return 0;
26038 }
26039@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26040 printk
26041 ("nicstar%d: Can't allocate buffers for aal0.\n",
26042 card->index);
26043- atomic_add(i, &vcc->stats->rx_drop);
26044+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26045 break;
26046 }
26047 if (!atm_charge(vcc, sb->truesize)) {
26048 RXPRINTK
26049 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26050 card->index);
26051- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26052+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26053 dev_kfree_skb_any(sb);
26054 break;
26055 }
26056@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26057 ATM_SKB(sb)->vcc = vcc;
26058 __net_timestamp(sb);
26059 vcc->push(vcc, sb);
26060- atomic_inc(&vcc->stats->rx);
26061+ atomic_inc_unchecked(&vcc->stats->rx);
26062 cell += ATM_CELL_PAYLOAD;
26063 }
26064
26065@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26066 if (iovb == NULL) {
26067 printk("nicstar%d: Out of iovec buffers.\n",
26068 card->index);
26069- atomic_inc(&vcc->stats->rx_drop);
26070+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26071 recycle_rx_buf(card, skb);
26072 return;
26073 }
26074@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26075 small or large buffer itself. */
26076 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26077 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26078- atomic_inc(&vcc->stats->rx_err);
26079+ atomic_inc_unchecked(&vcc->stats->rx_err);
26080 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26081 NS_MAX_IOVECS);
26082 NS_PRV_IOVCNT(iovb) = 0;
26083@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26084 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26085 card->index);
26086 which_list(card, skb);
26087- atomic_inc(&vcc->stats->rx_err);
26088+ atomic_inc_unchecked(&vcc->stats->rx_err);
26089 recycle_rx_buf(card, skb);
26090 vc->rx_iov = NULL;
26091 recycle_iov_buf(card, iovb);
26092@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26093 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26094 card->index);
26095 which_list(card, skb);
26096- atomic_inc(&vcc->stats->rx_err);
26097+ atomic_inc_unchecked(&vcc->stats->rx_err);
26098 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26099 NS_PRV_IOVCNT(iovb));
26100 vc->rx_iov = NULL;
26101@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26102 printk(" - PDU size mismatch.\n");
26103 else
26104 printk(".\n");
26105- atomic_inc(&vcc->stats->rx_err);
26106+ atomic_inc_unchecked(&vcc->stats->rx_err);
26107 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26108 NS_PRV_IOVCNT(iovb));
26109 vc->rx_iov = NULL;
26110@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26111 /* skb points to a small buffer */
26112 if (!atm_charge(vcc, skb->truesize)) {
26113 push_rxbufs(card, skb);
26114- atomic_inc(&vcc->stats->rx_drop);
26115+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26116 } else {
26117 skb_put(skb, len);
26118 dequeue_sm_buf(card, skb);
26119@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26120 ATM_SKB(skb)->vcc = vcc;
26121 __net_timestamp(skb);
26122 vcc->push(vcc, skb);
26123- atomic_inc(&vcc->stats->rx);
26124+ atomic_inc_unchecked(&vcc->stats->rx);
26125 }
26126 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26127 struct sk_buff *sb;
26128@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26129 if (len <= NS_SMBUFSIZE) {
26130 if (!atm_charge(vcc, sb->truesize)) {
26131 push_rxbufs(card, sb);
26132- atomic_inc(&vcc->stats->rx_drop);
26133+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26134 } else {
26135 skb_put(sb, len);
26136 dequeue_sm_buf(card, sb);
26137@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26138 ATM_SKB(sb)->vcc = vcc;
26139 __net_timestamp(sb);
26140 vcc->push(vcc, sb);
26141- atomic_inc(&vcc->stats->rx);
26142+ atomic_inc_unchecked(&vcc->stats->rx);
26143 }
26144
26145 push_rxbufs(card, skb);
26146@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26147
26148 if (!atm_charge(vcc, skb->truesize)) {
26149 push_rxbufs(card, skb);
26150- atomic_inc(&vcc->stats->rx_drop);
26151+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26152 } else {
26153 dequeue_lg_buf(card, skb);
26154 #ifdef NS_USE_DESTRUCTORS
26155@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26156 ATM_SKB(skb)->vcc = vcc;
26157 __net_timestamp(skb);
26158 vcc->push(vcc, skb);
26159- atomic_inc(&vcc->stats->rx);
26160+ atomic_inc_unchecked(&vcc->stats->rx);
26161 }
26162
26163 push_rxbufs(card, sb);
26164@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26165 printk
26166 ("nicstar%d: Out of huge buffers.\n",
26167 card->index);
26168- atomic_inc(&vcc->stats->rx_drop);
26169+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26170 recycle_iovec_rx_bufs(card,
26171 (struct iovec *)
26172 iovb->data,
26173@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26174 card->hbpool.count++;
26175 } else
26176 dev_kfree_skb_any(hb);
26177- atomic_inc(&vcc->stats->rx_drop);
26178+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26179 } else {
26180 /* Copy the small buffer to the huge buffer */
26181 sb = (struct sk_buff *)iov->iov_base;
26182@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26183 #endif /* NS_USE_DESTRUCTORS */
26184 __net_timestamp(hb);
26185 vcc->push(vcc, hb);
26186- atomic_inc(&vcc->stats->rx);
26187+ atomic_inc_unchecked(&vcc->stats->rx);
26188 }
26189 }
26190
26191diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26192index 5d1d076..4f31f42 100644
26193--- a/drivers/atm/solos-pci.c
26194+++ b/drivers/atm/solos-pci.c
26195@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26196 }
26197 atm_charge(vcc, skb->truesize);
26198 vcc->push(vcc, skb);
26199- atomic_inc(&vcc->stats->rx);
26200+ atomic_inc_unchecked(&vcc->stats->rx);
26201 break;
26202
26203 case PKT_STATUS:
26204@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf)
26205 char msg[500];
26206 char item[10];
26207
26208+ pax_track_stack();
26209+
26210 len = buf->len;
26211 for (i = 0; i < len; i++){
26212 if(i % 8 == 0)
26213@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26214 vcc = SKB_CB(oldskb)->vcc;
26215
26216 if (vcc) {
26217- atomic_inc(&vcc->stats->tx);
26218+ atomic_inc_unchecked(&vcc->stats->tx);
26219 solos_pop(vcc, oldskb);
26220 } else
26221 dev_kfree_skb_irq(oldskb);
26222diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26223index 90f1ccc..04c4a1e 100644
26224--- a/drivers/atm/suni.c
26225+++ b/drivers/atm/suni.c
26226@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26227
26228
26229 #define ADD_LIMITED(s,v) \
26230- atomic_add((v),&stats->s); \
26231- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26232+ atomic_add_unchecked((v),&stats->s); \
26233+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26234
26235
26236 static void suni_hz(unsigned long from_timer)
26237diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26238index 5120a96..e2572bd 100644
26239--- a/drivers/atm/uPD98402.c
26240+++ b/drivers/atm/uPD98402.c
26241@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26242 struct sonet_stats tmp;
26243 int error = 0;
26244
26245- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26246+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26247 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26248 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26249 if (zero && !error) {
26250@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26251
26252
26253 #define ADD_LIMITED(s,v) \
26254- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26255- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26256- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26257+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26258+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26259+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26260
26261
26262 static void stat_event(struct atm_dev *dev)
26263@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26264 if (reason & uPD98402_INT_PFM) stat_event(dev);
26265 if (reason & uPD98402_INT_PCO) {
26266 (void) GET(PCOCR); /* clear interrupt cause */
26267- atomic_add(GET(HECCT),
26268+ atomic_add_unchecked(GET(HECCT),
26269 &PRIV(dev)->sonet_stats.uncorr_hcs);
26270 }
26271 if ((reason & uPD98402_INT_RFO) &&
26272@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26273 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26274 uPD98402_INT_LOS),PIMR); /* enable them */
26275 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26276- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26277- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26278- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26279+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26280+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26281+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26282 return 0;
26283 }
26284
26285diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26286index d889f56..17eb71e 100644
26287--- a/drivers/atm/zatm.c
26288+++ b/drivers/atm/zatm.c
26289@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26290 }
26291 if (!size) {
26292 dev_kfree_skb_irq(skb);
26293- if (vcc) atomic_inc(&vcc->stats->rx_err);
26294+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26295 continue;
26296 }
26297 if (!atm_charge(vcc,skb->truesize)) {
26298@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26299 skb->len = size;
26300 ATM_SKB(skb)->vcc = vcc;
26301 vcc->push(vcc,skb);
26302- atomic_inc(&vcc->stats->rx);
26303+ atomic_inc_unchecked(&vcc->stats->rx);
26304 }
26305 zout(pos & 0xffff,MTA(mbx));
26306 #if 0 /* probably a stupid idea */
26307@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26308 skb_queue_head(&zatm_vcc->backlog,skb);
26309 break;
26310 }
26311- atomic_inc(&vcc->stats->tx);
26312+ atomic_inc_unchecked(&vcc->stats->tx);
26313 wake_up(&zatm_vcc->tx_wait);
26314 }
26315
26316diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26317index a4760e0..51283cf 100644
26318--- a/drivers/base/devtmpfs.c
26319+++ b/drivers/base/devtmpfs.c
26320@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26321 if (!thread)
26322 return 0;
26323
26324- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26325+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26326 if (err)
26327 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26328 else
26329diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26330index 84f7c7d..37cfd87 100644
26331--- a/drivers/base/power/wakeup.c
26332+++ b/drivers/base/power/wakeup.c
26333@@ -29,14 +29,14 @@ bool events_check_enabled;
26334 * They need to be modified together atomically, so it's better to use one
26335 * atomic variable to hold them both.
26336 */
26337-static atomic_t combined_event_count = ATOMIC_INIT(0);
26338+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26339
26340 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26341 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26342
26343 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26344 {
26345- unsigned int comb = atomic_read(&combined_event_count);
26346+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26347
26348 *cnt = (comb >> IN_PROGRESS_BITS);
26349 *inpr = comb & MAX_IN_PROGRESS;
26350@@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26351 ws->last_time = ktime_get();
26352
26353 /* Increment the counter of events in progress. */
26354- atomic_inc(&combined_event_count);
26355+ atomic_inc_unchecked(&combined_event_count);
26356 }
26357
26358 /**
26359@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26360 * Increment the counter of registered wakeup events and decrement the
26361 * couter of wakeup events in progress simultaneously.
26362 */
26363- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26364+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26365 }
26366
26367 /**
26368diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
26369index e086fbb..398e1fe 100644
26370--- a/drivers/block/DAC960.c
26371+++ b/drivers/block/DAC960.c
26372@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
26373 unsigned long flags;
26374 int Channel, TargetID;
26375
26376+ pax_track_stack();
26377+
26378 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26379 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26380 sizeof(DAC960_SCSI_Inquiry_T) +
26381diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26382index c2f9b3e..11b8693 100644
26383--- a/drivers/block/cciss.c
26384+++ b/drivers/block/cciss.c
26385@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26386 int err;
26387 u32 cp;
26388
26389+ memset(&arg64, 0, sizeof(arg64));
26390+
26391 err = 0;
26392 err |=
26393 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26394@@ -1716,7 +1718,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
26395 case CCISS_BIG_PASSTHRU:
26396 return cciss_bigpassthru(h, argp);
26397
26398- /* scsi_cmd_ioctl handles these, below, though some are not */
26399+ /* scsi_cmd_blk_ioctl handles these, below, though some are not */
26400 /* very meaningful for cciss. SG_IO is the main one people want. */
26401
26402 case SG_GET_VERSION_NUM:
26403@@ -1727,9 +1729,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
26404 case SG_EMULATED_HOST:
26405 case SG_IO:
26406 case SCSI_IOCTL_SEND_COMMAND:
26407- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
26408+ return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
26409
26410- /* scsi_cmd_ioctl would normally handle these, below, but */
26411+ /* scsi_cmd_blk_ioctl would normally handle these, below, but */
26412 /* they aren't a good fit for cciss, as CD-ROMs are */
26413 /* not supported, and we don't have any bus/target/lun */
26414 /* which we present to the kernel. */
26415@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
26416 while (!list_empty(&h->reqQ)) {
26417 c = list_entry(h->reqQ.next, CommandList_struct, list);
26418 /* can't do anything if fifo is full */
26419- if ((h->access.fifo_full(h))) {
26420+ if ((h->access->fifo_full(h))) {
26421 dev_warn(&h->pdev->dev, "fifo full\n");
26422 break;
26423 }
26424@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
26425 h->Qdepth--;
26426
26427 /* Tell the controller execute command */
26428- h->access.submit_command(h, c);
26429+ h->access->submit_command(h, c);
26430
26431 /* Put job onto the completed Q */
26432 addQ(&h->cmpQ, c);
26433@@ -3422,17 +3424,17 @@ startio:
26434
26435 static inline unsigned long get_next_completion(ctlr_info_t *h)
26436 {
26437- return h->access.command_completed(h);
26438+ return h->access->command_completed(h);
26439 }
26440
26441 static inline int interrupt_pending(ctlr_info_t *h)
26442 {
26443- return h->access.intr_pending(h);
26444+ return h->access->intr_pending(h);
26445 }
26446
26447 static inline long interrupt_not_for_us(ctlr_info_t *h)
26448 {
26449- return ((h->access.intr_pending(h) == 0) ||
26450+ return ((h->access->intr_pending(h) == 0) ||
26451 (h->interrupts_enabled == 0));
26452 }
26453
26454@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h)
26455 u32 a;
26456
26457 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26458- return h->access.command_completed(h);
26459+ return h->access->command_completed(h);
26460
26461 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26462 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26463@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26464 trans_support & CFGTBL_Trans_use_short_tags);
26465
26466 /* Change the access methods to the performant access methods */
26467- h->access = SA5_performant_access;
26468+ h->access = &SA5_performant_access;
26469 h->transMethod = CFGTBL_Trans_Performant;
26470
26471 return;
26472@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26473 if (prod_index < 0)
26474 return -ENODEV;
26475 h->product_name = products[prod_index].product_name;
26476- h->access = *(products[prod_index].access);
26477+ h->access = products[prod_index].access;
26478
26479 if (cciss_board_disabled(h)) {
26480 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26481@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
26482 }
26483
26484 /* make sure the board interrupts are off */
26485- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26486+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26487 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26488 if (rc)
26489 goto clean2;
26490@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
26491 * fake ones to scoop up any residual completions.
26492 */
26493 spin_lock_irqsave(&h->lock, flags);
26494- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26495+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26496 spin_unlock_irqrestore(&h->lock, flags);
26497 free_irq(h->intr[PERF_MODE_INT], h);
26498 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26499@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
26500 dev_info(&h->pdev->dev, "Board READY.\n");
26501 dev_info(&h->pdev->dev,
26502 "Waiting for stale completions to drain.\n");
26503- h->access.set_intr_mask(h, CCISS_INTR_ON);
26504+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26505 msleep(10000);
26506- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26507+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26508
26509 rc = controller_reset_failed(h->cfgtable);
26510 if (rc)
26511@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
26512 cciss_scsi_setup(h);
26513
26514 /* Turn the interrupts on so we can service requests */
26515- h->access.set_intr_mask(h, CCISS_INTR_ON);
26516+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26517
26518 /* Get the firmware version */
26519 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26520@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26521 kfree(flush_buf);
26522 if (return_code != IO_OK)
26523 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26524- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26525+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26526 free_irq(h->intr[PERF_MODE_INT], h);
26527 }
26528
26529diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26530index c049548..a09cb6e 100644
26531--- a/drivers/block/cciss.h
26532+++ b/drivers/block/cciss.h
26533@@ -100,7 +100,7 @@ struct ctlr_info
26534 /* information about each logical volume */
26535 drive_info_struct *drv[CISS_MAX_LUN];
26536
26537- struct access_method access;
26538+ struct access_method *access;
26539
26540 /* queue and queue Info */
26541 struct list_head reqQ;
26542diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26543index b2fceb5..87fec83 100644
26544--- a/drivers/block/cpqarray.c
26545+++ b/drivers/block/cpqarray.c
26546@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26547 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26548 goto Enomem4;
26549 }
26550- hba[i]->access.set_intr_mask(hba[i], 0);
26551+ hba[i]->access->set_intr_mask(hba[i], 0);
26552 if (request_irq(hba[i]->intr, do_ida_intr,
26553 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26554 {
26555@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26556 add_timer(&hba[i]->timer);
26557
26558 /* Enable IRQ now that spinlock and rate limit timer are set up */
26559- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26560+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26561
26562 for(j=0; j<NWD; j++) {
26563 struct gendisk *disk = ida_gendisk[i][j];
26564@@ -694,7 +694,7 @@ DBGINFO(
26565 for(i=0; i<NR_PRODUCTS; i++) {
26566 if (board_id == products[i].board_id) {
26567 c->product_name = products[i].product_name;
26568- c->access = *(products[i].access);
26569+ c->access = products[i].access;
26570 break;
26571 }
26572 }
26573@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26574 hba[ctlr]->intr = intr;
26575 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26576 hba[ctlr]->product_name = products[j].product_name;
26577- hba[ctlr]->access = *(products[j].access);
26578+ hba[ctlr]->access = products[j].access;
26579 hba[ctlr]->ctlr = ctlr;
26580 hba[ctlr]->board_id = board_id;
26581 hba[ctlr]->pci_dev = NULL; /* not PCI */
26582@@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q)
26583 struct scatterlist tmp_sg[SG_MAX];
26584 int i, dir, seg;
26585
26586+ pax_track_stack();
26587+
26588 queue_next:
26589 creq = blk_peek_request(q);
26590 if (!creq)
26591@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
26592
26593 while((c = h->reqQ) != NULL) {
26594 /* Can't do anything if we're busy */
26595- if (h->access.fifo_full(h) == 0)
26596+ if (h->access->fifo_full(h) == 0)
26597 return;
26598
26599 /* Get the first entry from the request Q */
26600@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
26601 h->Qdepth--;
26602
26603 /* Tell the controller to do our bidding */
26604- h->access.submit_command(h, c);
26605+ h->access->submit_command(h, c);
26606
26607 /* Get onto the completion Q */
26608 addQ(&h->cmpQ, c);
26609@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26610 unsigned long flags;
26611 __u32 a,a1;
26612
26613- istat = h->access.intr_pending(h);
26614+ istat = h->access->intr_pending(h);
26615 /* Is this interrupt for us? */
26616 if (istat == 0)
26617 return IRQ_NONE;
26618@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26619 */
26620 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26621 if (istat & FIFO_NOT_EMPTY) {
26622- while((a = h->access.command_completed(h))) {
26623+ while((a = h->access->command_completed(h))) {
26624 a1 = a; a &= ~3;
26625 if ((c = h->cmpQ) == NULL)
26626 {
26627@@ -1449,11 +1451,11 @@ static int sendcmd(
26628 /*
26629 * Disable interrupt
26630 */
26631- info_p->access.set_intr_mask(info_p, 0);
26632+ info_p->access->set_intr_mask(info_p, 0);
26633 /* Make sure there is room in the command FIFO */
26634 /* Actually it should be completely empty at this time. */
26635 for (i = 200000; i > 0; i--) {
26636- temp = info_p->access.fifo_full(info_p);
26637+ temp = info_p->access->fifo_full(info_p);
26638 if (temp != 0) {
26639 break;
26640 }
26641@@ -1466,7 +1468,7 @@ DBG(
26642 /*
26643 * Send the cmd
26644 */
26645- info_p->access.submit_command(info_p, c);
26646+ info_p->access->submit_command(info_p, c);
26647 complete = pollcomplete(ctlr);
26648
26649 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26650@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26651 * we check the new geometry. Then turn interrupts back on when
26652 * we're done.
26653 */
26654- host->access.set_intr_mask(host, 0);
26655+ host->access->set_intr_mask(host, 0);
26656 getgeometry(ctlr);
26657- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26658+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26659
26660 for(i=0; i<NWD; i++) {
26661 struct gendisk *disk = ida_gendisk[ctlr][i];
26662@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
26663 /* Wait (up to 2 seconds) for a command to complete */
26664
26665 for (i = 200000; i > 0; i--) {
26666- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26667+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26668 if (done == 0) {
26669 udelay(10); /* a short fixed delay */
26670 } else
26671diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26672index be73e9d..7fbf140 100644
26673--- a/drivers/block/cpqarray.h
26674+++ b/drivers/block/cpqarray.h
26675@@ -99,7 +99,7 @@ struct ctlr_info {
26676 drv_info_t drv[NWD];
26677 struct proc_dir_entry *proc;
26678
26679- struct access_method access;
26680+ struct access_method *access;
26681
26682 cmdlist_t *reqQ;
26683 cmdlist_t *cmpQ;
26684diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26685index ef2ceed..c9cb18e 100644
26686--- a/drivers/block/drbd/drbd_int.h
26687+++ b/drivers/block/drbd/drbd_int.h
26688@@ -737,7 +737,7 @@ struct drbd_request;
26689 struct drbd_epoch {
26690 struct list_head list;
26691 unsigned int barrier_nr;
26692- atomic_t epoch_size; /* increased on every request added. */
26693+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26694 atomic_t active; /* increased on every req. added, and dec on every finished. */
26695 unsigned long flags;
26696 };
26697@@ -1109,7 +1109,7 @@ struct drbd_conf {
26698 void *int_dig_in;
26699 void *int_dig_vv;
26700 wait_queue_head_t seq_wait;
26701- atomic_t packet_seq;
26702+ atomic_unchecked_t packet_seq;
26703 unsigned int peer_seq;
26704 spinlock_t peer_seq_lock;
26705 unsigned int minor;
26706@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26707
26708 static inline void drbd_tcp_cork(struct socket *sock)
26709 {
26710- int __user val = 1;
26711+ int val = 1;
26712 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26713- (char __user *)&val, sizeof(val));
26714+ (char __force_user *)&val, sizeof(val));
26715 }
26716
26717 static inline void drbd_tcp_uncork(struct socket *sock)
26718 {
26719- int __user val = 0;
26720+ int val = 0;
26721 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26722- (char __user *)&val, sizeof(val));
26723+ (char __force_user *)&val, sizeof(val));
26724 }
26725
26726 static inline void drbd_tcp_nodelay(struct socket *sock)
26727 {
26728- int __user val = 1;
26729+ int val = 1;
26730 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26731- (char __user *)&val, sizeof(val));
26732+ (char __force_user *)&val, sizeof(val));
26733 }
26734
26735 static inline void drbd_tcp_quickack(struct socket *sock)
26736 {
26737- int __user val = 2;
26738+ int val = 2;
26739 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26740- (char __user *)&val, sizeof(val));
26741+ (char __force_user *)&val, sizeof(val));
26742 }
26743
26744 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26745diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26746index 0358e55..bc33689 100644
26747--- a/drivers/block/drbd/drbd_main.c
26748+++ b/drivers/block/drbd/drbd_main.c
26749@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26750 p.sector = sector;
26751 p.block_id = block_id;
26752 p.blksize = blksize;
26753- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26754+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26755
26756 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26757 return false;
26758@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26759 p.sector = cpu_to_be64(req->sector);
26760 p.block_id = (unsigned long)req;
26761 p.seq_num = cpu_to_be32(req->seq_num =
26762- atomic_add_return(1, &mdev->packet_seq));
26763+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26764
26765 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26766
26767@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26768 atomic_set(&mdev->unacked_cnt, 0);
26769 atomic_set(&mdev->local_cnt, 0);
26770 atomic_set(&mdev->net_cnt, 0);
26771- atomic_set(&mdev->packet_seq, 0);
26772+ atomic_set_unchecked(&mdev->packet_seq, 0);
26773 atomic_set(&mdev->pp_in_use, 0);
26774 atomic_set(&mdev->pp_in_use_by_net, 0);
26775 atomic_set(&mdev->rs_sect_in, 0);
26776@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26777 mdev->receiver.t_state);
26778
26779 /* no need to lock it, I'm the only thread alive */
26780- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26781- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26782+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26783+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26784 mdev->al_writ_cnt =
26785 mdev->bm_writ_cnt =
26786 mdev->read_cnt =
26787diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26788index 0feab26..5d9b3dd 100644
26789--- a/drivers/block/drbd/drbd_nl.c
26790+++ b/drivers/block/drbd/drbd_nl.c
26791@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26792 module_put(THIS_MODULE);
26793 }
26794
26795-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26796+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26797
26798 static unsigned short *
26799 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26800@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26801 cn_reply->id.idx = CN_IDX_DRBD;
26802 cn_reply->id.val = CN_VAL_DRBD;
26803
26804- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26805+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26806 cn_reply->ack = 0; /* not used here. */
26807 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26808 (int)((char *)tl - (char *)reply->tag_list);
26809@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26810 cn_reply->id.idx = CN_IDX_DRBD;
26811 cn_reply->id.val = CN_VAL_DRBD;
26812
26813- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26814+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26815 cn_reply->ack = 0; /* not used here. */
26816 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26817 (int)((char *)tl - (char *)reply->tag_list);
26818@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26819 cn_reply->id.idx = CN_IDX_DRBD;
26820 cn_reply->id.val = CN_VAL_DRBD;
26821
26822- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26823+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26824 cn_reply->ack = 0; // not used here.
26825 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26826 (int)((char*)tl - (char*)reply->tag_list);
26827@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26828 cn_reply->id.idx = CN_IDX_DRBD;
26829 cn_reply->id.val = CN_VAL_DRBD;
26830
26831- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26832+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26833 cn_reply->ack = 0; /* not used here. */
26834 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26835 (int)((char *)tl - (char *)reply->tag_list);
26836diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26837index 43beaca..4a5b1dd 100644
26838--- a/drivers/block/drbd/drbd_receiver.c
26839+++ b/drivers/block/drbd/drbd_receiver.c
26840@@ -894,7 +894,7 @@ retry:
26841 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26842 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26843
26844- atomic_set(&mdev->packet_seq, 0);
26845+ atomic_set_unchecked(&mdev->packet_seq, 0);
26846 mdev->peer_seq = 0;
26847
26848 drbd_thread_start(&mdev->asender);
26849@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26850 do {
26851 next_epoch = NULL;
26852
26853- epoch_size = atomic_read(&epoch->epoch_size);
26854+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26855
26856 switch (ev & ~EV_CLEANUP) {
26857 case EV_PUT:
26858@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26859 rv = FE_DESTROYED;
26860 } else {
26861 epoch->flags = 0;
26862- atomic_set(&epoch->epoch_size, 0);
26863+ atomic_set_unchecked(&epoch->epoch_size, 0);
26864 /* atomic_set(&epoch->active, 0); is already zero */
26865 if (rv == FE_STILL_LIVE)
26866 rv = FE_RECYCLED;
26867@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26868 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26869 drbd_flush(mdev);
26870
26871- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26872+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26873 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26874 if (epoch)
26875 break;
26876 }
26877
26878 epoch = mdev->current_epoch;
26879- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26880+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26881
26882 D_ASSERT(atomic_read(&epoch->active) == 0);
26883 D_ASSERT(epoch->flags == 0);
26884@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26885 }
26886
26887 epoch->flags = 0;
26888- atomic_set(&epoch->epoch_size, 0);
26889+ atomic_set_unchecked(&epoch->epoch_size, 0);
26890 atomic_set(&epoch->active, 0);
26891
26892 spin_lock(&mdev->epoch_lock);
26893- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26894+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26895 list_add(&epoch->list, &mdev->current_epoch->list);
26896 mdev->current_epoch = epoch;
26897 mdev->epochs++;
26898@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26899 spin_unlock(&mdev->peer_seq_lock);
26900
26901 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26902- atomic_inc(&mdev->current_epoch->epoch_size);
26903+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26904 return drbd_drain_block(mdev, data_size);
26905 }
26906
26907@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26908
26909 spin_lock(&mdev->epoch_lock);
26910 e->epoch = mdev->current_epoch;
26911- atomic_inc(&e->epoch->epoch_size);
26912+ atomic_inc_unchecked(&e->epoch->epoch_size);
26913 atomic_inc(&e->epoch->active);
26914 spin_unlock(&mdev->epoch_lock);
26915
26916@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26917 D_ASSERT(list_empty(&mdev->done_ee));
26918
26919 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26920- atomic_set(&mdev->current_epoch->epoch_size, 0);
26921+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26922 D_ASSERT(list_empty(&mdev->current_epoch->list));
26923 }
26924
26925diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26926index 4720c7a..2c49af1 100644
26927--- a/drivers/block/loop.c
26928+++ b/drivers/block/loop.c
26929@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file,
26930 mm_segment_t old_fs = get_fs();
26931
26932 set_fs(get_ds());
26933- bw = file->f_op->write(file, buf, len, &pos);
26934+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26935 set_fs(old_fs);
26936 if (likely(bw == len))
26937 return 0;
26938diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
26939index f533f33..6177bcb 100644
26940--- a/drivers/block/nbd.c
26941+++ b/drivers/block/nbd.c
26942@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
26943 struct kvec iov;
26944 sigset_t blocked, oldset;
26945
26946+ pax_track_stack();
26947+
26948 if (unlikely(!sock)) {
26949 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26950 lo->disk->disk_name, (send ? "send" : "recv"));
26951@@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q)
26952 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26953 unsigned int cmd, unsigned long arg)
26954 {
26955+ pax_track_stack();
26956+
26957 switch (cmd) {
26958 case NBD_DISCONNECT: {
26959 struct request sreq;
26960diff --git a/drivers/block/ub.c b/drivers/block/ub.c
26961index 0e376d4..7333b9e 100644
26962--- a/drivers/block/ub.c
26963+++ b/drivers/block/ub.c
26964@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
26965 static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
26966 unsigned int cmd, unsigned long arg)
26967 {
26968- struct gendisk *disk = bdev->bd_disk;
26969 void __user *usermem = (void __user *) arg;
26970 int ret;
26971
26972 mutex_lock(&ub_mutex);
26973- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
26974+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
26975 mutex_unlock(&ub_mutex);
26976
26977 return ret;
26978diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
26979index 079c088..5d7a934 100644
26980--- a/drivers/block/virtio_blk.c
26981+++ b/drivers/block/virtio_blk.c
26982@@ -236,8 +236,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
26983 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
26984 return -ENOTTY;
26985
26986- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
26987- (void __user *)data);
26988+ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
26989+ (void __user *)data);
26990 }
26991
26992 /* We provide getgeo only to please some old bootloader/partitioning tools */
26993diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
26994index f997c27..cedb231 100644
26995--- a/drivers/cdrom/cdrom.c
26996+++ b/drivers/cdrom/cdrom.c
26997@@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
26998 {
26999 void __user *argp = (void __user *)arg;
27000 int ret;
27001- struct gendisk *disk = bdev->bd_disk;
27002
27003 /*
27004 * Try the generic SCSI command ioctl's first.
27005 */
27006- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
27007+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
27008 if (ret != -ENOTTY)
27009 return ret;
27010
27011diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27012index 423fd56..06d3be0 100644
27013--- a/drivers/char/Kconfig
27014+++ b/drivers/char/Kconfig
27015@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27016
27017 config DEVKMEM
27018 bool "/dev/kmem virtual device support"
27019- default y
27020+ default n
27021+ depends on !GRKERNSEC_KMEM
27022 help
27023 Say Y here if you want to support the /dev/kmem device. The
27024 /dev/kmem device is rarely used, but can be used for certain
27025@@ -596,6 +597,7 @@ config DEVPORT
27026 bool
27027 depends on !M68K
27028 depends on ISA || PCI
27029+ depends on !GRKERNSEC_KMEM
27030 default y
27031
27032 source "drivers/s390/char/Kconfig"
27033diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27034index 2e04433..22afc64 100644
27035--- a/drivers/char/agp/frontend.c
27036+++ b/drivers/char/agp/frontend.c
27037@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27038 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27039 return -EFAULT;
27040
27041- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27042+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27043 return -EFAULT;
27044
27045 client = agp_find_client_by_pid(reserve.pid);
27046diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27047index 095ab90..afad0a4 100644
27048--- a/drivers/char/briq_panel.c
27049+++ b/drivers/char/briq_panel.c
27050@@ -9,6 +9,7 @@
27051 #include <linux/types.h>
27052 #include <linux/errno.h>
27053 #include <linux/tty.h>
27054+#include <linux/mutex.h>
27055 #include <linux/timer.h>
27056 #include <linux/kernel.h>
27057 #include <linux/wait.h>
27058@@ -34,6 +35,7 @@ static int vfd_is_open;
27059 static unsigned char vfd[40];
27060 static int vfd_cursor;
27061 static unsigned char ledpb, led;
27062+static DEFINE_MUTEX(vfd_mutex);
27063
27064 static void update_vfd(void)
27065 {
27066@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27067 if (!vfd_is_open)
27068 return -EBUSY;
27069
27070+ mutex_lock(&vfd_mutex);
27071 for (;;) {
27072 char c;
27073 if (!indx)
27074 break;
27075- if (get_user(c, buf))
27076+ if (get_user(c, buf)) {
27077+ mutex_unlock(&vfd_mutex);
27078 return -EFAULT;
27079+ }
27080 if (esc) {
27081 set_led(c);
27082 esc = 0;
27083@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27084 buf++;
27085 }
27086 update_vfd();
27087+ mutex_unlock(&vfd_mutex);
27088
27089 return len;
27090 }
27091diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27092index f773a9d..65cd683 100644
27093--- a/drivers/char/genrtc.c
27094+++ b/drivers/char/genrtc.c
27095@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27096 switch (cmd) {
27097
27098 case RTC_PLL_GET:
27099+ memset(&pll, 0, sizeof(pll));
27100 if (get_rtc_pll(&pll))
27101 return -EINVAL;
27102 else
27103diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27104index 0833896..cccce52 100644
27105--- a/drivers/char/hpet.c
27106+++ b/drivers/char/hpet.c
27107@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27108 }
27109
27110 static int
27111-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27112+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27113 struct hpet_info *info)
27114 {
27115 struct hpet_timer __iomem *timer;
27116diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27117index 58c0e63..25aed94 100644
27118--- a/drivers/char/ipmi/ipmi_msghandler.c
27119+++ b/drivers/char/ipmi/ipmi_msghandler.c
27120@@ -415,7 +415,7 @@ struct ipmi_smi {
27121 struct proc_dir_entry *proc_dir;
27122 char proc_dir_name[10];
27123
27124- atomic_t stats[IPMI_NUM_STATS];
27125+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27126
27127 /*
27128 * run_to_completion duplicate of smb_info, smi_info
27129@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27130
27131
27132 #define ipmi_inc_stat(intf, stat) \
27133- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27134+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27135 #define ipmi_get_stat(intf, stat) \
27136- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27137+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27138
27139 static int is_lan_addr(struct ipmi_addr *addr)
27140 {
27141@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27142 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27143 init_waitqueue_head(&intf->waitq);
27144 for (i = 0; i < IPMI_NUM_STATS; i++)
27145- atomic_set(&intf->stats[i], 0);
27146+ atomic_set_unchecked(&intf->stats[i], 0);
27147
27148 intf->proc_dir = NULL;
27149
27150@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
27151 struct ipmi_smi_msg smi_msg;
27152 struct ipmi_recv_msg recv_msg;
27153
27154+ pax_track_stack();
27155+
27156 si = (struct ipmi_system_interface_addr *) &addr;
27157 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27158 si->channel = IPMI_BMC_CHANNEL;
27159diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27160index 9397ab4..d01bee1 100644
27161--- a/drivers/char/ipmi/ipmi_si_intf.c
27162+++ b/drivers/char/ipmi/ipmi_si_intf.c
27163@@ -277,7 +277,7 @@ struct smi_info {
27164 unsigned char slave_addr;
27165
27166 /* Counters and things for the proc filesystem. */
27167- atomic_t stats[SI_NUM_STATS];
27168+ atomic_unchecked_t stats[SI_NUM_STATS];
27169
27170 struct task_struct *thread;
27171
27172@@ -286,9 +286,9 @@ struct smi_info {
27173 };
27174
27175 #define smi_inc_stat(smi, stat) \
27176- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27177+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27178 #define smi_get_stat(smi, stat) \
27179- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27180+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27181
27182 #define SI_MAX_PARMS 4
27183
27184@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27185 atomic_set(&new_smi->req_events, 0);
27186 new_smi->run_to_completion = 0;
27187 for (i = 0; i < SI_NUM_STATS; i++)
27188- atomic_set(&new_smi->stats[i], 0);
27189+ atomic_set_unchecked(&new_smi->stats[i], 0);
27190
27191 new_smi->interrupt_disabled = 1;
27192 atomic_set(&new_smi->stop_operation, 0);
27193diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27194index 1aeaaba..e018570 100644
27195--- a/drivers/char/mbcs.c
27196+++ b/drivers/char/mbcs.c
27197@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27198 return 0;
27199 }
27200
27201-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27202+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27203 {
27204 .part_num = MBCS_PART_NUM,
27205 .mfg_num = MBCS_MFG_NUM,
27206diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27207index 8fc04b4..cebdeec 100644
27208--- a/drivers/char/mem.c
27209+++ b/drivers/char/mem.c
27210@@ -18,6 +18,7 @@
27211 #include <linux/raw.h>
27212 #include <linux/tty.h>
27213 #include <linux/capability.h>
27214+#include <linux/security.h>
27215 #include <linux/ptrace.h>
27216 #include <linux/device.h>
27217 #include <linux/highmem.h>
27218@@ -34,6 +35,10 @@
27219 # include <linux/efi.h>
27220 #endif
27221
27222+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27223+extern const struct file_operations grsec_fops;
27224+#endif
27225+
27226 static inline unsigned long size_inside_page(unsigned long start,
27227 unsigned long size)
27228 {
27229@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27230
27231 while (cursor < to) {
27232 if (!devmem_is_allowed(pfn)) {
27233+#ifdef CONFIG_GRKERNSEC_KMEM
27234+ gr_handle_mem_readwrite(from, to);
27235+#else
27236 printk(KERN_INFO
27237 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27238 current->comm, from, to);
27239+#endif
27240 return 0;
27241 }
27242 cursor += PAGE_SIZE;
27243@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27244 }
27245 return 1;
27246 }
27247+#elif defined(CONFIG_GRKERNSEC_KMEM)
27248+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27249+{
27250+ return 0;
27251+}
27252 #else
27253 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27254 {
27255@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27256
27257 while (count > 0) {
27258 unsigned long remaining;
27259+ char *temp;
27260
27261 sz = size_inside_page(p, count);
27262
27263@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27264 if (!ptr)
27265 return -EFAULT;
27266
27267- remaining = copy_to_user(buf, ptr, sz);
27268+#ifdef CONFIG_PAX_USERCOPY
27269+ temp = kmalloc(sz, GFP_KERNEL);
27270+ if (!temp) {
27271+ unxlate_dev_mem_ptr(p, ptr);
27272+ return -ENOMEM;
27273+ }
27274+ memcpy(temp, ptr, sz);
27275+#else
27276+ temp = ptr;
27277+#endif
27278+
27279+ remaining = copy_to_user(buf, temp, sz);
27280+
27281+#ifdef CONFIG_PAX_USERCOPY
27282+ kfree(temp);
27283+#endif
27284+
27285 unxlate_dev_mem_ptr(p, ptr);
27286 if (remaining)
27287 return -EFAULT;
27288@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27289 size_t count, loff_t *ppos)
27290 {
27291 unsigned long p = *ppos;
27292- ssize_t low_count, read, sz;
27293+ ssize_t low_count, read, sz, err = 0;
27294 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27295- int err = 0;
27296
27297 read = 0;
27298 if (p < (unsigned long) high_memory) {
27299@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27300 }
27301 #endif
27302 while (low_count > 0) {
27303+ char *temp;
27304+
27305 sz = size_inside_page(p, low_count);
27306
27307 /*
27308@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27309 */
27310 kbuf = xlate_dev_kmem_ptr((char *)p);
27311
27312- if (copy_to_user(buf, kbuf, sz))
27313+#ifdef CONFIG_PAX_USERCOPY
27314+ temp = kmalloc(sz, GFP_KERNEL);
27315+ if (!temp)
27316+ return -ENOMEM;
27317+ memcpy(temp, kbuf, sz);
27318+#else
27319+ temp = kbuf;
27320+#endif
27321+
27322+ err = copy_to_user(buf, temp, sz);
27323+
27324+#ifdef CONFIG_PAX_USERCOPY
27325+ kfree(temp);
27326+#endif
27327+
27328+ if (err)
27329 return -EFAULT;
27330 buf += sz;
27331 p += sz;
27332@@ -866,6 +913,9 @@ static const struct memdev {
27333 #ifdef CONFIG_CRASH_DUMP
27334 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27335 #endif
27336+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27337+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27338+#endif
27339 };
27340
27341 static int memory_open(struct inode *inode, struct file *filp)
27342diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27343index da3cfee..a5a6606 100644
27344--- a/drivers/char/nvram.c
27345+++ b/drivers/char/nvram.c
27346@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27347
27348 spin_unlock_irq(&rtc_lock);
27349
27350- if (copy_to_user(buf, contents, tmp - contents))
27351+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27352 return -EFAULT;
27353
27354 *ppos = i;
27355diff --git a/drivers/char/random.c b/drivers/char/random.c
27356index c35a785..6d82202 100644
27357--- a/drivers/char/random.c
27358+++ b/drivers/char/random.c
27359@@ -261,8 +261,13 @@
27360 /*
27361 * Configuration information
27362 */
27363+#ifdef CONFIG_GRKERNSEC_RANDNET
27364+#define INPUT_POOL_WORDS 512
27365+#define OUTPUT_POOL_WORDS 128
27366+#else
27367 #define INPUT_POOL_WORDS 128
27368 #define OUTPUT_POOL_WORDS 32
27369+#endif
27370 #define SEC_XFER_SIZE 512
27371 #define EXTRACT_SIZE 10
27372
27373@@ -300,10 +305,17 @@ static struct poolinfo {
27374 int poolwords;
27375 int tap1, tap2, tap3, tap4, tap5;
27376 } poolinfo_table[] = {
27377+#ifdef CONFIG_GRKERNSEC_RANDNET
27378+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27379+ { 512, 411, 308, 208, 104, 1 },
27380+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27381+ { 128, 103, 76, 51, 25, 1 },
27382+#else
27383 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27384 { 128, 103, 76, 51, 25, 1 },
27385 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27386 { 32, 26, 20, 14, 7, 1 },
27387+#endif
27388 #if 0
27389 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27390 { 2048, 1638, 1231, 819, 411, 1 },
27391@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27392
27393 extract_buf(r, tmp);
27394 i = min_t(int, nbytes, EXTRACT_SIZE);
27395- if (copy_to_user(buf, tmp, i)) {
27396+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27397 ret = -EFAULT;
27398 break;
27399 }
27400@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27401 #include <linux/sysctl.h>
27402
27403 static int min_read_thresh = 8, min_write_thresh;
27404-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27405+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27406 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27407 static char sysctl_bootid[16];
27408
27409diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27410index 1ee8ce7..b778bef 100644
27411--- a/drivers/char/sonypi.c
27412+++ b/drivers/char/sonypi.c
27413@@ -55,6 +55,7 @@
27414 #include <asm/uaccess.h>
27415 #include <asm/io.h>
27416 #include <asm/system.h>
27417+#include <asm/local.h>
27418
27419 #include <linux/sonypi.h>
27420
27421@@ -491,7 +492,7 @@ static struct sonypi_device {
27422 spinlock_t fifo_lock;
27423 wait_queue_head_t fifo_proc_list;
27424 struct fasync_struct *fifo_async;
27425- int open_count;
27426+ local_t open_count;
27427 int model;
27428 struct input_dev *input_jog_dev;
27429 struct input_dev *input_key_dev;
27430@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27431 static int sonypi_misc_release(struct inode *inode, struct file *file)
27432 {
27433 mutex_lock(&sonypi_device.lock);
27434- sonypi_device.open_count--;
27435+ local_dec(&sonypi_device.open_count);
27436 mutex_unlock(&sonypi_device.lock);
27437 return 0;
27438 }
27439@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27440 {
27441 mutex_lock(&sonypi_device.lock);
27442 /* Flush input queue on first open */
27443- if (!sonypi_device.open_count)
27444+ if (!local_read(&sonypi_device.open_count))
27445 kfifo_reset(&sonypi_device.fifo);
27446- sonypi_device.open_count++;
27447+ local_inc(&sonypi_device.open_count);
27448 mutex_unlock(&sonypi_device.lock);
27449
27450 return 0;
27451diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27452index 9ca5c02..7ce352c 100644
27453--- a/drivers/char/tpm/tpm.c
27454+++ b/drivers/char/tpm/tpm.c
27455@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27456 chip->vendor.req_complete_val)
27457 goto out_recv;
27458
27459- if ((status == chip->vendor.req_canceled)) {
27460+ if (status == chip->vendor.req_canceled) {
27461 dev_err(chip->dev, "Operation Canceled\n");
27462 rc = -ECANCELED;
27463 goto out;
27464@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
27465
27466 struct tpm_chip *chip = dev_get_drvdata(dev);
27467
27468+ pax_track_stack();
27469+
27470 tpm_cmd.header.in = tpm_readpubek_header;
27471 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27472 "attempting to read the PUBEK");
27473diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27474index 0636520..169c1d0 100644
27475--- a/drivers/char/tpm/tpm_bios.c
27476+++ b/drivers/char/tpm/tpm_bios.c
27477@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27478 event = addr;
27479
27480 if ((event->event_type == 0 && event->event_size == 0) ||
27481- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27482+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27483 return NULL;
27484
27485 return addr;
27486@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27487 return NULL;
27488
27489 if ((event->event_type == 0 && event->event_size == 0) ||
27490- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27491+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27492 return NULL;
27493
27494 (*pos)++;
27495@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27496 int i;
27497
27498 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27499- seq_putc(m, data[i]);
27500+ if (!seq_putc(m, data[i]))
27501+ return -EFAULT;
27502
27503 return 0;
27504 }
27505@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27506 log->bios_event_log_end = log->bios_event_log + len;
27507
27508 virt = acpi_os_map_memory(start, len);
27509+ if (!virt) {
27510+ kfree(log->bios_event_log);
27511+ log->bios_event_log = NULL;
27512+ return -EFAULT;
27513+ }
27514
27515- memcpy(log->bios_event_log, virt, len);
27516+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27517
27518 acpi_os_unmap_memory(virt, len);
27519 return 0;
27520diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27521index fb68b12..0f6c6ca 100644
27522--- a/drivers/char/virtio_console.c
27523+++ b/drivers/char/virtio_console.c
27524@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27525 if (to_user) {
27526 ssize_t ret;
27527
27528- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27529+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27530 if (ret)
27531 return -EFAULT;
27532 } else {
27533@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27534 if (!port_has_data(port) && !port->host_connected)
27535 return 0;
27536
27537- return fill_readbuf(port, ubuf, count, true);
27538+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27539 }
27540
27541 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27542diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
27543index a84250a..68c725e 100644
27544--- a/drivers/crypto/hifn_795x.c
27545+++ b/drivers/crypto/hifn_795x.c
27546@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
27547 0xCA, 0x34, 0x2B, 0x2E};
27548 struct scatterlist sg;
27549
27550+ pax_track_stack();
27551+
27552 memset(src, 0, sizeof(src));
27553 memset(ctx.key, 0, sizeof(ctx.key));
27554
27555diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
27556index db33d30..7823369 100644
27557--- a/drivers/crypto/padlock-aes.c
27558+++ b/drivers/crypto/padlock-aes.c
27559@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
27560 struct crypto_aes_ctx gen_aes;
27561 int cpu;
27562
27563+ pax_track_stack();
27564+
27565 if (key_len % 8) {
27566 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27567 return -EINVAL;
27568diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27569index 9a8bebc..b1e4989 100644
27570--- a/drivers/edac/amd64_edac.c
27571+++ b/drivers/edac/amd64_edac.c
27572@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27573 * PCI core identifies what devices are on a system during boot, and then
27574 * inquiry this table to see if this driver is for a given device found.
27575 */
27576-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27577+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27578 {
27579 .vendor = PCI_VENDOR_ID_AMD,
27580 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27581diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27582index e47e73b..348e0bd 100644
27583--- a/drivers/edac/amd76x_edac.c
27584+++ b/drivers/edac/amd76x_edac.c
27585@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27586 edac_mc_free(mci);
27587 }
27588
27589-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27590+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27591 {
27592 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27593 AMD762},
27594diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27595index 1af531a..3a8ff27 100644
27596--- a/drivers/edac/e752x_edac.c
27597+++ b/drivers/edac/e752x_edac.c
27598@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27599 edac_mc_free(mci);
27600 }
27601
27602-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27603+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27604 {
27605 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27606 E7520},
27607diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27608index 6ffb6d2..383d8d7 100644
27609--- a/drivers/edac/e7xxx_edac.c
27610+++ b/drivers/edac/e7xxx_edac.c
27611@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27612 edac_mc_free(mci);
27613 }
27614
27615-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27616+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27617 {
27618 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27619 E7205},
27620diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27621index 495198a..ac08c85 100644
27622--- a/drivers/edac/edac_pci_sysfs.c
27623+++ b/drivers/edac/edac_pci_sysfs.c
27624@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27625 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27626 static int edac_pci_poll_msec = 1000; /* one second workq period */
27627
27628-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27629-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27630+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27631+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27632
27633 static struct kobject *edac_pci_top_main_kobj;
27634 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27635@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27636 edac_printk(KERN_CRIT, EDAC_PCI,
27637 "Signaled System Error on %s\n",
27638 pci_name(dev));
27639- atomic_inc(&pci_nonparity_count);
27640+ atomic_inc_unchecked(&pci_nonparity_count);
27641 }
27642
27643 if (status & (PCI_STATUS_PARITY)) {
27644@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27645 "Master Data Parity Error on %s\n",
27646 pci_name(dev));
27647
27648- atomic_inc(&pci_parity_count);
27649+ atomic_inc_unchecked(&pci_parity_count);
27650 }
27651
27652 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27653@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27654 "Detected Parity Error on %s\n",
27655 pci_name(dev));
27656
27657- atomic_inc(&pci_parity_count);
27658+ atomic_inc_unchecked(&pci_parity_count);
27659 }
27660 }
27661
27662@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27663 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27664 "Signaled System Error on %s\n",
27665 pci_name(dev));
27666- atomic_inc(&pci_nonparity_count);
27667+ atomic_inc_unchecked(&pci_nonparity_count);
27668 }
27669
27670 if (status & (PCI_STATUS_PARITY)) {
27671@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27672 "Master Data Parity Error on "
27673 "%s\n", pci_name(dev));
27674
27675- atomic_inc(&pci_parity_count);
27676+ atomic_inc_unchecked(&pci_parity_count);
27677 }
27678
27679 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27680@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27681 "Detected Parity Error on %s\n",
27682 pci_name(dev));
27683
27684- atomic_inc(&pci_parity_count);
27685+ atomic_inc_unchecked(&pci_parity_count);
27686 }
27687 }
27688 }
27689@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27690 if (!check_pci_errors)
27691 return;
27692
27693- before_count = atomic_read(&pci_parity_count);
27694+ before_count = atomic_read_unchecked(&pci_parity_count);
27695
27696 /* scan all PCI devices looking for a Parity Error on devices and
27697 * bridges.
27698@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27699 /* Only if operator has selected panic on PCI Error */
27700 if (edac_pci_get_panic_on_pe()) {
27701 /* If the count is different 'after' from 'before' */
27702- if (before_count != atomic_read(&pci_parity_count))
27703+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27704 panic("EDAC: PCI Parity Error");
27705 }
27706 }
27707diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27708index c0510b3..6e2a954 100644
27709--- a/drivers/edac/i3000_edac.c
27710+++ b/drivers/edac/i3000_edac.c
27711@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27712 edac_mc_free(mci);
27713 }
27714
27715-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27716+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27717 {
27718 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27719 I3000},
27720diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27721index aa08497..7e6822a 100644
27722--- a/drivers/edac/i3200_edac.c
27723+++ b/drivers/edac/i3200_edac.c
27724@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27725 edac_mc_free(mci);
27726 }
27727
27728-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27729+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27730 {
27731 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27732 I3200},
27733diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27734index 4dc3ac2..67d05a6 100644
27735--- a/drivers/edac/i5000_edac.c
27736+++ b/drivers/edac/i5000_edac.c
27737@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27738 *
27739 * The "E500P" device is the first device supported.
27740 */
27741-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27742+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27743 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27744 .driver_data = I5000P},
27745
27746diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27747index bcbdeec..9886d16 100644
27748--- a/drivers/edac/i5100_edac.c
27749+++ b/drivers/edac/i5100_edac.c
27750@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27751 edac_mc_free(mci);
27752 }
27753
27754-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27755+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27756 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27757 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27758 { 0, }
27759diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27760index 74d6ec34..baff517 100644
27761--- a/drivers/edac/i5400_edac.c
27762+++ b/drivers/edac/i5400_edac.c
27763@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27764 *
27765 * The "E500P" device is the first device supported.
27766 */
27767-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27768+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27769 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27770 {0,} /* 0 terminated list. */
27771 };
27772diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27773index a76fe83..15479e6 100644
27774--- a/drivers/edac/i7300_edac.c
27775+++ b/drivers/edac/i7300_edac.c
27776@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27777 *
27778 * Has only 8086:360c PCI ID
27779 */
27780-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27781+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27782 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27783 {0,} /* 0 terminated list. */
27784 };
27785diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27786index f6cf448..3f612e9 100644
27787--- a/drivers/edac/i7core_edac.c
27788+++ b/drivers/edac/i7core_edac.c
27789@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = {
27790 /*
27791 * pci_device_id table for which devices we are looking for
27792 */
27793-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27794+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27795 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27796 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27797 {0,} /* 0 terminated list. */
27798diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27799index 4329d39..f3022ef 100644
27800--- a/drivers/edac/i82443bxgx_edac.c
27801+++ b/drivers/edac/i82443bxgx_edac.c
27802@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27803
27804 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27805
27806-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27807+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27808 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27809 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27810 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27811diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27812index 931a057..fd28340 100644
27813--- a/drivers/edac/i82860_edac.c
27814+++ b/drivers/edac/i82860_edac.c
27815@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27816 edac_mc_free(mci);
27817 }
27818
27819-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27820+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27821 {
27822 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27823 I82860},
27824diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27825index 33864c6..01edc61 100644
27826--- a/drivers/edac/i82875p_edac.c
27827+++ b/drivers/edac/i82875p_edac.c
27828@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27829 edac_mc_free(mci);
27830 }
27831
27832-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27833+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27834 {
27835 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27836 I82875P},
27837diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27838index a5da732..983363b 100644
27839--- a/drivers/edac/i82975x_edac.c
27840+++ b/drivers/edac/i82975x_edac.c
27841@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27842 edac_mc_free(mci);
27843 }
27844
27845-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27846+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27847 {
27848 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27849 I82975X
27850diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27851index 795a320..3bbc3d3 100644
27852--- a/drivers/edac/mce_amd.h
27853+++ b/drivers/edac/mce_amd.h
27854@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27855 bool (*dc_mce)(u16, u8);
27856 bool (*ic_mce)(u16, u8);
27857 bool (*nb_mce)(u16, u8);
27858-};
27859+} __no_const;
27860
27861 void amd_report_gart_errors(bool);
27862 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
27863diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27864index b153674..ad2ba9b 100644
27865--- a/drivers/edac/r82600_edac.c
27866+++ b/drivers/edac/r82600_edac.c
27867@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27868 edac_mc_free(mci);
27869 }
27870
27871-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27872+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27873 {
27874 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27875 },
27876diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27877index b6f47de..c5acf3a 100644
27878--- a/drivers/edac/x38_edac.c
27879+++ b/drivers/edac/x38_edac.c
27880@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27881 edac_mc_free(mci);
27882 }
27883
27884-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27885+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27886 {
27887 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27888 X38},
27889diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27890index 85661b0..c784559a 100644
27891--- a/drivers/firewire/core-card.c
27892+++ b/drivers/firewire/core-card.c
27893@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27894
27895 void fw_core_remove_card(struct fw_card *card)
27896 {
27897- struct fw_card_driver dummy_driver = dummy_driver_template;
27898+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27899
27900 card->driver->update_phy_reg(card, 4,
27901 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27902diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27903index 4799393..37bd3ab 100644
27904--- a/drivers/firewire/core-cdev.c
27905+++ b/drivers/firewire/core-cdev.c
27906@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27907 int ret;
27908
27909 if ((request->channels == 0 && request->bandwidth == 0) ||
27910- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27911- request->bandwidth < 0)
27912+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27913 return -EINVAL;
27914
27915 r = kmalloc(sizeof(*r), GFP_KERNEL);
27916diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27917index 334b82a..ea5261d 100644
27918--- a/drivers/firewire/core-transaction.c
27919+++ b/drivers/firewire/core-transaction.c
27920@@ -37,6 +37,7 @@
27921 #include <linux/timer.h>
27922 #include <linux/types.h>
27923 #include <linux/workqueue.h>
27924+#include <linux/sched.h>
27925
27926 #include <asm/byteorder.h>
27927
27928@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
27929 struct transaction_callback_data d;
27930 struct fw_transaction t;
27931
27932+ pax_track_stack();
27933+
27934 init_timer_on_stack(&t.split_timeout_timer);
27935 init_completion(&d.done);
27936 d.payload = payload;
27937diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27938index b45be57..5fad18b 100644
27939--- a/drivers/firewire/core.h
27940+++ b/drivers/firewire/core.h
27941@@ -101,6 +101,7 @@ struct fw_card_driver {
27942
27943 int (*stop_iso)(struct fw_iso_context *ctx);
27944 };
27945+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27946
27947 void fw_card_initialize(struct fw_card *card,
27948 const struct fw_card_driver *driver, struct device *device);
27949diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27950index bcb1126..2cc2121 100644
27951--- a/drivers/firmware/dmi_scan.c
27952+++ b/drivers/firmware/dmi_scan.c
27953@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27954 }
27955 }
27956 else {
27957- /*
27958- * no iounmap() for that ioremap(); it would be a no-op, but
27959- * it's so early in setup that sucker gets confused into doing
27960- * what it shouldn't if we actually call it.
27961- */
27962 p = dmi_ioremap(0xF0000, 0x10000);
27963 if (p == NULL)
27964 goto error;
27965@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27966 if (buf == NULL)
27967 return -1;
27968
27969- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27970+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27971
27972 iounmap(buf);
27973 return 0;
27974diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27975index 98723cb..10ca85b 100644
27976--- a/drivers/gpio/gpio-vr41xx.c
27977+++ b/drivers/gpio/gpio-vr41xx.c
27978@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27979 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27980 maskl, pendl, maskh, pendh);
27981
27982- atomic_inc(&irq_err_count);
27983+ atomic_inc_unchecked(&irq_err_count);
27984
27985 return -EINVAL;
27986 }
27987diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27988index 2410c40..2d03563 100644
27989--- a/drivers/gpu/drm/drm_crtc.c
27990+++ b/drivers/gpu/drm/drm_crtc.c
27991@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27992 */
27993 if ((out_resp->count_modes >= mode_count) && mode_count) {
27994 copied = 0;
27995- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27996+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27997 list_for_each_entry(mode, &connector->modes, head) {
27998 drm_crtc_convert_to_umode(&u_mode, mode);
27999 if (copy_to_user(mode_ptr + copied,
28000@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28001
28002 if ((out_resp->count_props >= props_count) && props_count) {
28003 copied = 0;
28004- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28005- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28006+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28007+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28008 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28009 if (connector->property_ids[i] != 0) {
28010 if (put_user(connector->property_ids[i],
28011@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28012
28013 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28014 copied = 0;
28015- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28016+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28017 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28018 if (connector->encoder_ids[i] != 0) {
28019 if (put_user(connector->encoder_ids[i],
28020@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28021 }
28022
28023 for (i = 0; i < crtc_req->count_connectors; i++) {
28024- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28025+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28026 if (get_user(out_id, &set_connectors_ptr[i])) {
28027 ret = -EFAULT;
28028 goto out;
28029@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28030 fb = obj_to_fb(obj);
28031
28032 num_clips = r->num_clips;
28033- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28034+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28035
28036 if (!num_clips != !clips_ptr) {
28037 ret = -EINVAL;
28038@@ -2276,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28039 out_resp->flags = property->flags;
28040
28041 if ((out_resp->count_values >= value_count) && value_count) {
28042- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28043+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28044 for (i = 0; i < value_count; i++) {
28045 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28046 ret = -EFAULT;
28047@@ -2289,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28048 if (property->flags & DRM_MODE_PROP_ENUM) {
28049 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28050 copied = 0;
28051- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28052+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28053 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28054
28055 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28056@@ -2312,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28057 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28058 copied = 0;
28059 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28060- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28061+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28062
28063 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28064 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28065@@ -2373,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28066 struct drm_mode_get_blob *out_resp = data;
28067 struct drm_property_blob *blob;
28068 int ret = 0;
28069- void *blob_ptr;
28070+ void __user *blob_ptr;
28071
28072 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28073 return -EINVAL;
28074@@ -2387,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28075 blob = obj_to_blob(obj);
28076
28077 if (out_resp->length == blob->length) {
28078- blob_ptr = (void *)(unsigned long)out_resp->data;
28079+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
28080 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28081 ret = -EFAULT;
28082 goto done;
28083diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28084index f88a9b2..8f4078f 100644
28085--- a/drivers/gpu/drm/drm_crtc_helper.c
28086+++ b/drivers/gpu/drm/drm_crtc_helper.c
28087@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28088 struct drm_crtc *tmp;
28089 int crtc_mask = 1;
28090
28091- WARN(!crtc, "checking null crtc?\n");
28092+ BUG_ON(!crtc);
28093
28094 dev = crtc->dev;
28095
28096@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
28097 struct drm_encoder *encoder;
28098 bool ret = true;
28099
28100+ pax_track_stack();
28101+
28102 crtc->enabled = drm_helper_crtc_in_use(crtc);
28103 if (!crtc->enabled)
28104 return true;
28105diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28106index 93a112d..c8b065d 100644
28107--- a/drivers/gpu/drm/drm_drv.c
28108+++ b/drivers/gpu/drm/drm_drv.c
28109@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
28110 /**
28111 * Copy and IOCTL return string to user space
28112 */
28113-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28114+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28115 {
28116 int len;
28117
28118@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
28119
28120 dev = file_priv->minor->dev;
28121 atomic_inc(&dev->ioctl_count);
28122- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28123+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28124 ++file_priv->ioctl_count;
28125
28126 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28127diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28128index 2ec7d48..be14bb1 100644
28129--- a/drivers/gpu/drm/drm_fops.c
28130+++ b/drivers/gpu/drm/drm_fops.c
28131@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev)
28132 }
28133
28134 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28135- atomic_set(&dev->counts[i], 0);
28136+ atomic_set_unchecked(&dev->counts[i], 0);
28137
28138 dev->sigdata.lock = NULL;
28139
28140@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
28141
28142 retcode = drm_open_helper(inode, filp, dev);
28143 if (!retcode) {
28144- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28145- if (!dev->open_count++)
28146+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28147+ if (local_inc_return(&dev->open_count) == 1)
28148 retcode = drm_setup(dev);
28149 }
28150 if (!retcode) {
28151@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp)
28152
28153 mutex_lock(&drm_global_mutex);
28154
28155- DRM_DEBUG("open_count = %d\n", dev->open_count);
28156+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28157
28158 if (dev->driver->preclose)
28159 dev->driver->preclose(dev, file_priv);
28160@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
28161 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28162 task_pid_nr(current),
28163 (long)old_encode_dev(file_priv->minor->device),
28164- dev->open_count);
28165+ local_read(&dev->open_count));
28166
28167 /* if the master has gone away we can't do anything with the lock */
28168 if (file_priv->minor->master)
28169@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp)
28170 * End inline drm_release
28171 */
28172
28173- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28174- if (!--dev->open_count) {
28175+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28176+ if (local_dec_and_test(&dev->open_count)) {
28177 if (atomic_read(&dev->ioctl_count)) {
28178 DRM_ERROR("Device busy: %d\n",
28179 atomic_read(&dev->ioctl_count));
28180diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28181index c87dc96..326055d 100644
28182--- a/drivers/gpu/drm/drm_global.c
28183+++ b/drivers/gpu/drm/drm_global.c
28184@@ -36,7 +36,7 @@
28185 struct drm_global_item {
28186 struct mutex mutex;
28187 void *object;
28188- int refcount;
28189+ atomic_t refcount;
28190 };
28191
28192 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28193@@ -49,7 +49,7 @@ void drm_global_init(void)
28194 struct drm_global_item *item = &glob[i];
28195 mutex_init(&item->mutex);
28196 item->object = NULL;
28197- item->refcount = 0;
28198+ atomic_set(&item->refcount, 0);
28199 }
28200 }
28201
28202@@ -59,7 +59,7 @@ void drm_global_release(void)
28203 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28204 struct drm_global_item *item = &glob[i];
28205 BUG_ON(item->object != NULL);
28206- BUG_ON(item->refcount != 0);
28207+ BUG_ON(atomic_read(&item->refcount) != 0);
28208 }
28209 }
28210
28211@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28212 void *object;
28213
28214 mutex_lock(&item->mutex);
28215- if (item->refcount == 0) {
28216+ if (atomic_read(&item->refcount) == 0) {
28217 item->object = kzalloc(ref->size, GFP_KERNEL);
28218 if (unlikely(item->object == NULL)) {
28219 ret = -ENOMEM;
28220@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28221 goto out_err;
28222
28223 }
28224- ++item->refcount;
28225+ atomic_inc(&item->refcount);
28226 ref->object = item->object;
28227 object = item->object;
28228 mutex_unlock(&item->mutex);
28229@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28230 struct drm_global_item *item = &glob[ref->global_type];
28231
28232 mutex_lock(&item->mutex);
28233- BUG_ON(item->refcount == 0);
28234+ BUG_ON(atomic_read(&item->refcount) == 0);
28235 BUG_ON(ref->object != item->object);
28236- if (--item->refcount == 0) {
28237+ if (atomic_dec_and_test(&item->refcount)) {
28238 ref->release(ref);
28239 item->object = NULL;
28240 }
28241diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28242index ab1162d..42587b2 100644
28243--- a/drivers/gpu/drm/drm_info.c
28244+++ b/drivers/gpu/drm/drm_info.c
28245@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28246 struct drm_local_map *map;
28247 struct drm_map_list *r_list;
28248
28249- /* Hardcoded from _DRM_FRAME_BUFFER,
28250- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28251- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28252- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28253+ static const char * const types[] = {
28254+ [_DRM_FRAME_BUFFER] = "FB",
28255+ [_DRM_REGISTERS] = "REG",
28256+ [_DRM_SHM] = "SHM",
28257+ [_DRM_AGP] = "AGP",
28258+ [_DRM_SCATTER_GATHER] = "SG",
28259+ [_DRM_CONSISTENT] = "PCI",
28260+ [_DRM_GEM] = "GEM" };
28261 const char *type;
28262 int i;
28263
28264@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28265 map = r_list->map;
28266 if (!map)
28267 continue;
28268- if (map->type < 0 || map->type > 5)
28269+ if (map->type >= ARRAY_SIZE(types))
28270 type = "??";
28271 else
28272 type = types[map->type];
28273@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28274 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28275 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28276 vma->vm_flags & VM_IO ? 'i' : '-',
28277+#ifdef CONFIG_GRKERNSEC_HIDESYM
28278+ 0);
28279+#else
28280 vma->vm_pgoff);
28281+#endif
28282
28283 #if defined(__i386__)
28284 pgprot = pgprot_val(vma->vm_page_prot);
28285diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28286index 4a058c7..b42cd92 100644
28287--- a/drivers/gpu/drm/drm_ioc32.c
28288+++ b/drivers/gpu/drm/drm_ioc32.c
28289@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28290 request = compat_alloc_user_space(nbytes);
28291 if (!access_ok(VERIFY_WRITE, request, nbytes))
28292 return -EFAULT;
28293- list = (struct drm_buf_desc *) (request + 1);
28294+ list = (struct drm_buf_desc __user *) (request + 1);
28295
28296 if (__put_user(count, &request->count)
28297 || __put_user(list, &request->list))
28298@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28299 request = compat_alloc_user_space(nbytes);
28300 if (!access_ok(VERIFY_WRITE, request, nbytes))
28301 return -EFAULT;
28302- list = (struct drm_buf_pub *) (request + 1);
28303+ list = (struct drm_buf_pub __user *) (request + 1);
28304
28305 if (__put_user(count, &request->count)
28306 || __put_user(list, &request->list))
28307diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28308index 904d7e9..ab88581 100644
28309--- a/drivers/gpu/drm/drm_ioctl.c
28310+++ b/drivers/gpu/drm/drm_ioctl.c
28311@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28312 stats->data[i].value =
28313 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28314 else
28315- stats->data[i].value = atomic_read(&dev->counts[i]);
28316+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28317 stats->data[i].type = dev->types[i];
28318 }
28319
28320diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28321index 632ae24..244cf4a 100644
28322--- a/drivers/gpu/drm/drm_lock.c
28323+++ b/drivers/gpu/drm/drm_lock.c
28324@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28325 if (drm_lock_take(&master->lock, lock->context)) {
28326 master->lock.file_priv = file_priv;
28327 master->lock.lock_time = jiffies;
28328- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28329+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28330 break; /* Got lock */
28331 }
28332
28333@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28334 return -EINVAL;
28335 }
28336
28337- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28338+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28339
28340 if (drm_lock_free(&master->lock, lock->context)) {
28341 /* FIXME: Should really bail out here. */
28342diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28343index 8f371e8..9f85d52 100644
28344--- a/drivers/gpu/drm/i810/i810_dma.c
28345+++ b/drivers/gpu/drm/i810/i810_dma.c
28346@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28347 dma->buflist[vertex->idx],
28348 vertex->discard, vertex->used);
28349
28350- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28351- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28352+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28353+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28354 sarea_priv->last_enqueue = dev_priv->counter - 1;
28355 sarea_priv->last_dispatch = (int)hw_status[5];
28356
28357@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28358 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28359 mc->last_render);
28360
28361- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28362- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28363+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28364+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28365 sarea_priv->last_enqueue = dev_priv->counter - 1;
28366 sarea_priv->last_dispatch = (int)hw_status[5];
28367
28368diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28369index c9339f4..f5e1b9d 100644
28370--- a/drivers/gpu/drm/i810/i810_drv.h
28371+++ b/drivers/gpu/drm/i810/i810_drv.h
28372@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28373 int page_flipping;
28374
28375 wait_queue_head_t irq_queue;
28376- atomic_t irq_received;
28377- atomic_t irq_emitted;
28378+ atomic_unchecked_t irq_received;
28379+ atomic_unchecked_t irq_emitted;
28380
28381 int front_offset;
28382 } drm_i810_private_t;
28383diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28384index 3c395a5..02889c2 100644
28385--- a/drivers/gpu/drm/i915/i915_debugfs.c
28386+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28387@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28388 I915_READ(GTIMR));
28389 }
28390 seq_printf(m, "Interrupts received: %d\n",
28391- atomic_read(&dev_priv->irq_received));
28392+ atomic_read_unchecked(&dev_priv->irq_received));
28393 for (i = 0; i < I915_NUM_RINGS; i++) {
28394 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28395 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28396@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28397 return ret;
28398
28399 if (opregion->header)
28400- seq_write(m, opregion->header, OPREGION_SIZE);
28401+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28402
28403 mutex_unlock(&dev->struct_mutex);
28404
28405diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28406index c72b590..aa86f0a 100644
28407--- a/drivers/gpu/drm/i915/i915_dma.c
28408+++ b/drivers/gpu/drm/i915/i915_dma.c
28409@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28410 bool can_switch;
28411
28412 spin_lock(&dev->count_lock);
28413- can_switch = (dev->open_count == 0);
28414+ can_switch = (local_read(&dev->open_count) == 0);
28415 spin_unlock(&dev->count_lock);
28416 return can_switch;
28417 }
28418diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28419index 1a2a2d1..f280182 100644
28420--- a/drivers/gpu/drm/i915/i915_drv.h
28421+++ b/drivers/gpu/drm/i915/i915_drv.h
28422@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
28423 /* render clock increase/decrease */
28424 /* display clock increase/decrease */
28425 /* pll clock increase/decrease */
28426-};
28427+} __no_const;
28428
28429 struct intel_device_info {
28430 u8 gen;
28431@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
28432 int current_page;
28433 int page_flipping;
28434
28435- atomic_t irq_received;
28436+ atomic_unchecked_t irq_received;
28437
28438 /* protects the irq masks */
28439 spinlock_t irq_lock;
28440@@ -883,7 +883,7 @@ struct drm_i915_gem_object {
28441 * will be page flipped away on the next vblank. When it
28442 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28443 */
28444- atomic_t pending_flip;
28445+ atomic_unchecked_t pending_flip;
28446 };
28447
28448 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28449@@ -1263,7 +1263,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28450 extern void intel_teardown_gmbus(struct drm_device *dev);
28451 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28452 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28453-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28454+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28455 {
28456 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28457 }
28458diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28459index 4934cf8..1da9c84 100644
28460--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28461+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28462@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28463 i915_gem_clflush_object(obj);
28464
28465 if (obj->base.pending_write_domain)
28466- cd->flips |= atomic_read(&obj->pending_flip);
28467+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28468
28469 /* The actual obj->write_domain will be updated with
28470 * pending_write_domain after we emit the accumulated flush for all
28471@@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28472
28473 static int
28474 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28475- int count)
28476+ unsigned int count)
28477 {
28478- int i;
28479+ unsigned int i;
28480
28481 for (i = 0; i < count; i++) {
28482 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28483diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28484index 73248d0..f7bac29 100644
28485--- a/drivers/gpu/drm/i915/i915_irq.c
28486+++ b/drivers/gpu/drm/i915/i915_irq.c
28487@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28488 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28489 struct drm_i915_master_private *master_priv;
28490
28491- atomic_inc(&dev_priv->irq_received);
28492+ atomic_inc_unchecked(&dev_priv->irq_received);
28493
28494 /* disable master interrupt before clearing iir */
28495 de_ier = I915_READ(DEIER);
28496@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28497 struct drm_i915_master_private *master_priv;
28498 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28499
28500- atomic_inc(&dev_priv->irq_received);
28501+ atomic_inc_unchecked(&dev_priv->irq_received);
28502
28503 if (IS_GEN6(dev))
28504 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28505@@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28506 int ret = IRQ_NONE, pipe;
28507 bool blc_event = false;
28508
28509- atomic_inc(&dev_priv->irq_received);
28510+ atomic_inc_unchecked(&dev_priv->irq_received);
28511
28512 iir = I915_READ(IIR);
28513
28514@@ -1741,7 +1741,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28515 {
28516 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28517
28518- atomic_set(&dev_priv->irq_received, 0);
28519+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28520
28521 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28522 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28523@@ -1905,7 +1905,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28524 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28525 int pipe;
28526
28527- atomic_set(&dev_priv->irq_received, 0);
28528+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28529
28530 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28531 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28532diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28533index 07e7cf3..c75f312 100644
28534--- a/drivers/gpu/drm/i915/intel_display.c
28535+++ b/drivers/gpu/drm/i915/intel_display.c
28536@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28537
28538 wait_event(dev_priv->pending_flip_queue,
28539 atomic_read(&dev_priv->mm.wedged) ||
28540- atomic_read(&obj->pending_flip) == 0);
28541+ atomic_read_unchecked(&obj->pending_flip) == 0);
28542
28543 /* Big Hammer, we also need to ensure that any pending
28544 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28545@@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28546 obj = to_intel_framebuffer(crtc->fb)->obj;
28547 dev_priv = crtc->dev->dev_private;
28548 wait_event(dev_priv->pending_flip_queue,
28549- atomic_read(&obj->pending_flip) == 0);
28550+ atomic_read_unchecked(&obj->pending_flip) == 0);
28551 }
28552
28553 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28554@@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28555
28556 atomic_clear_mask(1 << intel_crtc->plane,
28557 &obj->pending_flip.counter);
28558- if (atomic_read(&obj->pending_flip) == 0)
28559+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28560 wake_up(&dev_priv->pending_flip_queue);
28561
28562 schedule_work(&work->work);
28563@@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28564 /* Block clients from rendering to the new back buffer until
28565 * the flip occurs and the object is no longer visible.
28566 */
28567- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28568+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28569
28570 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28571 if (ret)
28572@@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28573 return 0;
28574
28575 cleanup_pending:
28576- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28577+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28578 cleanup_objs:
28579 drm_gem_object_unreference(&work->old_fb_obj->base);
28580 drm_gem_object_unreference(&obj->base);
28581diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28582index 54558a0..2d97005 100644
28583--- a/drivers/gpu/drm/mga/mga_drv.h
28584+++ b/drivers/gpu/drm/mga/mga_drv.h
28585@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28586 u32 clear_cmd;
28587 u32 maccess;
28588
28589- atomic_t vbl_received; /**< Number of vblanks received. */
28590+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28591 wait_queue_head_t fence_queue;
28592- atomic_t last_fence_retired;
28593+ atomic_unchecked_t last_fence_retired;
28594 u32 next_fence_to_post;
28595
28596 unsigned int fb_cpp;
28597diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28598index 2581202..f230a8d9 100644
28599--- a/drivers/gpu/drm/mga/mga_irq.c
28600+++ b/drivers/gpu/drm/mga/mga_irq.c
28601@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28602 if (crtc != 0)
28603 return 0;
28604
28605- return atomic_read(&dev_priv->vbl_received);
28606+ return atomic_read_unchecked(&dev_priv->vbl_received);
28607 }
28608
28609
28610@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28611 /* VBLANK interrupt */
28612 if (status & MGA_VLINEPEN) {
28613 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28614- atomic_inc(&dev_priv->vbl_received);
28615+ atomic_inc_unchecked(&dev_priv->vbl_received);
28616 drm_handle_vblank(dev, 0);
28617 handled = 1;
28618 }
28619@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28620 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28621 MGA_WRITE(MGA_PRIMEND, prim_end);
28622
28623- atomic_inc(&dev_priv->last_fence_retired);
28624+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28625 DRM_WAKEUP(&dev_priv->fence_queue);
28626 handled = 1;
28627 }
28628@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28629 * using fences.
28630 */
28631 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28632- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28633+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28634 - *sequence) <= (1 << 23)));
28635
28636 *sequence = cur_fence;
28637diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28638index b311fab..dc11d6a 100644
28639--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28640+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28641@@ -201,7 +201,7 @@ struct methods {
28642 const char desc[8];
28643 void (*loadbios)(struct drm_device *, uint8_t *);
28644 const bool rw;
28645-};
28646+} __do_const;
28647
28648 static struct methods shadow_methods[] = {
28649 { "PRAMIN", load_vbios_pramin, true },
28650@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28651 struct bit_table {
28652 const char id;
28653 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28654-};
28655+} __no_const;
28656
28657 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28658
28659diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28660index d7d51de..7c6a7f1 100644
28661--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28662+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28663@@ -238,7 +238,7 @@ struct nouveau_channel {
28664 struct list_head pending;
28665 uint32_t sequence;
28666 uint32_t sequence_ack;
28667- atomic_t last_sequence_irq;
28668+ atomic_unchecked_t last_sequence_irq;
28669 struct nouveau_vma vma;
28670 } fence;
28671
28672@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28673 u32 handle, u16 class);
28674 void (*set_tile_region)(struct drm_device *dev, int i);
28675 void (*tlb_flush)(struct drm_device *, int engine);
28676-};
28677+} __no_const;
28678
28679 struct nouveau_instmem_engine {
28680 void *priv;
28681@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28682 struct nouveau_mc_engine {
28683 int (*init)(struct drm_device *dev);
28684 void (*takedown)(struct drm_device *dev);
28685-};
28686+} __no_const;
28687
28688 struct nouveau_timer_engine {
28689 int (*init)(struct drm_device *dev);
28690 void (*takedown)(struct drm_device *dev);
28691 uint64_t (*read)(struct drm_device *dev);
28692-};
28693+} __no_const;
28694
28695 struct nouveau_fb_engine {
28696 int num_tiles;
28697@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
28698 void (*put)(struct drm_device *, struct nouveau_mem **);
28699
28700 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28701-};
28702+} __no_const;
28703
28704 struct nouveau_engine {
28705 struct nouveau_instmem_engine instmem;
28706@@ -660,7 +660,7 @@ struct drm_nouveau_private {
28707 struct drm_global_reference mem_global_ref;
28708 struct ttm_bo_global_ref bo_global_ref;
28709 struct ttm_bo_device bdev;
28710- atomic_t validate_sequence;
28711+ atomic_unchecked_t validate_sequence;
28712 } ttm;
28713
28714 struct {
28715diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28716index ae22dfa..4f09960 100644
28717--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28718+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28719@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28720 if (USE_REFCNT(dev))
28721 sequence = nvchan_rd32(chan, 0x48);
28722 else
28723- sequence = atomic_read(&chan->fence.last_sequence_irq);
28724+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28725
28726 if (chan->fence.sequence_ack == sequence)
28727 goto out;
28728@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28729 return ret;
28730 }
28731
28732- atomic_set(&chan->fence.last_sequence_irq, 0);
28733+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28734 return 0;
28735 }
28736
28737diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28738index 5f0bc57..eb9fac8 100644
28739--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28740+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28741@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28742 int trycnt = 0;
28743 int ret, i;
28744
28745- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28746+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28747 retry:
28748 if (++trycnt > 100000) {
28749 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28750diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28751index 10656e4..59bf2a4 100644
28752--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28753+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28754@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28755 bool can_switch;
28756
28757 spin_lock(&dev->count_lock);
28758- can_switch = (dev->open_count == 0);
28759+ can_switch = (local_read(&dev->open_count) == 0);
28760 spin_unlock(&dev->count_lock);
28761 return can_switch;
28762 }
28763diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28764index dbdea8e..cd6eeeb 100644
28765--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28766+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28767@@ -554,7 +554,7 @@ static int
28768 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28769 u32 class, u32 mthd, u32 data)
28770 {
28771- atomic_set(&chan->fence.last_sequence_irq, data);
28772+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28773 return 0;
28774 }
28775
28776diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28777index 570e190..084a31a 100644
28778--- a/drivers/gpu/drm/r128/r128_cce.c
28779+++ b/drivers/gpu/drm/r128/r128_cce.c
28780@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28781
28782 /* GH: Simple idle check.
28783 */
28784- atomic_set(&dev_priv->idle_count, 0);
28785+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28786
28787 /* We don't support anything other than bus-mastering ring mode,
28788 * but the ring can be in either AGP or PCI space for the ring
28789diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28790index 930c71b..499aded 100644
28791--- a/drivers/gpu/drm/r128/r128_drv.h
28792+++ b/drivers/gpu/drm/r128/r128_drv.h
28793@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28794 int is_pci;
28795 unsigned long cce_buffers_offset;
28796
28797- atomic_t idle_count;
28798+ atomic_unchecked_t idle_count;
28799
28800 int page_flipping;
28801 int current_page;
28802 u32 crtc_offset;
28803 u32 crtc_offset_cntl;
28804
28805- atomic_t vbl_received;
28806+ atomic_unchecked_t vbl_received;
28807
28808 u32 color_fmt;
28809 unsigned int front_offset;
28810diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28811index 429d5a0..7e899ed 100644
28812--- a/drivers/gpu/drm/r128/r128_irq.c
28813+++ b/drivers/gpu/drm/r128/r128_irq.c
28814@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28815 if (crtc != 0)
28816 return 0;
28817
28818- return atomic_read(&dev_priv->vbl_received);
28819+ return atomic_read_unchecked(&dev_priv->vbl_received);
28820 }
28821
28822 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28823@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28824 /* VBLANK interrupt */
28825 if (status & R128_CRTC_VBLANK_INT) {
28826 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28827- atomic_inc(&dev_priv->vbl_received);
28828+ atomic_inc_unchecked(&dev_priv->vbl_received);
28829 drm_handle_vblank(dev, 0);
28830 return IRQ_HANDLED;
28831 }
28832diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28833index a9e33ce..09edd4b 100644
28834--- a/drivers/gpu/drm/r128/r128_state.c
28835+++ b/drivers/gpu/drm/r128/r128_state.c
28836@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28837
28838 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28839 {
28840- if (atomic_read(&dev_priv->idle_count) == 0)
28841+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28842 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28843 else
28844- atomic_set(&dev_priv->idle_count, 0);
28845+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28846 }
28847
28848 #endif
28849diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
28850index 14cc88a..cc7b3a5 100644
28851--- a/drivers/gpu/drm/radeon/atom.c
28852+++ b/drivers/gpu/drm/radeon/atom.c
28853@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
28854 char name[512];
28855 int i;
28856
28857+ pax_track_stack();
28858+
28859 if (!ctx)
28860 return NULL;
28861
28862diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28863index 5a82b6b..9e69c73 100644
28864--- a/drivers/gpu/drm/radeon/mkregtable.c
28865+++ b/drivers/gpu/drm/radeon/mkregtable.c
28866@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28867 regex_t mask_rex;
28868 regmatch_t match[4];
28869 char buf[1024];
28870- size_t end;
28871+ long end;
28872 int len;
28873 int done = 0;
28874 int r;
28875 unsigned o;
28876 struct offset *offset;
28877 char last_reg_s[10];
28878- int last_reg;
28879+ unsigned long last_reg;
28880
28881 if (regcomp
28882 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28883diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28884index 184628c..30e1725 100644
28885--- a/drivers/gpu/drm/radeon/radeon.h
28886+++ b/drivers/gpu/drm/radeon/radeon.h
28887@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28888 */
28889 struct radeon_fence_driver {
28890 uint32_t scratch_reg;
28891- atomic_t seq;
28892+ atomic_unchecked_t seq;
28893 uint32_t last_seq;
28894 unsigned long last_jiffies;
28895 unsigned long last_timeout;
28896@@ -962,7 +962,7 @@ struct radeon_asic {
28897 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28898 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28899 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28900-};
28901+} __no_const;
28902
28903 /*
28904 * Asic structures
28905diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
28906index a098edc..d001c09 100644
28907--- a/drivers/gpu/drm/radeon/radeon_atombios.c
28908+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
28909@@ -569,6 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
28910 struct radeon_gpio_rec gpio;
28911 struct radeon_hpd hpd;
28912
28913+ pax_track_stack();
28914+
28915 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
28916 return false;
28917
28918diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28919index 50d105a..355cf8d 100644
28920--- a/drivers/gpu/drm/radeon/radeon_device.c
28921+++ b/drivers/gpu/drm/radeon/radeon_device.c
28922@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28923 bool can_switch;
28924
28925 spin_lock(&dev->count_lock);
28926- can_switch = (dev->open_count == 0);
28927+ can_switch = (local_read(&dev->open_count) == 0);
28928 spin_unlock(&dev->count_lock);
28929 return can_switch;
28930 }
28931diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
28932index 07ac481..41cb437 100644
28933--- a/drivers/gpu/drm/radeon/radeon_display.c
28934+++ b/drivers/gpu/drm/radeon/radeon_display.c
28935@@ -926,6 +926,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
28936 uint32_t post_div;
28937 u32 pll_out_min, pll_out_max;
28938
28939+ pax_track_stack();
28940+
28941 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
28942 freq = freq * 1000;
28943
28944diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28945index a1b59ca..86f2d44 100644
28946--- a/drivers/gpu/drm/radeon/radeon_drv.h
28947+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28948@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28949
28950 /* SW interrupt */
28951 wait_queue_head_t swi_queue;
28952- atomic_t swi_emitted;
28953+ atomic_unchecked_t swi_emitted;
28954 int vblank_crtc;
28955 uint32_t irq_enable_reg;
28956 uint32_t r500_disp_irq_reg;
28957diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28958index 7fd4e3e..9748ab5 100644
28959--- a/drivers/gpu/drm/radeon/radeon_fence.c
28960+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28961@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28962 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28963 return 0;
28964 }
28965- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28966+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28967 if (!rdev->cp.ready)
28968 /* FIXME: cp is not running assume everythings is done right
28969 * away
28970@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28971 return r;
28972 }
28973 radeon_fence_write(rdev, 0);
28974- atomic_set(&rdev->fence_drv.seq, 0);
28975+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28976 INIT_LIST_HEAD(&rdev->fence_drv.created);
28977 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28978 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28979diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28980index 48b7cea..342236f 100644
28981--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28982+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28983@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28984 request = compat_alloc_user_space(sizeof(*request));
28985 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28986 || __put_user(req32.param, &request->param)
28987- || __put_user((void __user *)(unsigned long)req32.value,
28988+ || __put_user((unsigned long)req32.value,
28989 &request->value))
28990 return -EFAULT;
28991
28992diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28993index 465746b..cb2b055 100644
28994--- a/drivers/gpu/drm/radeon/radeon_irq.c
28995+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28996@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28997 unsigned int ret;
28998 RING_LOCALS;
28999
29000- atomic_inc(&dev_priv->swi_emitted);
29001- ret = atomic_read(&dev_priv->swi_emitted);
29002+ atomic_inc_unchecked(&dev_priv->swi_emitted);
29003+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29004
29005 BEGIN_RING(4);
29006 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29007@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
29008 drm_radeon_private_t *dev_priv =
29009 (drm_radeon_private_t *) dev->dev_private;
29010
29011- atomic_set(&dev_priv->swi_emitted, 0);
29012+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29013 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29014
29015 dev->max_vblank_count = 0x001fffff;
29016diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
29017index 92e7ea7..147ffad 100644
29018--- a/drivers/gpu/drm/radeon/radeon_state.c
29019+++ b/drivers/gpu/drm/radeon/radeon_state.c
29020@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
29021 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
29022 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
29023
29024- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29025+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29026 sarea_priv->nbox * sizeof(depth_boxes[0])))
29027 return -EFAULT;
29028
29029@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
29030 {
29031 drm_radeon_private_t *dev_priv = dev->dev_private;
29032 drm_radeon_getparam_t *param = data;
29033- int value;
29034+ int value = 0;
29035
29036 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29037
29038diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
29039index 0b5468b..9c4b308 100644
29040--- a/drivers/gpu/drm/radeon/radeon_ttm.c
29041+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29042@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29043 }
29044 if (unlikely(ttm_vm_ops == NULL)) {
29045 ttm_vm_ops = vma->vm_ops;
29046- radeon_ttm_vm_ops = *ttm_vm_ops;
29047- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29048+ pax_open_kernel();
29049+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29050+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29051+ pax_close_kernel();
29052 }
29053 vma->vm_ops = &radeon_ttm_vm_ops;
29054 return 0;
29055diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29056index a9049ed..501f284 100644
29057--- a/drivers/gpu/drm/radeon/rs690.c
29058+++ b/drivers/gpu/drm/radeon/rs690.c
29059@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29060 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29061 rdev->pm.sideport_bandwidth.full)
29062 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29063- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29064+ read_delay_latency.full = dfixed_const(800 * 1000);
29065 read_delay_latency.full = dfixed_div(read_delay_latency,
29066 rdev->pm.igp_sideport_mclk);
29067+ a.full = dfixed_const(370);
29068+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29069 } else {
29070 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29071 rdev->pm.k8_bandwidth.full)
29072diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29073index 727e93d..1565650 100644
29074--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29075+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29076@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29077 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29078 struct shrink_control *sc)
29079 {
29080- static atomic_t start_pool = ATOMIC_INIT(0);
29081+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29082 unsigned i;
29083- unsigned pool_offset = atomic_add_return(1, &start_pool);
29084+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29085 struct ttm_page_pool *pool;
29086 int shrink_pages = sc->nr_to_scan;
29087
29088diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29089index 9cf87d9..2000b7d 100644
29090--- a/drivers/gpu/drm/via/via_drv.h
29091+++ b/drivers/gpu/drm/via/via_drv.h
29092@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29093 typedef uint32_t maskarray_t[5];
29094
29095 typedef struct drm_via_irq {
29096- atomic_t irq_received;
29097+ atomic_unchecked_t irq_received;
29098 uint32_t pending_mask;
29099 uint32_t enable_mask;
29100 wait_queue_head_t irq_queue;
29101@@ -75,7 +75,7 @@ typedef struct drm_via_private {
29102 struct timeval last_vblank;
29103 int last_vblank_valid;
29104 unsigned usec_per_vblank;
29105- atomic_t vbl_received;
29106+ atomic_unchecked_t vbl_received;
29107 drm_via_state_t hc_state;
29108 char pci_buf[VIA_PCI_BUF_SIZE];
29109 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29110diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29111index d391f48..10c8ca3 100644
29112--- a/drivers/gpu/drm/via/via_irq.c
29113+++ b/drivers/gpu/drm/via/via_irq.c
29114@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29115 if (crtc != 0)
29116 return 0;
29117
29118- return atomic_read(&dev_priv->vbl_received);
29119+ return atomic_read_unchecked(&dev_priv->vbl_received);
29120 }
29121
29122 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29123@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29124
29125 status = VIA_READ(VIA_REG_INTERRUPT);
29126 if (status & VIA_IRQ_VBLANK_PENDING) {
29127- atomic_inc(&dev_priv->vbl_received);
29128- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29129+ atomic_inc_unchecked(&dev_priv->vbl_received);
29130+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29131 do_gettimeofday(&cur_vblank);
29132 if (dev_priv->last_vblank_valid) {
29133 dev_priv->usec_per_vblank =
29134@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29135 dev_priv->last_vblank = cur_vblank;
29136 dev_priv->last_vblank_valid = 1;
29137 }
29138- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29139+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29140 DRM_DEBUG("US per vblank is: %u\n",
29141 dev_priv->usec_per_vblank);
29142 }
29143@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29144
29145 for (i = 0; i < dev_priv->num_irqs; ++i) {
29146 if (status & cur_irq->pending_mask) {
29147- atomic_inc(&cur_irq->irq_received);
29148+ atomic_inc_unchecked(&cur_irq->irq_received);
29149 DRM_WAKEUP(&cur_irq->irq_queue);
29150 handled = 1;
29151 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29152@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29153 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29154 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29155 masks[irq][4]));
29156- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29157+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29158 } else {
29159 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29160 (((cur_irq_sequence =
29161- atomic_read(&cur_irq->irq_received)) -
29162+ atomic_read_unchecked(&cur_irq->irq_received)) -
29163 *sequence) <= (1 << 23)));
29164 }
29165 *sequence = cur_irq_sequence;
29166@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29167 }
29168
29169 for (i = 0; i < dev_priv->num_irqs; ++i) {
29170- atomic_set(&cur_irq->irq_received, 0);
29171+ atomic_set_unchecked(&cur_irq->irq_received, 0);
29172 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29173 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29174 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29175@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29176 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29177 case VIA_IRQ_RELATIVE:
29178 irqwait->request.sequence +=
29179- atomic_read(&cur_irq->irq_received);
29180+ atomic_read_unchecked(&cur_irq->irq_received);
29181 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29182 case VIA_IRQ_ABSOLUTE:
29183 break;
29184diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29185index 10fc01f..b4e9822 100644
29186--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29187+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29188@@ -240,7 +240,7 @@ struct vmw_private {
29189 * Fencing and IRQs.
29190 */
29191
29192- atomic_t fence_seq;
29193+ atomic_unchecked_t fence_seq;
29194 wait_queue_head_t fence_queue;
29195 wait_queue_head_t fifo_queue;
29196 atomic_t fence_queue_waiters;
29197diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
29198index 41b95ed..69ea504 100644
29199--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
29200+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
29201@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
29202 struct drm_vmw_fence_rep fence_rep;
29203 struct drm_vmw_fence_rep __user *user_fence_rep;
29204 int ret;
29205- void *user_cmd;
29206+ void __user *user_cmd;
29207 void *cmd;
29208 uint32_t sequence;
29209 struct vmw_sw_context *sw_context = &dev_priv->ctx;
29210diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29211index 61eacc1..ee38ce8 100644
29212--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29213+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29214@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29215 while (!vmw_lag_lt(queue, us)) {
29216 spin_lock(&queue->lock);
29217 if (list_empty(&queue->head))
29218- sequence = atomic_read(&dev_priv->fence_seq);
29219+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29220 else {
29221 fence = list_first_entry(&queue->head,
29222 struct vmw_fence, head);
29223diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29224index 635c0ff..2641bbb 100644
29225--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29226+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29227@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29228 (unsigned int) min,
29229 (unsigned int) fifo->capabilities);
29230
29231- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29232+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29233 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
29234 vmw_fence_queue_init(&fifo->fence_queue);
29235 return vmw_fifo_send_fence(dev_priv, &dummy);
29236@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29237 if (reserveable)
29238 iowrite32(bytes, fifo_mem +
29239 SVGA_FIFO_RESERVED);
29240- return fifo_mem + (next_cmd >> 2);
29241+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29242 } else {
29243 need_bounce = true;
29244 }
29245@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29246
29247 fm = vmw_fifo_reserve(dev_priv, bytes);
29248 if (unlikely(fm == NULL)) {
29249- *sequence = atomic_read(&dev_priv->fence_seq);
29250+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29251 ret = -ENOMEM;
29252 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
29253 false, 3*HZ);
29254@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29255 }
29256
29257 do {
29258- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
29259+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
29260 } while (*sequence == 0);
29261
29262 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29263diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29264index e92298a..f68f2d6 100644
29265--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29266+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29267@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
29268 * emitted. Then the fence is stale and signaled.
29269 */
29270
29271- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
29272+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
29273 > VMW_FENCE_WRAP);
29274
29275 return ret;
29276@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29277
29278 if (fifo_idle)
29279 down_read(&fifo_state->rwsem);
29280- signal_seq = atomic_read(&dev_priv->fence_seq);
29281+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
29282 ret = 0;
29283
29284 for (;;) {
29285diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29286index dfe32e6..dd18a00 100644
29287--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29288+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29289@@ -843,7 +843,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
29290 struct vmw_framebuffer *vfb = NULL;
29291 struct vmw_surface *surface = NULL;
29292 struct vmw_dma_buffer *bo = NULL;
29293- u64 required_size;
29294 int ret;
29295
29296 /**
29297@@ -852,8 +851,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
29298 * requested framebuffer.
29299 */
29300
29301- required_size = mode_cmd->pitch * mode_cmd->height;
29302- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
29303+ if (!vmw_kms_validate_mode_vram(dev_priv,
29304+ mode_cmd->pitch,
29305+ mode_cmd->height)) {
29306 DRM_ERROR("VRAM size is too small for requested mode.\n");
29307 return NULL;
29308 }
29309diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
29310index c72f1c0..18376f1 100644
29311--- a/drivers/gpu/vga/vgaarb.c
29312+++ b/drivers/gpu/vga/vgaarb.c
29313@@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
29314 uc = &priv->cards[i];
29315 }
29316
29317- if (!uc)
29318- return -EINVAL;
29319+ if (!uc) {
29320+ ret_val = -EINVAL;
29321+ goto done;
29322+ }
29323
29324- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
29325- return -EINVAL;
29326+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
29327+ ret_val = -EINVAL;
29328+ goto done;
29329+ }
29330
29331- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
29332- return -EINVAL;
29333+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
29334+ ret_val = -EINVAL;
29335+ goto done;
29336+ }
29337
29338 vga_put(pdev, io_state);
29339
29340diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29341index e9c8f80..427d61e 100644
29342--- a/drivers/hid/hid-core.c
29343+++ b/drivers/hid/hid-core.c
29344@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev)
29345
29346 int hid_add_device(struct hid_device *hdev)
29347 {
29348- static atomic_t id = ATOMIC_INIT(0);
29349+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29350 int ret;
29351
29352 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29353@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev)
29354 /* XXX hack, any other cleaner solution after the driver core
29355 * is converted to allow more than 20 bytes as the device name? */
29356 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29357- hdev->vendor, hdev->product, atomic_inc_return(&id));
29358+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29359
29360 hid_debug_register(hdev, dev_name(&hdev->dev));
29361 ret = device_add(&hdev->dev);
29362diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29363index 7c1188b..5a64357 100644
29364--- a/drivers/hid/usbhid/hiddev.c
29365+++ b/drivers/hid/usbhid/hiddev.c
29366@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29367 break;
29368
29369 case HIDIOCAPPLICATION:
29370- if (arg < 0 || arg >= hid->maxapplication)
29371+ if (arg >= hid->maxapplication)
29372 break;
29373
29374 for (i = 0; i < hid->maxcollection; i++)
29375diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29376index 66f6729..2d6de0a 100644
29377--- a/drivers/hwmon/acpi_power_meter.c
29378+++ b/drivers/hwmon/acpi_power_meter.c
29379@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29380 return res;
29381
29382 temp /= 1000;
29383- if (temp < 0)
29384- return -EINVAL;
29385
29386 mutex_lock(&resource->lock);
29387 resource->trip[attr->index - 7] = temp;
29388diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29389index fe4104c..346febb 100644
29390--- a/drivers/hwmon/sht15.c
29391+++ b/drivers/hwmon/sht15.c
29392@@ -166,7 +166,7 @@ struct sht15_data {
29393 int supply_uV;
29394 bool supply_uV_valid;
29395 struct work_struct update_supply_work;
29396- atomic_t interrupt_handled;
29397+ atomic_unchecked_t interrupt_handled;
29398 };
29399
29400 /**
29401@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29402 return ret;
29403
29404 gpio_direction_input(data->pdata->gpio_data);
29405- atomic_set(&data->interrupt_handled, 0);
29406+ atomic_set_unchecked(&data->interrupt_handled, 0);
29407
29408 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29409 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29410 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29411 /* Only relevant if the interrupt hasn't occurred. */
29412- if (!atomic_read(&data->interrupt_handled))
29413+ if (!atomic_read_unchecked(&data->interrupt_handled))
29414 schedule_work(&data->read_work);
29415 }
29416 ret = wait_event_timeout(data->wait_queue,
29417@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29418
29419 /* First disable the interrupt */
29420 disable_irq_nosync(irq);
29421- atomic_inc(&data->interrupt_handled);
29422+ atomic_inc_unchecked(&data->interrupt_handled);
29423 /* Then schedule a reading work struct */
29424 if (data->state != SHT15_READING_NOTHING)
29425 schedule_work(&data->read_work);
29426@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29427 * If not, then start the interrupt again - care here as could
29428 * have gone low in meantime so verify it hasn't!
29429 */
29430- atomic_set(&data->interrupt_handled, 0);
29431+ atomic_set_unchecked(&data->interrupt_handled, 0);
29432 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29433 /* If still not occurred or another handler has been scheduled */
29434 if (gpio_get_value(data->pdata->gpio_data)
29435- || atomic_read(&data->interrupt_handled))
29436+ || atomic_read_unchecked(&data->interrupt_handled))
29437 return;
29438 }
29439
29440diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29441index 378fcb5..5e91fa8 100644
29442--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29443+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29444@@ -43,7 +43,7 @@
29445 extern struct i2c_adapter amd756_smbus;
29446
29447 static struct i2c_adapter *s4882_adapter;
29448-static struct i2c_algorithm *s4882_algo;
29449+static i2c_algorithm_no_const *s4882_algo;
29450
29451 /* Wrapper access functions for multiplexed SMBus */
29452 static DEFINE_MUTEX(amd756_lock);
29453diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29454index 29015eb..af2d8e9 100644
29455--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29456+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29457@@ -41,7 +41,7 @@
29458 extern struct i2c_adapter *nforce2_smbus;
29459
29460 static struct i2c_adapter *s4985_adapter;
29461-static struct i2c_algorithm *s4985_algo;
29462+static i2c_algorithm_no_const *s4985_algo;
29463
29464 /* Wrapper access functions for multiplexed SMBus */
29465 static DEFINE_MUTEX(nforce2_lock);
29466diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29467index d7a4833..7fae376 100644
29468--- a/drivers/i2c/i2c-mux.c
29469+++ b/drivers/i2c/i2c-mux.c
29470@@ -28,7 +28,7 @@
29471 /* multiplexer per channel data */
29472 struct i2c_mux_priv {
29473 struct i2c_adapter adap;
29474- struct i2c_algorithm algo;
29475+ i2c_algorithm_no_const algo;
29476
29477 struct i2c_adapter *parent;
29478 void *mux_dev; /* the mux chip/device */
29479diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29480index 57d00ca..0145194 100644
29481--- a/drivers/ide/aec62xx.c
29482+++ b/drivers/ide/aec62xx.c
29483@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29484 .cable_detect = atp86x_cable_detect,
29485 };
29486
29487-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29488+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29489 { /* 0: AEC6210 */
29490 .name = DRV_NAME,
29491 .init_chipset = init_chipset_aec62xx,
29492diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29493index 2c8016a..911a27c 100644
29494--- a/drivers/ide/alim15x3.c
29495+++ b/drivers/ide/alim15x3.c
29496@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29497 .dma_sff_read_status = ide_dma_sff_read_status,
29498 };
29499
29500-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29501+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29502 .name = DRV_NAME,
29503 .init_chipset = init_chipset_ali15x3,
29504 .init_hwif = init_hwif_ali15x3,
29505diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29506index 3747b25..56fc995 100644
29507--- a/drivers/ide/amd74xx.c
29508+++ b/drivers/ide/amd74xx.c
29509@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29510 .udma_mask = udma, \
29511 }
29512
29513-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29514+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29515 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29516 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29517 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29518diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29519index 15f0ead..cb43480 100644
29520--- a/drivers/ide/atiixp.c
29521+++ b/drivers/ide/atiixp.c
29522@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29523 .cable_detect = atiixp_cable_detect,
29524 };
29525
29526-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29527+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29528 { /* 0: IXP200/300/400/700 */
29529 .name = DRV_NAME,
29530 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29531diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29532index 5f80312..d1fc438 100644
29533--- a/drivers/ide/cmd64x.c
29534+++ b/drivers/ide/cmd64x.c
29535@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29536 .dma_sff_read_status = ide_dma_sff_read_status,
29537 };
29538
29539-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29540+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29541 { /* 0: CMD643 */
29542 .name = DRV_NAME,
29543 .init_chipset = init_chipset_cmd64x,
29544diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29545index 2c1e5f7..1444762 100644
29546--- a/drivers/ide/cs5520.c
29547+++ b/drivers/ide/cs5520.c
29548@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29549 .set_dma_mode = cs5520_set_dma_mode,
29550 };
29551
29552-static const struct ide_port_info cyrix_chipset __devinitdata = {
29553+static const struct ide_port_info cyrix_chipset __devinitconst = {
29554 .name = DRV_NAME,
29555 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29556 .port_ops = &cs5520_port_ops,
29557diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29558index 4dc4eb9..49b40ad 100644
29559--- a/drivers/ide/cs5530.c
29560+++ b/drivers/ide/cs5530.c
29561@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29562 .udma_filter = cs5530_udma_filter,
29563 };
29564
29565-static const struct ide_port_info cs5530_chipset __devinitdata = {
29566+static const struct ide_port_info cs5530_chipset __devinitconst = {
29567 .name = DRV_NAME,
29568 .init_chipset = init_chipset_cs5530,
29569 .init_hwif = init_hwif_cs5530,
29570diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29571index 5059faf..18d4c85 100644
29572--- a/drivers/ide/cs5535.c
29573+++ b/drivers/ide/cs5535.c
29574@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29575 .cable_detect = cs5535_cable_detect,
29576 };
29577
29578-static const struct ide_port_info cs5535_chipset __devinitdata = {
29579+static const struct ide_port_info cs5535_chipset __devinitconst = {
29580 .name = DRV_NAME,
29581 .port_ops = &cs5535_port_ops,
29582 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29583diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29584index 67cbcfa..37ea151 100644
29585--- a/drivers/ide/cy82c693.c
29586+++ b/drivers/ide/cy82c693.c
29587@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29588 .set_dma_mode = cy82c693_set_dma_mode,
29589 };
29590
29591-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29592+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29593 .name = DRV_NAME,
29594 .init_iops = init_iops_cy82c693,
29595 .port_ops = &cy82c693_port_ops,
29596diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29597index 58c51cd..4aec3b8 100644
29598--- a/drivers/ide/hpt366.c
29599+++ b/drivers/ide/hpt366.c
29600@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29601 }
29602 };
29603
29604-static const struct hpt_info hpt36x __devinitdata = {
29605+static const struct hpt_info hpt36x __devinitconst = {
29606 .chip_name = "HPT36x",
29607 .chip_type = HPT36x,
29608 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29609@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29610 .timings = &hpt36x_timings
29611 };
29612
29613-static const struct hpt_info hpt370 __devinitdata = {
29614+static const struct hpt_info hpt370 __devinitconst = {
29615 .chip_name = "HPT370",
29616 .chip_type = HPT370,
29617 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29618@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29619 .timings = &hpt37x_timings
29620 };
29621
29622-static const struct hpt_info hpt370a __devinitdata = {
29623+static const struct hpt_info hpt370a __devinitconst = {
29624 .chip_name = "HPT370A",
29625 .chip_type = HPT370A,
29626 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29627@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29628 .timings = &hpt37x_timings
29629 };
29630
29631-static const struct hpt_info hpt374 __devinitdata = {
29632+static const struct hpt_info hpt374 __devinitconst = {
29633 .chip_name = "HPT374",
29634 .chip_type = HPT374,
29635 .udma_mask = ATA_UDMA5,
29636@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29637 .timings = &hpt37x_timings
29638 };
29639
29640-static const struct hpt_info hpt372 __devinitdata = {
29641+static const struct hpt_info hpt372 __devinitconst = {
29642 .chip_name = "HPT372",
29643 .chip_type = HPT372,
29644 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29645@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29646 .timings = &hpt37x_timings
29647 };
29648
29649-static const struct hpt_info hpt372a __devinitdata = {
29650+static const struct hpt_info hpt372a __devinitconst = {
29651 .chip_name = "HPT372A",
29652 .chip_type = HPT372A,
29653 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29654@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29655 .timings = &hpt37x_timings
29656 };
29657
29658-static const struct hpt_info hpt302 __devinitdata = {
29659+static const struct hpt_info hpt302 __devinitconst = {
29660 .chip_name = "HPT302",
29661 .chip_type = HPT302,
29662 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29663@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29664 .timings = &hpt37x_timings
29665 };
29666
29667-static const struct hpt_info hpt371 __devinitdata = {
29668+static const struct hpt_info hpt371 __devinitconst = {
29669 .chip_name = "HPT371",
29670 .chip_type = HPT371,
29671 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29672@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29673 .timings = &hpt37x_timings
29674 };
29675
29676-static const struct hpt_info hpt372n __devinitdata = {
29677+static const struct hpt_info hpt372n __devinitconst = {
29678 .chip_name = "HPT372N",
29679 .chip_type = HPT372N,
29680 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29681@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29682 .timings = &hpt37x_timings
29683 };
29684
29685-static const struct hpt_info hpt302n __devinitdata = {
29686+static const struct hpt_info hpt302n __devinitconst = {
29687 .chip_name = "HPT302N",
29688 .chip_type = HPT302N,
29689 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29690@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29691 .timings = &hpt37x_timings
29692 };
29693
29694-static const struct hpt_info hpt371n __devinitdata = {
29695+static const struct hpt_info hpt371n __devinitconst = {
29696 .chip_name = "HPT371N",
29697 .chip_type = HPT371N,
29698 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29699@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29700 .dma_sff_read_status = ide_dma_sff_read_status,
29701 };
29702
29703-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29704+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29705 { /* 0: HPT36x */
29706 .name = DRV_NAME,
29707 .init_chipset = init_chipset_hpt366,
29708diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29709index 04b0956..f5b47dc 100644
29710--- a/drivers/ide/ide-cd.c
29711+++ b/drivers/ide/ide-cd.c
29712@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29713 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29714 if ((unsigned long)buf & alignment
29715 || blk_rq_bytes(rq) & q->dma_pad_mask
29716- || object_is_on_stack(buf))
29717+ || object_starts_on_stack(buf))
29718 drive->dma = 0;
29719 }
29720 }
29721diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
29722index 61fdf54..2834ea6 100644
29723--- a/drivers/ide/ide-floppy.c
29724+++ b/drivers/ide/ide-floppy.c
29725@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
29726 u8 pc_buf[256], header_len, desc_cnt;
29727 int i, rc = 1, blocks, length;
29728
29729+ pax_track_stack();
29730+
29731 ide_debug_log(IDE_DBG_FUNC, "enter");
29732
29733 drive->bios_cyl = 0;
29734diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
29735index d267b7a..a22ca84 100644
29736--- a/drivers/ide/ide-floppy_ioctl.c
29737+++ b/drivers/ide/ide-floppy_ioctl.c
29738@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
29739 * and CDROM_SEND_PACKET (legacy) ioctls
29740 */
29741 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
29742- err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
29743- mode, cmd, argp);
29744+ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
29745
29746 if (err == -ENOTTY)
29747 err = generic_ide_ioctl(drive, bdev, cmd, arg);
29748diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29749index a743e68..1cfd674 100644
29750--- a/drivers/ide/ide-pci-generic.c
29751+++ b/drivers/ide/ide-pci-generic.c
29752@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29753 .udma_mask = ATA_UDMA6, \
29754 }
29755
29756-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29757+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29758 /* 0: Unknown */
29759 DECLARE_GENERIC_PCI_DEV(0),
29760
29761diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29762index 560e66d..d5dd180 100644
29763--- a/drivers/ide/it8172.c
29764+++ b/drivers/ide/it8172.c
29765@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29766 .set_dma_mode = it8172_set_dma_mode,
29767 };
29768
29769-static const struct ide_port_info it8172_port_info __devinitdata = {
29770+static const struct ide_port_info it8172_port_info __devinitconst = {
29771 .name = DRV_NAME,
29772 .port_ops = &it8172_port_ops,
29773 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29774diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29775index 46816ba..1847aeb 100644
29776--- a/drivers/ide/it8213.c
29777+++ b/drivers/ide/it8213.c
29778@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29779 .cable_detect = it8213_cable_detect,
29780 };
29781
29782-static const struct ide_port_info it8213_chipset __devinitdata = {
29783+static const struct ide_port_info it8213_chipset __devinitconst = {
29784 .name = DRV_NAME,
29785 .enablebits = { {0x41, 0x80, 0x80} },
29786 .port_ops = &it8213_port_ops,
29787diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29788index 2e3169f..c5611db 100644
29789--- a/drivers/ide/it821x.c
29790+++ b/drivers/ide/it821x.c
29791@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29792 .cable_detect = it821x_cable_detect,
29793 };
29794
29795-static const struct ide_port_info it821x_chipset __devinitdata = {
29796+static const struct ide_port_info it821x_chipset __devinitconst = {
29797 .name = DRV_NAME,
29798 .init_chipset = init_chipset_it821x,
29799 .init_hwif = init_hwif_it821x,
29800diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29801index 74c2c4a..efddd7d 100644
29802--- a/drivers/ide/jmicron.c
29803+++ b/drivers/ide/jmicron.c
29804@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29805 .cable_detect = jmicron_cable_detect,
29806 };
29807
29808-static const struct ide_port_info jmicron_chipset __devinitdata = {
29809+static const struct ide_port_info jmicron_chipset __devinitconst = {
29810 .name = DRV_NAME,
29811 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29812 .port_ops = &jmicron_port_ops,
29813diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29814index 95327a2..73f78d8 100644
29815--- a/drivers/ide/ns87415.c
29816+++ b/drivers/ide/ns87415.c
29817@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29818 .dma_sff_read_status = superio_dma_sff_read_status,
29819 };
29820
29821-static const struct ide_port_info ns87415_chipset __devinitdata = {
29822+static const struct ide_port_info ns87415_chipset __devinitconst = {
29823 .name = DRV_NAME,
29824 .init_hwif = init_hwif_ns87415,
29825 .tp_ops = &ns87415_tp_ops,
29826diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29827index 1a53a4c..39edc66 100644
29828--- a/drivers/ide/opti621.c
29829+++ b/drivers/ide/opti621.c
29830@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29831 .set_pio_mode = opti621_set_pio_mode,
29832 };
29833
29834-static const struct ide_port_info opti621_chipset __devinitdata = {
29835+static const struct ide_port_info opti621_chipset __devinitconst = {
29836 .name = DRV_NAME,
29837 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29838 .port_ops = &opti621_port_ops,
29839diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29840index 9546fe2..2e5ceb6 100644
29841--- a/drivers/ide/pdc202xx_new.c
29842+++ b/drivers/ide/pdc202xx_new.c
29843@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29844 .udma_mask = udma, \
29845 }
29846
29847-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29848+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29849 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29850 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29851 };
29852diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29853index 3a35ec6..5634510 100644
29854--- a/drivers/ide/pdc202xx_old.c
29855+++ b/drivers/ide/pdc202xx_old.c
29856@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29857 .max_sectors = sectors, \
29858 }
29859
29860-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29861+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29862 { /* 0: PDC20246 */
29863 .name = DRV_NAME,
29864 .init_chipset = init_chipset_pdc202xx,
29865diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29866index b59d04c..368c2a7 100644
29867--- a/drivers/ide/piix.c
29868+++ b/drivers/ide/piix.c
29869@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29870 .udma_mask = udma, \
29871 }
29872
29873-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29874+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29875 /* 0: MPIIX */
29876 { /*
29877 * MPIIX actually has only a single IDE channel mapped to
29878diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29879index a6414a8..c04173e 100644
29880--- a/drivers/ide/rz1000.c
29881+++ b/drivers/ide/rz1000.c
29882@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29883 }
29884 }
29885
29886-static const struct ide_port_info rz1000_chipset __devinitdata = {
29887+static const struct ide_port_info rz1000_chipset __devinitconst = {
29888 .name = DRV_NAME,
29889 .host_flags = IDE_HFLAG_NO_DMA,
29890 };
29891diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29892index 356b9b5..d4758eb 100644
29893--- a/drivers/ide/sc1200.c
29894+++ b/drivers/ide/sc1200.c
29895@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29896 .dma_sff_read_status = ide_dma_sff_read_status,
29897 };
29898
29899-static const struct ide_port_info sc1200_chipset __devinitdata = {
29900+static const struct ide_port_info sc1200_chipset __devinitconst = {
29901 .name = DRV_NAME,
29902 .port_ops = &sc1200_port_ops,
29903 .dma_ops = &sc1200_dma_ops,
29904diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29905index b7f5b0c..9701038 100644
29906--- a/drivers/ide/scc_pata.c
29907+++ b/drivers/ide/scc_pata.c
29908@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29909 .dma_sff_read_status = scc_dma_sff_read_status,
29910 };
29911
29912-static const struct ide_port_info scc_chipset __devinitdata = {
29913+static const struct ide_port_info scc_chipset __devinitconst = {
29914 .name = "sccIDE",
29915 .init_iops = init_iops_scc,
29916 .init_dma = scc_init_dma,
29917diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29918index 35fb8da..24d72ef 100644
29919--- a/drivers/ide/serverworks.c
29920+++ b/drivers/ide/serverworks.c
29921@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29922 .cable_detect = svwks_cable_detect,
29923 };
29924
29925-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29926+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29927 { /* 0: OSB4 */
29928 .name = DRV_NAME,
29929 .init_chipset = init_chipset_svwks,
29930diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
29931index ab3db61..afed580 100644
29932--- a/drivers/ide/setup-pci.c
29933+++ b/drivers/ide/setup-pci.c
29934@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
29935 int ret, i, n_ports = dev2 ? 4 : 2;
29936 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29937
29938+ pax_track_stack();
29939+
29940 for (i = 0; i < n_ports / 2; i++) {
29941 ret = ide_setup_pci_controller(pdev[i], d, !i);
29942 if (ret < 0)
29943diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29944index ddeda44..46f7e30 100644
29945--- a/drivers/ide/siimage.c
29946+++ b/drivers/ide/siimage.c
29947@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29948 .udma_mask = ATA_UDMA6, \
29949 }
29950
29951-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29952+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29953 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29954 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29955 };
29956diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29957index 4a00225..09e61b4 100644
29958--- a/drivers/ide/sis5513.c
29959+++ b/drivers/ide/sis5513.c
29960@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29961 .cable_detect = sis_cable_detect,
29962 };
29963
29964-static const struct ide_port_info sis5513_chipset __devinitdata = {
29965+static const struct ide_port_info sis5513_chipset __devinitconst = {
29966 .name = DRV_NAME,
29967 .init_chipset = init_chipset_sis5513,
29968 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29969diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29970index f21dc2a..d051cd2 100644
29971--- a/drivers/ide/sl82c105.c
29972+++ b/drivers/ide/sl82c105.c
29973@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29974 .dma_sff_read_status = ide_dma_sff_read_status,
29975 };
29976
29977-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29978+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29979 .name = DRV_NAME,
29980 .init_chipset = init_chipset_sl82c105,
29981 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29982diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29983index 864ffe0..863a5e9 100644
29984--- a/drivers/ide/slc90e66.c
29985+++ b/drivers/ide/slc90e66.c
29986@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29987 .cable_detect = slc90e66_cable_detect,
29988 };
29989
29990-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29991+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29992 .name = DRV_NAME,
29993 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29994 .port_ops = &slc90e66_port_ops,
29995diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29996index e444d24..ba577de 100644
29997--- a/drivers/ide/tc86c001.c
29998+++ b/drivers/ide/tc86c001.c
29999@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
30000 .dma_sff_read_status = ide_dma_sff_read_status,
30001 };
30002
30003-static const struct ide_port_info tc86c001_chipset __devinitdata = {
30004+static const struct ide_port_info tc86c001_chipset __devinitconst = {
30005 .name = DRV_NAME,
30006 .init_hwif = init_hwif_tc86c001,
30007 .port_ops = &tc86c001_port_ops,
30008diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
30009index e53a1b7..d11aff7 100644
30010--- a/drivers/ide/triflex.c
30011+++ b/drivers/ide/triflex.c
30012@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
30013 .set_dma_mode = triflex_set_mode,
30014 };
30015
30016-static const struct ide_port_info triflex_device __devinitdata = {
30017+static const struct ide_port_info triflex_device __devinitconst = {
30018 .name = DRV_NAME,
30019 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
30020 .port_ops = &triflex_port_ops,
30021diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
30022index 4b42ca0..e494a98 100644
30023--- a/drivers/ide/trm290.c
30024+++ b/drivers/ide/trm290.c
30025@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
30026 .dma_check = trm290_dma_check,
30027 };
30028
30029-static const struct ide_port_info trm290_chipset __devinitdata = {
30030+static const struct ide_port_info trm290_chipset __devinitconst = {
30031 .name = DRV_NAME,
30032 .init_hwif = init_hwif_trm290,
30033 .tp_ops = &trm290_tp_ops,
30034diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
30035index f46f49c..eb77678 100644
30036--- a/drivers/ide/via82cxxx.c
30037+++ b/drivers/ide/via82cxxx.c
30038@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
30039 .cable_detect = via82cxxx_cable_detect,
30040 };
30041
30042-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
30043+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
30044 .name = DRV_NAME,
30045 .init_chipset = init_chipset_via82cxxx,
30046 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
30047diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
30048index fc0f2bd..ac2f8a5 100644
30049--- a/drivers/infiniband/core/cm.c
30050+++ b/drivers/infiniband/core/cm.c
30051@@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
30052
30053 struct cm_counter_group {
30054 struct kobject obj;
30055- atomic_long_t counter[CM_ATTR_COUNT];
30056+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30057 };
30058
30059 struct cm_counter_attribute {
30060@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work,
30061 struct ib_mad_send_buf *msg = NULL;
30062 int ret;
30063
30064- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30065+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30066 counter[CM_REQ_COUNTER]);
30067
30068 /* Quick state check to discard duplicate REQs. */
30069@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
30070 if (!cm_id_priv)
30071 return;
30072
30073- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30074+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30075 counter[CM_REP_COUNTER]);
30076 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30077 if (ret)
30078@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work)
30079 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30080 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30081 spin_unlock_irq(&cm_id_priv->lock);
30082- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30083+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30084 counter[CM_RTU_COUNTER]);
30085 goto out;
30086 }
30087@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work)
30088 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30089 dreq_msg->local_comm_id);
30090 if (!cm_id_priv) {
30091- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30092+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30093 counter[CM_DREQ_COUNTER]);
30094 cm_issue_drep(work->port, work->mad_recv_wc);
30095 return -EINVAL;
30096@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work)
30097 case IB_CM_MRA_REP_RCVD:
30098 break;
30099 case IB_CM_TIMEWAIT:
30100- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30101+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30102 counter[CM_DREQ_COUNTER]);
30103 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30104 goto unlock;
30105@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
30106 cm_free_msg(msg);
30107 goto deref;
30108 case IB_CM_DREQ_RCVD:
30109- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30110+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30111 counter[CM_DREQ_COUNTER]);
30112 goto unlock;
30113 default:
30114@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work)
30115 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30116 cm_id_priv->msg, timeout)) {
30117 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30118- atomic_long_inc(&work->port->
30119+ atomic_long_inc_unchecked(&work->port->
30120 counter_group[CM_RECV_DUPLICATES].
30121 counter[CM_MRA_COUNTER]);
30122 goto out;
30123@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work)
30124 break;
30125 case IB_CM_MRA_REQ_RCVD:
30126 case IB_CM_MRA_REP_RCVD:
30127- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30128+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30129 counter[CM_MRA_COUNTER]);
30130 /* fall through */
30131 default:
30132@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work)
30133 case IB_CM_LAP_IDLE:
30134 break;
30135 case IB_CM_MRA_LAP_SENT:
30136- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30137+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30138 counter[CM_LAP_COUNTER]);
30139 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30140 goto unlock;
30141@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work)
30142 cm_free_msg(msg);
30143 goto deref;
30144 case IB_CM_LAP_RCVD:
30145- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30146+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30147 counter[CM_LAP_COUNTER]);
30148 goto unlock;
30149 default:
30150@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30151 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30152 if (cur_cm_id_priv) {
30153 spin_unlock_irq(&cm.lock);
30154- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30155+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30156 counter[CM_SIDR_REQ_COUNTER]);
30157 goto out; /* Duplicate message. */
30158 }
30159@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30160 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30161 msg->retries = 1;
30162
30163- atomic_long_add(1 + msg->retries,
30164+ atomic_long_add_unchecked(1 + msg->retries,
30165 &port->counter_group[CM_XMIT].counter[attr_index]);
30166 if (msg->retries)
30167- atomic_long_add(msg->retries,
30168+ atomic_long_add_unchecked(msg->retries,
30169 &port->counter_group[CM_XMIT_RETRIES].
30170 counter[attr_index]);
30171
30172@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30173 }
30174
30175 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30176- atomic_long_inc(&port->counter_group[CM_RECV].
30177+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30178 counter[attr_id - CM_ATTR_ID_OFFSET]);
30179
30180 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30181@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30182 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30183
30184 return sprintf(buf, "%ld\n",
30185- atomic_long_read(&group->counter[cm_attr->index]));
30186+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30187 }
30188
30189 static const struct sysfs_ops cm_counter_ops = {
30190diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
30191index ca4c5dc..572d1ae 100644
30192--- a/drivers/infiniband/core/cma.c
30193+++ b/drivers/infiniband/core/cma.c
30194@@ -2492,6 +2492,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
30195
30196 req.private_data_len = sizeof(struct cma_hdr) +
30197 conn_param->private_data_len;
30198+ if (req.private_data_len < conn_param->private_data_len)
30199+ return -EINVAL;
30200+
30201 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
30202 if (!req.private_data)
30203 return -ENOMEM;
30204@@ -2541,6 +2544,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
30205 memset(&req, 0, sizeof req);
30206 offset = cma_user_data_offset(id_priv->id.ps);
30207 req.private_data_len = offset + conn_param->private_data_len;
30208+ if (req.private_data_len < conn_param->private_data_len)
30209+ return -EINVAL;
30210+
30211 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
30212 if (!private_data)
30213 return -ENOMEM;
30214diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30215index 4507043..14ad522 100644
30216--- a/drivers/infiniband/core/fmr_pool.c
30217+++ b/drivers/infiniband/core/fmr_pool.c
30218@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30219
30220 struct task_struct *thread;
30221
30222- atomic_t req_ser;
30223- atomic_t flush_ser;
30224+ atomic_unchecked_t req_ser;
30225+ atomic_unchecked_t flush_ser;
30226
30227 wait_queue_head_t force_wait;
30228 };
30229@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30230 struct ib_fmr_pool *pool = pool_ptr;
30231
30232 do {
30233- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30234+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30235 ib_fmr_batch_release(pool);
30236
30237- atomic_inc(&pool->flush_ser);
30238+ atomic_inc_unchecked(&pool->flush_ser);
30239 wake_up_interruptible(&pool->force_wait);
30240
30241 if (pool->flush_function)
30242@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30243 }
30244
30245 set_current_state(TASK_INTERRUPTIBLE);
30246- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30247+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30248 !kthread_should_stop())
30249 schedule();
30250 __set_current_state(TASK_RUNNING);
30251@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30252 pool->dirty_watermark = params->dirty_watermark;
30253 pool->dirty_len = 0;
30254 spin_lock_init(&pool->pool_lock);
30255- atomic_set(&pool->req_ser, 0);
30256- atomic_set(&pool->flush_ser, 0);
30257+ atomic_set_unchecked(&pool->req_ser, 0);
30258+ atomic_set_unchecked(&pool->flush_ser, 0);
30259 init_waitqueue_head(&pool->force_wait);
30260
30261 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30262@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30263 }
30264 spin_unlock_irq(&pool->pool_lock);
30265
30266- serial = atomic_inc_return(&pool->req_ser);
30267+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30268 wake_up_process(pool->thread);
30269
30270 if (wait_event_interruptible(pool->force_wait,
30271- atomic_read(&pool->flush_ser) - serial >= 0))
30272+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30273 return -EINTR;
30274
30275 return 0;
30276@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30277 } else {
30278 list_add_tail(&fmr->list, &pool->dirty_list);
30279 if (++pool->dirty_len >= pool->dirty_watermark) {
30280- atomic_inc(&pool->req_ser);
30281+ atomic_inc_unchecked(&pool->req_ser);
30282 wake_up_process(pool->thread);
30283 }
30284 }
30285diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30286index 40c8353..946b0e4 100644
30287--- a/drivers/infiniband/hw/cxgb4/mem.c
30288+++ b/drivers/infiniband/hw/cxgb4/mem.c
30289@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30290 int err;
30291 struct fw_ri_tpte tpt;
30292 u32 stag_idx;
30293- static atomic_t key;
30294+ static atomic_unchecked_t key;
30295
30296 if (c4iw_fatal_error(rdev))
30297 return -EIO;
30298@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30299 &rdev->resource.tpt_fifo_lock);
30300 if (!stag_idx)
30301 return -ENOMEM;
30302- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30303+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30304 }
30305 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30306 __func__, stag_state, type, pdid, stag_idx);
30307diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
30308index 31ae1b1..2f5b038 100644
30309--- a/drivers/infiniband/hw/ipath/ipath_fs.c
30310+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
30311@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
30312 struct infinipath_counters counters;
30313 struct ipath_devdata *dd;
30314
30315+ pax_track_stack();
30316+
30317 dd = file->f_path.dentry->d_inode->i_private;
30318 dd->ipath_f_read_counters(dd, &counters);
30319
30320diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30321index 79b3dbc..96e5fcc 100644
30322--- a/drivers/infiniband/hw/ipath/ipath_rc.c
30323+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30324@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30325 struct ib_atomic_eth *ateth;
30326 struct ipath_ack_entry *e;
30327 u64 vaddr;
30328- atomic64_t *maddr;
30329+ atomic64_unchecked_t *maddr;
30330 u64 sdata;
30331 u32 rkey;
30332 u8 next;
30333@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30334 IB_ACCESS_REMOTE_ATOMIC)))
30335 goto nack_acc_unlck;
30336 /* Perform atomic OP and save result. */
30337- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30338+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30339 sdata = be64_to_cpu(ateth->swap_data);
30340 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30341 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30342- (u64) atomic64_add_return(sdata, maddr) - sdata :
30343+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30344 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30345 be64_to_cpu(ateth->compare_data),
30346 sdata);
30347diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30348index 1f95bba..9530f87 100644
30349--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30350+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30351@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30352 unsigned long flags;
30353 struct ib_wc wc;
30354 u64 sdata;
30355- atomic64_t *maddr;
30356+ atomic64_unchecked_t *maddr;
30357 enum ib_wc_status send_status;
30358
30359 /*
30360@@ -382,11 +382,11 @@ again:
30361 IB_ACCESS_REMOTE_ATOMIC)))
30362 goto acc_err;
30363 /* Perform atomic OP and save result. */
30364- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30365+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30366 sdata = wqe->wr.wr.atomic.compare_add;
30367 *(u64 *) sqp->s_sge.sge.vaddr =
30368 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30369- (u64) atomic64_add_return(sdata, maddr) - sdata :
30370+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30371 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30372 sdata, wqe->wr.wr.atomic.swap);
30373 goto send_comp;
30374diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30375index 2d668c6..3312bb7 100644
30376--- a/drivers/infiniband/hw/nes/nes.c
30377+++ b/drivers/infiniband/hw/nes/nes.c
30378@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30379 LIST_HEAD(nes_adapter_list);
30380 static LIST_HEAD(nes_dev_list);
30381
30382-atomic_t qps_destroyed;
30383+atomic_unchecked_t qps_destroyed;
30384
30385 static unsigned int ee_flsh_adapter;
30386 static unsigned int sysfs_nonidx_addr;
30387@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30388 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30389 struct nes_adapter *nesadapter = nesdev->nesadapter;
30390
30391- atomic_inc(&qps_destroyed);
30392+ atomic_inc_unchecked(&qps_destroyed);
30393
30394 /* Free the control structures */
30395
30396diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30397index 6fe7987..68637b5 100644
30398--- a/drivers/infiniband/hw/nes/nes.h
30399+++ b/drivers/infiniband/hw/nes/nes.h
30400@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
30401 extern unsigned int wqm_quanta;
30402 extern struct list_head nes_adapter_list;
30403
30404-extern atomic_t cm_connects;
30405-extern atomic_t cm_accepts;
30406-extern atomic_t cm_disconnects;
30407-extern atomic_t cm_closes;
30408-extern atomic_t cm_connecteds;
30409-extern atomic_t cm_connect_reqs;
30410-extern atomic_t cm_rejects;
30411-extern atomic_t mod_qp_timouts;
30412-extern atomic_t qps_created;
30413-extern atomic_t qps_destroyed;
30414-extern atomic_t sw_qps_destroyed;
30415+extern atomic_unchecked_t cm_connects;
30416+extern atomic_unchecked_t cm_accepts;
30417+extern atomic_unchecked_t cm_disconnects;
30418+extern atomic_unchecked_t cm_closes;
30419+extern atomic_unchecked_t cm_connecteds;
30420+extern atomic_unchecked_t cm_connect_reqs;
30421+extern atomic_unchecked_t cm_rejects;
30422+extern atomic_unchecked_t mod_qp_timouts;
30423+extern atomic_unchecked_t qps_created;
30424+extern atomic_unchecked_t qps_destroyed;
30425+extern atomic_unchecked_t sw_qps_destroyed;
30426 extern u32 mh_detected;
30427 extern u32 mh_pauses_sent;
30428 extern u32 cm_packets_sent;
30429@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
30430 extern u32 cm_packets_received;
30431 extern u32 cm_packets_dropped;
30432 extern u32 cm_packets_retrans;
30433-extern atomic_t cm_listens_created;
30434-extern atomic_t cm_listens_destroyed;
30435+extern atomic_unchecked_t cm_listens_created;
30436+extern atomic_unchecked_t cm_listens_destroyed;
30437 extern u32 cm_backlog_drops;
30438-extern atomic_t cm_loopbacks;
30439-extern atomic_t cm_nodes_created;
30440-extern atomic_t cm_nodes_destroyed;
30441-extern atomic_t cm_accel_dropped_pkts;
30442-extern atomic_t cm_resets_recvd;
30443+extern atomic_unchecked_t cm_loopbacks;
30444+extern atomic_unchecked_t cm_nodes_created;
30445+extern atomic_unchecked_t cm_nodes_destroyed;
30446+extern atomic_unchecked_t cm_accel_dropped_pkts;
30447+extern atomic_unchecked_t cm_resets_recvd;
30448
30449 extern u32 int_mod_timer_init;
30450 extern u32 int_mod_cq_depth_256;
30451diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30452index a237547..28a9819 100644
30453--- a/drivers/infiniband/hw/nes/nes_cm.c
30454+++ b/drivers/infiniband/hw/nes/nes_cm.c
30455@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30456 u32 cm_packets_retrans;
30457 u32 cm_packets_created;
30458 u32 cm_packets_received;
30459-atomic_t cm_listens_created;
30460-atomic_t cm_listens_destroyed;
30461+atomic_unchecked_t cm_listens_created;
30462+atomic_unchecked_t cm_listens_destroyed;
30463 u32 cm_backlog_drops;
30464-atomic_t cm_loopbacks;
30465-atomic_t cm_nodes_created;
30466-atomic_t cm_nodes_destroyed;
30467-atomic_t cm_accel_dropped_pkts;
30468-atomic_t cm_resets_recvd;
30469+atomic_unchecked_t cm_loopbacks;
30470+atomic_unchecked_t cm_nodes_created;
30471+atomic_unchecked_t cm_nodes_destroyed;
30472+atomic_unchecked_t cm_accel_dropped_pkts;
30473+atomic_unchecked_t cm_resets_recvd;
30474
30475 static inline int mini_cm_accelerated(struct nes_cm_core *,
30476 struct nes_cm_node *);
30477@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
30478
30479 static struct nes_cm_core *g_cm_core;
30480
30481-atomic_t cm_connects;
30482-atomic_t cm_accepts;
30483-atomic_t cm_disconnects;
30484-atomic_t cm_closes;
30485-atomic_t cm_connecteds;
30486-atomic_t cm_connect_reqs;
30487-atomic_t cm_rejects;
30488+atomic_unchecked_t cm_connects;
30489+atomic_unchecked_t cm_accepts;
30490+atomic_unchecked_t cm_disconnects;
30491+atomic_unchecked_t cm_closes;
30492+atomic_unchecked_t cm_connecteds;
30493+atomic_unchecked_t cm_connect_reqs;
30494+atomic_unchecked_t cm_rejects;
30495
30496
30497 /**
30498@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30499 kfree(listener);
30500 listener = NULL;
30501 ret = 0;
30502- atomic_inc(&cm_listens_destroyed);
30503+ atomic_inc_unchecked(&cm_listens_destroyed);
30504 } else {
30505 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30506 }
30507@@ -1242,7 +1242,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30508 cm_node->rem_mac);
30509
30510 add_hte_node(cm_core, cm_node);
30511- atomic_inc(&cm_nodes_created);
30512+ atomic_inc_unchecked(&cm_nodes_created);
30513
30514 return cm_node;
30515 }
30516@@ -1300,7 +1300,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30517 }
30518
30519 atomic_dec(&cm_core->node_cnt);
30520- atomic_inc(&cm_nodes_destroyed);
30521+ atomic_inc_unchecked(&cm_nodes_destroyed);
30522 nesqp = cm_node->nesqp;
30523 if (nesqp) {
30524 nesqp->cm_node = NULL;
30525@@ -1367,7 +1367,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30526
30527 static void drop_packet(struct sk_buff *skb)
30528 {
30529- atomic_inc(&cm_accel_dropped_pkts);
30530+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30531 dev_kfree_skb_any(skb);
30532 }
30533
30534@@ -1430,7 +1430,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30535 {
30536
30537 int reset = 0; /* whether to send reset in case of err.. */
30538- atomic_inc(&cm_resets_recvd);
30539+ atomic_inc_unchecked(&cm_resets_recvd);
30540 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30541 " refcnt=%d\n", cm_node, cm_node->state,
30542 atomic_read(&cm_node->ref_count));
30543@@ -2059,7 +2059,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30544 rem_ref_cm_node(cm_node->cm_core, cm_node);
30545 return NULL;
30546 }
30547- atomic_inc(&cm_loopbacks);
30548+ atomic_inc_unchecked(&cm_loopbacks);
30549 loopbackremotenode->loopbackpartner = cm_node;
30550 loopbackremotenode->tcp_cntxt.rcv_wscale =
30551 NES_CM_DEFAULT_RCV_WND_SCALE;
30552@@ -2334,7 +2334,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30553 add_ref_cm_node(cm_node);
30554 } else if (cm_node->state == NES_CM_STATE_TSA) {
30555 rem_ref_cm_node(cm_core, cm_node);
30556- atomic_inc(&cm_accel_dropped_pkts);
30557+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30558 dev_kfree_skb_any(skb);
30559 break;
30560 }
30561@@ -2640,7 +2640,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30562
30563 if ((cm_id) && (cm_id->event_handler)) {
30564 if (issue_disconn) {
30565- atomic_inc(&cm_disconnects);
30566+ atomic_inc_unchecked(&cm_disconnects);
30567 cm_event.event = IW_CM_EVENT_DISCONNECT;
30568 cm_event.status = disconn_status;
30569 cm_event.local_addr = cm_id->local_addr;
30570@@ -2662,7 +2662,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30571 }
30572
30573 if (issue_close) {
30574- atomic_inc(&cm_closes);
30575+ atomic_inc_unchecked(&cm_closes);
30576 nes_disconnect(nesqp, 1);
30577
30578 cm_id->provider_data = nesqp;
30579@@ -2793,7 +2793,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30580
30581 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30582 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30583- atomic_inc(&cm_accepts);
30584+ atomic_inc_unchecked(&cm_accepts);
30585
30586 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30587 netdev_refcnt_read(nesvnic->netdev));
30588@@ -3003,7 +3003,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30589
30590 struct nes_cm_core *cm_core;
30591
30592- atomic_inc(&cm_rejects);
30593+ atomic_inc_unchecked(&cm_rejects);
30594 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30595 loopback = cm_node->loopbackpartner;
30596 cm_core = cm_node->cm_core;
30597@@ -3069,7 +3069,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30598 ntohl(cm_id->local_addr.sin_addr.s_addr),
30599 ntohs(cm_id->local_addr.sin_port));
30600
30601- atomic_inc(&cm_connects);
30602+ atomic_inc_unchecked(&cm_connects);
30603 nesqp->active_conn = 1;
30604
30605 /* cache the cm_id in the qp */
30606@@ -3175,7 +3175,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30607 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30608 return err;
30609 }
30610- atomic_inc(&cm_listens_created);
30611+ atomic_inc_unchecked(&cm_listens_created);
30612 }
30613
30614 cm_id->add_ref(cm_id);
30615@@ -3280,7 +3280,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30616 if (nesqp->destroyed) {
30617 return;
30618 }
30619- atomic_inc(&cm_connecteds);
30620+ atomic_inc_unchecked(&cm_connecteds);
30621 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30622 " local port 0x%04X. jiffies = %lu.\n",
30623 nesqp->hwqp.qp_id,
30624@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30625
30626 cm_id->add_ref(cm_id);
30627 ret = cm_id->event_handler(cm_id, &cm_event);
30628- atomic_inc(&cm_closes);
30629+ atomic_inc_unchecked(&cm_closes);
30630 cm_event.event = IW_CM_EVENT_CLOSE;
30631 cm_event.status = 0;
30632 cm_event.provider_data = cm_id->provider_data;
30633@@ -3531,7 +3531,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30634 return;
30635 cm_id = cm_node->cm_id;
30636
30637- atomic_inc(&cm_connect_reqs);
30638+ atomic_inc_unchecked(&cm_connect_reqs);
30639 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30640 cm_node, cm_id, jiffies);
30641
30642@@ -3569,7 +3569,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30643 return;
30644 cm_id = cm_node->cm_id;
30645
30646- atomic_inc(&cm_connect_reqs);
30647+ atomic_inc_unchecked(&cm_connect_reqs);
30648 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30649 cm_node, cm_id, jiffies);
30650
30651diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30652index 9d7ffeb..a95dd7d 100644
30653--- a/drivers/infiniband/hw/nes/nes_nic.c
30654+++ b/drivers/infiniband/hw/nes/nes_nic.c
30655@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30656 target_stat_values[++index] = mh_detected;
30657 target_stat_values[++index] = mh_pauses_sent;
30658 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30659- target_stat_values[++index] = atomic_read(&cm_connects);
30660- target_stat_values[++index] = atomic_read(&cm_accepts);
30661- target_stat_values[++index] = atomic_read(&cm_disconnects);
30662- target_stat_values[++index] = atomic_read(&cm_connecteds);
30663- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30664- target_stat_values[++index] = atomic_read(&cm_rejects);
30665- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30666- target_stat_values[++index] = atomic_read(&qps_created);
30667- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30668- target_stat_values[++index] = atomic_read(&qps_destroyed);
30669- target_stat_values[++index] = atomic_read(&cm_closes);
30670+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30671+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30672+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30673+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30674+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30675+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30676+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30677+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30678+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30679+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30680+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30681 target_stat_values[++index] = cm_packets_sent;
30682 target_stat_values[++index] = cm_packets_bounced;
30683 target_stat_values[++index] = cm_packets_created;
30684 target_stat_values[++index] = cm_packets_received;
30685 target_stat_values[++index] = cm_packets_dropped;
30686 target_stat_values[++index] = cm_packets_retrans;
30687- target_stat_values[++index] = atomic_read(&cm_listens_created);
30688- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30689+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30690+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30691 target_stat_values[++index] = cm_backlog_drops;
30692- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30693- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30694- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30695- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30696- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30697+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30698+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30699+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30700+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30701+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30702 target_stat_values[++index] = nesadapter->free_4kpbl;
30703 target_stat_values[++index] = nesadapter->free_256pbl;
30704 target_stat_values[++index] = int_mod_timer_init;
30705diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30706index 9f2f7d4..6d2fee2 100644
30707--- a/drivers/infiniband/hw/nes/nes_verbs.c
30708+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30709@@ -46,9 +46,9 @@
30710
30711 #include <rdma/ib_umem.h>
30712
30713-atomic_t mod_qp_timouts;
30714-atomic_t qps_created;
30715-atomic_t sw_qps_destroyed;
30716+atomic_unchecked_t mod_qp_timouts;
30717+atomic_unchecked_t qps_created;
30718+atomic_unchecked_t sw_qps_destroyed;
30719
30720 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30721
30722@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30723 if (init_attr->create_flags)
30724 return ERR_PTR(-EINVAL);
30725
30726- atomic_inc(&qps_created);
30727+ atomic_inc_unchecked(&qps_created);
30728 switch (init_attr->qp_type) {
30729 case IB_QPT_RC:
30730 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30731@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30732 struct iw_cm_event cm_event;
30733 int ret;
30734
30735- atomic_inc(&sw_qps_destroyed);
30736+ atomic_inc_unchecked(&sw_qps_destroyed);
30737 nesqp->destroyed = 1;
30738
30739 /* Blow away the connection if it exists. */
30740diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30741index c9624ea..e025b66 100644
30742--- a/drivers/infiniband/hw/qib/qib.h
30743+++ b/drivers/infiniband/hw/qib/qib.h
30744@@ -51,6 +51,7 @@
30745 #include <linux/completion.h>
30746 #include <linux/kref.h>
30747 #include <linux/sched.h>
30748+#include <linux/slab.h>
30749
30750 #include "qib_common.h"
30751 #include "qib_verbs.h"
30752diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30753index c351aa4..e6967c2 100644
30754--- a/drivers/input/gameport/gameport.c
30755+++ b/drivers/input/gameport/gameport.c
30756@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30757 */
30758 static void gameport_init_port(struct gameport *gameport)
30759 {
30760- static atomic_t gameport_no = ATOMIC_INIT(0);
30761+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30762
30763 __module_get(THIS_MODULE);
30764
30765 mutex_init(&gameport->drv_mutex);
30766 device_initialize(&gameport->dev);
30767 dev_set_name(&gameport->dev, "gameport%lu",
30768- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30769+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30770 gameport->dev.bus = &gameport_bus;
30771 gameport->dev.release = gameport_release_port;
30772 if (gameport->parent)
30773diff --git a/drivers/input/input.c b/drivers/input/input.c
30774index da38d97..2aa0b79 100644
30775--- a/drivers/input/input.c
30776+++ b/drivers/input/input.c
30777@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30778 */
30779 int input_register_device(struct input_dev *dev)
30780 {
30781- static atomic_t input_no = ATOMIC_INIT(0);
30782+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30783 struct input_handler *handler;
30784 const char *path;
30785 int error;
30786@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30787 dev->setkeycode = input_default_setkeycode;
30788
30789 dev_set_name(&dev->dev, "input%ld",
30790- (unsigned long) atomic_inc_return(&input_no) - 1);
30791+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30792
30793 error = device_add(&dev->dev);
30794 if (error)
30795diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30796index b8d8611..15f8d2c 100644
30797--- a/drivers/input/joystick/sidewinder.c
30798+++ b/drivers/input/joystick/sidewinder.c
30799@@ -30,6 +30,7 @@
30800 #include <linux/kernel.h>
30801 #include <linux/module.h>
30802 #include <linux/slab.h>
30803+#include <linux/sched.h>
30804 #include <linux/init.h>
30805 #include <linux/input.h>
30806 #include <linux/gameport.h>
30807@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30808 unsigned char buf[SW_LENGTH];
30809 int i;
30810
30811+ pax_track_stack();
30812+
30813 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30814
30815 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30816diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30817index d728875..844c89b 100644
30818--- a/drivers/input/joystick/xpad.c
30819+++ b/drivers/input/joystick/xpad.c
30820@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30821
30822 static int xpad_led_probe(struct usb_xpad *xpad)
30823 {
30824- static atomic_t led_seq = ATOMIC_INIT(0);
30825+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30826 long led_no;
30827 struct xpad_led *led;
30828 struct led_classdev *led_cdev;
30829@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30830 if (!led)
30831 return -ENOMEM;
30832
30833- led_no = (long)atomic_inc_return(&led_seq) - 1;
30834+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30835
30836 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30837 led->xpad = xpad;
30838diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
30839index 1633b63..09f8f20 100644
30840--- a/drivers/input/misc/cma3000_d0x.c
30841+++ b/drivers/input/misc/cma3000_d0x.c
30842@@ -114,8 +114,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
30843 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
30844 {
30845 struct cma3000_accl_data *data = dev_id;
30846- int datax, datay, dataz;
30847- u8 ctrl, mode, range, intr_status;
30848+ int datax, datay, dataz, intr_status;
30849+ u8 ctrl, mode, range;
30850
30851 intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
30852 if (intr_status < 0)
30853diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30854index 0110b5a..d3ad144 100644
30855--- a/drivers/input/mousedev.c
30856+++ b/drivers/input/mousedev.c
30857@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30858
30859 spin_unlock_irq(&client->packet_lock);
30860
30861- if (copy_to_user(buffer, data, count))
30862+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30863 return -EFAULT;
30864
30865 return count;
30866diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30867index ba70058..571d25d 100644
30868--- a/drivers/input/serio/serio.c
30869+++ b/drivers/input/serio/serio.c
30870@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30871 */
30872 static void serio_init_port(struct serio *serio)
30873 {
30874- static atomic_t serio_no = ATOMIC_INIT(0);
30875+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30876
30877 __module_get(THIS_MODULE);
30878
30879@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30880 mutex_init(&serio->drv_mutex);
30881 device_initialize(&serio->dev);
30882 dev_set_name(&serio->dev, "serio%ld",
30883- (long)atomic_inc_return(&serio_no) - 1);
30884+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30885 serio->dev.bus = &serio_bus;
30886 serio->dev.release = serio_release_port;
30887 serio->dev.groups = serio_device_attr_groups;
30888diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30889index e44933d..9ba484a 100644
30890--- a/drivers/isdn/capi/capi.c
30891+++ b/drivers/isdn/capi/capi.c
30892@@ -83,8 +83,8 @@ struct capiminor {
30893
30894 struct capi20_appl *ap;
30895 u32 ncci;
30896- atomic_t datahandle;
30897- atomic_t msgid;
30898+ atomic_unchecked_t datahandle;
30899+ atomic_unchecked_t msgid;
30900
30901 struct tty_port port;
30902 int ttyinstop;
30903@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30904 capimsg_setu16(s, 2, mp->ap->applid);
30905 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30906 capimsg_setu8 (s, 5, CAPI_RESP);
30907- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30908+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30909 capimsg_setu32(s, 8, mp->ncci);
30910 capimsg_setu16(s, 12, datahandle);
30911 }
30912@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30913 mp->outbytes -= len;
30914 spin_unlock_bh(&mp->outlock);
30915
30916- datahandle = atomic_inc_return(&mp->datahandle);
30917+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30918 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30919 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30920 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30921 capimsg_setu16(skb->data, 2, mp->ap->applid);
30922 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30923 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30924- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30925+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30926 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30927 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30928 capimsg_setu16(skb->data, 16, len); /* Data length */
30929diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30930index db621db..825ea1a 100644
30931--- a/drivers/isdn/gigaset/common.c
30932+++ b/drivers/isdn/gigaset/common.c
30933@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30934 cs->commands_pending = 0;
30935 cs->cur_at_seq = 0;
30936 cs->gotfwver = -1;
30937- cs->open_count = 0;
30938+ local_set(&cs->open_count, 0);
30939 cs->dev = NULL;
30940 cs->tty = NULL;
30941 cs->tty_dev = NULL;
30942diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30943index 212efaf..f187c6b 100644
30944--- a/drivers/isdn/gigaset/gigaset.h
30945+++ b/drivers/isdn/gigaset/gigaset.h
30946@@ -35,6 +35,7 @@
30947 #include <linux/tty_driver.h>
30948 #include <linux/list.h>
30949 #include <linux/atomic.h>
30950+#include <asm/local.h>
30951
30952 #define GIG_VERSION {0, 5, 0, 0}
30953 #define GIG_COMPAT {0, 4, 0, 0}
30954@@ -433,7 +434,7 @@ struct cardstate {
30955 spinlock_t cmdlock;
30956 unsigned curlen, cmdbytes;
30957
30958- unsigned open_count;
30959+ local_t open_count;
30960 struct tty_struct *tty;
30961 struct tasklet_struct if_wake_tasklet;
30962 unsigned control_state;
30963diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30964index e35058b..5898a8b 100644
30965--- a/drivers/isdn/gigaset/interface.c
30966+++ b/drivers/isdn/gigaset/interface.c
30967@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30968 }
30969 tty->driver_data = cs;
30970
30971- ++cs->open_count;
30972-
30973- if (cs->open_count == 1) {
30974+ if (local_inc_return(&cs->open_count) == 1) {
30975 spin_lock_irqsave(&cs->lock, flags);
30976 cs->tty = tty;
30977 spin_unlock_irqrestore(&cs->lock, flags);
30978@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30979
30980 if (!cs->connected)
30981 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30982- else if (!cs->open_count)
30983+ else if (!local_read(&cs->open_count))
30984 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30985 else {
30986- if (!--cs->open_count) {
30987+ if (!local_dec_return(&cs->open_count)) {
30988 spin_lock_irqsave(&cs->lock, flags);
30989 cs->tty = NULL;
30990 spin_unlock_irqrestore(&cs->lock, flags);
30991@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty,
30992 if (!cs->connected) {
30993 gig_dbg(DEBUG_IF, "not connected");
30994 retval = -ENODEV;
30995- } else if (!cs->open_count)
30996+ } else if (!local_read(&cs->open_count))
30997 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30998 else {
30999 retval = 0;
31000@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
31001 retval = -ENODEV;
31002 goto done;
31003 }
31004- if (!cs->open_count) {
31005+ if (!local_read(&cs->open_count)) {
31006 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31007 retval = -ENODEV;
31008 goto done;
31009@@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty)
31010 if (!cs->connected) {
31011 gig_dbg(DEBUG_IF, "not connected");
31012 retval = -ENODEV;
31013- } else if (!cs->open_count)
31014+ } else if (!local_read(&cs->open_count))
31015 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31016 else if (cs->mstate != MS_LOCKED) {
31017 dev_warn(cs->dev, "can't write to unlocked device\n");
31018@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
31019
31020 if (!cs->connected)
31021 gig_dbg(DEBUG_IF, "not connected");
31022- else if (!cs->open_count)
31023+ else if (!local_read(&cs->open_count))
31024 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31025 else if (cs->mstate != MS_LOCKED)
31026 dev_warn(cs->dev, "can't write to unlocked device\n");
31027@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty)
31028
31029 if (!cs->connected)
31030 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31031- else if (!cs->open_count)
31032+ else if (!local_read(&cs->open_count))
31033 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31034 else
31035 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
31036@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty)
31037
31038 if (!cs->connected)
31039 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31040- else if (!cs->open_count)
31041+ else if (!local_read(&cs->open_count))
31042 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31043 else
31044 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
31045@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
31046 goto out;
31047 }
31048
31049- if (!cs->open_count) {
31050+ if (!local_read(&cs->open_count)) {
31051 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31052 goto out;
31053 }
31054diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
31055index 2a57da59..e7a12ed 100644
31056--- a/drivers/isdn/hardware/avm/b1.c
31057+++ b/drivers/isdn/hardware/avm/b1.c
31058@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
31059 }
31060 if (left) {
31061 if (t4file->user) {
31062- if (copy_from_user(buf, dp, left))
31063+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31064 return -EFAULT;
31065 } else {
31066 memcpy(buf, dp, left);
31067@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
31068 }
31069 if (left) {
31070 if (config->user) {
31071- if (copy_from_user(buf, dp, left))
31072+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31073 return -EFAULT;
31074 } else {
31075 memcpy(buf, dp, left);
31076diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
31077index f130724..c373c68 100644
31078--- a/drivers/isdn/hardware/eicon/capidtmf.c
31079+++ b/drivers/isdn/hardware/eicon/capidtmf.c
31080@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
31081 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31082 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31083
31084+ pax_track_stack();
31085
31086 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31087 {
31088diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
31089index 4d425c6..a9be6c4 100644
31090--- a/drivers/isdn/hardware/eicon/capifunc.c
31091+++ b/drivers/isdn/hardware/eicon/capifunc.c
31092@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31093 IDI_SYNC_REQ req;
31094 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31095
31096+ pax_track_stack();
31097+
31098 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31099
31100 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31101diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
31102index 3029234..ef0d9e2 100644
31103--- a/drivers/isdn/hardware/eicon/diddfunc.c
31104+++ b/drivers/isdn/hardware/eicon/diddfunc.c
31105@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
31106 IDI_SYNC_REQ req;
31107 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31108
31109+ pax_track_stack();
31110+
31111 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31112
31113 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31114diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
31115index 0bbee78..a0d0a01 100644
31116--- a/drivers/isdn/hardware/eicon/divasfunc.c
31117+++ b/drivers/isdn/hardware/eicon/divasfunc.c
31118@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
31119 IDI_SYNC_REQ req;
31120 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31121
31122+ pax_track_stack();
31123+
31124 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31125
31126 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31127diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
31128index 85784a7..a19ca98 100644
31129--- a/drivers/isdn/hardware/eicon/divasync.h
31130+++ b/drivers/isdn/hardware/eicon/divasync.h
31131@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31132 } diva_didd_add_adapter_t;
31133 typedef struct _diva_didd_remove_adapter {
31134 IDI_CALL p_request;
31135-} diva_didd_remove_adapter_t;
31136+} __no_const diva_didd_remove_adapter_t;
31137 typedef struct _diva_didd_read_adapter_array {
31138 void * buffer;
31139 dword length;
31140diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
31141index db87d51..7d09acf 100644
31142--- a/drivers/isdn/hardware/eicon/idifunc.c
31143+++ b/drivers/isdn/hardware/eicon/idifunc.c
31144@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
31145 IDI_SYNC_REQ req;
31146 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31147
31148+ pax_track_stack();
31149+
31150 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31151
31152 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31153diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
31154index a339598..b6a8bfc 100644
31155--- a/drivers/isdn/hardware/eicon/message.c
31156+++ b/drivers/isdn/hardware/eicon/message.c
31157@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
31158 dword d;
31159 word w;
31160
31161+ pax_track_stack();
31162+
31163 a = plci->adapter;
31164 Id = ((word)plci->Id<<8)|a->Id;
31165 PUT_WORD(&SS_Ind[4],0x0000);
31166@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
31167 word j, n, w;
31168 dword d;
31169
31170+ pax_track_stack();
31171+
31172
31173 for(i=0;i<8;i++) bp_parms[i].length = 0;
31174 for(i=0;i<2;i++) global_config[i].length = 0;
31175@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
31176 const byte llc3[] = {4,3,2,2,6,6,0};
31177 const byte header[] = {0,2,3,3,0,0,0};
31178
31179+ pax_track_stack();
31180+
31181 for(i=0;i<8;i++) bp_parms[i].length = 0;
31182 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31183 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31184@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
31185 word appl_number_group_type[MAX_APPL];
31186 PLCI *auxplci;
31187
31188+ pax_track_stack();
31189+
31190 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31191
31192 if(!a->group_optimization_enabled)
31193diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
31194index a564b75..f3cf8b5 100644
31195--- a/drivers/isdn/hardware/eicon/mntfunc.c
31196+++ b/drivers/isdn/hardware/eicon/mntfunc.c
31197@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
31198 IDI_SYNC_REQ req;
31199 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31200
31201+ pax_track_stack();
31202+
31203 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31204
31205 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31206diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
31207index a3bd163..8956575 100644
31208--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31209+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31210@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31211 typedef struct _diva_os_idi_adapter_interface {
31212 diva_init_card_proc_t cleanup_adapter_proc;
31213 diva_cmd_card_proc_t cmd_proc;
31214-} diva_os_idi_adapter_interface_t;
31215+} __no_const diva_os_idi_adapter_interface_t;
31216
31217 typedef struct _diva_os_xdi_adapter {
31218 struct list_head link;
31219diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
31220index 6ed82ad..b05ac05 100644
31221--- a/drivers/isdn/i4l/isdn_common.c
31222+++ b/drivers/isdn/i4l/isdn_common.c
31223@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
31224 } iocpar;
31225 void __user *argp = (void __user *)arg;
31226
31227+ pax_track_stack();
31228+
31229 #define name iocpar.name
31230 #define bname iocpar.bname
31231 #define iocts iocpar.iocts
31232diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31233index 1f355bb..43f1fea 100644
31234--- a/drivers/isdn/icn/icn.c
31235+++ b/drivers/isdn/icn/icn.c
31236@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31237 if (count > len)
31238 count = len;
31239 if (user) {
31240- if (copy_from_user(msg, buf, count))
31241+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31242 return -EFAULT;
31243 } else
31244 memcpy(msg, buf, count);
31245diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31246index 2535933..09a8e86 100644
31247--- a/drivers/lguest/core.c
31248+++ b/drivers/lguest/core.c
31249@@ -92,9 +92,17 @@ static __init int map_switcher(void)
31250 * it's worked so far. The end address needs +1 because __get_vm_area
31251 * allocates an extra guard page, so we need space for that.
31252 */
31253+
31254+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31255+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31256+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31257+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31258+#else
31259 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31260 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31261 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31262+#endif
31263+
31264 if (!switcher_vma) {
31265 err = -ENOMEM;
31266 printk("lguest: could not map switcher pages high\n");
31267@@ -119,7 +127,7 @@ static __init int map_switcher(void)
31268 * Now the Switcher is mapped at the right address, we can't fail!
31269 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31270 */
31271- memcpy(switcher_vma->addr, start_switcher_text,
31272+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31273 end_switcher_text - start_switcher_text);
31274
31275 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31276diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31277index 65af42f..530c87a 100644
31278--- a/drivers/lguest/x86/core.c
31279+++ b/drivers/lguest/x86/core.c
31280@@ -59,7 +59,7 @@ static struct {
31281 /* Offset from where switcher.S was compiled to where we've copied it */
31282 static unsigned long switcher_offset(void)
31283 {
31284- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31285+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31286 }
31287
31288 /* This cpu's struct lguest_pages. */
31289@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31290 * These copies are pretty cheap, so we do them unconditionally: */
31291 /* Save the current Host top-level page directory.
31292 */
31293+
31294+#ifdef CONFIG_PAX_PER_CPU_PGD
31295+ pages->state.host_cr3 = read_cr3();
31296+#else
31297 pages->state.host_cr3 = __pa(current->mm->pgd);
31298+#endif
31299+
31300 /*
31301 * Set up the Guest's page tables to see this CPU's pages (and no
31302 * other CPU's pages).
31303@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31304 * compiled-in switcher code and the high-mapped copy we just made.
31305 */
31306 for (i = 0; i < IDT_ENTRIES; i++)
31307- default_idt_entries[i] += switcher_offset();
31308+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31309
31310 /*
31311 * Set up the Switcher's per-cpu areas.
31312@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31313 * it will be undisturbed when we switch. To change %cs and jump we
31314 * need this structure to feed to Intel's "lcall" instruction.
31315 */
31316- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31317+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31318 lguest_entry.segment = LGUEST_CS;
31319
31320 /*
31321diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31322index 40634b0..4f5855e 100644
31323--- a/drivers/lguest/x86/switcher_32.S
31324+++ b/drivers/lguest/x86/switcher_32.S
31325@@ -87,6 +87,7 @@
31326 #include <asm/page.h>
31327 #include <asm/segment.h>
31328 #include <asm/lguest.h>
31329+#include <asm/processor-flags.h>
31330
31331 // We mark the start of the code to copy
31332 // It's placed in .text tho it's never run here
31333@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31334 // Changes type when we load it: damn Intel!
31335 // For after we switch over our page tables
31336 // That entry will be read-only: we'd crash.
31337+
31338+#ifdef CONFIG_PAX_KERNEXEC
31339+ mov %cr0, %edx
31340+ xor $X86_CR0_WP, %edx
31341+ mov %edx, %cr0
31342+#endif
31343+
31344 movl $(GDT_ENTRY_TSS*8), %edx
31345 ltr %dx
31346
31347@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31348 // Let's clear it again for our return.
31349 // The GDT descriptor of the Host
31350 // Points to the table after two "size" bytes
31351- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31352+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31353 // Clear "used" from type field (byte 5, bit 2)
31354- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31355+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31356+
31357+#ifdef CONFIG_PAX_KERNEXEC
31358+ mov %cr0, %eax
31359+ xor $X86_CR0_WP, %eax
31360+ mov %eax, %cr0
31361+#endif
31362
31363 // Once our page table's switched, the Guest is live!
31364 // The Host fades as we run this final step.
31365@@ -295,13 +309,12 @@ deliver_to_host:
31366 // I consulted gcc, and it gave
31367 // These instructions, which I gladly credit:
31368 leal (%edx,%ebx,8), %eax
31369- movzwl (%eax),%edx
31370- movl 4(%eax), %eax
31371- xorw %ax, %ax
31372- orl %eax, %edx
31373+ movl 4(%eax), %edx
31374+ movw (%eax), %dx
31375 // Now the address of the handler's in %edx
31376 // We call it now: its "iret" drops us home.
31377- jmp *%edx
31378+ ljmp $__KERNEL_CS, $1f
31379+1: jmp *%edx
31380
31381 // Every interrupt can come to us here
31382 // But we must truly tell each apart.
31383diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31384index 4daf9e5..b8d1d0f 100644
31385--- a/drivers/macintosh/macio_asic.c
31386+++ b/drivers/macintosh/macio_asic.c
31387@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31388 * MacIO is matched against any Apple ID, it's probe() function
31389 * will then decide wether it applies or not
31390 */
31391-static const struct pci_device_id __devinitdata pci_ids [] = { {
31392+static const struct pci_device_id __devinitconst pci_ids [] = { {
31393 .vendor = PCI_VENDOR_ID_APPLE,
31394 .device = PCI_ANY_ID,
31395 .subvendor = PCI_ANY_ID,
31396diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31397index 2e9a3ca..c2fb229 100644
31398--- a/drivers/md/dm-ioctl.c
31399+++ b/drivers/md/dm-ioctl.c
31400@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31401 cmd == DM_LIST_VERSIONS_CMD)
31402 return 0;
31403
31404- if ((cmd == DM_DEV_CREATE_CMD)) {
31405+ if (cmd == DM_DEV_CREATE_CMD) {
31406 if (!*param->name) {
31407 DMWARN("name not supplied when creating device");
31408 return -EINVAL;
31409diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31410index 9bfd057..01180bc 100644
31411--- a/drivers/md/dm-raid1.c
31412+++ b/drivers/md/dm-raid1.c
31413@@ -40,7 +40,7 @@ enum dm_raid1_error {
31414
31415 struct mirror {
31416 struct mirror_set *ms;
31417- atomic_t error_count;
31418+ atomic_unchecked_t error_count;
31419 unsigned long error_type;
31420 struct dm_dev *dev;
31421 sector_t offset;
31422@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31423 struct mirror *m;
31424
31425 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31426- if (!atomic_read(&m->error_count))
31427+ if (!atomic_read_unchecked(&m->error_count))
31428 return m;
31429
31430 return NULL;
31431@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31432 * simple way to tell if a device has encountered
31433 * errors.
31434 */
31435- atomic_inc(&m->error_count);
31436+ atomic_inc_unchecked(&m->error_count);
31437
31438 if (test_and_set_bit(error_type, &m->error_type))
31439 return;
31440@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31441 struct mirror *m = get_default_mirror(ms);
31442
31443 do {
31444- if (likely(!atomic_read(&m->error_count)))
31445+ if (likely(!atomic_read_unchecked(&m->error_count)))
31446 return m;
31447
31448 if (m-- == ms->mirror)
31449@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31450 {
31451 struct mirror *default_mirror = get_default_mirror(m->ms);
31452
31453- return !atomic_read(&default_mirror->error_count);
31454+ return !atomic_read_unchecked(&default_mirror->error_count);
31455 }
31456
31457 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31458@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31459 */
31460 if (likely(region_in_sync(ms, region, 1)))
31461 m = choose_mirror(ms, bio->bi_sector);
31462- else if (m && atomic_read(&m->error_count))
31463+ else if (m && atomic_read_unchecked(&m->error_count))
31464 m = NULL;
31465
31466 if (likely(m))
31467@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31468 }
31469
31470 ms->mirror[mirror].ms = ms;
31471- atomic_set(&(ms->mirror[mirror].error_count), 0);
31472+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31473 ms->mirror[mirror].error_type = 0;
31474 ms->mirror[mirror].offset = offset;
31475
31476@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31477 */
31478 static char device_status_char(struct mirror *m)
31479 {
31480- if (!atomic_read(&(m->error_count)))
31481+ if (!atomic_read_unchecked(&(m->error_count)))
31482 return 'A';
31483
31484 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31485diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31486index 3d80cf0..b77cc47 100644
31487--- a/drivers/md/dm-stripe.c
31488+++ b/drivers/md/dm-stripe.c
31489@@ -20,7 +20,7 @@ struct stripe {
31490 struct dm_dev *dev;
31491 sector_t physical_start;
31492
31493- atomic_t error_count;
31494+ atomic_unchecked_t error_count;
31495 };
31496
31497 struct stripe_c {
31498@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31499 kfree(sc);
31500 return r;
31501 }
31502- atomic_set(&(sc->stripe[i].error_count), 0);
31503+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31504 }
31505
31506 ti->private = sc;
31507@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31508 DMEMIT("%d ", sc->stripes);
31509 for (i = 0; i < sc->stripes; i++) {
31510 DMEMIT("%s ", sc->stripe[i].dev->name);
31511- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31512+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31513 'D' : 'A';
31514 }
31515 buffer[i] = '\0';
31516@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31517 */
31518 for (i = 0; i < sc->stripes; i++)
31519 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31520- atomic_inc(&(sc->stripe[i].error_count));
31521- if (atomic_read(&(sc->stripe[i].error_count)) <
31522+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31523+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31524 DM_IO_ERROR_THRESHOLD)
31525 schedule_work(&sc->trigger_event);
31526 }
31527diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31528index bc04518..7a83b81 100644
31529--- a/drivers/md/dm-table.c
31530+++ b/drivers/md/dm-table.c
31531@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31532 if (!dev_size)
31533 return 0;
31534
31535- if ((start >= dev_size) || (start + len > dev_size)) {
31536+ if ((start >= dev_size) || (len > dev_size - start)) {
31537 DMWARN("%s: %s too small for target: "
31538 "start=%llu, len=%llu, dev_size=%llu",
31539 dm_device_name(ti->table->md), bdevname(bdev, b),
31540diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31541index 52b39f3..83a8b6b 100644
31542--- a/drivers/md/dm.c
31543+++ b/drivers/md/dm.c
31544@@ -165,9 +165,9 @@ struct mapped_device {
31545 /*
31546 * Event handling.
31547 */
31548- atomic_t event_nr;
31549+ atomic_unchecked_t event_nr;
31550 wait_queue_head_t eventq;
31551- atomic_t uevent_seq;
31552+ atomic_unchecked_t uevent_seq;
31553 struct list_head uevent_list;
31554 spinlock_t uevent_lock; /* Protect access to uevent_list */
31555
31556@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor)
31557 rwlock_init(&md->map_lock);
31558 atomic_set(&md->holders, 1);
31559 atomic_set(&md->open_count, 0);
31560- atomic_set(&md->event_nr, 0);
31561- atomic_set(&md->uevent_seq, 0);
31562+ atomic_set_unchecked(&md->event_nr, 0);
31563+ atomic_set_unchecked(&md->uevent_seq, 0);
31564 INIT_LIST_HEAD(&md->uevent_list);
31565 spin_lock_init(&md->uevent_lock);
31566
31567@@ -1978,7 +1978,7 @@ static void event_callback(void *context)
31568
31569 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31570
31571- atomic_inc(&md->event_nr);
31572+ atomic_inc_unchecked(&md->event_nr);
31573 wake_up(&md->eventq);
31574 }
31575
31576@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31577
31578 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31579 {
31580- return atomic_add_return(1, &md->uevent_seq);
31581+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31582 }
31583
31584 uint32_t dm_get_event_nr(struct mapped_device *md)
31585 {
31586- return atomic_read(&md->event_nr);
31587+ return atomic_read_unchecked(&md->event_nr);
31588 }
31589
31590 int dm_wait_event(struct mapped_device *md, int event_nr)
31591 {
31592 return wait_event_interruptible(md->eventq,
31593- (event_nr != atomic_read(&md->event_nr)));
31594+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31595 }
31596
31597 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31598diff --git a/drivers/md/md.c b/drivers/md/md.c
31599index 5c95ccb..217fa57 100644
31600--- a/drivers/md/md.c
31601+++ b/drivers/md/md.c
31602@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31603 * start build, activate spare
31604 */
31605 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31606-static atomic_t md_event_count;
31607+static atomic_unchecked_t md_event_count;
31608 void md_new_event(mddev_t *mddev)
31609 {
31610- atomic_inc(&md_event_count);
31611+ atomic_inc_unchecked(&md_event_count);
31612 wake_up(&md_event_waiters);
31613 }
31614 EXPORT_SYMBOL_GPL(md_new_event);
31615@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31616 */
31617 static void md_new_event_inintr(mddev_t *mddev)
31618 {
31619- atomic_inc(&md_event_count);
31620+ atomic_inc_unchecked(&md_event_count);
31621 wake_up(&md_event_waiters);
31622 }
31623
31624@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
31625
31626 rdev->preferred_minor = 0xffff;
31627 rdev->data_offset = le64_to_cpu(sb->data_offset);
31628- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31629+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31630
31631 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31632 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31633@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
31634 else
31635 sb->resync_offset = cpu_to_le64(0);
31636
31637- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31638+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31639
31640 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31641 sb->size = cpu_to_le64(mddev->dev_sectors);
31642@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31643 static ssize_t
31644 errors_show(mdk_rdev_t *rdev, char *page)
31645 {
31646- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31647+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31648 }
31649
31650 static ssize_t
31651@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
31652 char *e;
31653 unsigned long n = simple_strtoul(buf, &e, 10);
31654 if (*buf && (*e == 0 || *e == '\n')) {
31655- atomic_set(&rdev->corrected_errors, n);
31656+ atomic_set_unchecked(&rdev->corrected_errors, n);
31657 return len;
31658 }
31659 return -EINVAL;
31660@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
31661 rdev->sb_loaded = 0;
31662 rdev->bb_page = NULL;
31663 atomic_set(&rdev->nr_pending, 0);
31664- atomic_set(&rdev->read_errors, 0);
31665- atomic_set(&rdev->corrected_errors, 0);
31666+ atomic_set_unchecked(&rdev->read_errors, 0);
31667+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31668
31669 INIT_LIST_HEAD(&rdev->same_set);
31670 init_waitqueue_head(&rdev->blocked_wait);
31671@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31672
31673 spin_unlock(&pers_lock);
31674 seq_printf(seq, "\n");
31675- seq->poll_event = atomic_read(&md_event_count);
31676+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31677 return 0;
31678 }
31679 if (v == (void*)2) {
31680@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31681 chunk_kb ? "KB" : "B");
31682 if (bitmap->file) {
31683 seq_printf(seq, ", file: ");
31684- seq_path(seq, &bitmap->file->f_path, " \t\n");
31685+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31686 }
31687
31688 seq_printf(seq, "\n");
31689@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31690 return error;
31691
31692 seq = file->private_data;
31693- seq->poll_event = atomic_read(&md_event_count);
31694+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31695 return error;
31696 }
31697
31698@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31699 /* always allow read */
31700 mask = POLLIN | POLLRDNORM;
31701
31702- if (seq->poll_event != atomic_read(&md_event_count))
31703+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31704 mask |= POLLERR | POLLPRI;
31705 return mask;
31706 }
31707@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
31708 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31709 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31710 (int)part_stat_read(&disk->part0, sectors[1]) -
31711- atomic_read(&disk->sync_io);
31712+ atomic_read_unchecked(&disk->sync_io);
31713 /* sync IO will cause sync_io to increase before the disk_stats
31714 * as sync_io is counted when a request starts, and
31715 * disk_stats is counted when it completes.
31716diff --git a/drivers/md/md.h b/drivers/md/md.h
31717index 0a309dc..7e01d7f 100644
31718--- a/drivers/md/md.h
31719+++ b/drivers/md/md.h
31720@@ -124,13 +124,13 @@ struct mdk_rdev_s
31721 * only maintained for arrays that
31722 * support hot removal
31723 */
31724- atomic_t read_errors; /* number of consecutive read errors that
31725+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31726 * we have tried to ignore.
31727 */
31728 struct timespec last_read_error; /* monotonic time since our
31729 * last read error
31730 */
31731- atomic_t corrected_errors; /* number of corrected read errors,
31732+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31733 * for reporting to userspace and storing
31734 * in superblock.
31735 */
31736@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
31737
31738 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31739 {
31740- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31741+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31742 }
31743
31744 struct mdk_personality
31745diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31746index 606fc04..f1ff8dc 100644
31747--- a/drivers/md/raid1.c
31748+++ b/drivers/md/raid1.c
31749@@ -1550,7 +1550,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
31750 if (r1_sync_page_io(rdev, sect, s,
31751 bio->bi_io_vec[idx].bv_page,
31752 READ) != 0)
31753- atomic_add(s, &rdev->corrected_errors);
31754+ atomic_add_unchecked(s, &rdev->corrected_errors);
31755 }
31756 sectors -= s;
31757 sect += s;
31758@@ -1763,7 +1763,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
31759 test_bit(In_sync, &rdev->flags)) {
31760 if (r1_sync_page_io(rdev, sect, s,
31761 conf->tmppage, READ)) {
31762- atomic_add(s, &rdev->corrected_errors);
31763+ atomic_add_unchecked(s, &rdev->corrected_errors);
31764 printk(KERN_INFO
31765 "md/raid1:%s: read error corrected "
31766 "(%d sectors at %llu on %s)\n",
31767diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31768index 1d44228..98db57d 100644
31769--- a/drivers/md/raid10.c
31770+++ b/drivers/md/raid10.c
31771@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error)
31772 /* The write handler will notice the lack of
31773 * R10BIO_Uptodate and record any errors etc
31774 */
31775- atomic_add(r10_bio->sectors,
31776+ atomic_add_unchecked(r10_bio->sectors,
31777 &conf->mirrors[d].rdev->corrected_errors);
31778
31779 /* for reconstruct, we always reschedule after a read.
31780@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31781 {
31782 struct timespec cur_time_mon;
31783 unsigned long hours_since_last;
31784- unsigned int read_errors = atomic_read(&rdev->read_errors);
31785+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31786
31787 ktime_get_ts(&cur_time_mon);
31788
31789@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31790 * overflowing the shift of read_errors by hours_since_last.
31791 */
31792 if (hours_since_last >= 8 * sizeof(read_errors))
31793- atomic_set(&rdev->read_errors, 0);
31794+ atomic_set_unchecked(&rdev->read_errors, 0);
31795 else
31796- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31797+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31798 }
31799
31800 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
31801@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31802 return;
31803
31804 check_decay_read_errors(mddev, rdev);
31805- atomic_inc(&rdev->read_errors);
31806- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31807+ atomic_inc_unchecked(&rdev->read_errors);
31808+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31809 char b[BDEVNAME_SIZE];
31810 bdevname(rdev->bdev, b);
31811
31812@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31813 "md/raid10:%s: %s: Raid device exceeded "
31814 "read_error threshold [cur %d:max %d]\n",
31815 mdname(mddev), b,
31816- atomic_read(&rdev->read_errors), max_read_errors);
31817+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31818 printk(KERN_NOTICE
31819 "md/raid10:%s: %s: Failing raid device\n",
31820 mdname(mddev), b);
31821@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31822 (unsigned long long)(
31823 sect + rdev->data_offset),
31824 bdevname(rdev->bdev, b));
31825- atomic_add(s, &rdev->corrected_errors);
31826+ atomic_add_unchecked(s, &rdev->corrected_errors);
31827 }
31828
31829 rdev_dec_pending(rdev, mddev);
31830diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31831index b6200c3..02e8702 100644
31832--- a/drivers/md/raid5.c
31833+++ b/drivers/md/raid5.c
31834@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31835 (unsigned long long)(sh->sector
31836 + rdev->data_offset),
31837 bdevname(rdev->bdev, b));
31838- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31839+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31840 clear_bit(R5_ReadError, &sh->dev[i].flags);
31841 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31842 }
31843- if (atomic_read(&conf->disks[i].rdev->read_errors))
31844- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31845+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31846+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31847 } else {
31848 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31849 int retry = 0;
31850 rdev = conf->disks[i].rdev;
31851
31852 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31853- atomic_inc(&rdev->read_errors);
31854+ atomic_inc_unchecked(&rdev->read_errors);
31855 if (conf->mddev->degraded >= conf->max_degraded)
31856 printk_ratelimited(
31857 KERN_WARNING
31858@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31859 (unsigned long long)(sh->sector
31860 + rdev->data_offset),
31861 bdn);
31862- else if (atomic_read(&rdev->read_errors)
31863+ else if (atomic_read_unchecked(&rdev->read_errors)
31864 > conf->max_nr_stripes)
31865 printk(KERN_WARNING
31866 "md/raid:%s: Too many read errors, failing device %s.\n",
31867@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
31868 sector_t r_sector;
31869 struct stripe_head sh2;
31870
31871+ pax_track_stack();
31872
31873 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31874 stripe = new_sector;
31875diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
31876index 1d1d8d2..6c6837a 100644
31877--- a/drivers/media/common/saa7146_hlp.c
31878+++ b/drivers/media/common/saa7146_hlp.c
31879@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
31880
31881 int x[32], y[32], w[32], h[32];
31882
31883+ pax_track_stack();
31884+
31885 /* clear out memory */
31886 memset(&line_list[0], 0x00, sizeof(u32)*32);
31887 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31888diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31889index 573d540..16f78f3 100644
31890--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31891+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31892@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
31893 .subvendor = _subvend, .subdevice = _subdev, \
31894 .driver_data = (unsigned long)&_driverdata }
31895
31896-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31897+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31898 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31899 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31900 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31901diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31902index 7ea517b..252fe54 100644
31903--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31904+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31905@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
31906 u8 buf[HOST_LINK_BUF_SIZE];
31907 int i;
31908
31909+ pax_track_stack();
31910+
31911 dprintk("%s\n", __func__);
31912
31913 /* check if we have space for a link buf in the rx_buffer */
31914@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
31915 unsigned long timeout;
31916 int written;
31917
31918+ pax_track_stack();
31919+
31920 dprintk("%s\n", __func__);
31921
31922 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31923diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31924index a7d876f..8c21b61 100644
31925--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31926+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31927@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31928 union {
31929 dmx_ts_cb ts;
31930 dmx_section_cb sec;
31931- } cb;
31932+ } __no_const cb;
31933
31934 struct dvb_demux *demux;
31935 void *priv;
31936diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31937index f732877..d38c35a 100644
31938--- a/drivers/media/dvb/dvb-core/dvbdev.c
31939+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31940@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31941 const struct dvb_device *template, void *priv, int type)
31942 {
31943 struct dvb_device *dvbdev;
31944- struct file_operations *dvbdevfops;
31945+ file_operations_no_const *dvbdevfops;
31946 struct device *clsdev;
31947 int minor;
31948 int id;
31949diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31950index acb5fb2..2413f1d 100644
31951--- a/drivers/media/dvb/dvb-usb/cxusb.c
31952+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31953@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31954 struct dib0700_adapter_state {
31955 int (*set_param_save) (struct dvb_frontend *,
31956 struct dvb_frontend_parameters *);
31957-};
31958+} __no_const;
31959
31960 static int dib7070_set_param_override(struct dvb_frontend *fe,
31961 struct dvb_frontend_parameters *fep)
31962diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
31963index a224e94..503b76a 100644
31964--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
31965+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
31966@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
31967 if (!buf)
31968 return -ENOMEM;
31969
31970+ pax_track_stack();
31971+
31972 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31973 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
31974 hx.addr, hx.len, hx.chk);
31975diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31976index 058b231..183d2b3 100644
31977--- a/drivers/media/dvb/dvb-usb/dw2102.c
31978+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31979@@ -95,7 +95,7 @@ struct su3000_state {
31980
31981 struct s6x0_state {
31982 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31983-};
31984+} __no_const;
31985
31986 /* debug */
31987 static int dvb_usb_dw2102_debug;
31988diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
31989index 37b1469..28a6f6f 100644
31990--- a/drivers/media/dvb/dvb-usb/lmedm04.c
31991+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
31992@@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
31993 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
31994 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
31995
31996+ pax_track_stack();
31997
31998 data[0] = 0x8a;
31999 len_in = 1;
32000@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev)
32001 int ret = 0, len_in;
32002 u8 data[512] = {0};
32003
32004+ pax_track_stack();
32005+
32006 data[0] = 0x0a;
32007 len_in = 1;
32008 info("FRM Firmware Cold Reset");
32009diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
32010index ba91735..4261d84 100644
32011--- a/drivers/media/dvb/frontends/dib3000.h
32012+++ b/drivers/media/dvb/frontends/dib3000.h
32013@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32014 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32015 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32016 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32017-};
32018+} __no_const;
32019
32020 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32021 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32022diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
32023index 90bf573..e8463da 100644
32024--- a/drivers/media/dvb/frontends/ds3000.c
32025+++ b/drivers/media/dvb/frontends/ds3000.c
32026@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
32027
32028 for (i = 0; i < 30 ; i++) {
32029 ds3000_read_status(fe, &status);
32030- if (status && FE_HAS_LOCK)
32031+ if (status & FE_HAS_LOCK)
32032 break;
32033
32034 msleep(10);
32035diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
32036index c283112..7f367a7 100644
32037--- a/drivers/media/dvb/frontends/mb86a16.c
32038+++ b/drivers/media/dvb/frontends/mb86a16.c
32039@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
32040 int ret = -1;
32041 int sync;
32042
32043+ pax_track_stack();
32044+
32045 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
32046
32047 fcp = 3000;
32048diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
32049index c709ce6..b3fe620 100644
32050--- a/drivers/media/dvb/frontends/or51211.c
32051+++ b/drivers/media/dvb/frontends/or51211.c
32052@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
32053 u8 tudata[585];
32054 int i;
32055
32056+ pax_track_stack();
32057+
32058 dprintk("Firmware is %zd bytes\n",fw->size);
32059
32060 /* Get eprom data */
32061diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
32062index 0564192..75b16f5 100644
32063--- a/drivers/media/dvb/ngene/ngene-cards.c
32064+++ b/drivers/media/dvb/ngene/ngene-cards.c
32065@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
32066
32067 /****************************************************************************/
32068
32069-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
32070+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
32071 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
32072 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
32073 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
32074diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
32075index 16a089f..ab1667d 100644
32076--- a/drivers/media/radio/radio-cadet.c
32077+++ b/drivers/media/radio/radio-cadet.c
32078@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
32079 unsigned char readbuf[RDS_BUFFER];
32080 int i = 0;
32081
32082+ if (count > RDS_BUFFER)
32083+ return -EFAULT;
32084 mutex_lock(&dev->lock);
32085 if (dev->rdsstat == 0) {
32086 dev->rdsstat = 1;
32087diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
32088index 9cde353..8c6a1c3 100644
32089--- a/drivers/media/video/au0828/au0828.h
32090+++ b/drivers/media/video/au0828/au0828.h
32091@@ -191,7 +191,7 @@ struct au0828_dev {
32092
32093 /* I2C */
32094 struct i2c_adapter i2c_adap;
32095- struct i2c_algorithm i2c_algo;
32096+ i2c_algorithm_no_const i2c_algo;
32097 struct i2c_client i2c_client;
32098 u32 i2c_rc;
32099
32100diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
32101index 9e2f870..22e3a08 100644
32102--- a/drivers/media/video/cx18/cx18-driver.c
32103+++ b/drivers/media/video/cx18/cx18-driver.c
32104@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
32105 struct i2c_client c;
32106 u8 eedata[256];
32107
32108+ pax_track_stack();
32109+
32110 memset(&c, 0, sizeof(c));
32111 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32112 c.adapter = &cx->i2c_adap[0];
32113diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
32114index ce765e3..f9e1b04 100644
32115--- a/drivers/media/video/cx23885/cx23885-input.c
32116+++ b/drivers/media/video/cx23885/cx23885-input.c
32117@@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
32118 bool handle = false;
32119 struct ir_raw_event ir_core_event[64];
32120
32121+ pax_track_stack();
32122+
32123 do {
32124 num = 0;
32125 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
32126diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
32127index 68d1240..46b32eb 100644
32128--- a/drivers/media/video/cx88/cx88-alsa.c
32129+++ b/drivers/media/video/cx88/cx88-alsa.c
32130@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
32131 * Only boards with eeprom and byte 1 at eeprom=1 have it
32132 */
32133
32134-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
32135+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
32136 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
32137 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
32138 {0, }
32139diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32140index 9515f3a..c9ecb85 100644
32141--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32142+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32143@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
32144 u8 *eeprom;
32145 struct tveeprom tvdata;
32146
32147+ pax_track_stack();
32148+
32149 memset(&tvdata,0,sizeof(tvdata));
32150
32151 eeprom = pvr2_eeprom_fetch(hdw);
32152diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32153index 305e6aa..0143317 100644
32154--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32155+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32156@@ -196,7 +196,7 @@ struct pvr2_hdw {
32157
32158 /* I2C stuff */
32159 struct i2c_adapter i2c_adap;
32160- struct i2c_algorithm i2c_algo;
32161+ i2c_algorithm_no_const i2c_algo;
32162 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32163 int i2c_cx25840_hack_state;
32164 int i2c_linked;
32165diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
32166index f9f29cc..5a2e330 100644
32167--- a/drivers/media/video/saa7134/saa6752hs.c
32168+++ b/drivers/media/video/saa7134/saa6752hs.c
32169@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
32170 unsigned char localPAT[256];
32171 unsigned char localPMT[256];
32172
32173+ pax_track_stack();
32174+
32175 /* Set video format - must be done first as it resets other settings */
32176 set_reg8(client, 0x41, h->video_format);
32177
32178diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
32179index 62fac7f..f29e0b9 100644
32180--- a/drivers/media/video/saa7164/saa7164-cmd.c
32181+++ b/drivers/media/video/saa7164/saa7164-cmd.c
32182@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
32183 u8 tmp[512];
32184 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32185
32186+ pax_track_stack();
32187+
32188 /* While any outstand message on the bus exists... */
32189 do {
32190
32191@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
32192 u8 tmp[512];
32193 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32194
32195+ pax_track_stack();
32196+
32197 while (loop) {
32198
32199 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
32200diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
32201index 84cd1b6..f741e07 100644
32202--- a/drivers/media/video/timblogiw.c
32203+++ b/drivers/media/video/timblogiw.c
32204@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
32205
32206 /* Platform device functions */
32207
32208-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
32209+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
32210 .vidioc_querycap = timblogiw_querycap,
32211 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
32212 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
32213@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
32214 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
32215 };
32216
32217-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
32218+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
32219 .owner = THIS_MODULE,
32220 .open = timblogiw_open,
32221 .release = timblogiw_close,
32222diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
32223index f344411..6ae9974 100644
32224--- a/drivers/media/video/usbvision/usbvision-core.c
32225+++ b/drivers/media/video/usbvision/usbvision-core.c
32226@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
32227 unsigned char rv, gv, bv;
32228 static unsigned char *Y, *U, *V;
32229
32230+ pax_track_stack();
32231+
32232 frame = usbvision->cur_frame;
32233 image_size = frame->frmwidth * frame->frmheight;
32234 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32235diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
32236index f300dea..04834ba 100644
32237--- a/drivers/media/video/videobuf-dma-sg.c
32238+++ b/drivers/media/video/videobuf-dma-sg.c
32239@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
32240 {
32241 struct videobuf_queue q;
32242
32243+ pax_track_stack();
32244+
32245 /* Required to make generic handler to call __videobuf_alloc */
32246 q.int_ops = &sg_ops;
32247
32248diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
32249index 7956a10..f39232f 100644
32250--- a/drivers/message/fusion/mptbase.c
32251+++ b/drivers/message/fusion/mptbase.c
32252@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
32253 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32254 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32255
32256+#ifdef CONFIG_GRKERNSEC_HIDESYM
32257+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
32258+#else
32259 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32260 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32261+#endif
32262+
32263 /*
32264 * Rounding UP to nearest 4-kB boundary here...
32265 */
32266diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
32267index 7596aec..f7ae9aa 100644
32268--- a/drivers/message/fusion/mptsas.c
32269+++ b/drivers/message/fusion/mptsas.c
32270@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
32271 return 0;
32272 }
32273
32274+static inline void
32275+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32276+{
32277+ if (phy_info->port_details) {
32278+ phy_info->port_details->rphy = rphy;
32279+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32280+ ioc->name, rphy));
32281+ }
32282+
32283+ if (rphy) {
32284+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32285+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32286+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32287+ ioc->name, rphy, rphy->dev.release));
32288+ }
32289+}
32290+
32291 /* no mutex */
32292 static void
32293 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32294@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
32295 return NULL;
32296 }
32297
32298-static inline void
32299-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32300-{
32301- if (phy_info->port_details) {
32302- phy_info->port_details->rphy = rphy;
32303- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32304- ioc->name, rphy));
32305- }
32306-
32307- if (rphy) {
32308- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32309- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32310- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32311- ioc->name, rphy, rphy->dev.release));
32312- }
32313-}
32314-
32315 static inline struct sas_port *
32316 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32317 {
32318diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32319index ce61a57..3da8862 100644
32320--- a/drivers/message/fusion/mptscsih.c
32321+++ b/drivers/message/fusion/mptscsih.c
32322@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32323
32324 h = shost_priv(SChost);
32325
32326- if (h) {
32327- if (h->info_kbuf == NULL)
32328- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32329- return h->info_kbuf;
32330- h->info_kbuf[0] = '\0';
32331+ if (!h)
32332+ return NULL;
32333
32334- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32335- h->info_kbuf[size-1] = '\0';
32336- }
32337+ if (h->info_kbuf == NULL)
32338+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32339+ return h->info_kbuf;
32340+ h->info_kbuf[0] = '\0';
32341+
32342+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32343+ h->info_kbuf[size-1] = '\0';
32344
32345 return h->info_kbuf;
32346 }
32347diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
32348index 098de2b..fbb922c 100644
32349--- a/drivers/message/i2o/i2o_config.c
32350+++ b/drivers/message/i2o/i2o_config.c
32351@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg)
32352 struct i2o_message *msg;
32353 unsigned int iop;
32354
32355+ pax_track_stack();
32356+
32357 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32358 return -EFAULT;
32359
32360diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32361index 07dbeaf..5533142 100644
32362--- a/drivers/message/i2o/i2o_proc.c
32363+++ b/drivers/message/i2o/i2o_proc.c
32364@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32365 "Array Controller Device"
32366 };
32367
32368-static char *chtostr(u8 * chars, int n)
32369-{
32370- char tmp[256];
32371- tmp[0] = 0;
32372- return strncat(tmp, (char *)chars, n);
32373-}
32374-
32375 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32376 char *group)
32377 {
32378@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32379
32380 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32381 seq_printf(seq, "%-#8x", ddm_table.module_id);
32382- seq_printf(seq, "%-29s",
32383- chtostr(ddm_table.module_name_version, 28));
32384+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32385 seq_printf(seq, "%9d ", ddm_table.data_size);
32386 seq_printf(seq, "%8d", ddm_table.code_size);
32387
32388@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32389
32390 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32391 seq_printf(seq, "%-#8x", dst->module_id);
32392- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32393- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32394+ seq_printf(seq, "%-.28s", dst->module_name_version);
32395+ seq_printf(seq, "%-.8s", dst->date);
32396 seq_printf(seq, "%8d ", dst->module_size);
32397 seq_printf(seq, "%8d ", dst->mpb_size);
32398 seq_printf(seq, "0x%04x", dst->module_flags);
32399@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32400 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32401 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32402 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32403- seq_printf(seq, "Vendor info : %s\n",
32404- chtostr((u8 *) (work32 + 2), 16));
32405- seq_printf(seq, "Product info : %s\n",
32406- chtostr((u8 *) (work32 + 6), 16));
32407- seq_printf(seq, "Description : %s\n",
32408- chtostr((u8 *) (work32 + 10), 16));
32409- seq_printf(seq, "Product rev. : %s\n",
32410- chtostr((u8 *) (work32 + 14), 8));
32411+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32412+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32413+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32414+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32415
32416 seq_printf(seq, "Serial number : ");
32417 print_serial_number(seq, (u8 *) (work32 + 16),
32418@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32419 }
32420
32421 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32422- seq_printf(seq, "Module name : %s\n",
32423- chtostr(result.module_name, 24));
32424- seq_printf(seq, "Module revision : %s\n",
32425- chtostr(result.module_rev, 8));
32426+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32427+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32428
32429 seq_printf(seq, "Serial number : ");
32430 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32431@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32432 return 0;
32433 }
32434
32435- seq_printf(seq, "Device name : %s\n",
32436- chtostr(result.device_name, 64));
32437- seq_printf(seq, "Service name : %s\n",
32438- chtostr(result.service_name, 64));
32439- seq_printf(seq, "Physical name : %s\n",
32440- chtostr(result.physical_location, 64));
32441- seq_printf(seq, "Instance number : %s\n",
32442- chtostr(result.instance_number, 4));
32443+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32444+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32445+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32446+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32447
32448 return 0;
32449 }
32450diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32451index a8c08f3..155fe3d 100644
32452--- a/drivers/message/i2o/iop.c
32453+++ b/drivers/message/i2o/iop.c
32454@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32455
32456 spin_lock_irqsave(&c->context_list_lock, flags);
32457
32458- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32459- atomic_inc(&c->context_list_counter);
32460+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32461+ atomic_inc_unchecked(&c->context_list_counter);
32462
32463- entry->context = atomic_read(&c->context_list_counter);
32464+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32465
32466 list_add(&entry->list, &c->context_list);
32467
32468@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32469
32470 #if BITS_PER_LONG == 64
32471 spin_lock_init(&c->context_list_lock);
32472- atomic_set(&c->context_list_counter, 0);
32473+ atomic_set_unchecked(&c->context_list_counter, 0);
32474 INIT_LIST_HEAD(&c->context_list);
32475 #endif
32476
32477diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
32478index a20e1c4..4f57255 100644
32479--- a/drivers/mfd/ab3100-core.c
32480+++ b/drivers/mfd/ab3100-core.c
32481@@ -809,7 +809,7 @@ struct ab_family_id {
32482 char *name;
32483 };
32484
32485-static const struct ab_family_id ids[] __devinitdata = {
32486+static const struct ab_family_id ids[] __devinitconst = {
32487 /* AB3100 */
32488 {
32489 .id = 0xc0,
32490diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32491index f12720d..3c251fd 100644
32492--- a/drivers/mfd/abx500-core.c
32493+++ b/drivers/mfd/abx500-core.c
32494@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
32495
32496 struct abx500_device_entry {
32497 struct list_head list;
32498- struct abx500_ops ops;
32499+ abx500_ops_no_const ops;
32500 struct device *dev;
32501 };
32502
32503diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32504index 5c2a06a..8fa077c 100644
32505--- a/drivers/mfd/janz-cmodio.c
32506+++ b/drivers/mfd/janz-cmodio.c
32507@@ -13,6 +13,7 @@
32508
32509 #include <linux/kernel.h>
32510 #include <linux/module.h>
32511+#include <linux/slab.h>
32512 #include <linux/init.h>
32513 #include <linux/pci.h>
32514 #include <linux/interrupt.h>
32515diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
32516index 5fe5de1..af64f53 100644
32517--- a/drivers/mfd/wm8350-i2c.c
32518+++ b/drivers/mfd/wm8350-i2c.c
32519@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
32520 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32521 int ret;
32522
32523+ pax_track_stack();
32524+
32525 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32526 return -EINVAL;
32527
32528diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32529index 8b51cd6..f628f8d 100644
32530--- a/drivers/misc/lis3lv02d/lis3lv02d.c
32531+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32532@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
32533 * the lid is closed. This leads to interrupts as soon as a little move
32534 * is done.
32535 */
32536- atomic_inc(&lis3_dev.count);
32537+ atomic_inc_unchecked(&lis3_dev.count);
32538
32539 wake_up_interruptible(&lis3_dev.misc_wait);
32540 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
32541@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32542 if (lis3_dev.pm_dev)
32543 pm_runtime_get_sync(lis3_dev.pm_dev);
32544
32545- atomic_set(&lis3_dev.count, 0);
32546+ atomic_set_unchecked(&lis3_dev.count, 0);
32547 return 0;
32548 }
32549
32550@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32551 add_wait_queue(&lis3_dev.misc_wait, &wait);
32552 while (true) {
32553 set_current_state(TASK_INTERRUPTIBLE);
32554- data = atomic_xchg(&lis3_dev.count, 0);
32555+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
32556 if (data)
32557 break;
32558
32559@@ -585,7 +585,7 @@ out:
32560 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32561 {
32562 poll_wait(file, &lis3_dev.misc_wait, wait);
32563- if (atomic_read(&lis3_dev.count))
32564+ if (atomic_read_unchecked(&lis3_dev.count))
32565 return POLLIN | POLLRDNORM;
32566 return 0;
32567 }
32568diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32569index a193958..4d7ecd2 100644
32570--- a/drivers/misc/lis3lv02d/lis3lv02d.h
32571+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32572@@ -265,7 +265,7 @@ struct lis3lv02d {
32573 struct input_polled_dev *idev; /* input device */
32574 struct platform_device *pdev; /* platform device */
32575 struct regulator_bulk_data regulators[2];
32576- atomic_t count; /* interrupt count after last read */
32577+ atomic_unchecked_t count; /* interrupt count after last read */
32578 union axis_conversion ac; /* hw -> logical axis */
32579 int mapped_btns[3];
32580
32581diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32582index 2f30bad..c4c13d0 100644
32583--- a/drivers/misc/sgi-gru/gruhandles.c
32584+++ b/drivers/misc/sgi-gru/gruhandles.c
32585@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32586 unsigned long nsec;
32587
32588 nsec = CLKS2NSEC(clks);
32589- atomic_long_inc(&mcs_op_statistics[op].count);
32590- atomic_long_add(nsec, &mcs_op_statistics[op].total);
32591+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32592+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32593 if (mcs_op_statistics[op].max < nsec)
32594 mcs_op_statistics[op].max = nsec;
32595 }
32596diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32597index 7768b87..f8aac38 100644
32598--- a/drivers/misc/sgi-gru/gruprocfs.c
32599+++ b/drivers/misc/sgi-gru/gruprocfs.c
32600@@ -32,9 +32,9 @@
32601
32602 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32603
32604-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32605+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32606 {
32607- unsigned long val = atomic_long_read(v);
32608+ unsigned long val = atomic_long_read_unchecked(v);
32609
32610 seq_printf(s, "%16lu %s\n", val, id);
32611 }
32612@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32613
32614 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32615 for (op = 0; op < mcsop_last; op++) {
32616- count = atomic_long_read(&mcs_op_statistics[op].count);
32617- total = atomic_long_read(&mcs_op_statistics[op].total);
32618+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32619+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32620 max = mcs_op_statistics[op].max;
32621 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32622 count ? total / count : 0, max);
32623diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32624index 5c3ce24..4915ccb 100644
32625--- a/drivers/misc/sgi-gru/grutables.h
32626+++ b/drivers/misc/sgi-gru/grutables.h
32627@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32628 * GRU statistics.
32629 */
32630 struct gru_stats_s {
32631- atomic_long_t vdata_alloc;
32632- atomic_long_t vdata_free;
32633- atomic_long_t gts_alloc;
32634- atomic_long_t gts_free;
32635- atomic_long_t gms_alloc;
32636- atomic_long_t gms_free;
32637- atomic_long_t gts_double_allocate;
32638- atomic_long_t assign_context;
32639- atomic_long_t assign_context_failed;
32640- atomic_long_t free_context;
32641- atomic_long_t load_user_context;
32642- atomic_long_t load_kernel_context;
32643- atomic_long_t lock_kernel_context;
32644- atomic_long_t unlock_kernel_context;
32645- atomic_long_t steal_user_context;
32646- atomic_long_t steal_kernel_context;
32647- atomic_long_t steal_context_failed;
32648- atomic_long_t nopfn;
32649- atomic_long_t asid_new;
32650- atomic_long_t asid_next;
32651- atomic_long_t asid_wrap;
32652- atomic_long_t asid_reuse;
32653- atomic_long_t intr;
32654- atomic_long_t intr_cbr;
32655- atomic_long_t intr_tfh;
32656- atomic_long_t intr_spurious;
32657- atomic_long_t intr_mm_lock_failed;
32658- atomic_long_t call_os;
32659- atomic_long_t call_os_wait_queue;
32660- atomic_long_t user_flush_tlb;
32661- atomic_long_t user_unload_context;
32662- atomic_long_t user_exception;
32663- atomic_long_t set_context_option;
32664- atomic_long_t check_context_retarget_intr;
32665- atomic_long_t check_context_unload;
32666- atomic_long_t tlb_dropin;
32667- atomic_long_t tlb_preload_page;
32668- atomic_long_t tlb_dropin_fail_no_asid;
32669- atomic_long_t tlb_dropin_fail_upm;
32670- atomic_long_t tlb_dropin_fail_invalid;
32671- atomic_long_t tlb_dropin_fail_range_active;
32672- atomic_long_t tlb_dropin_fail_idle;
32673- atomic_long_t tlb_dropin_fail_fmm;
32674- atomic_long_t tlb_dropin_fail_no_exception;
32675- atomic_long_t tfh_stale_on_fault;
32676- atomic_long_t mmu_invalidate_range;
32677- atomic_long_t mmu_invalidate_page;
32678- atomic_long_t flush_tlb;
32679- atomic_long_t flush_tlb_gru;
32680- atomic_long_t flush_tlb_gru_tgh;
32681- atomic_long_t flush_tlb_gru_zero_asid;
32682+ atomic_long_unchecked_t vdata_alloc;
32683+ atomic_long_unchecked_t vdata_free;
32684+ atomic_long_unchecked_t gts_alloc;
32685+ atomic_long_unchecked_t gts_free;
32686+ atomic_long_unchecked_t gms_alloc;
32687+ atomic_long_unchecked_t gms_free;
32688+ atomic_long_unchecked_t gts_double_allocate;
32689+ atomic_long_unchecked_t assign_context;
32690+ atomic_long_unchecked_t assign_context_failed;
32691+ atomic_long_unchecked_t free_context;
32692+ atomic_long_unchecked_t load_user_context;
32693+ atomic_long_unchecked_t load_kernel_context;
32694+ atomic_long_unchecked_t lock_kernel_context;
32695+ atomic_long_unchecked_t unlock_kernel_context;
32696+ atomic_long_unchecked_t steal_user_context;
32697+ atomic_long_unchecked_t steal_kernel_context;
32698+ atomic_long_unchecked_t steal_context_failed;
32699+ atomic_long_unchecked_t nopfn;
32700+ atomic_long_unchecked_t asid_new;
32701+ atomic_long_unchecked_t asid_next;
32702+ atomic_long_unchecked_t asid_wrap;
32703+ atomic_long_unchecked_t asid_reuse;
32704+ atomic_long_unchecked_t intr;
32705+ atomic_long_unchecked_t intr_cbr;
32706+ atomic_long_unchecked_t intr_tfh;
32707+ atomic_long_unchecked_t intr_spurious;
32708+ atomic_long_unchecked_t intr_mm_lock_failed;
32709+ atomic_long_unchecked_t call_os;
32710+ atomic_long_unchecked_t call_os_wait_queue;
32711+ atomic_long_unchecked_t user_flush_tlb;
32712+ atomic_long_unchecked_t user_unload_context;
32713+ atomic_long_unchecked_t user_exception;
32714+ atomic_long_unchecked_t set_context_option;
32715+ atomic_long_unchecked_t check_context_retarget_intr;
32716+ atomic_long_unchecked_t check_context_unload;
32717+ atomic_long_unchecked_t tlb_dropin;
32718+ atomic_long_unchecked_t tlb_preload_page;
32719+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32720+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32721+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32722+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32723+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32724+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32725+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32726+ atomic_long_unchecked_t tfh_stale_on_fault;
32727+ atomic_long_unchecked_t mmu_invalidate_range;
32728+ atomic_long_unchecked_t mmu_invalidate_page;
32729+ atomic_long_unchecked_t flush_tlb;
32730+ atomic_long_unchecked_t flush_tlb_gru;
32731+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32732+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32733
32734- atomic_long_t copy_gpa;
32735- atomic_long_t read_gpa;
32736+ atomic_long_unchecked_t copy_gpa;
32737+ atomic_long_unchecked_t read_gpa;
32738
32739- atomic_long_t mesq_receive;
32740- atomic_long_t mesq_receive_none;
32741- atomic_long_t mesq_send;
32742- atomic_long_t mesq_send_failed;
32743- atomic_long_t mesq_noop;
32744- atomic_long_t mesq_send_unexpected_error;
32745- atomic_long_t mesq_send_lb_overflow;
32746- atomic_long_t mesq_send_qlimit_reached;
32747- atomic_long_t mesq_send_amo_nacked;
32748- atomic_long_t mesq_send_put_nacked;
32749- atomic_long_t mesq_page_overflow;
32750- atomic_long_t mesq_qf_locked;
32751- atomic_long_t mesq_qf_noop_not_full;
32752- atomic_long_t mesq_qf_switch_head_failed;
32753- atomic_long_t mesq_qf_unexpected_error;
32754- atomic_long_t mesq_noop_unexpected_error;
32755- atomic_long_t mesq_noop_lb_overflow;
32756- atomic_long_t mesq_noop_qlimit_reached;
32757- atomic_long_t mesq_noop_amo_nacked;
32758- atomic_long_t mesq_noop_put_nacked;
32759- atomic_long_t mesq_noop_page_overflow;
32760+ atomic_long_unchecked_t mesq_receive;
32761+ atomic_long_unchecked_t mesq_receive_none;
32762+ atomic_long_unchecked_t mesq_send;
32763+ atomic_long_unchecked_t mesq_send_failed;
32764+ atomic_long_unchecked_t mesq_noop;
32765+ atomic_long_unchecked_t mesq_send_unexpected_error;
32766+ atomic_long_unchecked_t mesq_send_lb_overflow;
32767+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32768+ atomic_long_unchecked_t mesq_send_amo_nacked;
32769+ atomic_long_unchecked_t mesq_send_put_nacked;
32770+ atomic_long_unchecked_t mesq_page_overflow;
32771+ atomic_long_unchecked_t mesq_qf_locked;
32772+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32773+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32774+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32775+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32776+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32777+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32778+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32779+ atomic_long_unchecked_t mesq_noop_put_nacked;
32780+ atomic_long_unchecked_t mesq_noop_page_overflow;
32781
32782 };
32783
32784@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32785 tghop_invalidate, mcsop_last};
32786
32787 struct mcs_op_statistic {
32788- atomic_long_t count;
32789- atomic_long_t total;
32790+ atomic_long_unchecked_t count;
32791+ atomic_long_unchecked_t total;
32792 unsigned long max;
32793 };
32794
32795@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32796
32797 #define STAT(id) do { \
32798 if (gru_options & OPT_STATS) \
32799- atomic_long_inc(&gru_stats.id); \
32800+ atomic_long_inc_unchecked(&gru_stats.id); \
32801 } while (0)
32802
32803 #ifdef CONFIG_SGI_GRU_DEBUG
32804diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32805index 851b2f2..a4ec097 100644
32806--- a/drivers/misc/sgi-xp/xp.h
32807+++ b/drivers/misc/sgi-xp/xp.h
32808@@ -289,7 +289,7 @@ struct xpc_interface {
32809 xpc_notify_func, void *);
32810 void (*received) (short, int, void *);
32811 enum xp_retval (*partid_to_nasids) (short, void *);
32812-};
32813+} __no_const;
32814
32815 extern struct xpc_interface xpc_interface;
32816
32817diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32818index b94d5f7..7f494c5 100644
32819--- a/drivers/misc/sgi-xp/xpc.h
32820+++ b/drivers/misc/sgi-xp/xpc.h
32821@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32822 void (*received_payload) (struct xpc_channel *, void *);
32823 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32824 };
32825+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32826
32827 /* struct xpc_partition act_state values (for XPC HB) */
32828
32829@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32830 /* found in xpc_main.c */
32831 extern struct device *xpc_part;
32832 extern struct device *xpc_chan;
32833-extern struct xpc_arch_operations xpc_arch_ops;
32834+extern xpc_arch_operations_no_const xpc_arch_ops;
32835 extern int xpc_disengage_timelimit;
32836 extern int xpc_disengage_timedout;
32837 extern int xpc_activate_IRQ_rcvd;
32838diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32839index 8d082b4..aa749ae 100644
32840--- a/drivers/misc/sgi-xp/xpc_main.c
32841+++ b/drivers/misc/sgi-xp/xpc_main.c
32842@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32843 .notifier_call = xpc_system_die,
32844 };
32845
32846-struct xpc_arch_operations xpc_arch_ops;
32847+xpc_arch_operations_no_const xpc_arch_ops;
32848
32849 /*
32850 * Timer function to enforce the timelimit on the partition disengage.
32851diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32852index 26c5286..292d261 100644
32853--- a/drivers/mmc/host/sdhci-pci.c
32854+++ b/drivers/mmc/host/sdhci-pci.c
32855@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32856 .probe = via_probe,
32857 };
32858
32859-static const struct pci_device_id pci_ids[] __devinitdata = {
32860+static const struct pci_device_id pci_ids[] __devinitconst = {
32861 {
32862 .vendor = PCI_VENDOR_ID_RICOH,
32863 .device = PCI_DEVICE_ID_RICOH_R5C822,
32864diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
32865index e1e122f..d99a6ea 100644
32866--- a/drivers/mtd/chips/cfi_cmdset_0001.c
32867+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
32868@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
32869 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32870 unsigned long timeo = jiffies + HZ;
32871
32872+ pax_track_stack();
32873+
32874 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32875 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32876 goto sleep;
32877@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
32878 unsigned long initial_adr;
32879 int initial_len = len;
32880
32881+ pax_track_stack();
32882+
32883 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32884 adr += chip->start;
32885 initial_adr = adr;
32886@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
32887 int retries = 3;
32888 int ret;
32889
32890+ pax_track_stack();
32891+
32892 adr += chip->start;
32893
32894 retry:
32895diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
32896index 179814a..abe9d60 100644
32897--- a/drivers/mtd/chips/cfi_cmdset_0020.c
32898+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
32899@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
32900 unsigned long cmd_addr;
32901 struct cfi_private *cfi = map->fldrv_priv;
32902
32903+ pax_track_stack();
32904+
32905 adr += chip->start;
32906
32907 /* Ensure cmd read/writes are aligned. */
32908@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
32909 DECLARE_WAITQUEUE(wait, current);
32910 int wbufsize, z;
32911
32912+ pax_track_stack();
32913+
32914 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32915 if (adr & (map_bankwidth(map)-1))
32916 return -EINVAL;
32917@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
32918 DECLARE_WAITQUEUE(wait, current);
32919 int ret = 0;
32920
32921+ pax_track_stack();
32922+
32923 adr += chip->start;
32924
32925 /* Let's determine this according to the interleave only once */
32926@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
32927 unsigned long timeo = jiffies + HZ;
32928 DECLARE_WAITQUEUE(wait, current);
32929
32930+ pax_track_stack();
32931+
32932 adr += chip->start;
32933
32934 /* Let's determine this according to the interleave only once */
32935@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
32936 unsigned long timeo = jiffies + HZ;
32937 DECLARE_WAITQUEUE(wait, current);
32938
32939+ pax_track_stack();
32940+
32941 adr += chip->start;
32942
32943 /* Let's determine this according to the interleave only once */
32944diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32945index f7fbf60..9866457 100644
32946--- a/drivers/mtd/devices/doc2000.c
32947+++ b/drivers/mtd/devices/doc2000.c
32948@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32949
32950 /* The ECC will not be calculated correctly if less than 512 is written */
32951 /* DBB-
32952- if (len != 0x200 && eccbuf)
32953+ if (len != 0x200)
32954 printk(KERN_WARNING
32955 "ECC needs a full sector write (adr: %lx size %lx)\n",
32956 (long) to, (long) len);
32957diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32958index 241192f..d0c35a3 100644
32959--- a/drivers/mtd/devices/doc2001.c
32960+++ b/drivers/mtd/devices/doc2001.c
32961@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32962 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32963
32964 /* Don't allow read past end of device */
32965- if (from >= this->totlen)
32966+ if (from >= this->totlen || !len)
32967 return -EINVAL;
32968
32969 /* Don't allow a single read to cross a 512-byte block boundary */
32970diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
32971index 037b399..225a71d 100644
32972--- a/drivers/mtd/ftl.c
32973+++ b/drivers/mtd/ftl.c
32974@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
32975 loff_t offset;
32976 uint16_t srcunitswap = cpu_to_le16(srcunit);
32977
32978+ pax_track_stack();
32979+
32980 eun = &part->EUNInfo[srcunit];
32981 xfer = &part->XferInfo[xferunit];
32982 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32983diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
32984index d7592e6..31c505c 100644
32985--- a/drivers/mtd/inftlcore.c
32986+++ b/drivers/mtd/inftlcore.c
32987@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
32988 struct inftl_oob oob;
32989 size_t retlen;
32990
32991+ pax_track_stack();
32992+
32993 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32994 "pending=%d)\n", inftl, thisVUC, pendingblock);
32995
32996diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
32997index 104052e..6232be5 100644
32998--- a/drivers/mtd/inftlmount.c
32999+++ b/drivers/mtd/inftlmount.c
33000@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
33001 struct INFTLPartition *ip;
33002 size_t retlen;
33003
33004+ pax_track_stack();
33005+
33006 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33007
33008 /*
33009diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
33010index dbfe17b..c7b0918 100644
33011--- a/drivers/mtd/lpddr/qinfo_probe.c
33012+++ b/drivers/mtd/lpddr/qinfo_probe.c
33013@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
33014 {
33015 map_word pfow_val[4];
33016
33017+ pax_track_stack();
33018+
33019 /* Check identification string */
33020 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33021 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33022diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
33023index 49e20a4..60fbfa5 100644
33024--- a/drivers/mtd/mtdchar.c
33025+++ b/drivers/mtd/mtdchar.c
33026@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
33027 u_long size;
33028 struct mtd_info_user info;
33029
33030+ pax_track_stack();
33031+
33032 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33033
33034 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33035diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33036index d527621..2491fab 100644
33037--- a/drivers/mtd/nand/denali.c
33038+++ b/drivers/mtd/nand/denali.c
33039@@ -26,6 +26,7 @@
33040 #include <linux/pci.h>
33041 #include <linux/mtd/mtd.h>
33042 #include <linux/module.h>
33043+#include <linux/slab.h>
33044
33045 #include "denali.h"
33046
33047diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
33048index b155666..611b801 100644
33049--- a/drivers/mtd/nftlcore.c
33050+++ b/drivers/mtd/nftlcore.c
33051@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
33052 int inplace = 1;
33053 size_t retlen;
33054
33055+ pax_track_stack();
33056+
33057 memset(BlockMap, 0xff, sizeof(BlockMap));
33058 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33059
33060diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33061index e3cd1ff..0ea79a3 100644
33062--- a/drivers/mtd/nftlmount.c
33063+++ b/drivers/mtd/nftlmount.c
33064@@ -24,6 +24,7 @@
33065 #include <asm/errno.h>
33066 #include <linux/delay.h>
33067 #include <linux/slab.h>
33068+#include <linux/sched.h>
33069 #include <linux/mtd/mtd.h>
33070 #include <linux/mtd/nand.h>
33071 #include <linux/mtd/nftl.h>
33072@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
33073 struct mtd_info *mtd = nftl->mbd.mtd;
33074 unsigned int i;
33075
33076+ pax_track_stack();
33077+
33078 /* Assume logical EraseSize == physical erasesize for starting the scan.
33079 We'll sort it out later if we find a MediaHeader which says otherwise */
33080 /* Actually, we won't. The new DiskOnChip driver has already scanned
33081diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33082index 6c3fb5a..c542a81 100644
33083--- a/drivers/mtd/ubi/build.c
33084+++ b/drivers/mtd/ubi/build.c
33085@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33086 static int __init bytes_str_to_int(const char *str)
33087 {
33088 char *endp;
33089- unsigned long result;
33090+ unsigned long result, scale = 1;
33091
33092 result = simple_strtoul(str, &endp, 0);
33093 if (str == endp || result >= INT_MAX) {
33094@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33095
33096 switch (*endp) {
33097 case 'G':
33098- result *= 1024;
33099+ scale *= 1024;
33100 case 'M':
33101- result *= 1024;
33102+ scale *= 1024;
33103 case 'K':
33104- result *= 1024;
33105+ scale *= 1024;
33106 if (endp[1] == 'i' && endp[2] == 'B')
33107 endp += 2;
33108 case '\0':
33109@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33110 return -EINVAL;
33111 }
33112
33113- return result;
33114+ if ((intoverflow_t)result*scale >= INT_MAX) {
33115+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33116+ str);
33117+ return -EINVAL;
33118+ }
33119+
33120+ return result*scale;
33121 }
33122
33123 /**
33124diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
33125index d4f7dda..d627d46 100644
33126--- a/drivers/net/atlx/atl2.c
33127+++ b/drivers/net/atlx/atl2.c
33128@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33129 */
33130
33131 #define ATL2_PARAM(X, desc) \
33132- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33133+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33134 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33135 MODULE_PARM_DESC(X, desc);
33136 #else
33137diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
33138index 87aecdf..ec23470 100644
33139--- a/drivers/net/bna/bfa_ioc_ct.c
33140+++ b/drivers/net/bna/bfa_ioc_ct.c
33141@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
33142 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
33143 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
33144
33145-static struct bfa_ioc_hwif nw_hwif_ct;
33146+static struct bfa_ioc_hwif nw_hwif_ct = {
33147+ .ioc_pll_init = bfa_ioc_ct_pll_init,
33148+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
33149+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
33150+ .ioc_reg_init = bfa_ioc_ct_reg_init,
33151+ .ioc_map_port = bfa_ioc_ct_map_port,
33152+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
33153+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
33154+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
33155+ .ioc_sync_start = bfa_ioc_ct_sync_start,
33156+ .ioc_sync_join = bfa_ioc_ct_sync_join,
33157+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
33158+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
33159+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
33160+};
33161
33162 /**
33163 * Called from bfa_ioc_attach() to map asic specific calls.
33164@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
33165 void
33166 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
33167 {
33168- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
33169- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
33170- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
33171- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
33172- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
33173- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
33174- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
33175- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
33176- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
33177- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
33178- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
33179- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
33180- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
33181-
33182 ioc->ioc_hwif = &nw_hwif_ct;
33183 }
33184
33185diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
33186index 8e35b25..c39f205 100644
33187--- a/drivers/net/bna/bnad.c
33188+++ b/drivers/net/bna/bnad.c
33189@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
33190 struct bna_intr_info *intr_info =
33191 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
33192 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
33193- struct bna_tx_event_cbfn tx_cbfn;
33194+ static struct bna_tx_event_cbfn tx_cbfn = {
33195+ /* Initialize the tx event handlers */
33196+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
33197+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
33198+ .tx_stall_cbfn = bnad_cb_tx_stall,
33199+ .tx_resume_cbfn = bnad_cb_tx_resume,
33200+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
33201+ };
33202 struct bna_tx *tx;
33203 unsigned long flags;
33204
33205@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
33206 tx_config->txq_depth = bnad->txq_depth;
33207 tx_config->tx_type = BNA_TX_T_REGULAR;
33208
33209- /* Initialize the tx event handlers */
33210- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
33211- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
33212- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
33213- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
33214- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
33215-
33216 /* Get BNA's resource requirement for one tx object */
33217 spin_lock_irqsave(&bnad->bna_lock, flags);
33218 bna_tx_res_req(bnad->num_txq_per_tx,
33219@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
33220 struct bna_intr_info *intr_info =
33221 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
33222 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
33223- struct bna_rx_event_cbfn rx_cbfn;
33224+ static struct bna_rx_event_cbfn rx_cbfn = {
33225+ /* Initialize the Rx event handlers */
33226+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
33227+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
33228+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
33229+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
33230+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
33231+ .rx_post_cbfn = bnad_cb_rx_post
33232+ };
33233 struct bna_rx *rx;
33234 unsigned long flags;
33235
33236 /* Initialize the Rx object configuration */
33237 bnad_init_rx_config(bnad, rx_config);
33238
33239- /* Initialize the Rx event handlers */
33240- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
33241- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
33242- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
33243- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
33244- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
33245- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
33246-
33247 /* Get BNA's resource requirement for one Rx object */
33248 spin_lock_irqsave(&bnad->bna_lock, flags);
33249 bna_rx_res_req(rx_config, res_info);
33250diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
33251index 4b2b570..31033f4 100644
33252--- a/drivers/net/bnx2.c
33253+++ b/drivers/net/bnx2.c
33254@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33255 int rc = 0;
33256 u32 magic, csum;
33257
33258+ pax_track_stack();
33259+
33260 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33261 goto test_nvram_done;
33262
33263diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
33264index cf3e479..5dc0ecc 100644
33265--- a/drivers/net/bnx2x/bnx2x_ethtool.c
33266+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
33267@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
33268 int i, rc;
33269 u32 magic, crc;
33270
33271+ pax_track_stack();
33272+
33273 if (BP_NOMCP(bp))
33274 return 0;
33275
33276diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
33277index 9a517c2..a50cfcb 100644
33278--- a/drivers/net/bnx2x/bnx2x_sp.h
33279+++ b/drivers/net/bnx2x/bnx2x_sp.h
33280@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
33281
33282 int (*wait_comp)(struct bnx2x *bp,
33283 struct bnx2x_rx_mode_ramrod_params *p);
33284-};
33285+} __no_const;
33286
33287 /********************** Set multicast group ***********************************/
33288
33289diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
33290index c5f5479..2e8c260 100644
33291--- a/drivers/net/cxgb3/l2t.h
33292+++ b/drivers/net/cxgb3/l2t.h
33293@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33294 */
33295 struct l2t_skb_cb {
33296 arp_failure_handler_func arp_failure_handler;
33297-};
33298+} __no_const;
33299
33300 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33301
33302diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
33303index b4efa29..c5f2703 100644
33304--- a/drivers/net/cxgb4/cxgb4_main.c
33305+++ b/drivers/net/cxgb4/cxgb4_main.c
33306@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap)
33307 unsigned int nchan = adap->params.nports;
33308 struct msix_entry entries[MAX_INGQ + 1];
33309
33310+ pax_track_stack();
33311+
33312 for (i = 0; i < ARRAY_SIZE(entries); ++i)
33313 entries[i].entry = i;
33314
33315diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
33316index d1ec111..12735bc 100644
33317--- a/drivers/net/cxgb4/t4_hw.c
33318+++ b/drivers/net/cxgb4/t4_hw.c
33319@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
33320 u8 vpd[VPD_LEN], csum;
33321 unsigned int vpdr_len, kw_offset, id_len;
33322
33323+ pax_track_stack();
33324+
33325 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
33326 if (ret < 0)
33327 return ret;
33328diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
33329index 536b3a5..e6f8dcc 100644
33330--- a/drivers/net/e1000e/82571.c
33331+++ b/drivers/net/e1000e/82571.c
33332@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
33333 {
33334 struct e1000_hw *hw = &adapter->hw;
33335 struct e1000_mac_info *mac = &hw->mac;
33336- struct e1000_mac_operations *func = &mac->ops;
33337+ e1000_mac_operations_no_const *func = &mac->ops;
33338 u32 swsm = 0;
33339 u32 swsm2 = 0;
33340 bool force_clear_smbi = false;
33341diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
33342index e4f4225..24da2ea 100644
33343--- a/drivers/net/e1000e/es2lan.c
33344+++ b/drivers/net/e1000e/es2lan.c
33345@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
33346 {
33347 struct e1000_hw *hw = &adapter->hw;
33348 struct e1000_mac_info *mac = &hw->mac;
33349- struct e1000_mac_operations *func = &mac->ops;
33350+ e1000_mac_operations_no_const *func = &mac->ops;
33351
33352 /* Set media type */
33353 switch (adapter->pdev->device) {
33354diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
33355index 2967039..ca8c40c 100644
33356--- a/drivers/net/e1000e/hw.h
33357+++ b/drivers/net/e1000e/hw.h
33358@@ -778,6 +778,7 @@ struct e1000_mac_operations {
33359 void (*write_vfta)(struct e1000_hw *, u32, u32);
33360 s32 (*read_mac_addr)(struct e1000_hw *);
33361 };
33362+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33363
33364 /*
33365 * When to use various PHY register access functions:
33366@@ -818,6 +819,7 @@ struct e1000_phy_operations {
33367 void (*power_up)(struct e1000_hw *);
33368 void (*power_down)(struct e1000_hw *);
33369 };
33370+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33371
33372 /* Function pointers for the NVM. */
33373 struct e1000_nvm_operations {
33374@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
33375 s32 (*validate)(struct e1000_hw *);
33376 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33377 };
33378+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33379
33380 struct e1000_mac_info {
33381- struct e1000_mac_operations ops;
33382+ e1000_mac_operations_no_const ops;
33383 u8 addr[ETH_ALEN];
33384 u8 perm_addr[ETH_ALEN];
33385
33386@@ -872,7 +875,7 @@ struct e1000_mac_info {
33387 };
33388
33389 struct e1000_phy_info {
33390- struct e1000_phy_operations ops;
33391+ e1000_phy_operations_no_const ops;
33392
33393 enum e1000_phy_type type;
33394
33395@@ -906,7 +909,7 @@ struct e1000_phy_info {
33396 };
33397
33398 struct e1000_nvm_info {
33399- struct e1000_nvm_operations ops;
33400+ e1000_nvm_operations_no_const ops;
33401
33402 enum e1000_nvm_type type;
33403 enum e1000_nvm_override override;
33404diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
33405index fa8677c..196356f 100644
33406--- a/drivers/net/fealnx.c
33407+++ b/drivers/net/fealnx.c
33408@@ -150,7 +150,7 @@ struct chip_info {
33409 int flags;
33410 };
33411
33412-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33413+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33414 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33415 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33416 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33417diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
33418index 2a5a34d..be871cc 100644
33419--- a/drivers/net/hamradio/6pack.c
33420+++ b/drivers/net/hamradio/6pack.c
33421@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
33422 unsigned char buf[512];
33423 int count1;
33424
33425+ pax_track_stack();
33426+
33427 if (!count)
33428 return;
33429
33430diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
33431index 4519a13..f97fcd0 100644
33432--- a/drivers/net/igb/e1000_hw.h
33433+++ b/drivers/net/igb/e1000_hw.h
33434@@ -314,6 +314,7 @@ struct e1000_mac_operations {
33435 s32 (*read_mac_addr)(struct e1000_hw *);
33436 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33437 };
33438+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33439
33440 struct e1000_phy_operations {
33441 s32 (*acquire)(struct e1000_hw *);
33442@@ -330,6 +331,7 @@ struct e1000_phy_operations {
33443 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33444 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33445 };
33446+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33447
33448 struct e1000_nvm_operations {
33449 s32 (*acquire)(struct e1000_hw *);
33450@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
33451 s32 (*update)(struct e1000_hw *);
33452 s32 (*validate)(struct e1000_hw *);
33453 };
33454+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33455
33456 struct e1000_info {
33457 s32 (*get_invariants)(struct e1000_hw *);
33458@@ -350,7 +353,7 @@ struct e1000_info {
33459 extern const struct e1000_info e1000_82575_info;
33460
33461 struct e1000_mac_info {
33462- struct e1000_mac_operations ops;
33463+ e1000_mac_operations_no_const ops;
33464
33465 u8 addr[6];
33466 u8 perm_addr[6];
33467@@ -388,7 +391,7 @@ struct e1000_mac_info {
33468 };
33469
33470 struct e1000_phy_info {
33471- struct e1000_phy_operations ops;
33472+ e1000_phy_operations_no_const ops;
33473
33474 enum e1000_phy_type type;
33475
33476@@ -423,7 +426,7 @@ struct e1000_phy_info {
33477 };
33478
33479 struct e1000_nvm_info {
33480- struct e1000_nvm_operations ops;
33481+ e1000_nvm_operations_no_const ops;
33482 enum e1000_nvm_type type;
33483 enum e1000_nvm_override override;
33484
33485@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
33486 s32 (*check_for_ack)(struct e1000_hw *, u16);
33487 s32 (*check_for_rst)(struct e1000_hw *, u16);
33488 };
33489+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33490
33491 struct e1000_mbx_stats {
33492 u32 msgs_tx;
33493@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
33494 };
33495
33496 struct e1000_mbx_info {
33497- struct e1000_mbx_operations ops;
33498+ e1000_mbx_operations_no_const ops;
33499 struct e1000_mbx_stats stats;
33500 u32 timeout;
33501 u32 usec_delay;
33502diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
33503index d7ed58f..64cde36 100644
33504--- a/drivers/net/igbvf/vf.h
33505+++ b/drivers/net/igbvf/vf.h
33506@@ -189,9 +189,10 @@ struct e1000_mac_operations {
33507 s32 (*read_mac_addr)(struct e1000_hw *);
33508 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33509 };
33510+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33511
33512 struct e1000_mac_info {
33513- struct e1000_mac_operations ops;
33514+ e1000_mac_operations_no_const ops;
33515 u8 addr[6];
33516 u8 perm_addr[6];
33517
33518@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
33519 s32 (*check_for_ack)(struct e1000_hw *);
33520 s32 (*check_for_rst)(struct e1000_hw *);
33521 };
33522+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33523
33524 struct e1000_mbx_stats {
33525 u32 msgs_tx;
33526@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
33527 };
33528
33529 struct e1000_mbx_info {
33530- struct e1000_mbx_operations ops;
33531+ e1000_mbx_operations_no_const ops;
33532 struct e1000_mbx_stats stats;
33533 u32 timeout;
33534 u32 usec_delay;
33535diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
33536index 6a130eb..1aeb9e4 100644
33537--- a/drivers/net/ixgb/ixgb_main.c
33538+++ b/drivers/net/ixgb/ixgb_main.c
33539@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev)
33540 u32 rctl;
33541 int i;
33542
33543+ pax_track_stack();
33544+
33545 /* Check for Promiscuous and All Multicast modes */
33546
33547 rctl = IXGB_READ_REG(hw, RCTL);
33548diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
33549index dd7fbeb..44b9bbf 100644
33550--- a/drivers/net/ixgb/ixgb_param.c
33551+++ b/drivers/net/ixgb/ixgb_param.c
33552@@ -261,6 +261,9 @@ void __devinit
33553 ixgb_check_options(struct ixgb_adapter *adapter)
33554 {
33555 int bd = adapter->bd_number;
33556+
33557+ pax_track_stack();
33558+
33559 if (bd >= IXGB_MAX_NIC) {
33560 pr_notice("Warning: no configuration for board #%i\n", bd);
33561 pr_notice("Using defaults for all values\n");
33562diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
33563index e0d970e..1cfdea5 100644
33564--- a/drivers/net/ixgbe/ixgbe_type.h
33565+++ b/drivers/net/ixgbe/ixgbe_type.h
33566@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
33567 s32 (*update_checksum)(struct ixgbe_hw *);
33568 u16 (*calc_checksum)(struct ixgbe_hw *);
33569 };
33570+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33571
33572 struct ixgbe_mac_operations {
33573 s32 (*init_hw)(struct ixgbe_hw *);
33574@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
33575 /* Manageability interface */
33576 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
33577 };
33578+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33579
33580 struct ixgbe_phy_operations {
33581 s32 (*identify)(struct ixgbe_hw *);
33582@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
33583 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33584 s32 (*check_overtemp)(struct ixgbe_hw *);
33585 };
33586+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33587
33588 struct ixgbe_eeprom_info {
33589- struct ixgbe_eeprom_operations ops;
33590+ ixgbe_eeprom_operations_no_const ops;
33591 enum ixgbe_eeprom_type type;
33592 u32 semaphore_delay;
33593 u16 word_size;
33594@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
33595
33596 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
33597 struct ixgbe_mac_info {
33598- struct ixgbe_mac_operations ops;
33599+ ixgbe_mac_operations_no_const ops;
33600 enum ixgbe_mac_type type;
33601 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33602 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33603@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
33604 };
33605
33606 struct ixgbe_phy_info {
33607- struct ixgbe_phy_operations ops;
33608+ ixgbe_phy_operations_no_const ops;
33609 struct mdio_if_info mdio;
33610 enum ixgbe_phy_type type;
33611 u32 id;
33612@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
33613 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
33614 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
33615 };
33616+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33617
33618 struct ixgbe_mbx_stats {
33619 u32 msgs_tx;
33620@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
33621 };
33622
33623 struct ixgbe_mbx_info {
33624- struct ixgbe_mbx_operations ops;
33625+ ixgbe_mbx_operations_no_const ops;
33626 struct ixgbe_mbx_stats stats;
33627 u32 timeout;
33628 u32 usec_delay;
33629diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
33630index 10306b4..28df758 100644
33631--- a/drivers/net/ixgbevf/vf.h
33632+++ b/drivers/net/ixgbevf/vf.h
33633@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33634 s32 (*clear_vfta)(struct ixgbe_hw *);
33635 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33636 };
33637+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33638
33639 enum ixgbe_mac_type {
33640 ixgbe_mac_unknown = 0,
33641@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33642 };
33643
33644 struct ixgbe_mac_info {
33645- struct ixgbe_mac_operations ops;
33646+ ixgbe_mac_operations_no_const ops;
33647 u8 addr[6];
33648 u8 perm_addr[6];
33649
33650@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33651 s32 (*check_for_ack)(struct ixgbe_hw *);
33652 s32 (*check_for_rst)(struct ixgbe_hw *);
33653 };
33654+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33655
33656 struct ixgbe_mbx_stats {
33657 u32 msgs_tx;
33658@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33659 };
33660
33661 struct ixgbe_mbx_info {
33662- struct ixgbe_mbx_operations ops;
33663+ ixgbe_mbx_operations_no_const ops;
33664 struct ixgbe_mbx_stats stats;
33665 u32 timeout;
33666 u32 udelay;
33667diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
33668index 27418d3..adf15bb 100644
33669--- a/drivers/net/ksz884x.c
33670+++ b/drivers/net/ksz884x.c
33671@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
33672 int rc;
33673 u64 counter[TOTAL_PORT_COUNTER_NUM];
33674
33675+ pax_track_stack();
33676+
33677 mutex_lock(&hw_priv->lock);
33678 n = SWITCH_PORT_NUM;
33679 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
33680diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
33681index f0ee35d..3831c8a 100644
33682--- a/drivers/net/mlx4/main.c
33683+++ b/drivers/net/mlx4/main.c
33684@@ -40,6 +40,7 @@
33685 #include <linux/dma-mapping.h>
33686 #include <linux/slab.h>
33687 #include <linux/io-mapping.h>
33688+#include <linux/sched.h>
33689
33690 #include <linux/mlx4/device.h>
33691 #include <linux/mlx4/doorbell.h>
33692@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
33693 u64 icm_size;
33694 int err;
33695
33696+ pax_track_stack();
33697+
33698 err = mlx4_QUERY_FW(dev);
33699 if (err) {
33700 if (err == -EACCES)
33701diff --git a/drivers/net/niu.c b/drivers/net/niu.c
33702index ed47585..5e5be8f 100644
33703--- a/drivers/net/niu.c
33704+++ b/drivers/net/niu.c
33705@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
33706 int i, num_irqs, err;
33707 u8 first_ldg;
33708
33709+ pax_track_stack();
33710+
33711 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33712 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33713 ldg_num_map[i] = first_ldg + i;
33714diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
33715index 80b6f36..5cd8938 100644
33716--- a/drivers/net/pcnet32.c
33717+++ b/drivers/net/pcnet32.c
33718@@ -270,7 +270,7 @@ struct pcnet32_private {
33719 struct sk_buff **rx_skbuff;
33720 dma_addr_t *tx_dma_addr;
33721 dma_addr_t *rx_dma_addr;
33722- struct pcnet32_access a;
33723+ struct pcnet32_access *a;
33724 spinlock_t lock; /* Guard lock */
33725 unsigned int cur_rx, cur_tx; /* The next free ring entry */
33726 unsigned int rx_ring_size; /* current rx ring size */
33727@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
33728 u16 val;
33729
33730 netif_wake_queue(dev);
33731- val = lp->a.read_csr(ioaddr, CSR3);
33732+ val = lp->a->read_csr(ioaddr, CSR3);
33733 val &= 0x00ff;
33734- lp->a.write_csr(ioaddr, CSR3, val);
33735+ lp->a->write_csr(ioaddr, CSR3, val);
33736 napi_enable(&lp->napi);
33737 }
33738
33739@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
33740 r = mii_link_ok(&lp->mii_if);
33741 } else if (lp->chip_version >= PCNET32_79C970A) {
33742 ulong ioaddr = dev->base_addr; /* card base I/O address */
33743- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33744+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33745 } else { /* can not detect link on really old chips */
33746 r = 1;
33747 }
33748@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
33749 pcnet32_netif_stop(dev);
33750
33751 spin_lock_irqsave(&lp->lock, flags);
33752- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33753+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33754
33755 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
33756
33757@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
33758 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33759 {
33760 struct pcnet32_private *lp = netdev_priv(dev);
33761- struct pcnet32_access *a = &lp->a; /* access to registers */
33762+ struct pcnet32_access *a = lp->a; /* access to registers */
33763 ulong ioaddr = dev->base_addr; /* card base I/O address */
33764 struct sk_buff *skb; /* sk buff */
33765 int x, i; /* counters */
33766@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33767 pcnet32_netif_stop(dev);
33768
33769 spin_lock_irqsave(&lp->lock, flags);
33770- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33771+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33772
33773 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
33774
33775 /* Reset the PCNET32 */
33776- lp->a.reset(ioaddr);
33777- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33778+ lp->a->reset(ioaddr);
33779+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33780
33781 /* switch pcnet32 to 32bit mode */
33782- lp->a.write_bcr(ioaddr, 20, 2);
33783+ lp->a->write_bcr(ioaddr, 20, 2);
33784
33785 /* purge & init rings but don't actually restart */
33786 pcnet32_restart(dev, 0x0000);
33787
33788- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33789+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33790
33791 /* Initialize Transmit buffers. */
33792 size = data_len + 15;
33793@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33794
33795 /* set int loopback in CSR15 */
33796 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
33797- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
33798+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
33799
33800 teststatus = cpu_to_le16(0x8000);
33801- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33802+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33803
33804 /* Check status of descriptors */
33805 for (x = 0; x < numbuffs; x++) {
33806@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33807 }
33808 }
33809
33810- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33811+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33812 wmb();
33813 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
33814 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
33815@@ -1015,7 +1015,7 @@ clean_up:
33816 pcnet32_restart(dev, CSR0_NORMAL);
33817 } else {
33818 pcnet32_purge_rx_ring(dev);
33819- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33820+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33821 }
33822 spin_unlock_irqrestore(&lp->lock, flags);
33823
33824@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
33825 enum ethtool_phys_id_state state)
33826 {
33827 struct pcnet32_private *lp = netdev_priv(dev);
33828- struct pcnet32_access *a = &lp->a;
33829+ struct pcnet32_access *a = lp->a;
33830 ulong ioaddr = dev->base_addr;
33831 unsigned long flags;
33832 int i;
33833@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
33834 {
33835 int csr5;
33836 struct pcnet32_private *lp = netdev_priv(dev);
33837- struct pcnet32_access *a = &lp->a;
33838+ struct pcnet32_access *a = lp->a;
33839 ulong ioaddr = dev->base_addr;
33840 int ticks;
33841
33842@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33843 spin_lock_irqsave(&lp->lock, flags);
33844 if (pcnet32_tx(dev)) {
33845 /* reset the chip to clear the error condition, then restart */
33846- lp->a.reset(ioaddr);
33847- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33848+ lp->a->reset(ioaddr);
33849+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33850 pcnet32_restart(dev, CSR0_START);
33851 netif_wake_queue(dev);
33852 }
33853@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33854 __napi_complete(napi);
33855
33856 /* clear interrupt masks */
33857- val = lp->a.read_csr(ioaddr, CSR3);
33858+ val = lp->a->read_csr(ioaddr, CSR3);
33859 val &= 0x00ff;
33860- lp->a.write_csr(ioaddr, CSR3, val);
33861+ lp->a->write_csr(ioaddr, CSR3, val);
33862
33863 /* Set interrupt enable. */
33864- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
33865+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
33866
33867 spin_unlock_irqrestore(&lp->lock, flags);
33868 }
33869@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33870 int i, csr0;
33871 u16 *buff = ptr;
33872 struct pcnet32_private *lp = netdev_priv(dev);
33873- struct pcnet32_access *a = &lp->a;
33874+ struct pcnet32_access *a = lp->a;
33875 ulong ioaddr = dev->base_addr;
33876 unsigned long flags;
33877
33878@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33879 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
33880 if (lp->phymask & (1 << j)) {
33881 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
33882- lp->a.write_bcr(ioaddr, 33,
33883+ lp->a->write_bcr(ioaddr, 33,
33884 (j << 5) | i);
33885- *buff++ = lp->a.read_bcr(ioaddr, 34);
33886+ *buff++ = lp->a->read_bcr(ioaddr, 34);
33887 }
33888 }
33889 }
33890@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33891 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
33892 lp->options |= PCNET32_PORT_FD;
33893
33894- lp->a = *a;
33895+ lp->a = a;
33896
33897 /* prior to register_netdev, dev->name is not yet correct */
33898 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
33899@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33900 if (lp->mii) {
33901 /* lp->phycount and lp->phymask are set to 0 by memset above */
33902
33903- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33904+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33905 /* scan for PHYs */
33906 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33907 unsigned short id1, id2;
33908@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33909 pr_info("Found PHY %04x:%04x at address %d\n",
33910 id1, id2, i);
33911 }
33912- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33913+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33914 if (lp->phycount > 1)
33915 lp->options |= PCNET32_PORT_MII;
33916 }
33917@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
33918 }
33919
33920 /* Reset the PCNET32 */
33921- lp->a.reset(ioaddr);
33922+ lp->a->reset(ioaddr);
33923
33924 /* switch pcnet32 to 32bit mode */
33925- lp->a.write_bcr(ioaddr, 20, 2);
33926+ lp->a->write_bcr(ioaddr, 20, 2);
33927
33928 netif_printk(lp, ifup, KERN_DEBUG, dev,
33929 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
33930@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
33931 (u32) (lp->init_dma_addr));
33932
33933 /* set/reset autoselect bit */
33934- val = lp->a.read_bcr(ioaddr, 2) & ~2;
33935+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
33936 if (lp->options & PCNET32_PORT_ASEL)
33937 val |= 2;
33938- lp->a.write_bcr(ioaddr, 2, val);
33939+ lp->a->write_bcr(ioaddr, 2, val);
33940
33941 /* handle full duplex setting */
33942 if (lp->mii_if.full_duplex) {
33943- val = lp->a.read_bcr(ioaddr, 9) & ~3;
33944+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
33945 if (lp->options & PCNET32_PORT_FD) {
33946 val |= 1;
33947 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
33948@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
33949 if (lp->chip_version == 0x2627)
33950 val |= 3;
33951 }
33952- lp->a.write_bcr(ioaddr, 9, val);
33953+ lp->a->write_bcr(ioaddr, 9, val);
33954 }
33955
33956 /* set/reset GPSI bit in test register */
33957- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
33958+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
33959 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
33960 val |= 0x10;
33961- lp->a.write_csr(ioaddr, 124, val);
33962+ lp->a->write_csr(ioaddr, 124, val);
33963
33964 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
33965 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
33966@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
33967 * duplex, and/or enable auto negotiation, and clear DANAS
33968 */
33969 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
33970- lp->a.write_bcr(ioaddr, 32,
33971- lp->a.read_bcr(ioaddr, 32) | 0x0080);
33972+ lp->a->write_bcr(ioaddr, 32,
33973+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
33974 /* disable Auto Negotiation, set 10Mpbs, HD */
33975- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
33976+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
33977 if (lp->options & PCNET32_PORT_FD)
33978 val |= 0x10;
33979 if (lp->options & PCNET32_PORT_100)
33980 val |= 0x08;
33981- lp->a.write_bcr(ioaddr, 32, val);
33982+ lp->a->write_bcr(ioaddr, 32, val);
33983 } else {
33984 if (lp->options & PCNET32_PORT_ASEL) {
33985- lp->a.write_bcr(ioaddr, 32,
33986- lp->a.read_bcr(ioaddr,
33987+ lp->a->write_bcr(ioaddr, 32,
33988+ lp->a->read_bcr(ioaddr,
33989 32) | 0x0080);
33990 /* enable auto negotiate, setup, disable fd */
33991- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
33992+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
33993 val |= 0x20;
33994- lp->a.write_bcr(ioaddr, 32, val);
33995+ lp->a->write_bcr(ioaddr, 32, val);
33996 }
33997 }
33998 } else {
33999@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
34000 * There is really no good other way to handle multiple PHYs
34001 * other than turning off all automatics
34002 */
34003- val = lp->a.read_bcr(ioaddr, 2);
34004- lp->a.write_bcr(ioaddr, 2, val & ~2);
34005- val = lp->a.read_bcr(ioaddr, 32);
34006- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34007+ val = lp->a->read_bcr(ioaddr, 2);
34008+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34009+ val = lp->a->read_bcr(ioaddr, 32);
34010+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34011
34012 if (!(lp->options & PCNET32_PORT_ASEL)) {
34013 /* setup ecmd */
34014@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
34015 ethtool_cmd_speed_set(&ecmd,
34016 (lp->options & PCNET32_PORT_100) ?
34017 SPEED_100 : SPEED_10);
34018- bcr9 = lp->a.read_bcr(ioaddr, 9);
34019+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34020
34021 if (lp->options & PCNET32_PORT_FD) {
34022 ecmd.duplex = DUPLEX_FULL;
34023@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
34024 ecmd.duplex = DUPLEX_HALF;
34025 bcr9 |= ~(1 << 0);
34026 }
34027- lp->a.write_bcr(ioaddr, 9, bcr9);
34028+ lp->a->write_bcr(ioaddr, 9, bcr9);
34029 }
34030
34031 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34032@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
34033
34034 #ifdef DO_DXSUFLO
34035 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34036- val = lp->a.read_csr(ioaddr, CSR3);
34037+ val = lp->a->read_csr(ioaddr, CSR3);
34038 val |= 0x40;
34039- lp->a.write_csr(ioaddr, CSR3, val);
34040+ lp->a->write_csr(ioaddr, CSR3, val);
34041 }
34042 #endif
34043
34044@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
34045 napi_enable(&lp->napi);
34046
34047 /* Re-initialize the PCNET32, and start it when done. */
34048- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34049- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34050+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34051+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34052
34053- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34054- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34055+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34056+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34057
34058 netif_start_queue(dev);
34059
34060@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
34061
34062 i = 0;
34063 while (i++ < 100)
34064- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34065+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34066 break;
34067 /*
34068 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34069 * reports that doing so triggers a bug in the '974.
34070 */
34071- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34072+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34073
34074 netif_printk(lp, ifup, KERN_DEBUG, dev,
34075 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
34076 i,
34077 (u32) (lp->init_dma_addr),
34078- lp->a.read_csr(ioaddr, CSR0));
34079+ lp->a->read_csr(ioaddr, CSR0));
34080
34081 spin_unlock_irqrestore(&lp->lock, flags);
34082
34083@@ -2218,7 +2218,7 @@ err_free_ring:
34084 * Switch back to 16bit mode to avoid problems with dumb
34085 * DOS packet driver after a warm reboot
34086 */
34087- lp->a.write_bcr(ioaddr, 20, 4);
34088+ lp->a->write_bcr(ioaddr, 20, 4);
34089
34090 err_free_irq:
34091 spin_unlock_irqrestore(&lp->lock, flags);
34092@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
34093
34094 /* wait for stop */
34095 for (i = 0; i < 100; i++)
34096- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34097+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34098 break;
34099
34100 if (i >= 100)
34101@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
34102 return;
34103
34104 /* ReInit Ring */
34105- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34106+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34107 i = 0;
34108 while (i++ < 1000)
34109- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34110+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34111 break;
34112
34113- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34114+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34115 }
34116
34117 static void pcnet32_tx_timeout(struct net_device *dev)
34118@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
34119 /* Transmitter timeout, serious problems. */
34120 if (pcnet32_debug & NETIF_MSG_DRV)
34121 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
34122- dev->name, lp->a.read_csr(ioaddr, CSR0));
34123- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34124+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34125+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34126 dev->stats.tx_errors++;
34127 if (netif_msg_tx_err(lp)) {
34128 int i;
34129@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
34130
34131 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
34132 "%s() called, csr0 %4.4x\n",
34133- __func__, lp->a.read_csr(ioaddr, CSR0));
34134+ __func__, lp->a->read_csr(ioaddr, CSR0));
34135
34136 /* Default status -- will not enable Successful-TxDone
34137 * interrupt when that option is available to us.
34138@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
34139 dev->stats.tx_bytes += skb->len;
34140
34141 /* Trigger an immediate send poll. */
34142- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34143+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34144
34145 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
34146 lp->tx_full = 1;
34147@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
34148
34149 spin_lock(&lp->lock);
34150
34151- csr0 = lp->a.read_csr(ioaddr, CSR0);
34152+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34153 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34154 if (csr0 == 0xffff)
34155 break; /* PCMCIA remove happened */
34156 /* Acknowledge all of the current interrupt sources ASAP. */
34157- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34158+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34159
34160 netif_printk(lp, intr, KERN_DEBUG, dev,
34161 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
34162- csr0, lp->a.read_csr(ioaddr, CSR0));
34163+ csr0, lp->a->read_csr(ioaddr, CSR0));
34164
34165 /* Log misc errors. */
34166 if (csr0 & 0x4000)
34167@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34168 if (napi_schedule_prep(&lp->napi)) {
34169 u16 val;
34170 /* set interrupt masks */
34171- val = lp->a.read_csr(ioaddr, CSR3);
34172+ val = lp->a->read_csr(ioaddr, CSR3);
34173 val |= 0x5f00;
34174- lp->a.write_csr(ioaddr, CSR3, val);
34175+ lp->a->write_csr(ioaddr, CSR3, val);
34176
34177 __napi_schedule(&lp->napi);
34178 break;
34179 }
34180- csr0 = lp->a.read_csr(ioaddr, CSR0);
34181+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34182 }
34183
34184 netif_printk(lp, intr, KERN_DEBUG, dev,
34185 "exiting interrupt, csr0=%#4.4x\n",
34186- lp->a.read_csr(ioaddr, CSR0));
34187+ lp->a->read_csr(ioaddr, CSR0));
34188
34189 spin_unlock(&lp->lock);
34190
34191@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
34192
34193 spin_lock_irqsave(&lp->lock, flags);
34194
34195- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34196+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34197
34198 netif_printk(lp, ifdown, KERN_DEBUG, dev,
34199 "Shutting down ethercard, status was %2.2x\n",
34200- lp->a.read_csr(ioaddr, CSR0));
34201+ lp->a->read_csr(ioaddr, CSR0));
34202
34203 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34204- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34205+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34206
34207 /*
34208 * Switch back to 16bit mode to avoid problems with dumb
34209 * DOS packet driver after a warm reboot
34210 */
34211- lp->a.write_bcr(ioaddr, 20, 4);
34212+ lp->a->write_bcr(ioaddr, 20, 4);
34213
34214 spin_unlock_irqrestore(&lp->lock, flags);
34215
34216@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
34217 unsigned long flags;
34218
34219 spin_lock_irqsave(&lp->lock, flags);
34220- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34221+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34222 spin_unlock_irqrestore(&lp->lock, flags);
34223
34224 return &dev->stats;
34225@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
34226 if (dev->flags & IFF_ALLMULTI) {
34227 ib->filter[0] = cpu_to_le32(~0U);
34228 ib->filter[1] = cpu_to_le32(~0U);
34229- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34230- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34231- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34232- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34233+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34234+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34235+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34236+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34237 return;
34238 }
34239 /* clear the multicast filter */
34240@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
34241 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34242 }
34243 for (i = 0; i < 4; i++)
34244- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34245+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34246 le16_to_cpu(mcast_table[i]));
34247 }
34248
34249@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
34250
34251 spin_lock_irqsave(&lp->lock, flags);
34252 suspended = pcnet32_suspend(dev, &flags, 0);
34253- csr15 = lp->a.read_csr(ioaddr, CSR15);
34254+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34255 if (dev->flags & IFF_PROMISC) {
34256 /* Log any net taps. */
34257 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
34258 lp->init_block->mode =
34259 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34260 7);
34261- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34262+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34263 } else {
34264 lp->init_block->mode =
34265 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34266- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34267+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34268 pcnet32_load_multicast(dev);
34269 }
34270
34271 if (suspended) {
34272 int csr5;
34273 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34274- csr5 = lp->a.read_csr(ioaddr, CSR5);
34275- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34276+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34277+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34278 } else {
34279- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34280+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34281 pcnet32_restart(dev, CSR0_NORMAL);
34282 netif_wake_queue(dev);
34283 }
34284@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
34285 if (!lp->mii)
34286 return 0;
34287
34288- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34289- val_out = lp->a.read_bcr(ioaddr, 34);
34290+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34291+ val_out = lp->a->read_bcr(ioaddr, 34);
34292
34293 return val_out;
34294 }
34295@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
34296 if (!lp->mii)
34297 return;
34298
34299- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34300- lp->a.write_bcr(ioaddr, 34, val);
34301+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34302+ lp->a->write_bcr(ioaddr, 34, val);
34303 }
34304
34305 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34306@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34307 curr_link = mii_link_ok(&lp->mii_if);
34308 } else {
34309 ulong ioaddr = dev->base_addr; /* card base I/O address */
34310- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34311+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34312 }
34313 if (!curr_link) {
34314 if (prev_link || verbose) {
34315@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34316 (ecmd.duplex == DUPLEX_FULL)
34317 ? "full" : "half");
34318 }
34319- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34320+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34321 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34322 if (lp->mii_if.full_duplex)
34323 bcr9 |= (1 << 0);
34324 else
34325 bcr9 &= ~(1 << 0);
34326- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34327+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34328 }
34329 } else {
34330 netif_info(lp, link, dev, "link up\n");
34331diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
34332index edfa15d..002bfa9 100644
34333--- a/drivers/net/ppp_generic.c
34334+++ b/drivers/net/ppp_generic.c
34335@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34336 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34337 struct ppp_stats stats;
34338 struct ppp_comp_stats cstats;
34339- char *vers;
34340
34341 switch (cmd) {
34342 case SIOCGPPPSTATS:
34343@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34344 break;
34345
34346 case SIOCGPPPVER:
34347- vers = PPP_VERSION;
34348- if (copy_to_user(addr, vers, strlen(vers) + 1))
34349+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34350 break;
34351 err = 0;
34352 break;
34353diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
34354index 6d657ca..d1be94b 100644
34355--- a/drivers/net/r8169.c
34356+++ b/drivers/net/r8169.c
34357@@ -663,12 +663,12 @@ struct rtl8169_private {
34358 struct mdio_ops {
34359 void (*write)(void __iomem *, int, int);
34360 int (*read)(void __iomem *, int);
34361- } mdio_ops;
34362+ } __no_const mdio_ops;
34363
34364 struct pll_power_ops {
34365 void (*down)(struct rtl8169_private *);
34366 void (*up)(struct rtl8169_private *);
34367- } pll_power_ops;
34368+ } __no_const pll_power_ops;
34369
34370 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34371 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34372diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
34373index 3c0f131..17f8b02 100644
34374--- a/drivers/net/sis190.c
34375+++ b/drivers/net/sis190.c
34376@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34377 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34378 struct net_device *dev)
34379 {
34380- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34381+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34382 struct sis190_private *tp = netdev_priv(dev);
34383 struct pci_dev *isa_bridge;
34384 u8 reg, tmp8;
34385diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
34386index 4793df8..44c9849 100644
34387--- a/drivers/net/sundance.c
34388+++ b/drivers/net/sundance.c
34389@@ -218,7 +218,7 @@ enum {
34390 struct pci_id_info {
34391 const char *name;
34392 };
34393-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34394+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34395 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34396 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34397 {"D-Link DFE-580TX 4 port Server Adapter"},
34398diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
34399index 2ea456d..3ad9523 100644
34400--- a/drivers/net/tg3.h
34401+++ b/drivers/net/tg3.h
34402@@ -134,6 +134,7 @@
34403 #define CHIPREV_ID_5750_A0 0x4000
34404 #define CHIPREV_ID_5750_A1 0x4001
34405 #define CHIPREV_ID_5750_A3 0x4003
34406+#define CHIPREV_ID_5750_C1 0x4201
34407 #define CHIPREV_ID_5750_C2 0x4202
34408 #define CHIPREV_ID_5752_A0_HW 0x5000
34409 #define CHIPREV_ID_5752_A0 0x6000
34410diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34411index 515f122..41dd273 100644
34412--- a/drivers/net/tokenring/abyss.c
34413+++ b/drivers/net/tokenring/abyss.c
34414@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34415
34416 static int __init abyss_init (void)
34417 {
34418- abyss_netdev_ops = tms380tr_netdev_ops;
34419+ pax_open_kernel();
34420+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34421
34422- abyss_netdev_ops.ndo_open = abyss_open;
34423- abyss_netdev_ops.ndo_stop = abyss_close;
34424+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34425+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34426+ pax_close_kernel();
34427
34428 return pci_register_driver(&abyss_driver);
34429 }
34430diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34431index 6153cfd..cf69c1c 100644
34432--- a/drivers/net/tokenring/madgemc.c
34433+++ b/drivers/net/tokenring/madgemc.c
34434@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34435
34436 static int __init madgemc_init (void)
34437 {
34438- madgemc_netdev_ops = tms380tr_netdev_ops;
34439- madgemc_netdev_ops.ndo_open = madgemc_open;
34440- madgemc_netdev_ops.ndo_stop = madgemc_close;
34441+ pax_open_kernel();
34442+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34443+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34444+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34445+ pax_close_kernel();
34446
34447 return mca_register_driver (&madgemc_driver);
34448 }
34449diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34450index 8d362e6..f91cc52 100644
34451--- a/drivers/net/tokenring/proteon.c
34452+++ b/drivers/net/tokenring/proteon.c
34453@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34454 struct platform_device *pdev;
34455 int i, num = 0, err = 0;
34456
34457- proteon_netdev_ops = tms380tr_netdev_ops;
34458- proteon_netdev_ops.ndo_open = proteon_open;
34459- proteon_netdev_ops.ndo_stop = tms380tr_close;
34460+ pax_open_kernel();
34461+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34462+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34463+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34464+ pax_close_kernel();
34465
34466 err = platform_driver_register(&proteon_driver);
34467 if (err)
34468diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34469index 46db5c5..37c1536 100644
34470--- a/drivers/net/tokenring/skisa.c
34471+++ b/drivers/net/tokenring/skisa.c
34472@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34473 struct platform_device *pdev;
34474 int i, num = 0, err = 0;
34475
34476- sk_isa_netdev_ops = tms380tr_netdev_ops;
34477- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34478- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34479+ pax_open_kernel();
34480+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34481+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34482+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34483+ pax_close_kernel();
34484
34485 err = platform_driver_register(&sk_isa_driver);
34486 if (err)
34487diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
34488index ce90efc..2676f89 100644
34489--- a/drivers/net/tulip/de2104x.c
34490+++ b/drivers/net/tulip/de2104x.c
34491@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
34492 struct de_srom_info_leaf *il;
34493 void *bufp;
34494
34495+ pax_track_stack();
34496+
34497 /* download entire eeprom */
34498 for (i = 0; i < DE_EEPROM_WORDS; i++)
34499 ((__le16 *)ee_data)[i] =
34500diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
34501index 959b410..c97fac2 100644
34502--- a/drivers/net/tulip/de4x5.c
34503+++ b/drivers/net/tulip/de4x5.c
34504@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34505 for (i=0; i<ETH_ALEN; i++) {
34506 tmp.addr[i] = dev->dev_addr[i];
34507 }
34508- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34509+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34510 break;
34511
34512 case DE4X5_SET_HWADDR: /* Set the hardware address */
34513@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34514 spin_lock_irqsave(&lp->lock, flags);
34515 memcpy(&statbuf, &lp->pktStats, ioc->len);
34516 spin_unlock_irqrestore(&lp->lock, flags);
34517- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34518+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34519 return -EFAULT;
34520 break;
34521 }
34522diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
34523index fa5eee9..e074432 100644
34524--- a/drivers/net/tulip/eeprom.c
34525+++ b/drivers/net/tulip/eeprom.c
34526@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34527 {NULL}};
34528
34529
34530-static const char *block_name[] __devinitdata = {
34531+static const char *block_name[] __devinitconst = {
34532 "21140 non-MII",
34533 "21140 MII PHY",
34534 "21142 Serial PHY",
34535diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
34536index 862eadf..3eee1e6 100644
34537--- a/drivers/net/tulip/winbond-840.c
34538+++ b/drivers/net/tulip/winbond-840.c
34539@@ -236,7 +236,7 @@ struct pci_id_info {
34540 int drv_flags; /* Driver use, intended as capability flags. */
34541 };
34542
34543-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34544+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34545 { /* Sometime a Level-One switch card. */
34546 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34547 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34548diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34549index 304fe78..db112fa 100644
34550--- a/drivers/net/usb/hso.c
34551+++ b/drivers/net/usb/hso.c
34552@@ -71,7 +71,7 @@
34553 #include <asm/byteorder.h>
34554 #include <linux/serial_core.h>
34555 #include <linux/serial.h>
34556-
34557+#include <asm/local.h>
34558
34559 #define MOD_AUTHOR "Option Wireless"
34560 #define MOD_DESCRIPTION "USB High Speed Option driver"
34561@@ -257,7 +257,7 @@ struct hso_serial {
34562
34563 /* from usb_serial_port */
34564 struct tty_struct *tty;
34565- int open_count;
34566+ local_t open_count;
34567 spinlock_t serial_lock;
34568
34569 int (*write_data) (struct hso_serial *serial);
34570@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34571 struct urb *urb;
34572
34573 urb = serial->rx_urb[0];
34574- if (serial->open_count > 0) {
34575+ if (local_read(&serial->open_count) > 0) {
34576 count = put_rxbuf_data(urb, serial);
34577 if (count == -1)
34578 return;
34579@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34580 DUMP1(urb->transfer_buffer, urb->actual_length);
34581
34582 /* Anyone listening? */
34583- if (serial->open_count == 0)
34584+ if (local_read(&serial->open_count) == 0)
34585 return;
34586
34587 if (status == 0) {
34588@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34589 spin_unlock_irq(&serial->serial_lock);
34590
34591 /* check for port already opened, if not set the termios */
34592- serial->open_count++;
34593- if (serial->open_count == 1) {
34594+ if (local_inc_return(&serial->open_count) == 1) {
34595 serial->rx_state = RX_IDLE;
34596 /* Force default termio settings */
34597 _hso_serial_set_termios(tty, NULL);
34598@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34599 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34600 if (result) {
34601 hso_stop_serial_device(serial->parent);
34602- serial->open_count--;
34603+ local_dec(&serial->open_count);
34604 kref_put(&serial->parent->ref, hso_serial_ref_free);
34605 }
34606 } else {
34607@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34608
34609 /* reset the rts and dtr */
34610 /* do the actual close */
34611- serial->open_count--;
34612+ local_dec(&serial->open_count);
34613
34614- if (serial->open_count <= 0) {
34615- serial->open_count = 0;
34616+ if (local_read(&serial->open_count) <= 0) {
34617+ local_set(&serial->open_count, 0);
34618 spin_lock_irq(&serial->serial_lock);
34619 if (serial->tty == tty) {
34620 serial->tty->driver_data = NULL;
34621@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34622
34623 /* the actual setup */
34624 spin_lock_irqsave(&serial->serial_lock, flags);
34625- if (serial->open_count)
34626+ if (local_read(&serial->open_count))
34627 _hso_serial_set_termios(tty, old);
34628 else
34629 tty->termios = old;
34630@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34631 D1("Pending read interrupt on port %d\n", i);
34632 spin_lock(&serial->serial_lock);
34633 if (serial->rx_state == RX_IDLE &&
34634- serial->open_count > 0) {
34635+ local_read(&serial->open_count) > 0) {
34636 /* Setup and send a ctrl req read on
34637 * port i */
34638 if (!serial->rx_urb_filled[0]) {
34639@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34640 /* Start all serial ports */
34641 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34642 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34643- if (dev2ser(serial_table[i])->open_count) {
34644+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
34645 result =
34646 hso_start_serial_device(serial_table[i], GFP_NOIO);
34647 hso_kick_transmit(dev2ser(serial_table[i]));
34648diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34649index 27400ed..c796e05 100644
34650--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34651+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34652@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34653 * Return with error code if any of the queue indices
34654 * is out of range
34655 */
34656- if (p->ring_index[i] < 0 ||
34657- p->ring_index[i] >= adapter->num_rx_queues)
34658+ if (p->ring_index[i] >= adapter->num_rx_queues)
34659 return -EINVAL;
34660 }
34661
34662diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
34663index dd36258..e47fd31 100644
34664--- a/drivers/net/vxge/vxge-config.h
34665+++ b/drivers/net/vxge/vxge-config.h
34666@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34667 void (*link_down)(struct __vxge_hw_device *devh);
34668 void (*crit_err)(struct __vxge_hw_device *devh,
34669 enum vxge_hw_event type, u64 ext_data);
34670-};
34671+} __no_const;
34672
34673 /*
34674 * struct __vxge_hw_blockpool_entry - Block private data structure
34675diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
34676index 178348a2..18bb433 100644
34677--- a/drivers/net/vxge/vxge-main.c
34678+++ b/drivers/net/vxge/vxge-main.c
34679@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
34680 struct sk_buff *completed[NR_SKB_COMPLETED];
34681 int more;
34682
34683+ pax_track_stack();
34684+
34685 do {
34686 more = 0;
34687 skb_ptr = completed;
34688@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
34689 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34690 int index;
34691
34692+ pax_track_stack();
34693+
34694 /*
34695 * Filling
34696 * - itable with bucket numbers
34697diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
34698index 4a518a3..936b334 100644
34699--- a/drivers/net/vxge/vxge-traffic.h
34700+++ b/drivers/net/vxge/vxge-traffic.h
34701@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34702 struct vxge_hw_mempool_dma *dma_object,
34703 u32 index,
34704 u32 is_last);
34705-};
34706+} __no_const;
34707
34708 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34709 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34710diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
34711index 56aeb01..547f71f 100644
34712--- a/drivers/net/wan/hdlc_x25.c
34713+++ b/drivers/net/wan/hdlc_x25.c
34714@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
34715
34716 static int x25_open(struct net_device *dev)
34717 {
34718- struct lapb_register_struct cb;
34719+ static struct lapb_register_struct cb = {
34720+ .connect_confirmation = x25_connected,
34721+ .connect_indication = x25_connected,
34722+ .disconnect_confirmation = x25_disconnected,
34723+ .disconnect_indication = x25_disconnected,
34724+ .data_indication = x25_data_indication,
34725+ .data_transmit = x25_data_transmit
34726+ };
34727 int result;
34728
34729- cb.connect_confirmation = x25_connected;
34730- cb.connect_indication = x25_connected;
34731- cb.disconnect_confirmation = x25_disconnected;
34732- cb.disconnect_indication = x25_disconnected;
34733- cb.data_indication = x25_data_indication;
34734- cb.data_transmit = x25_data_transmit;
34735-
34736 result = lapb_register(dev, &cb);
34737 if (result != LAPB_OK)
34738 return result;
34739diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
34740index 1fda46c..f2858f2 100644
34741--- a/drivers/net/wimax/i2400m/usb-fw.c
34742+++ b/drivers/net/wimax/i2400m/usb-fw.c
34743@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
34744 int do_autopm = 1;
34745 DECLARE_COMPLETION_ONSTACK(notif_completion);
34746
34747+ pax_track_stack();
34748+
34749 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34750 i2400m, ack, ack_size);
34751 BUG_ON(_ack == i2400m->bm_ack_buf);
34752diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
34753index e1b3e3c..e413f18 100644
34754--- a/drivers/net/wireless/airo.c
34755+++ b/drivers/net/wireless/airo.c
34756@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
34757 BSSListElement * loop_net;
34758 BSSListElement * tmp_net;
34759
34760+ pax_track_stack();
34761+
34762 /* Blow away current list of scan results */
34763 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34764 list_move_tail (&loop_net->list, &ai->network_free_list);
34765@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
34766 WepKeyRid wkr;
34767 int rc;
34768
34769+ pax_track_stack();
34770+
34771 memset( &mySsid, 0, sizeof( mySsid ) );
34772 kfree (ai->flash);
34773 ai->flash = NULL;
34774@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode,
34775 __le32 *vals = stats.vals;
34776 int len;
34777
34778+ pax_track_stack();
34779+
34780 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34781 return -ENOMEM;
34782 data = file->private_data;
34783@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
34784 /* If doLoseSync is not 1, we won't do a Lose Sync */
34785 int doLoseSync = -1;
34786
34787+ pax_track_stack();
34788+
34789 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34790 return -ENOMEM;
34791 data = file->private_data;
34792@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev,
34793 int i;
34794 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34795
34796+ pax_track_stack();
34797+
34798 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34799 if (!qual)
34800 return -ENOMEM;
34801@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
34802 CapabilityRid cap_rid;
34803 __le32 *vals = stats_rid.vals;
34804
34805+ pax_track_stack();
34806+
34807 /* Get stats out of the card */
34808 clear_bit(JOB_WSTATS, &local->jobs);
34809 if (local->power.event) {
34810diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34811index 17c4b56..00d836f 100644
34812--- a/drivers/net/wireless/ath/ath.h
34813+++ b/drivers/net/wireless/ath/ath.h
34814@@ -121,6 +121,7 @@ struct ath_ops {
34815 void (*write_flush) (void *);
34816 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34817 };
34818+typedef struct ath_ops __no_const ath_ops_no_const;
34819
34820 struct ath_common;
34821 struct ath_bus_ops;
34822diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
34823index ccca724..7afbadc 100644
34824--- a/drivers/net/wireless/ath/ath5k/debug.c
34825+++ b/drivers/net/wireless/ath/ath5k/debug.c
34826@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
34827 unsigned int v;
34828 u64 tsf;
34829
34830+ pax_track_stack();
34831+
34832 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
34833 len += snprintf(buf + len, sizeof(buf) - len,
34834 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
34835@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
34836 unsigned int len = 0;
34837 unsigned int i;
34838
34839+ pax_track_stack();
34840+
34841 len += snprintf(buf + len, sizeof(buf) - len,
34842 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
34843
34844@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
34845 unsigned int len = 0;
34846 u32 filt = ath5k_hw_get_rx_filter(ah);
34847
34848+ pax_track_stack();
34849+
34850 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
34851 ah->bssidmask);
34852 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
34853@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
34854 unsigned int len = 0;
34855 int i;
34856
34857+ pax_track_stack();
34858+
34859 len += snprintf(buf + len, sizeof(buf) - len,
34860 "RX\n---------------------\n");
34861 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
34862@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
34863 char buf[700];
34864 unsigned int len = 0;
34865
34866+ pax_track_stack();
34867+
34868 len += snprintf(buf + len, sizeof(buf) - len,
34869 "HW has PHY error counters:\t%s\n",
34870 ah->ah_capabilities.cap_has_phyerr_counters ?
34871@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34872 struct ath5k_buf *bf, *bf0;
34873 int i, n;
34874
34875+ pax_track_stack();
34876+
34877 len += snprintf(buf + len, sizeof(buf) - len,
34878 "available txbuffers: %d\n", ah->txbuf_len);
34879
34880diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34881index 7c2aaad..ad14dee 100644
34882--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34883+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34884@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
34885 int i, im, j;
34886 int nmeasurement;
34887
34888+ pax_track_stack();
34889+
34890 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
34891 if (ah->txchainmask & (1 << i))
34892 num_chains++;
34893diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34894index f80d1d6..08b773d 100644
34895--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34896+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34897@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
34898 int theta_low_bin = 0;
34899 int i;
34900
34901+ pax_track_stack();
34902+
34903 /* disregard any bin that contains <= 16 samples */
34904 thresh_accum_cnt = 16;
34905 scale_factor = 5;
34906diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
34907index d1eb896..8b67cd4 100644
34908--- a/drivers/net/wireless/ath/ath9k/debug.c
34909+++ b/drivers/net/wireless/ath/ath9k/debug.c
34910@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
34911 char buf[512];
34912 unsigned int len = 0;
34913
34914+ pax_track_stack();
34915+
34916 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
34917 len += snprintf(buf + len, sizeof(buf) - len,
34918 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
34919@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
34920 u8 addr[ETH_ALEN];
34921 u32 tmp;
34922
34923+ pax_track_stack();
34924+
34925 len += snprintf(buf + len, sizeof(buf) - len,
34926 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
34927 wiphy_name(sc->hw->wiphy),
34928diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34929index d3ff33c..309398e 100644
34930--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34931+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34932@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
34933 unsigned int len = 0;
34934 int ret = 0;
34935
34936+ pax_track_stack();
34937+
34938 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34939
34940 ath9k_htc_ps_wakeup(priv);
34941@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
34942 unsigned int len = 0;
34943 int ret = 0;
34944
34945+ pax_track_stack();
34946+
34947 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34948
34949 ath9k_htc_ps_wakeup(priv);
34950@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
34951 unsigned int len = 0;
34952 int ret = 0;
34953
34954+ pax_track_stack();
34955+
34956 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34957
34958 ath9k_htc_ps_wakeup(priv);
34959@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
34960 char buf[512];
34961 unsigned int len = 0;
34962
34963+ pax_track_stack();
34964+
34965 len += snprintf(buf + len, sizeof(buf) - len,
34966 "%20s : %10u\n", "Buffers queued",
34967 priv->debug.tx_stats.buf_queued);
34968@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
34969 char buf[512];
34970 unsigned int len = 0;
34971
34972+ pax_track_stack();
34973+
34974 spin_lock_bh(&priv->tx.tx_lock);
34975
34976 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
34977@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34978 char buf[512];
34979 unsigned int len = 0;
34980
34981+ pax_track_stack();
34982+
34983 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
34984 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
34985
34986diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34987index c798890..c19a8fb 100644
34988--- a/drivers/net/wireless/ath/ath9k/hw.h
34989+++ b/drivers/net/wireless/ath/ath9k/hw.h
34990@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
34991
34992 /* ANI */
34993 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34994-};
34995+} __no_const;
34996
34997 /**
34998 * struct ath_hw_ops - callbacks used by hardware code and driver code
34999@@ -639,7 +639,7 @@ struct ath_hw_ops {
35000 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35001 struct ath_hw_antcomb_conf *antconf);
35002
35003-};
35004+} __no_const;
35005
35006 struct ath_nf_limits {
35007 s16 max;
35008@@ -652,7 +652,7 @@ struct ath_nf_limits {
35009 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
35010
35011 struct ath_hw {
35012- struct ath_ops reg_ops;
35013+ ath_ops_no_const reg_ops;
35014
35015 struct ieee80211_hw *hw;
35016 struct ath_common common;
35017diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
35018index ef9ad79..f5f8d80 100644
35019--- a/drivers/net/wireless/ipw2x00/ipw2100.c
35020+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
35021@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
35022 int err;
35023 DECLARE_SSID_BUF(ssid);
35024
35025+ pax_track_stack();
35026+
35027 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35028
35029 if (ssid_len)
35030@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
35031 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35032 int err;
35033
35034+ pax_track_stack();
35035+
35036 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35037 idx, keylen, len);
35038
35039diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
35040index 32a9966..de69787 100644
35041--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
35042+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
35043@@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device
35044 unsigned long flags;
35045 DECLARE_SSID_BUF(ssid);
35046
35047+ pax_track_stack();
35048+
35049 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35050 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35051 print_ssid(ssid, info_element->data, info_element->len),
35052diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
35053index 66ee1562..b90412b 100644
35054--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
35055+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
35056@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
35057 */
35058 if (iwl3945_mod_params.disable_hw_scan) {
35059 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35060- iwl3945_hw_ops.hw_scan = NULL;
35061+ pax_open_kernel();
35062+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35063+ pax_close_kernel();
35064 }
35065
35066 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
35067diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35068index 3789ff4..22ab151 100644
35069--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35070+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35071@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
35072 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
35073 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
35074
35075+ pax_track_stack();
35076+
35077 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35078
35079 /* Treat uninitialized rate scaling data same as non-existing. */
35080@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
35081 container_of(lq_sta, struct iwl_station_priv, lq_sta);
35082 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35083
35084+ pax_track_stack();
35085+
35086 /* Override starting rate (index 0) if needed for debug purposes */
35087 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35088
35089diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
35090index f9a407e..a6f2bb7 100644
35091--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
35092+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
35093@@ -68,8 +68,8 @@ do { \
35094 } while (0)
35095
35096 #else
35097-#define IWL_DEBUG(__priv, level, fmt, args...)
35098-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35099+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35100+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35101 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35102 const void *p, u32 len)
35103 {}
35104diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35105index ec1485b..900c3bd 100644
35106--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35107+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35108@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
35109 int pos = 0;
35110 const size_t bufsz = sizeof(buf);
35111
35112+ pax_track_stack();
35113+
35114 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35115 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35116 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
35117@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
35118 char buf[256 * NUM_IWL_RXON_CTX];
35119 const size_t bufsz = sizeof(buf);
35120
35121+ pax_track_stack();
35122+
35123 for_each_context(priv, ctx) {
35124 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
35125 ctx->ctxid);
35126diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
35127index 0a0cc96..fd49ad8 100644
35128--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
35129+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
35130@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
35131 int buf_len = 512;
35132 size_t len = 0;
35133
35134+ pax_track_stack();
35135+
35136 if (*ppos != 0)
35137 return 0;
35138 if (count < sizeof(buf))
35139diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35140index 031cd89..bdc8435 100644
35141--- a/drivers/net/wireless/mac80211_hwsim.c
35142+++ b/drivers/net/wireless/mac80211_hwsim.c
35143@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void)
35144 return -EINVAL;
35145
35146 if (fake_hw_scan) {
35147- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35148- mac80211_hwsim_ops.sw_scan_start = NULL;
35149- mac80211_hwsim_ops.sw_scan_complete = NULL;
35150+ pax_open_kernel();
35151+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35152+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35153+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35154+ pax_close_kernel();
35155 }
35156
35157 spin_lock_init(&hwsim_radio_lock);
35158diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35159index 2215c3c..64e6a47 100644
35160--- a/drivers/net/wireless/mwifiex/main.h
35161+++ b/drivers/net/wireless/mwifiex/main.h
35162@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
35163
35164 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
35165 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35166-};
35167+} __no_const;
35168
35169 struct mwifiex_adapter {
35170 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
35171diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35172index 29f9389..f6d2ce0 100644
35173--- a/drivers/net/wireless/rndis_wlan.c
35174+++ b/drivers/net/wireless/rndis_wlan.c
35175@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35176
35177 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35178
35179- if (rts_threshold < 0 || rts_threshold > 2347)
35180+ if (rts_threshold > 2347)
35181 rts_threshold = 2347;
35182
35183 tmp = cpu_to_le32(rts_threshold);
35184diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
35185index 3b11642..d6bb049 100644
35186--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
35187+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
35188@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
35189 u8 rfpath;
35190 u8 num_total_rfpath = rtlphy->num_total_rfpath;
35191
35192+ pax_track_stack();
35193+
35194 precommoncmdcnt = 0;
35195 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
35196 MAX_PRECMD_CNT,
35197diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35198index a77f1bb..c608b2b 100644
35199--- a/drivers/net/wireless/wl1251/wl1251.h
35200+++ b/drivers/net/wireless/wl1251/wl1251.h
35201@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35202 void (*reset)(struct wl1251 *wl);
35203 void (*enable_irq)(struct wl1251 *wl);
35204 void (*disable_irq)(struct wl1251 *wl);
35205-};
35206+} __no_const;
35207
35208 struct wl1251 {
35209 struct ieee80211_hw *hw;
35210diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
35211index e0b3736..4b466e6 100644
35212--- a/drivers/net/wireless/wl12xx/spi.c
35213+++ b/drivers/net/wireless/wl12xx/spi.c
35214@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
35215 u32 chunk_len;
35216 int i;
35217
35218+ pax_track_stack();
35219+
35220 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
35221
35222 spi_message_init(&m);
35223diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35224index f34b5b2..b5abb9f 100644
35225--- a/drivers/oprofile/buffer_sync.c
35226+++ b/drivers/oprofile/buffer_sync.c
35227@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35228 if (cookie == NO_COOKIE)
35229 offset = pc;
35230 if (cookie == INVALID_COOKIE) {
35231- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35232+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35233 offset = pc;
35234 }
35235 if (cookie != last_cookie) {
35236@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35237 /* add userspace sample */
35238
35239 if (!mm) {
35240- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35241+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35242 return 0;
35243 }
35244
35245 cookie = lookup_dcookie(mm, s->eip, &offset);
35246
35247 if (cookie == INVALID_COOKIE) {
35248- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35249+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35250 return 0;
35251 }
35252
35253@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35254 /* ignore backtraces if failed to add a sample */
35255 if (state == sb_bt_start) {
35256 state = sb_bt_ignore;
35257- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35258+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35259 }
35260 }
35261 release_mm(mm);
35262diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35263index dd87e86..bc0148c 100644
35264--- a/drivers/oprofile/event_buffer.c
35265+++ b/drivers/oprofile/event_buffer.c
35266@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35267 }
35268
35269 if (buffer_pos == buffer_size) {
35270- atomic_inc(&oprofile_stats.event_lost_overflow);
35271+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35272 return;
35273 }
35274
35275diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35276index f8c752e..28bf4fc 100644
35277--- a/drivers/oprofile/oprof.c
35278+++ b/drivers/oprofile/oprof.c
35279@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35280 if (oprofile_ops.switch_events())
35281 return;
35282
35283- atomic_inc(&oprofile_stats.multiplex_counter);
35284+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35285 start_switch_worker();
35286 }
35287
35288diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35289index 917d28e..d62d981 100644
35290--- a/drivers/oprofile/oprofile_stats.c
35291+++ b/drivers/oprofile/oprofile_stats.c
35292@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35293 cpu_buf->sample_invalid_eip = 0;
35294 }
35295
35296- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35297- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35298- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35299- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35300- atomic_set(&oprofile_stats.multiplex_counter, 0);
35301+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35302+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35303+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35304+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35305+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35306 }
35307
35308
35309diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35310index 38b6fc0..b5cbfce 100644
35311--- a/drivers/oprofile/oprofile_stats.h
35312+++ b/drivers/oprofile/oprofile_stats.h
35313@@ -13,11 +13,11 @@
35314 #include <linux/atomic.h>
35315
35316 struct oprofile_stat_struct {
35317- atomic_t sample_lost_no_mm;
35318- atomic_t sample_lost_no_mapping;
35319- atomic_t bt_lost_no_mapping;
35320- atomic_t event_lost_overflow;
35321- atomic_t multiplex_counter;
35322+ atomic_unchecked_t sample_lost_no_mm;
35323+ atomic_unchecked_t sample_lost_no_mapping;
35324+ atomic_unchecked_t bt_lost_no_mapping;
35325+ atomic_unchecked_t event_lost_overflow;
35326+ atomic_unchecked_t multiplex_counter;
35327 };
35328
35329 extern struct oprofile_stat_struct oprofile_stats;
35330diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35331index 1c0b799..c11b2d2 100644
35332--- a/drivers/oprofile/oprofilefs.c
35333+++ b/drivers/oprofile/oprofilefs.c
35334@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35335
35336
35337 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35338- char const *name, atomic_t *val)
35339+ char const *name, atomic_unchecked_t *val)
35340 {
35341 return __oprofilefs_create_file(sb, root, name,
35342 &atomic_ro_fops, 0444, val);
35343diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35344index 3f56bc0..707d642 100644
35345--- a/drivers/parport/procfs.c
35346+++ b/drivers/parport/procfs.c
35347@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35348
35349 *ppos += len;
35350
35351- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35352+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35353 }
35354
35355 #ifdef CONFIG_PARPORT_1284
35356@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35357
35358 *ppos += len;
35359
35360- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35361+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35362 }
35363 #endif /* IEEE1284.3 support. */
35364
35365diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35366index 9fff878..ad0ad53 100644
35367--- a/drivers/pci/hotplug/cpci_hotplug.h
35368+++ b/drivers/pci/hotplug/cpci_hotplug.h
35369@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35370 int (*hardware_test) (struct slot* slot, u32 value);
35371 u8 (*get_power) (struct slot* slot);
35372 int (*set_power) (struct slot* slot, int value);
35373-};
35374+} __no_const;
35375
35376 struct cpci_hp_controller {
35377 unsigned int irq;
35378diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35379index 76ba8a1..20ca857 100644
35380--- a/drivers/pci/hotplug/cpqphp_nvram.c
35381+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35382@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35383
35384 void compaq_nvram_init (void __iomem *rom_start)
35385 {
35386+
35387+#ifndef CONFIG_PAX_KERNEXEC
35388 if (rom_start) {
35389 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35390 }
35391+#endif
35392+
35393 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35394
35395 /* initialize our int15 lock */
35396diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35397index cbfbab1..6a9fced 100644
35398--- a/drivers/pci/pcie/aspm.c
35399+++ b/drivers/pci/pcie/aspm.c
35400@@ -27,9 +27,9 @@
35401 #define MODULE_PARAM_PREFIX "pcie_aspm."
35402
35403 /* Note: those are not register definitions */
35404-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35405-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35406-#define ASPM_STATE_L1 (4) /* L1 state */
35407+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35408+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35409+#define ASPM_STATE_L1 (4U) /* L1 state */
35410 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35411 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35412
35413diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35414index 6ab6bd3..72bdc69 100644
35415--- a/drivers/pci/probe.c
35416+++ b/drivers/pci/probe.c
35417@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35418 u32 l, sz, mask;
35419 u16 orig_cmd;
35420
35421- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35422+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35423
35424 if (!dev->mmio_always_on) {
35425 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35426diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35427index 27911b5..5b6db88 100644
35428--- a/drivers/pci/proc.c
35429+++ b/drivers/pci/proc.c
35430@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35431 static int __init pci_proc_init(void)
35432 {
35433 struct pci_dev *dev = NULL;
35434+
35435+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35436+#ifdef CONFIG_GRKERNSEC_PROC_USER
35437+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35438+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35439+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35440+#endif
35441+#else
35442 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35443+#endif
35444 proc_create("devices", 0, proc_bus_pci_dir,
35445 &proc_bus_pci_dev_operations);
35446 proc_initialized = 1;
35447diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
35448index 90832a9..419089a 100644
35449--- a/drivers/pci/xen-pcifront.c
35450+++ b/drivers/pci/xen-pcifront.c
35451@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
35452 struct pcifront_sd *sd = bus->sysdata;
35453 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35454
35455+ pax_track_stack();
35456+
35457 if (verbose_request)
35458 dev_info(&pdev->xdev->dev,
35459 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
35460@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
35461 struct pcifront_sd *sd = bus->sysdata;
35462 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35463
35464+ pax_track_stack();
35465+
35466 if (verbose_request)
35467 dev_info(&pdev->xdev->dev,
35468 "write dev=%04x:%02x:%02x.%01x - "
35469@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
35470 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35471 struct msi_desc *entry;
35472
35473+ pax_track_stack();
35474+
35475 if (nvec > SH_INFO_MAX_VEC) {
35476 dev_err(&dev->dev, "too much vector for pci frontend: %x."
35477 " Increase SH_INFO_MAX_VEC.\n", nvec);
35478@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
35479 struct pcifront_sd *sd = dev->bus->sysdata;
35480 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35481
35482+ pax_track_stack();
35483+
35484 err = do_pci_op(pdev, &op);
35485
35486 /* What should do for error ? */
35487@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
35488 struct pcifront_sd *sd = dev->bus->sysdata;
35489 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35490
35491+ pax_track_stack();
35492+
35493 err = do_pci_op(pdev, &op);
35494 if (likely(!err)) {
35495 vector[0] = op.value;
35496diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35497index 7bd829f..a3237ad 100644
35498--- a/drivers/platform/x86/thinkpad_acpi.c
35499+++ b/drivers/platform/x86/thinkpad_acpi.c
35500@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35501 return 0;
35502 }
35503
35504-void static hotkey_mask_warn_incomplete_mask(void)
35505+static void hotkey_mask_warn_incomplete_mask(void)
35506 {
35507 /* log only what the user can fix... */
35508 const u32 wantedmask = hotkey_driver_mask &
35509@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35510 }
35511 }
35512
35513-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35514- struct tp_nvram_state *newn,
35515- const u32 event_mask)
35516-{
35517-
35518 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35519 do { \
35520 if ((event_mask & (1 << __scancode)) && \
35521@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35522 tpacpi_hotkey_send_key(__scancode); \
35523 } while (0)
35524
35525- void issue_volchange(const unsigned int oldvol,
35526- const unsigned int newvol)
35527- {
35528- unsigned int i = oldvol;
35529+static void issue_volchange(const unsigned int oldvol,
35530+ const unsigned int newvol,
35531+ const u32 event_mask)
35532+{
35533+ unsigned int i = oldvol;
35534
35535- while (i > newvol) {
35536- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35537- i--;
35538- }
35539- while (i < newvol) {
35540- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35541- i++;
35542- }
35543+ while (i > newvol) {
35544+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35545+ i--;
35546 }
35547+ while (i < newvol) {
35548+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35549+ i++;
35550+ }
35551+}
35552
35553- void issue_brightnesschange(const unsigned int oldbrt,
35554- const unsigned int newbrt)
35555- {
35556- unsigned int i = oldbrt;
35557+static void issue_brightnesschange(const unsigned int oldbrt,
35558+ const unsigned int newbrt,
35559+ const u32 event_mask)
35560+{
35561+ unsigned int i = oldbrt;
35562
35563- while (i > newbrt) {
35564- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35565- i--;
35566- }
35567- while (i < newbrt) {
35568- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35569- i++;
35570- }
35571+ while (i > newbrt) {
35572+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35573+ i--;
35574+ }
35575+ while (i < newbrt) {
35576+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35577+ i++;
35578 }
35579+}
35580
35581+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35582+ struct tp_nvram_state *newn,
35583+ const u32 event_mask)
35584+{
35585 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35586 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35587 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35588@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35589 oldn->volume_level != newn->volume_level) {
35590 /* recently muted, or repeated mute keypress, or
35591 * multiple presses ending in mute */
35592- issue_volchange(oldn->volume_level, newn->volume_level);
35593+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35594 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35595 }
35596 } else {
35597@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35598 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35599 }
35600 if (oldn->volume_level != newn->volume_level) {
35601- issue_volchange(oldn->volume_level, newn->volume_level);
35602+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35603 } else if (oldn->volume_toggle != newn->volume_toggle) {
35604 /* repeated vol up/down keypress at end of scale ? */
35605 if (newn->volume_level == 0)
35606@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35607 /* handle brightness */
35608 if (oldn->brightness_level != newn->brightness_level) {
35609 issue_brightnesschange(oldn->brightness_level,
35610- newn->brightness_level);
35611+ newn->brightness_level,
35612+ event_mask);
35613 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35614 /* repeated key presses that didn't change state */
35615 if (newn->brightness_level == 0)
35616@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35617 && !tp_features.bright_unkfw)
35618 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35619 }
35620+}
35621
35622 #undef TPACPI_COMPARE_KEY
35623 #undef TPACPI_MAY_SEND_KEY
35624-}
35625
35626 /*
35627 * Polling driver
35628diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35629index b859d16..5cc6b1a 100644
35630--- a/drivers/pnp/pnpbios/bioscalls.c
35631+++ b/drivers/pnp/pnpbios/bioscalls.c
35632@@ -59,7 +59,7 @@ do { \
35633 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35634 } while(0)
35635
35636-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35637+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35638 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35639
35640 /*
35641@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35642
35643 cpu = get_cpu();
35644 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35645+
35646+ pax_open_kernel();
35647 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35648+ pax_close_kernel();
35649
35650 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35651 spin_lock_irqsave(&pnp_bios_lock, flags);
35652@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35653 :"memory");
35654 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35655
35656+ pax_open_kernel();
35657 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35658+ pax_close_kernel();
35659+
35660 put_cpu();
35661
35662 /* If we get here and this is set then the PnP BIOS faulted on us. */
35663@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35664 return status;
35665 }
35666
35667-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35668+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35669 {
35670 int i;
35671
35672@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35673 pnp_bios_callpoint.offset = header->fields.pm16offset;
35674 pnp_bios_callpoint.segment = PNP_CS16;
35675
35676+ pax_open_kernel();
35677+
35678 for_each_possible_cpu(i) {
35679 struct desc_struct *gdt = get_cpu_gdt_table(i);
35680 if (!gdt)
35681@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35682 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35683 (unsigned long)__va(header->fields.pm16dseg));
35684 }
35685+
35686+ pax_close_kernel();
35687 }
35688diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35689index b0ecacb..7c9da2e 100644
35690--- a/drivers/pnp/resource.c
35691+++ b/drivers/pnp/resource.c
35692@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35693 return 1;
35694
35695 /* check if the resource is valid */
35696- if (*irq < 0 || *irq > 15)
35697+ if (*irq > 15)
35698 return 0;
35699
35700 /* check if the resource is reserved */
35701@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35702 return 1;
35703
35704 /* check if the resource is valid */
35705- if (*dma < 0 || *dma == 4 || *dma > 7)
35706+ if (*dma == 4 || *dma > 7)
35707 return 0;
35708
35709 /* check if the resource is reserved */
35710diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35711index bb16f5b..c751eef 100644
35712--- a/drivers/power/bq27x00_battery.c
35713+++ b/drivers/power/bq27x00_battery.c
35714@@ -67,7 +67,7 @@
35715 struct bq27x00_device_info;
35716 struct bq27x00_access_methods {
35717 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35718-};
35719+} __no_const;
35720
35721 enum bq27x00_chip { BQ27000, BQ27500 };
35722
35723diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35724index 33f5d9a..d957d3f 100644
35725--- a/drivers/regulator/max8660.c
35726+++ b/drivers/regulator/max8660.c
35727@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35728 max8660->shadow_regs[MAX8660_OVER1] = 5;
35729 } else {
35730 /* Otherwise devices can be toggled via software */
35731- max8660_dcdc_ops.enable = max8660_dcdc_enable;
35732- max8660_dcdc_ops.disable = max8660_dcdc_disable;
35733+ pax_open_kernel();
35734+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35735+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35736+ pax_close_kernel();
35737 }
35738
35739 /*
35740diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35741index 3285d41..ab7c22a 100644
35742--- a/drivers/regulator/mc13892-regulator.c
35743+++ b/drivers/regulator/mc13892-regulator.c
35744@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35745 }
35746 mc13xxx_unlock(mc13892);
35747
35748- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35749+ pax_open_kernel();
35750+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35751 = mc13892_vcam_set_mode;
35752- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35753+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35754 = mc13892_vcam_get_mode;
35755+ pax_close_kernel();
35756 for (i = 0; i < pdata->num_regulators; i++) {
35757 init_data = &pdata->regulators[i];
35758 priv->regulators[i] = regulator_register(
35759diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35760index cace6d3..f623fda 100644
35761--- a/drivers/rtc/rtc-dev.c
35762+++ b/drivers/rtc/rtc-dev.c
35763@@ -14,6 +14,7 @@
35764 #include <linux/module.h>
35765 #include <linux/rtc.h>
35766 #include <linux/sched.h>
35767+#include <linux/grsecurity.h>
35768 #include "rtc-core.h"
35769
35770 static dev_t rtc_devt;
35771@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35772 if (copy_from_user(&tm, uarg, sizeof(tm)))
35773 return -EFAULT;
35774
35775+ gr_log_timechange();
35776+
35777 return rtc_set_time(rtc, &tm);
35778
35779 case RTC_PIE_ON:
35780diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
35781index f66c33b..7ae5823 100644
35782--- a/drivers/scsi/BusLogic.c
35783+++ b/drivers/scsi/BusLogic.c
35784@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
35785 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
35786 *PrototypeHostAdapter)
35787 {
35788+ pax_track_stack();
35789+
35790 /*
35791 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
35792 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
35793diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35794index ffb5878..e6d785c 100644
35795--- a/drivers/scsi/aacraid/aacraid.h
35796+++ b/drivers/scsi/aacraid/aacraid.h
35797@@ -492,7 +492,7 @@ struct adapter_ops
35798 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35799 /* Administrative operations */
35800 int (*adapter_comm)(struct aac_dev * dev, int comm);
35801-};
35802+} __no_const;
35803
35804 /*
35805 * Define which interrupt handler needs to be installed
35806diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
35807index 8a0b330..b4286de 100644
35808--- a/drivers/scsi/aacraid/commctrl.c
35809+++ b/drivers/scsi/aacraid/commctrl.c
35810@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
35811 u32 actual_fibsize64, actual_fibsize = 0;
35812 int i;
35813
35814+ pax_track_stack();
35815
35816 if (dev->in_reset) {
35817 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
35818diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35819index c7b6fed..4db0569 100644
35820--- a/drivers/scsi/aacraid/linit.c
35821+++ b/drivers/scsi/aacraid/linit.c
35822@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35823 #elif defined(__devinitconst)
35824 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35825 #else
35826-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35827+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35828 #endif
35829 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35830 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35831diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35832index d5ff142..49c0ebb 100644
35833--- a/drivers/scsi/aic94xx/aic94xx_init.c
35834+++ b/drivers/scsi/aic94xx/aic94xx_init.c
35835@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35836 .lldd_control_phy = asd_control_phy,
35837 };
35838
35839-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35840+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35841 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35842 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35843 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35844diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35845index a796de9..1ef20e1 100644
35846--- a/drivers/scsi/bfa/bfa.h
35847+++ b/drivers/scsi/bfa/bfa.h
35848@@ -196,7 +196,7 @@ struct bfa_hwif_s {
35849 u32 *end);
35850 int cpe_vec_q0;
35851 int rme_vec_q0;
35852-};
35853+} __no_const;
35854 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35855
35856 struct bfa_faa_cbfn_s {
35857diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35858index e07bd47..dbd260a 100644
35859--- a/drivers/scsi/bfa/bfa_fcpim.c
35860+++ b/drivers/scsi/bfa/bfa_fcpim.c
35861@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35862 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35863 {
35864 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35865- struct bfa_itn_s *itn;
35866+ bfa_itn_s_no_const *itn;
35867
35868 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35869 itn->isr = isr;
35870diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35871index 1080bcb..a3b39e3 100644
35872--- a/drivers/scsi/bfa/bfa_fcpim.h
35873+++ b/drivers/scsi/bfa/bfa_fcpim.h
35874@@ -37,6 +37,7 @@ struct bfa_iotag_s {
35875 struct bfa_itn_s {
35876 bfa_isr_func_t isr;
35877 };
35878+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35879
35880 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35881 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35882@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35883 struct list_head iotag_tio_free_q; /* free IO resources */
35884 struct list_head iotag_unused_q; /* unused IO resources*/
35885 struct bfa_iotag_s *iotag_arr;
35886- struct bfa_itn_s *itn_arr;
35887+ bfa_itn_s_no_const *itn_arr;
35888 int num_ioim_reqs;
35889 int num_fwtio_reqs;
35890 int num_itns;
35891diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
35892index d4f951f..197c350 100644
35893--- a/drivers/scsi/bfa/bfa_fcs_lport.c
35894+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
35895@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
35896 u16 len, count;
35897 u16 templen;
35898
35899+ pax_track_stack();
35900+
35901 /*
35902 * get hba attributes
35903 */
35904@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
35905 u8 count = 0;
35906 u16 templen;
35907
35908+ pax_track_stack();
35909+
35910 /*
35911 * get port attributes
35912 */
35913diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
35914index 52628d5..f89d033 100644
35915--- a/drivers/scsi/bfa/bfa_fcs_rport.c
35916+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
35917@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
35918 struct fc_rpsc_speed_info_s speeds;
35919 struct bfa_port_attr_s pport_attr;
35920
35921+ pax_track_stack();
35922+
35923 bfa_trc(port->fcs, rx_fchs->s_id);
35924 bfa_trc(port->fcs, rx_fchs->d_id);
35925
35926diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35927index 546d46b..642fa5b 100644
35928--- a/drivers/scsi/bfa/bfa_ioc.h
35929+++ b/drivers/scsi/bfa/bfa_ioc.h
35930@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35931 bfa_ioc_disable_cbfn_t disable_cbfn;
35932 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35933 bfa_ioc_reset_cbfn_t reset_cbfn;
35934-};
35935+} __no_const;
35936
35937 /*
35938 * IOC event notification mechanism.
35939@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35940 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35941 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35942 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35943-};
35944+} __no_const;
35945
35946 /*
35947 * Queue element to wait for room in request queue. FIFO order is
35948diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
35949index 66fb725..0fe05ab 100644
35950--- a/drivers/scsi/bfa/bfad.c
35951+++ b/drivers/scsi/bfa/bfad.c
35952@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
35953 struct bfad_vport_s *vport, *vport_new;
35954 struct bfa_fcs_driver_info_s driver_info;
35955
35956+ pax_track_stack();
35957+
35958 /* Limit min/max. xfer size to [64k-32MB] */
35959 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
35960 max_xfer_size = BFAD_MIN_SECTORS >> 1;
35961diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
35962index b4f6c9a..0eb1938 100644
35963--- a/drivers/scsi/dpt_i2o.c
35964+++ b/drivers/scsi/dpt_i2o.c
35965@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
35966 dma_addr_t addr;
35967 ulong flags = 0;
35968
35969+ pax_track_stack();
35970+
35971 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
35972 // get user msg size in u32s
35973 if(get_user(size, &user_msg[0])){
35974@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
35975 s32 rcode;
35976 dma_addr_t addr;
35977
35978+ pax_track_stack();
35979+
35980 memset(msg, 0 , sizeof(msg));
35981 len = scsi_bufflen(cmd);
35982 direction = 0x00000000;
35983diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
35984index 94de889..ca4f0cf 100644
35985--- a/drivers/scsi/eata.c
35986+++ b/drivers/scsi/eata.c
35987@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
35988 struct hostdata *ha;
35989 char name[16];
35990
35991+ pax_track_stack();
35992+
35993 sprintf(name, "%s%d", driver_name, j);
35994
35995 if (!request_region(port_base, REGION_SIZE, driver_name)) {
35996diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
35997index c74c4b8..c41ca3f 100644
35998--- a/drivers/scsi/fcoe/fcoe_ctlr.c
35999+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
36000@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
36001 } buf;
36002 int rc;
36003
36004+ pax_track_stack();
36005+
36006 fiph = (struct fip_header *)skb->data;
36007 sub = fiph->fip_subcode;
36008
36009diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
36010index 3242bca..45a83e7 100644
36011--- a/drivers/scsi/gdth.c
36012+++ b/drivers/scsi/gdth.c
36013@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
36014 unsigned long flags;
36015 gdth_ha_str *ha;
36016
36017+ pax_track_stack();
36018+
36019 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36020 return -EFAULT;
36021 ha = gdth_find_ha(ldrv.ionode);
36022@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
36023 gdth_ha_str *ha;
36024 int rval;
36025
36026+ pax_track_stack();
36027+
36028 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36029 res.number >= MAX_HDRIVES)
36030 return -EFAULT;
36031@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd)
36032 gdth_ha_str *ha;
36033 int rval;
36034
36035+ pax_track_stack();
36036+
36037 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36038 return -EFAULT;
36039 ha = gdth_find_ha(gen.ionode);
36040@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
36041 int i;
36042 gdth_cmd_str gdtcmd;
36043 char cmnd[MAX_COMMAND_SIZE];
36044+
36045+ pax_track_stack();
36046+
36047 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36048
36049 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36050diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
36051index 6527543..81e4fe2 100644
36052--- a/drivers/scsi/gdth_proc.c
36053+++ b/drivers/scsi/gdth_proc.c
36054@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
36055 u64 paddr;
36056
36057 char cmnd[MAX_COMMAND_SIZE];
36058+
36059+ pax_track_stack();
36060+
36061 memset(cmnd, 0xff, 12);
36062 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36063
36064@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
36065 gdth_hget_str *phg;
36066 char cmnd[MAX_COMMAND_SIZE];
36067
36068+ pax_track_stack();
36069+
36070 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36071 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36072 if (!gdtcmd || !estr)
36073diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36074index 351dc0b..951dc32 100644
36075--- a/drivers/scsi/hosts.c
36076+++ b/drivers/scsi/hosts.c
36077@@ -42,7 +42,7 @@
36078 #include "scsi_logging.h"
36079
36080
36081-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36082+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36083
36084
36085 static void scsi_host_cls_release(struct device *dev)
36086@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36087 * subtract one because we increment first then return, but we need to
36088 * know what the next host number was before increment
36089 */
36090- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36091+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36092 shost->dma_channel = 0xff;
36093
36094 /* These three are default values which can be overridden */
36095diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36096index 418ce83..7ee1225 100644
36097--- a/drivers/scsi/hpsa.c
36098+++ b/drivers/scsi/hpsa.c
36099@@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h)
36100 u32 a;
36101
36102 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36103- return h->access.command_completed(h);
36104+ return h->access->command_completed(h);
36105
36106 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36107 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36108@@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h)
36109 while (!list_empty(&h->reqQ)) {
36110 c = list_entry(h->reqQ.next, struct CommandList, list);
36111 /* can't do anything if fifo is full */
36112- if ((h->access.fifo_full(h))) {
36113+ if ((h->access->fifo_full(h))) {
36114 dev_warn(&h->pdev->dev, "fifo full\n");
36115 break;
36116 }
36117@@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h)
36118 h->Qdepth--;
36119
36120 /* Tell the controller execute command */
36121- h->access.submit_command(h, c);
36122+ h->access->submit_command(h, c);
36123
36124 /* Put job onto the completed Q */
36125 addQ(&h->cmpQ, c);
36126@@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h)
36127
36128 static inline unsigned long get_next_completion(struct ctlr_info *h)
36129 {
36130- return h->access.command_completed(h);
36131+ return h->access->command_completed(h);
36132 }
36133
36134 static inline bool interrupt_pending(struct ctlr_info *h)
36135 {
36136- return h->access.intr_pending(h);
36137+ return h->access->intr_pending(h);
36138 }
36139
36140 static inline long interrupt_not_for_us(struct ctlr_info *h)
36141 {
36142- return (h->access.intr_pending(h) == 0) ||
36143+ return (h->access->intr_pending(h) == 0) ||
36144 (h->interrupts_enabled == 0);
36145 }
36146
36147@@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36148 if (prod_index < 0)
36149 return -ENODEV;
36150 h->product_name = products[prod_index].product_name;
36151- h->access = *(products[prod_index].access);
36152+ h->access = products[prod_index].access;
36153
36154 if (hpsa_board_disabled(h->pdev)) {
36155 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36156@@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
36157 }
36158
36159 /* make sure the board interrupts are off */
36160- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36161+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36162
36163 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36164 goto clean2;
36165@@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
36166 * fake ones to scoop up any residual completions.
36167 */
36168 spin_lock_irqsave(&h->lock, flags);
36169- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36170+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36171 spin_unlock_irqrestore(&h->lock, flags);
36172 free_irq(h->intr[h->intr_mode], h);
36173 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36174@@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
36175 dev_info(&h->pdev->dev, "Board READY.\n");
36176 dev_info(&h->pdev->dev,
36177 "Waiting for stale completions to drain.\n");
36178- h->access.set_intr_mask(h, HPSA_INTR_ON);
36179+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36180 msleep(10000);
36181- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36182+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36183
36184 rc = controller_reset_failed(h->cfgtable);
36185 if (rc)
36186@@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
36187 }
36188
36189 /* Turn the interrupts on so we can service requests */
36190- h->access.set_intr_mask(h, HPSA_INTR_ON);
36191+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36192
36193 hpsa_hba_inquiry(h);
36194 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36195@@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36196 * To write all data in the battery backed cache to disks
36197 */
36198 hpsa_flush_cache(h);
36199- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36200+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36201 free_irq(h->intr[h->intr_mode], h);
36202 #ifdef CONFIG_PCI_MSI
36203 if (h->msix_vector)
36204@@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36205 return;
36206 }
36207 /* Change the access methods to the performant access methods */
36208- h->access = SA5_performant_access;
36209+ h->access = &SA5_performant_access;
36210 h->transMethod = CFGTBL_Trans_Performant;
36211 }
36212
36213diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36214index 7f53cea..a8c7188 100644
36215--- a/drivers/scsi/hpsa.h
36216+++ b/drivers/scsi/hpsa.h
36217@@ -73,7 +73,7 @@ struct ctlr_info {
36218 unsigned int msix_vector;
36219 unsigned int msi_vector;
36220 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36221- struct access_method access;
36222+ struct access_method *access;
36223
36224 /* queue and queue Info */
36225 struct list_head reqQ;
36226diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36227index f2df059..a3a9930 100644
36228--- a/drivers/scsi/ips.h
36229+++ b/drivers/scsi/ips.h
36230@@ -1027,7 +1027,7 @@ typedef struct {
36231 int (*intr)(struct ips_ha *);
36232 void (*enableint)(struct ips_ha *);
36233 uint32_t (*statupd)(struct ips_ha *);
36234-} ips_hw_func_t;
36235+} __no_const ips_hw_func_t;
36236
36237 typedef struct ips_ha {
36238 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36239diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36240index d261e98..1e00f35 100644
36241--- a/drivers/scsi/libfc/fc_exch.c
36242+++ b/drivers/scsi/libfc/fc_exch.c
36243@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36244 * all together if not used XXX
36245 */
36246 struct {
36247- atomic_t no_free_exch;
36248- atomic_t no_free_exch_xid;
36249- atomic_t xid_not_found;
36250- atomic_t xid_busy;
36251- atomic_t seq_not_found;
36252- atomic_t non_bls_resp;
36253+ atomic_unchecked_t no_free_exch;
36254+ atomic_unchecked_t no_free_exch_xid;
36255+ atomic_unchecked_t xid_not_found;
36256+ atomic_unchecked_t xid_busy;
36257+ atomic_unchecked_t seq_not_found;
36258+ atomic_unchecked_t non_bls_resp;
36259 } stats;
36260 };
36261
36262@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36263 /* allocate memory for exchange */
36264 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36265 if (!ep) {
36266- atomic_inc(&mp->stats.no_free_exch);
36267+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36268 goto out;
36269 }
36270 memset(ep, 0, sizeof(*ep));
36271@@ -779,7 +779,7 @@ out:
36272 return ep;
36273 err:
36274 spin_unlock_bh(&pool->lock);
36275- atomic_inc(&mp->stats.no_free_exch_xid);
36276+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36277 mempool_free(ep, mp->ep_pool);
36278 return NULL;
36279 }
36280@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36281 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36282 ep = fc_exch_find(mp, xid);
36283 if (!ep) {
36284- atomic_inc(&mp->stats.xid_not_found);
36285+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36286 reject = FC_RJT_OX_ID;
36287 goto out;
36288 }
36289@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36290 ep = fc_exch_find(mp, xid);
36291 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36292 if (ep) {
36293- atomic_inc(&mp->stats.xid_busy);
36294+ atomic_inc_unchecked(&mp->stats.xid_busy);
36295 reject = FC_RJT_RX_ID;
36296 goto rel;
36297 }
36298@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36299 }
36300 xid = ep->xid; /* get our XID */
36301 } else if (!ep) {
36302- atomic_inc(&mp->stats.xid_not_found);
36303+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36304 reject = FC_RJT_RX_ID; /* XID not found */
36305 goto out;
36306 }
36307@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36308 } else {
36309 sp = &ep->seq;
36310 if (sp->id != fh->fh_seq_id) {
36311- atomic_inc(&mp->stats.seq_not_found);
36312+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36313 if (f_ctl & FC_FC_END_SEQ) {
36314 /*
36315 * Update sequence_id based on incoming last
36316@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36317
36318 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36319 if (!ep) {
36320- atomic_inc(&mp->stats.xid_not_found);
36321+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36322 goto out;
36323 }
36324 if (ep->esb_stat & ESB_ST_COMPLETE) {
36325- atomic_inc(&mp->stats.xid_not_found);
36326+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36327 goto rel;
36328 }
36329 if (ep->rxid == FC_XID_UNKNOWN)
36330 ep->rxid = ntohs(fh->fh_rx_id);
36331 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36332- atomic_inc(&mp->stats.xid_not_found);
36333+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36334 goto rel;
36335 }
36336 if (ep->did != ntoh24(fh->fh_s_id) &&
36337 ep->did != FC_FID_FLOGI) {
36338- atomic_inc(&mp->stats.xid_not_found);
36339+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36340 goto rel;
36341 }
36342 sof = fr_sof(fp);
36343@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36344 sp->ssb_stat |= SSB_ST_RESP;
36345 sp->id = fh->fh_seq_id;
36346 } else if (sp->id != fh->fh_seq_id) {
36347- atomic_inc(&mp->stats.seq_not_found);
36348+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36349 goto rel;
36350 }
36351
36352@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36353 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36354
36355 if (!sp)
36356- atomic_inc(&mp->stats.xid_not_found);
36357+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36358 else
36359- atomic_inc(&mp->stats.non_bls_resp);
36360+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36361
36362 fc_frame_free(fp);
36363 }
36364diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36365index db9238f..4378ed2 100644
36366--- a/drivers/scsi/libsas/sas_ata.c
36367+++ b/drivers/scsi/libsas/sas_ata.c
36368@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36369 .postreset = ata_std_postreset,
36370 .error_handler = ata_std_error_handler,
36371 .post_internal_cmd = sas_ata_post_internal,
36372- .qc_defer = ata_std_qc_defer,
36373+ .qc_defer = ata_std_qc_defer,
36374 .qc_prep = ata_noop_qc_prep,
36375 .qc_issue = sas_ata_qc_issue,
36376 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36377diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36378index c088a36..01c73b0 100644
36379--- a/drivers/scsi/lpfc/lpfc.h
36380+++ b/drivers/scsi/lpfc/lpfc.h
36381@@ -425,7 +425,7 @@ struct lpfc_vport {
36382 struct dentry *debug_nodelist;
36383 struct dentry *vport_debugfs_root;
36384 struct lpfc_debugfs_trc *disc_trc;
36385- atomic_t disc_trc_cnt;
36386+ atomic_unchecked_t disc_trc_cnt;
36387 #endif
36388 uint8_t stat_data_enabled;
36389 uint8_t stat_data_blocked;
36390@@ -835,8 +835,8 @@ struct lpfc_hba {
36391 struct timer_list fabric_block_timer;
36392 unsigned long bit_flags;
36393 #define FABRIC_COMANDS_BLOCKED 0
36394- atomic_t num_rsrc_err;
36395- atomic_t num_cmd_success;
36396+ atomic_unchecked_t num_rsrc_err;
36397+ atomic_unchecked_t num_cmd_success;
36398 unsigned long last_rsrc_error_time;
36399 unsigned long last_ramp_down_time;
36400 unsigned long last_ramp_up_time;
36401@@ -850,7 +850,7 @@ struct lpfc_hba {
36402 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36403 struct dentry *debug_slow_ring_trc;
36404 struct lpfc_debugfs_trc *slow_ring_trc;
36405- atomic_t slow_ring_trc_cnt;
36406+ atomic_unchecked_t slow_ring_trc_cnt;
36407 /* iDiag debugfs sub-directory */
36408 struct dentry *idiag_root;
36409 struct dentry *idiag_pci_cfg;
36410diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36411index a0424dd..2499b6b 100644
36412--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36413+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36414@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36415
36416 #include <linux/debugfs.h>
36417
36418-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36419+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36420 static unsigned long lpfc_debugfs_start_time = 0L;
36421
36422 /* iDiag */
36423@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36424 lpfc_debugfs_enable = 0;
36425
36426 len = 0;
36427- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36428+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36429 (lpfc_debugfs_max_disc_trc - 1);
36430 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36431 dtp = vport->disc_trc + i;
36432@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36433 lpfc_debugfs_enable = 0;
36434
36435 len = 0;
36436- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36437+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36438 (lpfc_debugfs_max_slow_ring_trc - 1);
36439 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36440 dtp = phba->slow_ring_trc + i;
36441@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36442 !vport || !vport->disc_trc)
36443 return;
36444
36445- index = atomic_inc_return(&vport->disc_trc_cnt) &
36446+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36447 (lpfc_debugfs_max_disc_trc - 1);
36448 dtp = vport->disc_trc + index;
36449 dtp->fmt = fmt;
36450 dtp->data1 = data1;
36451 dtp->data2 = data2;
36452 dtp->data3 = data3;
36453- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36454+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36455 dtp->jif = jiffies;
36456 #endif
36457 return;
36458@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36459 !phba || !phba->slow_ring_trc)
36460 return;
36461
36462- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36463+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36464 (lpfc_debugfs_max_slow_ring_trc - 1);
36465 dtp = phba->slow_ring_trc + index;
36466 dtp->fmt = fmt;
36467 dtp->data1 = data1;
36468 dtp->data2 = data2;
36469 dtp->data3 = data3;
36470- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36471+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36472 dtp->jif = jiffies;
36473 #endif
36474 return;
36475@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36476 "slow_ring buffer\n");
36477 goto debug_failed;
36478 }
36479- atomic_set(&phba->slow_ring_trc_cnt, 0);
36480+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36481 memset(phba->slow_ring_trc, 0,
36482 (sizeof(struct lpfc_debugfs_trc) *
36483 lpfc_debugfs_max_slow_ring_trc));
36484@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36485 "buffer\n");
36486 goto debug_failed;
36487 }
36488- atomic_set(&vport->disc_trc_cnt, 0);
36489+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36490
36491 snprintf(name, sizeof(name), "discovery_trace");
36492 vport->debug_disc_trc =
36493diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36494index a3c8200..31e562e 100644
36495--- a/drivers/scsi/lpfc/lpfc_init.c
36496+++ b/drivers/scsi/lpfc/lpfc_init.c
36497@@ -9969,8 +9969,10 @@ lpfc_init(void)
36498 printk(LPFC_COPYRIGHT "\n");
36499
36500 if (lpfc_enable_npiv) {
36501- lpfc_transport_functions.vport_create = lpfc_vport_create;
36502- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36503+ pax_open_kernel();
36504+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36505+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36506+ pax_close_kernel();
36507 }
36508 lpfc_transport_template =
36509 fc_attach_transport(&lpfc_transport_functions);
36510diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36511index eadd241..26c8e0f 100644
36512--- a/drivers/scsi/lpfc/lpfc_scsi.c
36513+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36514@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36515 uint32_t evt_posted;
36516
36517 spin_lock_irqsave(&phba->hbalock, flags);
36518- atomic_inc(&phba->num_rsrc_err);
36519+ atomic_inc_unchecked(&phba->num_rsrc_err);
36520 phba->last_rsrc_error_time = jiffies;
36521
36522 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36523@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36524 unsigned long flags;
36525 struct lpfc_hba *phba = vport->phba;
36526 uint32_t evt_posted;
36527- atomic_inc(&phba->num_cmd_success);
36528+ atomic_inc_unchecked(&phba->num_cmd_success);
36529
36530 if (vport->cfg_lun_queue_depth <= queue_depth)
36531 return;
36532@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36533 unsigned long num_rsrc_err, num_cmd_success;
36534 int i;
36535
36536- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36537- num_cmd_success = atomic_read(&phba->num_cmd_success);
36538+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36539+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36540
36541 vports = lpfc_create_vport_work_array(phba);
36542 if (vports != NULL)
36543@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36544 }
36545 }
36546 lpfc_destroy_vport_work_array(phba, vports);
36547- atomic_set(&phba->num_rsrc_err, 0);
36548- atomic_set(&phba->num_cmd_success, 0);
36549+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36550+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36551 }
36552
36553 /**
36554@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36555 }
36556 }
36557 lpfc_destroy_vport_work_array(phba, vports);
36558- atomic_set(&phba->num_rsrc_err, 0);
36559- atomic_set(&phba->num_cmd_success, 0);
36560+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36561+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36562 }
36563
36564 /**
36565diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
36566index 2e6619e..fa64494 100644
36567--- a/drivers/scsi/megaraid/megaraid_mbox.c
36568+++ b/drivers/scsi/megaraid/megaraid_mbox.c
36569@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
36570 int rval;
36571 int i;
36572
36573+ pax_track_stack();
36574+
36575 // Allocate memory for the base list of scb for management module.
36576 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36577
36578diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
36579index 86afb13f..c912398 100644
36580--- a/drivers/scsi/osd/osd_initiator.c
36581+++ b/drivers/scsi/osd/osd_initiator.c
36582@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od,
36583 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36584 int ret;
36585
36586+ pax_track_stack();
36587+
36588 or = osd_start_request(od, GFP_KERNEL);
36589 if (!or)
36590 return -ENOMEM;
36591diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36592index d079f9a..d26072c 100644
36593--- a/drivers/scsi/pmcraid.c
36594+++ b/drivers/scsi/pmcraid.c
36595@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36596 res->scsi_dev = scsi_dev;
36597 scsi_dev->hostdata = res;
36598 res->change_detected = 0;
36599- atomic_set(&res->read_failures, 0);
36600- atomic_set(&res->write_failures, 0);
36601+ atomic_set_unchecked(&res->read_failures, 0);
36602+ atomic_set_unchecked(&res->write_failures, 0);
36603 rc = 0;
36604 }
36605 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36606@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36607
36608 /* If this was a SCSI read/write command keep count of errors */
36609 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36610- atomic_inc(&res->read_failures);
36611+ atomic_inc_unchecked(&res->read_failures);
36612 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36613- atomic_inc(&res->write_failures);
36614+ atomic_inc_unchecked(&res->write_failures);
36615
36616 if (!RES_IS_GSCSI(res->cfg_entry) &&
36617 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36618@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
36619 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36620 * hrrq_id assigned here in queuecommand
36621 */
36622- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36623+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36624 pinstance->num_hrrq;
36625 cmd->cmd_done = pmcraid_io_done;
36626
36627@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
36628 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36629 * hrrq_id assigned here in queuecommand
36630 */
36631- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36632+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36633 pinstance->num_hrrq;
36634
36635 if (request_size) {
36636@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36637
36638 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36639 /* add resources only after host is added into system */
36640- if (!atomic_read(&pinstance->expose_resources))
36641+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36642 return;
36643
36644 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36645@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance(
36646 init_waitqueue_head(&pinstance->reset_wait_q);
36647
36648 atomic_set(&pinstance->outstanding_cmds, 0);
36649- atomic_set(&pinstance->last_message_id, 0);
36650- atomic_set(&pinstance->expose_resources, 0);
36651+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36652+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36653
36654 INIT_LIST_HEAD(&pinstance->free_res_q);
36655 INIT_LIST_HEAD(&pinstance->used_res_q);
36656@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
36657 /* Schedule worker thread to handle CCN and take care of adding and
36658 * removing devices to OS
36659 */
36660- atomic_set(&pinstance->expose_resources, 1);
36661+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36662 schedule_work(&pinstance->worker_q);
36663 return rc;
36664
36665diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36666index f920baf..4417389 100644
36667--- a/drivers/scsi/pmcraid.h
36668+++ b/drivers/scsi/pmcraid.h
36669@@ -749,7 +749,7 @@ struct pmcraid_instance {
36670 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36671
36672 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36673- atomic_t last_message_id;
36674+ atomic_unchecked_t last_message_id;
36675
36676 /* configuration table */
36677 struct pmcraid_config_table *cfg_table;
36678@@ -778,7 +778,7 @@ struct pmcraid_instance {
36679 atomic_t outstanding_cmds;
36680
36681 /* should add/delete resources to mid-layer now ?*/
36682- atomic_t expose_resources;
36683+ atomic_unchecked_t expose_resources;
36684
36685
36686
36687@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
36688 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36689 };
36690 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36691- atomic_t read_failures; /* count of failed READ commands */
36692- atomic_t write_failures; /* count of failed WRITE commands */
36693+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36694+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36695
36696 /* To indicate add/delete/modify during CCN */
36697 u8 change_detected;
36698diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36699index a03eaf4..a6b3fd9 100644
36700--- a/drivers/scsi/qla2xxx/qla_def.h
36701+++ b/drivers/scsi/qla2xxx/qla_def.h
36702@@ -2244,7 +2244,7 @@ struct isp_operations {
36703 int (*get_flash_version) (struct scsi_qla_host *, void *);
36704 int (*start_scsi) (srb_t *);
36705 int (*abort_isp) (struct scsi_qla_host *);
36706-};
36707+} __no_const;
36708
36709 /* MSI-X Support *************************************************************/
36710
36711diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36712index 473c5c8..4e2f24a 100644
36713--- a/drivers/scsi/qla4xxx/ql4_def.h
36714+++ b/drivers/scsi/qla4xxx/ql4_def.h
36715@@ -256,7 +256,7 @@ struct ddb_entry {
36716 atomic_t retry_relogin_timer; /* Min Time between relogins
36717 * (4000 only) */
36718 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36719- atomic_t relogin_retry_count; /* Num of times relogin has been
36720+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36721 * retried */
36722
36723 uint16_t port;
36724diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
36725index 42ed5db..0262f9e 100644
36726--- a/drivers/scsi/qla4xxx/ql4_init.c
36727+++ b/drivers/scsi/qla4xxx/ql4_init.c
36728@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
36729 ddb_entry->fw_ddb_index = fw_ddb_index;
36730 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36731 atomic_set(&ddb_entry->relogin_timer, 0);
36732- atomic_set(&ddb_entry->relogin_retry_count, 0);
36733+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36734 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36735 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36736 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36737@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
36738 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
36739 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
36740 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36741- atomic_set(&ddb_entry->relogin_retry_count, 0);
36742+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36743 atomic_set(&ddb_entry->relogin_timer, 0);
36744 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36745 iscsi_unblock_session(ddb_entry->sess);
36746diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36747index f2364ec..44c42b1 100644
36748--- a/drivers/scsi/qla4xxx/ql4_os.c
36749+++ b/drivers/scsi/qla4xxx/ql4_os.c
36750@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
36751 ddb_entry->fw_ddb_device_state ==
36752 DDB_DS_SESSION_FAILED) {
36753 /* Reset retry relogin timer */
36754- atomic_inc(&ddb_entry->relogin_retry_count);
36755+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36756 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
36757 " timed out-retrying"
36758 " relogin (%d)\n",
36759 ha->host_no,
36760 ddb_entry->fw_ddb_index,
36761- atomic_read(&ddb_entry->
36762+ atomic_read_unchecked(&ddb_entry->
36763 relogin_retry_count))
36764 );
36765 start_dpc++;
36766diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36767index 2aeb2e9..46e3925 100644
36768--- a/drivers/scsi/scsi.c
36769+++ b/drivers/scsi/scsi.c
36770@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36771 unsigned long timeout;
36772 int rtn = 0;
36773
36774- atomic_inc(&cmd->device->iorequest_cnt);
36775+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36776
36777 /* check if the device is still usable */
36778 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36779diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
36780index 6888b2c..45befa1 100644
36781--- a/drivers/scsi/scsi_debug.c
36782+++ b/drivers/scsi/scsi_debug.c
36783@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
36784 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36785 unsigned char *cmd = (unsigned char *)scp->cmnd;
36786
36787+ pax_track_stack();
36788+
36789 if ((errsts = check_readiness(scp, 1, devip)))
36790 return errsts;
36791 memset(arr, 0, sizeof(arr));
36792@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
36793 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36794 unsigned char *cmd = (unsigned char *)scp->cmnd;
36795
36796+ pax_track_stack();
36797+
36798 if ((errsts = check_readiness(scp, 1, devip)))
36799 return errsts;
36800 memset(arr, 0, sizeof(arr));
36801diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36802index 6d219e4..eb3ded3 100644
36803--- a/drivers/scsi/scsi_lib.c
36804+++ b/drivers/scsi/scsi_lib.c
36805@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36806 shost = sdev->host;
36807 scsi_init_cmd_errh(cmd);
36808 cmd->result = DID_NO_CONNECT << 16;
36809- atomic_inc(&cmd->device->iorequest_cnt);
36810+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36811
36812 /*
36813 * SCSI request completion path will do scsi_device_unbusy(),
36814@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36815
36816 INIT_LIST_HEAD(&cmd->eh_entry);
36817
36818- atomic_inc(&cmd->device->iodone_cnt);
36819+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36820 if (cmd->result)
36821- atomic_inc(&cmd->device->ioerr_cnt);
36822+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36823
36824 disposition = scsi_decide_disposition(cmd);
36825 if (disposition != SUCCESS &&
36826diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36827index e0bd3f7..816b8a6 100644
36828--- a/drivers/scsi/scsi_sysfs.c
36829+++ b/drivers/scsi/scsi_sysfs.c
36830@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36831 char *buf) \
36832 { \
36833 struct scsi_device *sdev = to_scsi_device(dev); \
36834- unsigned long long count = atomic_read(&sdev->field); \
36835+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36836 return snprintf(buf, 20, "0x%llx\n", count); \
36837 } \
36838 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36839diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36840index 84a1fdf..693b0d6 100644
36841--- a/drivers/scsi/scsi_tgt_lib.c
36842+++ b/drivers/scsi/scsi_tgt_lib.c
36843@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36844 int err;
36845
36846 dprintk("%lx %u\n", uaddr, len);
36847- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36848+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36849 if (err) {
36850 /*
36851 * TODO: need to fixup sg_tablesize, max_segment_size,
36852diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36853index 1b21491..1b7f60e 100644
36854--- a/drivers/scsi/scsi_transport_fc.c
36855+++ b/drivers/scsi/scsi_transport_fc.c
36856@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36857 * Netlink Infrastructure
36858 */
36859
36860-static atomic_t fc_event_seq;
36861+static atomic_unchecked_t fc_event_seq;
36862
36863 /**
36864 * fc_get_event_number - Obtain the next sequential FC event number
36865@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36866 u32
36867 fc_get_event_number(void)
36868 {
36869- return atomic_add_return(1, &fc_event_seq);
36870+ return atomic_add_return_unchecked(1, &fc_event_seq);
36871 }
36872 EXPORT_SYMBOL(fc_get_event_number);
36873
36874@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36875 {
36876 int error;
36877
36878- atomic_set(&fc_event_seq, 0);
36879+ atomic_set_unchecked(&fc_event_seq, 0);
36880
36881 error = transport_class_register(&fc_host_class);
36882 if (error)
36883@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36884 char *cp;
36885
36886 *val = simple_strtoul(buf, &cp, 0);
36887- if ((*cp && (*cp != '\n')) || (*val < 0))
36888+ if (*cp && (*cp != '\n'))
36889 return -EINVAL;
36890 /*
36891 * Check for overflow; dev_loss_tmo is u32
36892diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36893index 3fd16d7..ba0871f 100644
36894--- a/drivers/scsi/scsi_transport_iscsi.c
36895+++ b/drivers/scsi/scsi_transport_iscsi.c
36896@@ -83,7 +83,7 @@ struct iscsi_internal {
36897 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36898 };
36899
36900-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36901+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36902 static struct workqueue_struct *iscsi_eh_timer_workq;
36903
36904 /*
36905@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36906 int err;
36907
36908 ihost = shost->shost_data;
36909- session->sid = atomic_add_return(1, &iscsi_session_nr);
36910+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36911
36912 if (id == ISCSI_MAX_TARGET) {
36913 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36914@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void)
36915 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36916 ISCSI_TRANSPORT_VERSION);
36917
36918- atomic_set(&iscsi_session_nr, 0);
36919+ atomic_set_unchecked(&iscsi_session_nr, 0);
36920
36921 err = class_register(&iscsi_transport_class);
36922 if (err)
36923diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36924index 21a045e..ec89e03 100644
36925--- a/drivers/scsi/scsi_transport_srp.c
36926+++ b/drivers/scsi/scsi_transport_srp.c
36927@@ -33,7 +33,7 @@
36928 #include "scsi_transport_srp_internal.h"
36929
36930 struct srp_host_attrs {
36931- atomic_t next_port_id;
36932+ atomic_unchecked_t next_port_id;
36933 };
36934 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36935
36936@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36937 struct Scsi_Host *shost = dev_to_shost(dev);
36938 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36939
36940- atomic_set(&srp_host->next_port_id, 0);
36941+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36942 return 0;
36943 }
36944
36945@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36946 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36947 rport->roles = ids->roles;
36948
36949- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36950+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36951 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36952
36953 transport_setup_device(&rport->dev);
36954diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
36955index 953773c..c7f29de 100644
36956--- a/drivers/scsi/sd.c
36957+++ b/drivers/scsi/sd.c
36958@@ -1073,6 +1073,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
36959 SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
36960 disk->disk_name, cmd));
36961
36962+ error = scsi_verify_blk_ioctl(bdev, cmd);
36963+ if (error < 0)
36964+ return error;
36965+
36966 /*
36967 * If we are in the middle of error recovery, don't let anyone
36968 * else try and use this device. Also, if error recovery fails, it
36969@@ -1095,7 +1099,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
36970 error = scsi_ioctl(sdp, cmd, p);
36971 break;
36972 default:
36973- error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
36974+ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
36975 if (error != -ENOTTY)
36976 break;
36977 error = scsi_ioctl(sdp, cmd, p);
36978@@ -1265,6 +1269,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
36979 unsigned int cmd, unsigned long arg)
36980 {
36981 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
36982+ int ret;
36983+
36984+ ret = scsi_verify_blk_ioctl(bdev, cmd);
36985+ if (ret < 0)
36986+ return ret;
36987
36988 /*
36989 * If we are in the middle of error recovery, don't let anyone
36990@@ -1276,8 +1285,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
36991 return -ENODEV;
36992
36993 if (sdev->host->hostt->compat_ioctl) {
36994- int ret;
36995-
36996 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
36997
36998 return ret;
36999diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37000index 909ed9e..1ae290a 100644
37001--- a/drivers/scsi/sg.c
37002+++ b/drivers/scsi/sg.c
37003@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37004 sdp->disk->disk_name,
37005 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37006 NULL,
37007- (char *)arg);
37008+ (char __user *)arg);
37009 case BLKTRACESTART:
37010 return blk_trace_startstop(sdp->device->request_queue, 1);
37011 case BLKTRACESTOP:
37012@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
37013 const struct file_operations * fops;
37014 };
37015
37016-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37017+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37018 {"allow_dio", &adio_fops},
37019 {"debug", &debug_fops},
37020 {"def_reserved_size", &dressz_fops},
37021@@ -2325,7 +2325,7 @@ sg_proc_init(void)
37022 {
37023 int k, mask;
37024 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37025- struct sg_proc_leaf * leaf;
37026+ const struct sg_proc_leaf * leaf;
37027
37028 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37029 if (!sg_proc_sgp)
37030diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
37031index b4543f5..e1b34b8 100644
37032--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
37033+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
37034@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
37035 int do_iounmap = 0;
37036 int do_disable_device = 1;
37037
37038+ pax_track_stack();
37039+
37040 memset(&sym_dev, 0, sizeof(sym_dev));
37041 memset(&nvram, 0, sizeof(nvram));
37042 sym_dev.pdev = pdev;
37043diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
37044index a18996d..fe993cb 100644
37045--- a/drivers/scsi/vmw_pvscsi.c
37046+++ b/drivers/scsi/vmw_pvscsi.c
37047@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
37048 dma_addr_t base;
37049 unsigned i;
37050
37051+ pax_track_stack();
37052+
37053 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
37054 cmd.reqRingNumPages = adapter->req_pages;
37055 cmd.cmpRingNumPages = adapter->cmp_pages;
37056diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
37057index c5f37f0..898d202 100644
37058--- a/drivers/spi/spi-dw-pci.c
37059+++ b/drivers/spi/spi-dw-pci.c
37060@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
37061 #define spi_resume NULL
37062 #endif
37063
37064-static const struct pci_device_id pci_ids[] __devinitdata = {
37065+static const struct pci_device_id pci_ids[] __devinitconst = {
37066 /* Intel MID platform SPI controller 0 */
37067 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
37068 {},
37069diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37070index 4d1b9f5..8408fe3 100644
37071--- a/drivers/spi/spi.c
37072+++ b/drivers/spi/spi.c
37073@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master)
37074 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37075
37076 /* portable code must never pass more than 32 bytes */
37077-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37078+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37079
37080 static u8 *buf;
37081
37082diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
37083index 32ee39a..3004c3d 100644
37084--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
37085+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
37086@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
37087 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
37088
37089
37090-static struct net_device_ops ar6000_netdev_ops = {
37091+static net_device_ops_no_const ar6000_netdev_ops = {
37092 .ndo_init = NULL,
37093 .ndo_open = ar6000_open,
37094 .ndo_stop = ar6000_close,
37095diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
37096index 39e0873..0925710 100644
37097--- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
37098+++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
37099@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
37100 typedef struct ar6k_pal_config_s
37101 {
37102 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
37103-}ar6k_pal_config_t;
37104+} __no_const ar6k_pal_config_t;
37105
37106 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
37107 #endif /* _AR6K_PAL_H_ */
37108diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
37109index 05dada9..96171c6 100644
37110--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
37111+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
37112@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp)
37113 free_netdev(ifp->net);
37114 }
37115 /* Allocate etherdev, including space for private structure */
37116- ifp->net = alloc_etherdev(sizeof(drvr_priv));
37117+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
37118 if (!ifp->net) {
37119 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
37120 ret = -ENOMEM;
37121 }
37122 if (ret == 0) {
37123 strcpy(ifp->net->name, ifp->name);
37124- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
37125+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
37126 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
37127 if (err != 0) {
37128 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
37129@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
37130 BRCMF_TRACE(("%s: Enter\n", __func__));
37131
37132 /* Allocate etherdev, including space for private structure */
37133- net = alloc_etherdev(sizeof(drvr_priv));
37134+ net = alloc_etherdev(sizeof(*drvr_priv));
37135 if (!net) {
37136 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
37137 goto fail;
37138@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
37139 /*
37140 * Save the brcmf_info into the priv
37141 */
37142- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
37143+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
37144
37145 /* Set network interface name if it was provided as module parameter */
37146 if (iface_name[0]) {
37147@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
37148 /*
37149 * Save the brcmf_info into the priv
37150 */
37151- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
37152+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
37153
37154 #if defined(CONFIG_PM_SLEEP)
37155 atomic_set(&brcmf_mmc_suspend, false);
37156diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
37157index d345472..cedb19e 100644
37158--- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
37159+++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
37160@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
37161 u16 func, uint bustype, u32 regsva, void *param);
37162 /* detach from device */
37163 void (*detach) (void *ch);
37164-};
37165+} __no_const;
37166
37167 struct sdioh_info;
37168
37169diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
37170index a01b01c..b3f721c 100644
37171--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
37172+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
37173@@ -591,7 +591,7 @@ struct phy_func_ptr {
37174 initfn_t carrsuppr;
37175 rxsigpwrfn_t rxsigpwr;
37176 detachfn_t detach;
37177-};
37178+} __no_const;
37179
37180 struct brcms_phy {
37181 struct brcms_phy_pub pubpi_ro;
37182diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
37183index 8fb3051..a8b6c67 100644
37184--- a/drivers/staging/et131x/et1310_tx.c
37185+++ b/drivers/staging/et131x/et1310_tx.c
37186@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
37187 struct net_device_stats *stats = &etdev->net_stats;
37188
37189 if (tcb->flags & fMP_DEST_BROAD)
37190- atomic_inc(&etdev->stats.brdcstxmt);
37191+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
37192 else if (tcb->flags & fMP_DEST_MULTI)
37193- atomic_inc(&etdev->stats.multixmt);
37194+ atomic_inc_unchecked(&etdev->stats.multixmt);
37195 else
37196- atomic_inc(&etdev->stats.unixmt);
37197+ atomic_inc_unchecked(&etdev->stats.unixmt);
37198
37199 if (tcb->skb) {
37200 stats->tx_bytes += tcb->skb->len;
37201diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
37202index 408c50b..fd65e9f 100644
37203--- a/drivers/staging/et131x/et131x_adapter.h
37204+++ b/drivers/staging/et131x/et131x_adapter.h
37205@@ -106,11 +106,11 @@ struct ce_stats {
37206 * operations
37207 */
37208 u32 unircv; /* # multicast packets received */
37209- atomic_t unixmt; /* # multicast packets for Tx */
37210+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37211 u32 multircv; /* # multicast packets received */
37212- atomic_t multixmt; /* # multicast packets for Tx */
37213+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37214 u32 brdcstrcv; /* # broadcast packets received */
37215- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37216+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37217 u32 norcvbuf; /* # Rx packets discarded */
37218 u32 noxmtbuf; /* # Tx packets discarded */
37219
37220diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
37221index 455f47a..86205ff 100644
37222--- a/drivers/staging/hv/channel.c
37223+++ b/drivers/staging/hv/channel.c
37224@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
37225 int ret = 0;
37226 int t;
37227
37228- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
37229- atomic_inc(&vmbus_connection.next_gpadl_handle);
37230+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
37231+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
37232
37233 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
37234 if (ret)
37235diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
37236index 824f816..a800af7 100644
37237--- a/drivers/staging/hv/hv.c
37238+++ b/drivers/staging/hv/hv.c
37239@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
37240 u64 output_address = (output) ? virt_to_phys(output) : 0;
37241 u32 output_address_hi = output_address >> 32;
37242 u32 output_address_lo = output_address & 0xFFFFFFFF;
37243- volatile void *hypercall_page = hv_context.hypercall_page;
37244+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
37245
37246 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
37247 "=a"(hv_status_lo) : "d" (control_hi),
37248diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
37249index d957fc2..43cedd9 100644
37250--- a/drivers/staging/hv/hv_mouse.c
37251+++ b/drivers/staging/hv/hv_mouse.c
37252@@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
37253 if (hid_dev) {
37254 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
37255
37256- hid_dev->ll_driver->open = mousevsc_hid_open;
37257- hid_dev->ll_driver->close = mousevsc_hid_close;
37258+ pax_open_kernel();
37259+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
37260+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
37261+ pax_close_kernel();
37262
37263 hid_dev->bus = BUS_VIRTUAL;
37264 hid_dev->vendor = input_device_ctx->device_info.vendor;
37265diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
37266index 349ad80..3f75719 100644
37267--- a/drivers/staging/hv/hyperv_vmbus.h
37268+++ b/drivers/staging/hv/hyperv_vmbus.h
37269@@ -559,7 +559,7 @@ enum vmbus_connect_state {
37270 struct vmbus_connection {
37271 enum vmbus_connect_state conn_state;
37272
37273- atomic_t next_gpadl_handle;
37274+ atomic_unchecked_t next_gpadl_handle;
37275
37276 /*
37277 * Represents channel interrupts. Each bit position represents a
37278diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
37279index dbb5201..d6047c6 100644
37280--- a/drivers/staging/hv/rndis_filter.c
37281+++ b/drivers/staging/hv/rndis_filter.c
37282@@ -43,7 +43,7 @@ struct rndis_device {
37283
37284 enum rndis_device_state state;
37285 u32 link_stat;
37286- atomic_t new_req_id;
37287+ atomic_unchecked_t new_req_id;
37288
37289 spinlock_t request_lock;
37290 struct list_head req_list;
37291@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
37292 * template
37293 */
37294 set = &rndis_msg->msg.set_req;
37295- set->req_id = atomic_inc_return(&dev->new_req_id);
37296+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37297
37298 /* Add to the request list */
37299 spin_lock_irqsave(&dev->request_lock, flags);
37300@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
37301
37302 /* Setup the rndis set */
37303 halt = &request->request_msg.msg.halt_req;
37304- halt->req_id = atomic_inc_return(&dev->new_req_id);
37305+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37306
37307 /* Ignore return since this msg is optional. */
37308 rndis_filter_send_request(dev, request);
37309diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
37310index 1c949f5..7a8b104 100644
37311--- a/drivers/staging/hv/vmbus_drv.c
37312+++ b/drivers/staging/hv/vmbus_drv.c
37313@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
37314 {
37315 int ret = 0;
37316
37317- static atomic_t device_num = ATOMIC_INIT(0);
37318+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37319
37320 /* Set the device name. Otherwise, device_register() will fail. */
37321 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
37322- atomic_inc_return(&device_num));
37323+ atomic_inc_return_unchecked(&device_num));
37324
37325 /* The new device belongs to this bus */
37326 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
37327diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
37328index 3f26f71..fb5c787 100644
37329--- a/drivers/staging/iio/ring_generic.h
37330+++ b/drivers/staging/iio/ring_generic.h
37331@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
37332
37333 int (*is_enabled)(struct iio_ring_buffer *ring);
37334 int (*enable)(struct iio_ring_buffer *ring);
37335-};
37336+} __no_const;
37337
37338 struct iio_ring_setup_ops {
37339 int (*preenable)(struct iio_dev *);
37340diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
37341index cfec92d..a65dacf 100644
37342--- a/drivers/staging/mei/interface.c
37343+++ b/drivers/staging/mei/interface.c
37344@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
37345 mei_hdr->reserved = 0;
37346
37347 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
37348- memset(mei_flow_control, 0, sizeof(mei_flow_control));
37349+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
37350 mei_flow_control->host_addr = cl->host_client_id;
37351 mei_flow_control->me_addr = cl->me_client_id;
37352 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
37353@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
37354
37355 mei_cli_disconnect =
37356 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
37357- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
37358+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
37359 mei_cli_disconnect->host_addr = cl->host_client_id;
37360 mei_cli_disconnect->me_addr = cl->me_client_id;
37361 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
37362diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37363index 8b307b4..a97ac91 100644
37364--- a/drivers/staging/octeon/ethernet-rx.c
37365+++ b/drivers/staging/octeon/ethernet-rx.c
37366@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37367 /* Increment RX stats for virtual ports */
37368 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37369 #ifdef CONFIG_64BIT
37370- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37371- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37372+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37373+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37374 #else
37375- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37376- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37377+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37378+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37379 #endif
37380 }
37381 netif_receive_skb(skb);
37382@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37383 dev->name);
37384 */
37385 #ifdef CONFIG_64BIT
37386- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37387+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37388 #else
37389- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37390+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37391 #endif
37392 dev_kfree_skb_irq(skb);
37393 }
37394diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37395index a8f780e..aef1098 100644
37396--- a/drivers/staging/octeon/ethernet.c
37397+++ b/drivers/staging/octeon/ethernet.c
37398@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37399 * since the RX tasklet also increments it.
37400 */
37401 #ifdef CONFIG_64BIT
37402- atomic64_add(rx_status.dropped_packets,
37403- (atomic64_t *)&priv->stats.rx_dropped);
37404+ atomic64_add_unchecked(rx_status.dropped_packets,
37405+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37406 #else
37407- atomic_add(rx_status.dropped_packets,
37408- (atomic_t *)&priv->stats.rx_dropped);
37409+ atomic_add_unchecked(rx_status.dropped_packets,
37410+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37411 #endif
37412 }
37413
37414diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
37415index f3c6060..56bf826 100644
37416--- a/drivers/staging/pohmelfs/inode.c
37417+++ b/drivers/staging/pohmelfs/inode.c
37418@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37419 mutex_init(&psb->mcache_lock);
37420 psb->mcache_root = RB_ROOT;
37421 psb->mcache_timeout = msecs_to_jiffies(5000);
37422- atomic_long_set(&psb->mcache_gen, 0);
37423+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37424
37425 psb->trans_max_pages = 100;
37426
37427@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37428 INIT_LIST_HEAD(&psb->crypto_ready_list);
37429 INIT_LIST_HEAD(&psb->crypto_active_list);
37430
37431- atomic_set(&psb->trans_gen, 1);
37432+ atomic_set_unchecked(&psb->trans_gen, 1);
37433 atomic_long_set(&psb->total_inodes, 0);
37434
37435 mutex_init(&psb->state_lock);
37436diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
37437index e22665c..a2a9390 100644
37438--- a/drivers/staging/pohmelfs/mcache.c
37439+++ b/drivers/staging/pohmelfs/mcache.c
37440@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
37441 m->data = data;
37442 m->start = start;
37443 m->size = size;
37444- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37445+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37446
37447 mutex_lock(&psb->mcache_lock);
37448 err = pohmelfs_mcache_insert(psb, m);
37449diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
37450index 985b6b7..7699e05 100644
37451--- a/drivers/staging/pohmelfs/netfs.h
37452+++ b/drivers/staging/pohmelfs/netfs.h
37453@@ -571,14 +571,14 @@ struct pohmelfs_config;
37454 struct pohmelfs_sb {
37455 struct rb_root mcache_root;
37456 struct mutex mcache_lock;
37457- atomic_long_t mcache_gen;
37458+ atomic_long_unchecked_t mcache_gen;
37459 unsigned long mcache_timeout;
37460
37461 unsigned int idx;
37462
37463 unsigned int trans_retries;
37464
37465- atomic_t trans_gen;
37466+ atomic_unchecked_t trans_gen;
37467
37468 unsigned int crypto_attached_size;
37469 unsigned int crypto_align_size;
37470diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
37471index 36a2535..0591bf4 100644
37472--- a/drivers/staging/pohmelfs/trans.c
37473+++ b/drivers/staging/pohmelfs/trans.c
37474@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
37475 int err;
37476 struct netfs_cmd *cmd = t->iovec.iov_base;
37477
37478- t->gen = atomic_inc_return(&psb->trans_gen);
37479+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37480
37481 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37482 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37483diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37484index b70cb2b..4db41a7 100644
37485--- a/drivers/staging/rtl8712/rtl871x_io.h
37486+++ b/drivers/staging/rtl8712/rtl871x_io.h
37487@@ -83,7 +83,7 @@ struct _io_ops {
37488 u8 *pmem);
37489 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37490 u8 *pmem);
37491-};
37492+} __no_const;
37493
37494 struct io_req {
37495 struct list_head list;
37496diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37497index c7b5e8b..783d6cb 100644
37498--- a/drivers/staging/sbe-2t3e3/netdev.c
37499+++ b/drivers/staging/sbe-2t3e3/netdev.c
37500@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37501 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37502
37503 if (rlen)
37504- if (copy_to_user(data, &resp, rlen))
37505+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37506 return -EFAULT;
37507
37508 return 0;
37509diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37510index be21617..0954e45 100644
37511--- a/drivers/staging/usbip/usbip_common.h
37512+++ b/drivers/staging/usbip/usbip_common.h
37513@@ -289,7 +289,7 @@ struct usbip_device {
37514 void (*shutdown)(struct usbip_device *);
37515 void (*reset)(struct usbip_device *);
37516 void (*unusable)(struct usbip_device *);
37517- } eh_ops;
37518+ } __no_const eh_ops;
37519 };
37520
37521 #if 0
37522diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37523index 71a586e..4d8a91a 100644
37524--- a/drivers/staging/usbip/vhci.h
37525+++ b/drivers/staging/usbip/vhci.h
37526@@ -85,7 +85,7 @@ struct vhci_hcd {
37527 unsigned resuming:1;
37528 unsigned long re_timeout;
37529
37530- atomic_t seqnum;
37531+ atomic_unchecked_t seqnum;
37532
37533 /*
37534 * NOTE:
37535diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37536index 2ee97e2..0420b86 100644
37537--- a/drivers/staging/usbip/vhci_hcd.c
37538+++ b/drivers/staging/usbip/vhci_hcd.c
37539@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37540 return;
37541 }
37542
37543- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37544+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37545 if (priv->seqnum == 0xffff)
37546 dev_info(&urb->dev->dev, "seqnum max\n");
37547
37548@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37549 return -ENOMEM;
37550 }
37551
37552- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37553+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37554 if (unlink->seqnum == 0xffff)
37555 pr_info("seqnum max\n");
37556
37557@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37558 vdev->rhport = rhport;
37559 }
37560
37561- atomic_set(&vhci->seqnum, 0);
37562+ atomic_set_unchecked(&vhci->seqnum, 0);
37563 spin_lock_init(&vhci->lock);
37564
37565 hcd->power_budget = 0; /* no limit */
37566diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37567index 3872b8c..fe6d2f4 100644
37568--- a/drivers/staging/usbip/vhci_rx.c
37569+++ b/drivers/staging/usbip/vhci_rx.c
37570@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37571 if (!urb) {
37572 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37573 pr_info("max seqnum %d\n",
37574- atomic_read(&the_controller->seqnum));
37575+ atomic_read_unchecked(&the_controller->seqnum));
37576 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37577 return;
37578 }
37579diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37580index 7735027..30eed13 100644
37581--- a/drivers/staging/vt6655/hostap.c
37582+++ b/drivers/staging/vt6655/hostap.c
37583@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37584 *
37585 */
37586
37587+static net_device_ops_no_const apdev_netdev_ops;
37588+
37589 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37590 {
37591 PSDevice apdev_priv;
37592 struct net_device *dev = pDevice->dev;
37593 int ret;
37594- const struct net_device_ops apdev_netdev_ops = {
37595- .ndo_start_xmit = pDevice->tx_80211,
37596- };
37597
37598 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37599
37600@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37601 *apdev_priv = *pDevice;
37602 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37603
37604+ /* only half broken now */
37605+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37606 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37607
37608 pDevice->apdev->type = ARPHRD_IEEE80211;
37609diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37610index 51b5adf..098e320 100644
37611--- a/drivers/staging/vt6656/hostap.c
37612+++ b/drivers/staging/vt6656/hostap.c
37613@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37614 *
37615 */
37616
37617+static net_device_ops_no_const apdev_netdev_ops;
37618+
37619 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37620 {
37621 PSDevice apdev_priv;
37622 struct net_device *dev = pDevice->dev;
37623 int ret;
37624- const struct net_device_ops apdev_netdev_ops = {
37625- .ndo_start_xmit = pDevice->tx_80211,
37626- };
37627
37628 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37629
37630@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37631 *apdev_priv = *pDevice;
37632 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37633
37634+ /* only half broken now */
37635+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37636 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37637
37638 pDevice->apdev->type = ARPHRD_IEEE80211;
37639diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37640index 7843dfd..3db105f 100644
37641--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37642+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37643@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37644
37645 struct usbctlx_completor {
37646 int (*complete) (struct usbctlx_completor *);
37647-};
37648+} __no_const;
37649
37650 static int
37651 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37652diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37653index 1ca66ea..76f1343 100644
37654--- a/drivers/staging/zcache/tmem.c
37655+++ b/drivers/staging/zcache/tmem.c
37656@@ -39,7 +39,7 @@
37657 * A tmem host implementation must use this function to register callbacks
37658 * for memory allocation.
37659 */
37660-static struct tmem_hostops tmem_hostops;
37661+static tmem_hostops_no_const tmem_hostops;
37662
37663 static void tmem_objnode_tree_init(void);
37664
37665@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37666 * A tmem host implementation must use this function to register
37667 * callbacks for a page-accessible memory (PAM) implementation
37668 */
37669-static struct tmem_pamops tmem_pamops;
37670+static tmem_pamops_no_const tmem_pamops;
37671
37672 void tmem_register_pamops(struct tmem_pamops *m)
37673 {
37674diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37675index ed147c4..94fc3c6 100644
37676--- a/drivers/staging/zcache/tmem.h
37677+++ b/drivers/staging/zcache/tmem.h
37678@@ -180,6 +180,7 @@ struct tmem_pamops {
37679 void (*new_obj)(struct tmem_obj *);
37680 int (*replace_in_obj)(void *, struct tmem_obj *);
37681 };
37682+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37683 extern void tmem_register_pamops(struct tmem_pamops *m);
37684
37685 /* memory allocation methods provided by the host implementation */
37686@@ -189,6 +190,7 @@ struct tmem_hostops {
37687 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37688 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37689 };
37690+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37691 extern void tmem_register_hostops(struct tmem_hostops *m);
37692
37693 /* core tmem accessor functions */
37694diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37695index c4ac6f6..4f90f53 100644
37696--- a/drivers/target/iscsi/iscsi_target.c
37697+++ b/drivers/target/iscsi/iscsi_target.c
37698@@ -1370,7 +1370,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37699 * outstanding_r2ts reaches zero, go ahead and send the delayed
37700 * TASK_ABORTED status.
37701 */
37702- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37703+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37704 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37705 if (--cmd->outstanding_r2ts < 1) {
37706 iscsit_stop_dataout_timer(cmd);
37707diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
37708index 8badcb4..94c9ac6 100644
37709--- a/drivers/target/target_core_alua.c
37710+++ b/drivers/target/target_core_alua.c
37711@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata(
37712 char path[ALUA_METADATA_PATH_LEN];
37713 int len;
37714
37715+ pax_track_stack();
37716+
37717 memset(path, 0, ALUA_METADATA_PATH_LEN);
37718
37719 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
37720@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata(
37721 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
37722 int len;
37723
37724+ pax_track_stack();
37725+
37726 memset(path, 0, ALUA_METADATA_PATH_LEN);
37727 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
37728
37729diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37730index 5f91397..dcc2d25 100644
37731--- a/drivers/target/target_core_cdb.c
37732+++ b/drivers/target/target_core_cdb.c
37733@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
37734 int length = 0;
37735 unsigned char buf[SE_MODE_PAGE_BUF];
37736
37737+ pax_track_stack();
37738+
37739 memset(buf, 0, SE_MODE_PAGE_BUF);
37740
37741 switch (cdb[2] & 0x3f) {
37742diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
37743index b2575d8..b6b28fd 100644
37744--- a/drivers/target/target_core_configfs.c
37745+++ b/drivers/target/target_core_configfs.c
37746@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
37747 ssize_t len = 0;
37748 int reg_count = 0, prf_isid;
37749
37750+ pax_track_stack();
37751+
37752 if (!su_dev->se_dev_ptr)
37753 return -ENODEV;
37754
37755diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37756index 7fd3a16..bc2fb3e 100644
37757--- a/drivers/target/target_core_pr.c
37758+++ b/drivers/target/target_core_pr.c
37759@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration(
37760 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
37761 u16 tpgt;
37762
37763+ pax_track_stack();
37764+
37765 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
37766 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
37767 /*
37768@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf(
37769 ssize_t len = 0;
37770 int reg_count = 0;
37771
37772+ pax_track_stack();
37773+
37774 memset(buf, 0, pr_aptpl_buf_len);
37775 /*
37776 * Called to clear metadata once APTPL has been deactivated.
37777@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file(
37778 char path[512];
37779 int ret;
37780
37781+ pax_track_stack();
37782+
37783 memset(iov, 0, sizeof(struct iovec));
37784 memset(path, 0, 512);
37785
37786diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37787index 5c1b8c5..0cb7d0e 100644
37788--- a/drivers/target/target_core_tmr.c
37789+++ b/drivers/target/target_core_tmr.c
37790@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
37791 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37792 cmd->t_task_list_num,
37793 atomic_read(&cmd->t_task_cdbs_left),
37794- atomic_read(&cmd->t_task_cdbs_sent),
37795+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37796 atomic_read(&cmd->t_transport_active),
37797 atomic_read(&cmd->t_transport_stop),
37798 atomic_read(&cmd->t_transport_sent));
37799@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37800 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37801 " task: %p, t_fe_count: %d dev: %p\n", task,
37802 fe_count, dev);
37803- atomic_set(&cmd->t_transport_aborted, 1);
37804+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37805 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37806
37807 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37808@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37809 }
37810 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37811 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37812- atomic_set(&cmd->t_transport_aborted, 1);
37813+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37814 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37815
37816 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37817diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37818index e2added..ccb5251 100644
37819--- a/drivers/target/target_core_transport.c
37820+++ b/drivers/target/target_core_transport.c
37821@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba(
37822
37823 dev->queue_depth = dev_limits->queue_depth;
37824 atomic_set(&dev->depth_left, dev->queue_depth);
37825- atomic_set(&dev->dev_ordered_id, 0);
37826+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37827
37828 se_dev_set_default_attribs(dev, dev_limits);
37829
37830@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37831 * Used to determine when ORDERED commands should go from
37832 * Dormant to Active status.
37833 */
37834- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37835+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37836 smp_mb__after_atomic_inc();
37837 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37838 cmd->se_ordered_id, cmd->sam_task_attr,
37839@@ -1960,7 +1960,7 @@ static void transport_generic_request_failure(
37840 " t_transport_active: %d t_transport_stop: %d"
37841 " t_transport_sent: %d\n", cmd->t_task_list_num,
37842 atomic_read(&cmd->t_task_cdbs_left),
37843- atomic_read(&cmd->t_task_cdbs_sent),
37844+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37845 atomic_read(&cmd->t_task_cdbs_ex_left),
37846 atomic_read(&cmd->t_transport_active),
37847 atomic_read(&cmd->t_transport_stop),
37848@@ -2460,9 +2460,9 @@ check_depth:
37849 spin_lock_irqsave(&cmd->t_state_lock, flags);
37850 atomic_set(&task->task_active, 1);
37851 atomic_set(&task->task_sent, 1);
37852- atomic_inc(&cmd->t_task_cdbs_sent);
37853+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37854
37855- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37856+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37857 cmd->t_task_list_num)
37858 atomic_set(&cmd->transport_sent, 1);
37859
37860@@ -4682,7 +4682,7 @@ static void transport_generic_wait_for_tasks(
37861 atomic_set(&cmd->transport_lun_stop, 0);
37862 }
37863 if (!atomic_read(&cmd->t_transport_active) ||
37864- atomic_read(&cmd->t_transport_aborted))
37865+ atomic_read_unchecked(&cmd->t_transport_aborted))
37866 goto remove;
37867
37868 atomic_set(&cmd->t_transport_stop, 1);
37869@@ -4917,7 +4917,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37870 {
37871 int ret = 0;
37872
37873- if (atomic_read(&cmd->t_transport_aborted) != 0) {
37874+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37875 if (!send_status ||
37876 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37877 return 1;
37878@@ -4954,7 +4954,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37879 */
37880 if (cmd->data_direction == DMA_TO_DEVICE) {
37881 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37882- atomic_inc(&cmd->t_transport_aborted);
37883+ atomic_inc_unchecked(&cmd->t_transport_aborted);
37884 smp_mb__after_atomic_inc();
37885 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
37886 transport_new_cmd_failure(cmd);
37887@@ -5068,7 +5068,7 @@ static void transport_processing_shutdown(struct se_device *dev)
37888 cmd->se_tfo->get_task_tag(cmd),
37889 cmd->t_task_list_num,
37890 atomic_read(&cmd->t_task_cdbs_left),
37891- atomic_read(&cmd->t_task_cdbs_sent),
37892+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37893 atomic_read(&cmd->t_transport_active),
37894 atomic_read(&cmd->t_transport_stop),
37895 atomic_read(&cmd->t_transport_sent));
37896diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
37897index d5f923b..9c78228 100644
37898--- a/drivers/telephony/ixj.c
37899+++ b/drivers/telephony/ixj.c
37900@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37901 bool mContinue;
37902 char *pIn, *pOut;
37903
37904+ pax_track_stack();
37905+
37906 if (!SCI_Prepare(j))
37907 return 0;
37908
37909diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37910index 4c8b665..1d931eb 100644
37911--- a/drivers/tty/hvc/hvcs.c
37912+++ b/drivers/tty/hvc/hvcs.c
37913@@ -83,6 +83,7 @@
37914 #include <asm/hvcserver.h>
37915 #include <asm/uaccess.h>
37916 #include <asm/vio.h>
37917+#include <asm/local.h>
37918
37919 /*
37920 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37921@@ -270,7 +271,7 @@ struct hvcs_struct {
37922 unsigned int index;
37923
37924 struct tty_struct *tty;
37925- int open_count;
37926+ local_t open_count;
37927
37928 /*
37929 * Used to tell the driver kernel_thread what operations need to take
37930@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37931
37932 spin_lock_irqsave(&hvcsd->lock, flags);
37933
37934- if (hvcsd->open_count > 0) {
37935+ if (local_read(&hvcsd->open_count) > 0) {
37936 spin_unlock_irqrestore(&hvcsd->lock, flags);
37937 printk(KERN_INFO "HVCS: vterm state unchanged. "
37938 "The hvcs device node is still in use.\n");
37939@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37940 if ((retval = hvcs_partner_connect(hvcsd)))
37941 goto error_release;
37942
37943- hvcsd->open_count = 1;
37944+ local_set(&hvcsd->open_count, 1);
37945 hvcsd->tty = tty;
37946 tty->driver_data = hvcsd;
37947
37948@@ -1179,7 +1180,7 @@ fast_open:
37949
37950 spin_lock_irqsave(&hvcsd->lock, flags);
37951 kref_get(&hvcsd->kref);
37952- hvcsd->open_count++;
37953+ local_inc(&hvcsd->open_count);
37954 hvcsd->todo_mask |= HVCS_SCHED_READ;
37955 spin_unlock_irqrestore(&hvcsd->lock, flags);
37956
37957@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37958 hvcsd = tty->driver_data;
37959
37960 spin_lock_irqsave(&hvcsd->lock, flags);
37961- if (--hvcsd->open_count == 0) {
37962+ if (local_dec_and_test(&hvcsd->open_count)) {
37963
37964 vio_disable_interrupts(hvcsd->vdev);
37965
37966@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37967 free_irq(irq, hvcsd);
37968 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37969 return;
37970- } else if (hvcsd->open_count < 0) {
37971+ } else if (local_read(&hvcsd->open_count) < 0) {
37972 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37973 " is missmanaged.\n",
37974- hvcsd->vdev->unit_address, hvcsd->open_count);
37975+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37976 }
37977
37978 spin_unlock_irqrestore(&hvcsd->lock, flags);
37979@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37980
37981 spin_lock_irqsave(&hvcsd->lock, flags);
37982 /* Preserve this so that we know how many kref refs to put */
37983- temp_open_count = hvcsd->open_count;
37984+ temp_open_count = local_read(&hvcsd->open_count);
37985
37986 /*
37987 * Don't kref put inside the spinlock because the destruction
37988@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37989 hvcsd->tty->driver_data = NULL;
37990 hvcsd->tty = NULL;
37991
37992- hvcsd->open_count = 0;
37993+ local_set(&hvcsd->open_count, 0);
37994
37995 /* This will drop any buffered data on the floor which is OK in a hangup
37996 * scenario. */
37997@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37998 * the middle of a write operation? This is a crummy place to do this
37999 * but we want to keep it all in the spinlock.
38000 */
38001- if (hvcsd->open_count <= 0) {
38002+ if (local_read(&hvcsd->open_count) <= 0) {
38003 spin_unlock_irqrestore(&hvcsd->lock, flags);
38004 return -ENODEV;
38005 }
38006@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
38007 {
38008 struct hvcs_struct *hvcsd = tty->driver_data;
38009
38010- if (!hvcsd || hvcsd->open_count <= 0)
38011+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
38012 return 0;
38013
38014 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
38015diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
38016index ef92869..f4ebd88 100644
38017--- a/drivers/tty/ipwireless/tty.c
38018+++ b/drivers/tty/ipwireless/tty.c
38019@@ -29,6 +29,7 @@
38020 #include <linux/tty_driver.h>
38021 #include <linux/tty_flip.h>
38022 #include <linux/uaccess.h>
38023+#include <asm/local.h>
38024
38025 #include "tty.h"
38026 #include "network.h"
38027@@ -51,7 +52,7 @@ struct ipw_tty {
38028 int tty_type;
38029 struct ipw_network *network;
38030 struct tty_struct *linux_tty;
38031- int open_count;
38032+ local_t open_count;
38033 unsigned int control_lines;
38034 struct mutex ipw_tty_mutex;
38035 int tx_bytes_queued;
38036@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38037 mutex_unlock(&tty->ipw_tty_mutex);
38038 return -ENODEV;
38039 }
38040- if (tty->open_count == 0)
38041+ if (local_read(&tty->open_count) == 0)
38042 tty->tx_bytes_queued = 0;
38043
38044- tty->open_count++;
38045+ local_inc(&tty->open_count);
38046
38047 tty->linux_tty = linux_tty;
38048 linux_tty->driver_data = tty;
38049@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38050
38051 static void do_ipw_close(struct ipw_tty *tty)
38052 {
38053- tty->open_count--;
38054-
38055- if (tty->open_count == 0) {
38056+ if (local_dec_return(&tty->open_count) == 0) {
38057 struct tty_struct *linux_tty = tty->linux_tty;
38058
38059 if (linux_tty != NULL) {
38060@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
38061 return;
38062
38063 mutex_lock(&tty->ipw_tty_mutex);
38064- if (tty->open_count == 0) {
38065+ if (local_read(&tty->open_count) == 0) {
38066 mutex_unlock(&tty->ipw_tty_mutex);
38067 return;
38068 }
38069@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38070 return;
38071 }
38072
38073- if (!tty->open_count) {
38074+ if (!local_read(&tty->open_count)) {
38075 mutex_unlock(&tty->ipw_tty_mutex);
38076 return;
38077 }
38078@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38079 return -ENODEV;
38080
38081 mutex_lock(&tty->ipw_tty_mutex);
38082- if (!tty->open_count) {
38083+ if (!local_read(&tty->open_count)) {
38084 mutex_unlock(&tty->ipw_tty_mutex);
38085 return -EINVAL;
38086 }
38087@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38088 if (!tty)
38089 return -ENODEV;
38090
38091- if (!tty->open_count)
38092+ if (!local_read(&tty->open_count))
38093 return -EINVAL;
38094
38095 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38096@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38097 if (!tty)
38098 return 0;
38099
38100- if (!tty->open_count)
38101+ if (!local_read(&tty->open_count))
38102 return 0;
38103
38104 return tty->tx_bytes_queued;
38105@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38106 if (!tty)
38107 return -ENODEV;
38108
38109- if (!tty->open_count)
38110+ if (!local_read(&tty->open_count))
38111 return -EINVAL;
38112
38113 return get_control_lines(tty);
38114@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38115 if (!tty)
38116 return -ENODEV;
38117
38118- if (!tty->open_count)
38119+ if (!local_read(&tty->open_count))
38120 return -EINVAL;
38121
38122 return set_control_lines(tty, set, clear);
38123@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38124 if (!tty)
38125 return -ENODEV;
38126
38127- if (!tty->open_count)
38128+ if (!local_read(&tty->open_count))
38129 return -EINVAL;
38130
38131 /* FIXME: Exactly how is the tty object locked here .. */
38132@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38133 against a parallel ioctl etc */
38134 mutex_lock(&ttyj->ipw_tty_mutex);
38135 }
38136- while (ttyj->open_count)
38137+ while (local_read(&ttyj->open_count))
38138 do_ipw_close(ttyj);
38139 ipwireless_disassociate_network_ttys(network,
38140 ttyj->channel_idx);
38141diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38142index 8a50e4e..7d9ca3d 100644
38143--- a/drivers/tty/n_gsm.c
38144+++ b/drivers/tty/n_gsm.c
38145@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38146 kref_init(&dlci->ref);
38147 mutex_init(&dlci->mutex);
38148 dlci->fifo = &dlci->_fifo;
38149- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38150+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38151 kfree(dlci);
38152 return NULL;
38153 }
38154diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38155index 39d6ab6..eb97f41 100644
38156--- a/drivers/tty/n_tty.c
38157+++ b/drivers/tty/n_tty.c
38158@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38159 {
38160 *ops = tty_ldisc_N_TTY;
38161 ops->owner = NULL;
38162- ops->refcount = ops->flags = 0;
38163+ atomic_set(&ops->refcount, 0);
38164+ ops->flags = 0;
38165 }
38166 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38167diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38168index e18604b..a7d5a11 100644
38169--- a/drivers/tty/pty.c
38170+++ b/drivers/tty/pty.c
38171@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
38172 register_sysctl_table(pty_root_table);
38173
38174 /* Now create the /dev/ptmx special device */
38175+ pax_open_kernel();
38176 tty_default_fops(&ptmx_fops);
38177- ptmx_fops.open = ptmx_open;
38178+ *(void **)&ptmx_fops.open = ptmx_open;
38179+ pax_close_kernel();
38180
38181 cdev_init(&ptmx_cdev, &ptmx_fops);
38182 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38183diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
38184index 6a1241c..d04ab0d 100644
38185--- a/drivers/tty/rocket.c
38186+++ b/drivers/tty/rocket.c
38187@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
38188 struct rocket_ports tmp;
38189 int board;
38190
38191+ pax_track_stack();
38192+
38193 if (!retports)
38194 return -EFAULT;
38195 memset(&tmp, 0, sizeof (tmp));
38196diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38197index 87e7e6c..89744e0 100644
38198--- a/drivers/tty/serial/kgdboc.c
38199+++ b/drivers/tty/serial/kgdboc.c
38200@@ -23,8 +23,9 @@
38201 #define MAX_CONFIG_LEN 40
38202
38203 static struct kgdb_io kgdboc_io_ops;
38204+static struct kgdb_io kgdboc_io_ops_console;
38205
38206-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38207+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38208 static int configured = -1;
38209
38210 static char config[MAX_CONFIG_LEN];
38211@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
38212 kgdboc_unregister_kbd();
38213 if (configured == 1)
38214 kgdb_unregister_io_module(&kgdboc_io_ops);
38215+ else if (configured == 2)
38216+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
38217 }
38218
38219 static int configure_kgdboc(void)
38220@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
38221 int err;
38222 char *cptr = config;
38223 struct console *cons;
38224+ int is_console = 0;
38225
38226 err = kgdboc_option_setup(config);
38227 if (err || !strlen(config) || isspace(config[0]))
38228 goto noconfig;
38229
38230 err = -ENODEV;
38231- kgdboc_io_ops.is_console = 0;
38232 kgdb_tty_driver = NULL;
38233
38234 kgdboc_use_kms = 0;
38235@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
38236 int idx;
38237 if (cons->device && cons->device(cons, &idx) == p &&
38238 idx == tty_line) {
38239- kgdboc_io_ops.is_console = 1;
38240+ is_console = 1;
38241 break;
38242 }
38243 cons = cons->next;
38244@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
38245 kgdb_tty_line = tty_line;
38246
38247 do_register:
38248- err = kgdb_register_io_module(&kgdboc_io_ops);
38249+ if (is_console) {
38250+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38251+ configured = 2;
38252+ } else {
38253+ err = kgdb_register_io_module(&kgdboc_io_ops);
38254+ configured = 1;
38255+ }
38256 if (err)
38257 goto noconfig;
38258
38259- configured = 1;
38260-
38261 return 0;
38262
38263 noconfig:
38264@@ -212,7 +219,7 @@ noconfig:
38265 static int __init init_kgdboc(void)
38266 {
38267 /* Already configured? */
38268- if (configured == 1)
38269+ if (configured >= 1)
38270 return 0;
38271
38272 return configure_kgdboc();
38273@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38274 if (config[len - 1] == '\n')
38275 config[len - 1] = '\0';
38276
38277- if (configured == 1)
38278+ if (configured >= 1)
38279 cleanup_kgdboc();
38280
38281 /* Go and configure with the new params. */
38282@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
38283 .post_exception = kgdboc_post_exp_handler,
38284 };
38285
38286+static struct kgdb_io kgdboc_io_ops_console = {
38287+ .name = "kgdboc",
38288+ .read_char = kgdboc_get_char,
38289+ .write_char = kgdboc_put_char,
38290+ .pre_exception = kgdboc_pre_exp_handler,
38291+ .post_exception = kgdboc_post_exp_handler,
38292+ .is_console = 1
38293+};
38294+
38295 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38296 /* This is only available if kgdboc is a built in for early debugging */
38297 static int __init kgdboc_early_init(char *opt)
38298diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
38299index cab52f4..29fc6aa 100644
38300--- a/drivers/tty/serial/mfd.c
38301+++ b/drivers/tty/serial/mfd.c
38302@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
38303 }
38304
38305 /* First 3 are UART ports, and the 4th is the DMA */
38306-static const struct pci_device_id pci_ids[] __devinitdata = {
38307+static const struct pci_device_id pci_ids[] __devinitconst = {
38308 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
38309 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
38310 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
38311diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
38312index 23bc743..d425c07 100644
38313--- a/drivers/tty/serial/mrst_max3110.c
38314+++ b/drivers/tty/serial/mrst_max3110.c
38315@@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max)
38316 int loop = 1, num, total = 0;
38317 u8 recv_buf[512], *pbuf;
38318
38319+ pax_track_stack();
38320+
38321 pbuf = recv_buf;
38322 do {
38323 num = max3110_read_multi(max, pbuf);
38324diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38325index 1a890e2..1d8139c 100644
38326--- a/drivers/tty/tty_io.c
38327+++ b/drivers/tty/tty_io.c
38328@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38329
38330 void tty_default_fops(struct file_operations *fops)
38331 {
38332- *fops = tty_fops;
38333+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38334 }
38335
38336 /*
38337diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38338index a76c808..ecbc743 100644
38339--- a/drivers/tty/tty_ldisc.c
38340+++ b/drivers/tty/tty_ldisc.c
38341@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38342 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38343 struct tty_ldisc_ops *ldo = ld->ops;
38344
38345- ldo->refcount--;
38346+ atomic_dec(&ldo->refcount);
38347 module_put(ldo->owner);
38348 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38349
38350@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38351 spin_lock_irqsave(&tty_ldisc_lock, flags);
38352 tty_ldiscs[disc] = new_ldisc;
38353 new_ldisc->num = disc;
38354- new_ldisc->refcount = 0;
38355+ atomic_set(&new_ldisc->refcount, 0);
38356 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38357
38358 return ret;
38359@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
38360 return -EINVAL;
38361
38362 spin_lock_irqsave(&tty_ldisc_lock, flags);
38363- if (tty_ldiscs[disc]->refcount)
38364+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38365 ret = -EBUSY;
38366 else
38367 tty_ldiscs[disc] = NULL;
38368@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38369 if (ldops) {
38370 ret = ERR_PTR(-EAGAIN);
38371 if (try_module_get(ldops->owner)) {
38372- ldops->refcount++;
38373+ atomic_inc(&ldops->refcount);
38374 ret = ldops;
38375 }
38376 }
38377@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38378 unsigned long flags;
38379
38380 spin_lock_irqsave(&tty_ldisc_lock, flags);
38381- ldops->refcount--;
38382+ atomic_dec(&ldops->refcount);
38383 module_put(ldops->owner);
38384 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38385 }
38386diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38387index 3761ccf..2c613b3 100644
38388--- a/drivers/tty/vt/keyboard.c
38389+++ b/drivers/tty/vt/keyboard.c
38390@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38391 kbd->kbdmode == VC_OFF) &&
38392 value != KVAL(K_SAK))
38393 return; /* SAK is allowed even in raw mode */
38394+
38395+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38396+ {
38397+ void *func = fn_handler[value];
38398+ if (func == fn_show_state || func == fn_show_ptregs ||
38399+ func == fn_show_mem)
38400+ return;
38401+ }
38402+#endif
38403+
38404 fn_handler[value](vc);
38405 }
38406
38407diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
38408index b3915b7..e716839 100644
38409--- a/drivers/tty/vt/vt.c
38410+++ b/drivers/tty/vt/vt.c
38411@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
38412
38413 static void notify_write(struct vc_data *vc, unsigned int unicode)
38414 {
38415- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
38416+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
38417 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
38418 }
38419
38420diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38421index 5e096f4..0da1363 100644
38422--- a/drivers/tty/vt/vt_ioctl.c
38423+++ b/drivers/tty/vt/vt_ioctl.c
38424@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38425 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38426 return -EFAULT;
38427
38428- if (!capable(CAP_SYS_TTY_CONFIG))
38429- perm = 0;
38430-
38431 switch (cmd) {
38432 case KDGKBENT:
38433 key_map = key_maps[s];
38434@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38435 val = (i ? K_HOLE : K_NOSUCHMAP);
38436 return put_user(val, &user_kbe->kb_value);
38437 case KDSKBENT:
38438+ if (!capable(CAP_SYS_TTY_CONFIG))
38439+ perm = 0;
38440+
38441 if (!perm)
38442 return -EPERM;
38443 if (!i && v == K_NOSUCHMAP) {
38444@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38445 int i, j, k;
38446 int ret;
38447
38448- if (!capable(CAP_SYS_TTY_CONFIG))
38449- perm = 0;
38450-
38451 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38452 if (!kbs) {
38453 ret = -ENOMEM;
38454@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38455 kfree(kbs);
38456 return ((p && *p) ? -EOVERFLOW : 0);
38457 case KDSKBSENT:
38458+ if (!capable(CAP_SYS_TTY_CONFIG))
38459+ perm = 0;
38460+
38461 if (!perm) {
38462 ret = -EPERM;
38463 goto reterr;
38464diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38465index d2efe82..9440ab6 100644
38466--- a/drivers/uio/uio.c
38467+++ b/drivers/uio/uio.c
38468@@ -25,6 +25,7 @@
38469 #include <linux/kobject.h>
38470 #include <linux/cdev.h>
38471 #include <linux/uio_driver.h>
38472+#include <asm/local.h>
38473
38474 #define UIO_MAX_DEVICES (1U << MINORBITS)
38475
38476@@ -32,10 +33,10 @@ struct uio_device {
38477 struct module *owner;
38478 struct device *dev;
38479 int minor;
38480- atomic_t event;
38481+ atomic_unchecked_t event;
38482 struct fasync_struct *async_queue;
38483 wait_queue_head_t wait;
38484- int vma_count;
38485+ local_t vma_count;
38486 struct uio_info *info;
38487 struct kobject *map_dir;
38488 struct kobject *portio_dir;
38489@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38490 struct device_attribute *attr, char *buf)
38491 {
38492 struct uio_device *idev = dev_get_drvdata(dev);
38493- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38494+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38495 }
38496
38497 static struct device_attribute uio_class_attributes[] = {
38498@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38499 {
38500 struct uio_device *idev = info->uio_dev;
38501
38502- atomic_inc(&idev->event);
38503+ atomic_inc_unchecked(&idev->event);
38504 wake_up_interruptible(&idev->wait);
38505 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38506 }
38507@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38508 }
38509
38510 listener->dev = idev;
38511- listener->event_count = atomic_read(&idev->event);
38512+ listener->event_count = atomic_read_unchecked(&idev->event);
38513 filep->private_data = listener;
38514
38515 if (idev->info->open) {
38516@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38517 return -EIO;
38518
38519 poll_wait(filep, &idev->wait, wait);
38520- if (listener->event_count != atomic_read(&idev->event))
38521+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38522 return POLLIN | POLLRDNORM;
38523 return 0;
38524 }
38525@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38526 do {
38527 set_current_state(TASK_INTERRUPTIBLE);
38528
38529- event_count = atomic_read(&idev->event);
38530+ event_count = atomic_read_unchecked(&idev->event);
38531 if (event_count != listener->event_count) {
38532 if (copy_to_user(buf, &event_count, count))
38533 retval = -EFAULT;
38534@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38535 static void uio_vma_open(struct vm_area_struct *vma)
38536 {
38537 struct uio_device *idev = vma->vm_private_data;
38538- idev->vma_count++;
38539+ local_inc(&idev->vma_count);
38540 }
38541
38542 static void uio_vma_close(struct vm_area_struct *vma)
38543 {
38544 struct uio_device *idev = vma->vm_private_data;
38545- idev->vma_count--;
38546+ local_dec(&idev->vma_count);
38547 }
38548
38549 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38550@@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner,
38551 idev->owner = owner;
38552 idev->info = info;
38553 init_waitqueue_head(&idev->wait);
38554- atomic_set(&idev->event, 0);
38555+ atomic_set_unchecked(&idev->event, 0);
38556
38557 ret = uio_get_minor(idev);
38558 if (ret)
38559diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38560index a845f8b..4f54072 100644
38561--- a/drivers/usb/atm/cxacru.c
38562+++ b/drivers/usb/atm/cxacru.c
38563@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38564 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38565 if (ret < 2)
38566 return -EINVAL;
38567- if (index < 0 || index > 0x7f)
38568+ if (index > 0x7f)
38569 return -EINVAL;
38570 pos += tmp;
38571
38572diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38573index d3448ca..d2864ca 100644
38574--- a/drivers/usb/atm/usbatm.c
38575+++ b/drivers/usb/atm/usbatm.c
38576@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38577 if (printk_ratelimit())
38578 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38579 __func__, vpi, vci);
38580- atomic_inc(&vcc->stats->rx_err);
38581+ atomic_inc_unchecked(&vcc->stats->rx_err);
38582 return;
38583 }
38584
38585@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38586 if (length > ATM_MAX_AAL5_PDU) {
38587 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38588 __func__, length, vcc);
38589- atomic_inc(&vcc->stats->rx_err);
38590+ atomic_inc_unchecked(&vcc->stats->rx_err);
38591 goto out;
38592 }
38593
38594@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38595 if (sarb->len < pdu_length) {
38596 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38597 __func__, pdu_length, sarb->len, vcc);
38598- atomic_inc(&vcc->stats->rx_err);
38599+ atomic_inc_unchecked(&vcc->stats->rx_err);
38600 goto out;
38601 }
38602
38603 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38604 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38605 __func__, vcc);
38606- atomic_inc(&vcc->stats->rx_err);
38607+ atomic_inc_unchecked(&vcc->stats->rx_err);
38608 goto out;
38609 }
38610
38611@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38612 if (printk_ratelimit())
38613 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38614 __func__, length);
38615- atomic_inc(&vcc->stats->rx_drop);
38616+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38617 goto out;
38618 }
38619
38620@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38621
38622 vcc->push(vcc, skb);
38623
38624- atomic_inc(&vcc->stats->rx);
38625+ atomic_inc_unchecked(&vcc->stats->rx);
38626 out:
38627 skb_trim(sarb, 0);
38628 }
38629@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38630 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38631
38632 usbatm_pop(vcc, skb);
38633- atomic_inc(&vcc->stats->tx);
38634+ atomic_inc_unchecked(&vcc->stats->tx);
38635
38636 skb = skb_dequeue(&instance->sndqueue);
38637 }
38638@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38639 if (!left--)
38640 return sprintf(page,
38641 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38642- atomic_read(&atm_dev->stats.aal5.tx),
38643- atomic_read(&atm_dev->stats.aal5.tx_err),
38644- atomic_read(&atm_dev->stats.aal5.rx),
38645- atomic_read(&atm_dev->stats.aal5.rx_err),
38646- atomic_read(&atm_dev->stats.aal5.rx_drop));
38647+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38648+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38649+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38650+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38651+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38652
38653 if (!left--) {
38654 if (instance->disconnected)
38655diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38656index 0149c09..f108812 100644
38657--- a/drivers/usb/core/devices.c
38658+++ b/drivers/usb/core/devices.c
38659@@ -126,7 +126,7 @@ static const char format_endpt[] =
38660 * time it gets called.
38661 */
38662 static struct device_connect_event {
38663- atomic_t count;
38664+ atomic_unchecked_t count;
38665 wait_queue_head_t wait;
38666 } device_event = {
38667 .count = ATOMIC_INIT(1),
38668@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38669
38670 void usbfs_conn_disc_event(void)
38671 {
38672- atomic_add(2, &device_event.count);
38673+ atomic_add_unchecked(2, &device_event.count);
38674 wake_up(&device_event.wait);
38675 }
38676
38677@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38678
38679 poll_wait(file, &device_event.wait, wait);
38680
38681- event_count = atomic_read(&device_event.count);
38682+ event_count = atomic_read_unchecked(&device_event.count);
38683 if (file->f_version != event_count) {
38684 file->f_version = event_count;
38685 return POLLIN | POLLRDNORM;
38686diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
38687index 0b5ec23..0da3d76 100644
38688--- a/drivers/usb/core/message.c
38689+++ b/drivers/usb/core/message.c
38690@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
38691 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38692 if (buf) {
38693 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38694- if (len > 0) {
38695- smallbuf = kmalloc(++len, GFP_NOIO);
38696+ if (len++ > 0) {
38697+ smallbuf = kmalloc(len, GFP_NOIO);
38698 if (!smallbuf)
38699 return buf;
38700 memcpy(smallbuf, buf, len);
38701diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38702index 1fc8f12..20647c1 100644
38703--- a/drivers/usb/early/ehci-dbgp.c
38704+++ b/drivers/usb/early/ehci-dbgp.c
38705@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38706
38707 #ifdef CONFIG_KGDB
38708 static struct kgdb_io kgdbdbgp_io_ops;
38709-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38710+static struct kgdb_io kgdbdbgp_io_ops_console;
38711+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38712 #else
38713 #define dbgp_kgdb_mode (0)
38714 #endif
38715@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38716 .write_char = kgdbdbgp_write_char,
38717 };
38718
38719+static struct kgdb_io kgdbdbgp_io_ops_console = {
38720+ .name = "kgdbdbgp",
38721+ .read_char = kgdbdbgp_read_char,
38722+ .write_char = kgdbdbgp_write_char,
38723+ .is_console = 1
38724+};
38725+
38726 static int kgdbdbgp_wait_time;
38727
38728 static int __init kgdbdbgp_parse_config(char *str)
38729@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38730 ptr++;
38731 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38732 }
38733- kgdb_register_io_module(&kgdbdbgp_io_ops);
38734- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38735+ if (early_dbgp_console.index != -1)
38736+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38737+ else
38738+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38739
38740 return 0;
38741 }
38742diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
38743index d718033..6075579 100644
38744--- a/drivers/usb/host/xhci-mem.c
38745+++ b/drivers/usb/host/xhci-mem.c
38746@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
38747 unsigned int num_tests;
38748 int i, ret;
38749
38750+ pax_track_stack();
38751+
38752 num_tests = ARRAY_SIZE(simple_test_vector);
38753 for (i = 0; i < num_tests; i++) {
38754 ret = xhci_test_trb_in_td(xhci,
38755diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38756index d6bea3e..60b250e 100644
38757--- a/drivers/usb/wusbcore/wa-hc.h
38758+++ b/drivers/usb/wusbcore/wa-hc.h
38759@@ -192,7 +192,7 @@ struct wahc {
38760 struct list_head xfer_delayed_list;
38761 spinlock_t xfer_list_lock;
38762 struct work_struct xfer_work;
38763- atomic_t xfer_id_count;
38764+ atomic_unchecked_t xfer_id_count;
38765 };
38766
38767
38768@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38769 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38770 spin_lock_init(&wa->xfer_list_lock);
38771 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38772- atomic_set(&wa->xfer_id_count, 1);
38773+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38774 }
38775
38776 /**
38777diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38778index 4193345..49ae93d 100644
38779--- a/drivers/usb/wusbcore/wa-xfer.c
38780+++ b/drivers/usb/wusbcore/wa-xfer.c
38781@@ -295,7 +295,7 @@ out:
38782 */
38783 static void wa_xfer_id_init(struct wa_xfer *xfer)
38784 {
38785- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38786+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38787 }
38788
38789 /*
38790diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38791index c14c42b..f955cc2 100644
38792--- a/drivers/vhost/vhost.c
38793+++ b/drivers/vhost/vhost.c
38794@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38795 return 0;
38796 }
38797
38798-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38799+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38800 {
38801 struct file *eventfp, *filep = NULL,
38802 *pollstart = NULL, *pollstop = NULL;
38803diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38804index b0b2ac3..89a4399 100644
38805--- a/drivers/video/aty/aty128fb.c
38806+++ b/drivers/video/aty/aty128fb.c
38807@@ -148,7 +148,7 @@ enum {
38808 };
38809
38810 /* Must match above enum */
38811-static const char *r128_family[] __devinitdata = {
38812+static const char *r128_family[] __devinitconst = {
38813 "AGP",
38814 "PCI",
38815 "PRO AGP",
38816diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38817index 5c3960d..15cf8fc 100644
38818--- a/drivers/video/fbcmap.c
38819+++ b/drivers/video/fbcmap.c
38820@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38821 rc = -ENODEV;
38822 goto out;
38823 }
38824- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38825- !info->fbops->fb_setcmap)) {
38826+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38827 rc = -EINVAL;
38828 goto out1;
38829 }
38830diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38831index ad93629..ca6a218 100644
38832--- a/drivers/video/fbmem.c
38833+++ b/drivers/video/fbmem.c
38834@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38835 image->dx += image->width + 8;
38836 }
38837 } else if (rotate == FB_ROTATE_UD) {
38838- for (x = 0; x < num && image->dx >= 0; x++) {
38839+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38840 info->fbops->fb_imageblit(info, image);
38841 image->dx -= image->width + 8;
38842 }
38843@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38844 image->dy += image->height + 8;
38845 }
38846 } else if (rotate == FB_ROTATE_CCW) {
38847- for (x = 0; x < num && image->dy >= 0; x++) {
38848+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38849 info->fbops->fb_imageblit(info, image);
38850 image->dy -= image->height + 8;
38851 }
38852@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
38853 int flags = info->flags;
38854 int ret = 0;
38855
38856+ pax_track_stack();
38857+
38858 if (var->activate & FB_ACTIVATE_INV_MODE) {
38859 struct fb_videomode mode1, mode2;
38860
38861@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38862 void __user *argp = (void __user *)arg;
38863 long ret = 0;
38864
38865+ pax_track_stack();
38866+
38867 switch (cmd) {
38868 case FBIOGET_VSCREENINFO:
38869 if (!lock_fb_info(info))
38870@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38871 return -EFAULT;
38872 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38873 return -EINVAL;
38874- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38875+ if (con2fb.framebuffer >= FB_MAX)
38876 return -EINVAL;
38877 if (!registered_fb[con2fb.framebuffer])
38878 request_module("fb%d", con2fb.framebuffer);
38879diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38880index 5a5d092..265c5ed 100644
38881--- a/drivers/video/geode/gx1fb_core.c
38882+++ b/drivers/video/geode/gx1fb_core.c
38883@@ -29,7 +29,7 @@ static int crt_option = 1;
38884 static char panel_option[32] = "";
38885
38886 /* Modes relevant to the GX1 (taken from modedb.c) */
38887-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38888+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38889 /* 640x480-60 VESA */
38890 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38891 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38892diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38893index 896e53d..4d87d0b 100644
38894--- a/drivers/video/gxt4500.c
38895+++ b/drivers/video/gxt4500.c
38896@@ -156,7 +156,7 @@ struct gxt4500_par {
38897 static char *mode_option;
38898
38899 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38900-static const struct fb_videomode defaultmode __devinitdata = {
38901+static const struct fb_videomode defaultmode __devinitconst = {
38902 .refresh = 60,
38903 .xres = 1280,
38904 .yres = 1024,
38905@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38906 return 0;
38907 }
38908
38909-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38910+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38911 .id = "IBM GXT4500P",
38912 .type = FB_TYPE_PACKED_PIXELS,
38913 .visual = FB_VISUAL_PSEUDOCOLOR,
38914diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38915index 7672d2e..b56437f 100644
38916--- a/drivers/video/i810/i810_accel.c
38917+++ b/drivers/video/i810/i810_accel.c
38918@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38919 }
38920 }
38921 printk("ringbuffer lockup!!!\n");
38922+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38923 i810_report_error(mmio);
38924 par->dev_flags |= LOCKUP;
38925 info->pixmap.scan_align = 1;
38926diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38927index 318f6fb..9a389c1 100644
38928--- a/drivers/video/i810/i810_main.c
38929+++ b/drivers/video/i810/i810_main.c
38930@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38931 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38932
38933 /* PCI */
38934-static const char *i810_pci_list[] __devinitdata = {
38935+static const char *i810_pci_list[] __devinitconst = {
38936 "Intel(R) 810 Framebuffer Device" ,
38937 "Intel(R) 810-DC100 Framebuffer Device" ,
38938 "Intel(R) 810E Framebuffer Device" ,
38939diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38940index de36693..3c63fc2 100644
38941--- a/drivers/video/jz4740_fb.c
38942+++ b/drivers/video/jz4740_fb.c
38943@@ -136,7 +136,7 @@ struct jzfb {
38944 uint32_t pseudo_palette[16];
38945 };
38946
38947-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38948+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38949 .id = "JZ4740 FB",
38950 .type = FB_TYPE_PACKED_PIXELS,
38951 .visual = FB_VISUAL_TRUECOLOR,
38952diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38953index 3c14e43..eafa544 100644
38954--- a/drivers/video/logo/logo_linux_clut224.ppm
38955+++ b/drivers/video/logo/logo_linux_clut224.ppm
38956@@ -1,1604 +1,1123 @@
38957 P3
38958-# Standard 224-color Linux logo
38959 80 80
38960 255
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 0 0 0 0 0 0 0 0 0 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 6 6 6 6 6 6 10 10 10 10 10 10
38971- 10 10 10 6 6 6 6 6 6 6 6 6
38972- 0 0 0 0 0 0 0 0 0 0 0 0
38973- 0 0 0 0 0 0 0 0 0 0 0 0
38974- 0 0 0 0 0 0 0 0 0 0 0 0
38975- 0 0 0 0 0 0 0 0 0 0 0 0
38976- 0 0 0 0 0 0 0 0 0 0 0 0
38977- 0 0 0 0 0 0 0 0 0 0 0 0
38978- 0 0 0 0 0 0 0 0 0 0 0 0
38979- 0 0 0 0 0 0 0 0 0 0 0 0
38980- 0 0 0 0 0 0 0 0 0 0 0 0
38981- 0 0 0 0 0 0 0 0 0 0 0 0
38982- 0 0 0 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 0 0 0 0
38986- 0 0 0 0 0 0 0 0 0 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 6 6 6 10 10 10 14 14 14
38990- 22 22 22 26 26 26 30 30 30 34 34 34
38991- 30 30 30 30 30 30 26 26 26 18 18 18
38992- 14 14 14 10 10 10 6 6 6 0 0 0
38993- 0 0 0 0 0 0 0 0 0 0 0 0
38994- 0 0 0 0 0 0 0 0 0 0 0 0
38995- 0 0 0 0 0 0 0 0 0 0 0 0
38996- 0 0 0 0 0 0 0 0 0 0 0 0
38997- 0 0 0 0 0 0 0 0 0 0 0 0
38998- 0 0 0 0 0 0 0 0 0 0 0 0
38999- 0 0 0 0 0 0 0 0 0 0 0 0
39000- 0 0 0 0 0 0 0 0 0 0 0 0
39001- 0 0 0 0 0 0 0 0 0 0 0 0
39002- 0 0 0 0 0 1 0 0 1 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 0 0 0
39007- 0 0 0 0 0 0 0 0 0 0 0 0
39008- 0 0 0 0 0 0 0 0 0 0 0 0
39009- 6 6 6 14 14 14 26 26 26 42 42 42
39010- 54 54 54 66 66 66 78 78 78 78 78 78
39011- 78 78 78 74 74 74 66 66 66 54 54 54
39012- 42 42 42 26 26 26 18 18 18 10 10 10
39013- 6 6 6 0 0 0 0 0 0 0 0 0
39014- 0 0 0 0 0 0 0 0 0 0 0 0
39015- 0 0 0 0 0 0 0 0 0 0 0 0
39016- 0 0 0 0 0 0 0 0 0 0 0 0
39017- 0 0 0 0 0 0 0 0 0 0 0 0
39018- 0 0 0 0 0 0 0 0 0 0 0 0
39019- 0 0 0 0 0 0 0 0 0 0 0 0
39020- 0 0 0 0 0 0 0 0 0 0 0 0
39021- 0 0 0 0 0 0 0 0 0 0 0 0
39022- 0 0 1 0 0 0 0 0 0 0 0 0
39023- 0 0 0 0 0 0 0 0 0 0 0 0
39024- 0 0 0 0 0 0 0 0 0 0 0 0
39025- 0 0 0 0 0 0 0 0 0 0 0 0
39026- 0 0 0 0 0 0 0 0 0 0 0 0
39027- 0 0 0 0 0 0 0 0 0 0 0 0
39028- 0 0 0 0 0 0 0 0 0 10 10 10
39029- 22 22 22 42 42 42 66 66 66 86 86 86
39030- 66 66 66 38 38 38 38 38 38 22 22 22
39031- 26 26 26 34 34 34 54 54 54 66 66 66
39032- 86 86 86 70 70 70 46 46 46 26 26 26
39033- 14 14 14 6 6 6 0 0 0 0 0 0
39034- 0 0 0 0 0 0 0 0 0 0 0 0
39035- 0 0 0 0 0 0 0 0 0 0 0 0
39036- 0 0 0 0 0 0 0 0 0 0 0 0
39037- 0 0 0 0 0 0 0 0 0 0 0 0
39038- 0 0 0 0 0 0 0 0 0 0 0 0
39039- 0 0 0 0 0 0 0 0 0 0 0 0
39040- 0 0 0 0 0 0 0 0 0 0 0 0
39041- 0 0 0 0 0 0 0 0 0 0 0 0
39042- 0 0 1 0 0 1 0 0 1 0 0 0
39043- 0 0 0 0 0 0 0 0 0 0 0 0
39044- 0 0 0 0 0 0 0 0 0 0 0 0
39045- 0 0 0 0 0 0 0 0 0 0 0 0
39046- 0 0 0 0 0 0 0 0 0 0 0 0
39047- 0 0 0 0 0 0 0 0 0 0 0 0
39048- 0 0 0 0 0 0 10 10 10 26 26 26
39049- 50 50 50 82 82 82 58 58 58 6 6 6
39050- 2 2 6 2 2 6 2 2 6 2 2 6
39051- 2 2 6 2 2 6 2 2 6 2 2 6
39052- 6 6 6 54 54 54 86 86 86 66 66 66
39053- 38 38 38 18 18 18 6 6 6 0 0 0
39054- 0 0 0 0 0 0 0 0 0 0 0 0
39055- 0 0 0 0 0 0 0 0 0 0 0 0
39056- 0 0 0 0 0 0 0 0 0 0 0 0
39057- 0 0 0 0 0 0 0 0 0 0 0 0
39058- 0 0 0 0 0 0 0 0 0 0 0 0
39059- 0 0 0 0 0 0 0 0 0 0 0 0
39060- 0 0 0 0 0 0 0 0 0 0 0 0
39061- 0 0 0 0 0 0 0 0 0 0 0 0
39062- 0 0 0 0 0 0 0 0 0 0 0 0
39063- 0 0 0 0 0 0 0 0 0 0 0 0
39064- 0 0 0 0 0 0 0 0 0 0 0 0
39065- 0 0 0 0 0 0 0 0 0 0 0 0
39066- 0 0 0 0 0 0 0 0 0 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 0 0 0 6 6 6 22 22 22 50 50 50
39069- 78 78 78 34 34 34 2 2 6 2 2 6
39070- 2 2 6 2 2 6 2 2 6 2 2 6
39071- 2 2 6 2 2 6 2 2 6 2 2 6
39072- 2 2 6 2 2 6 6 6 6 70 70 70
39073- 78 78 78 46 46 46 22 22 22 6 6 6
39074- 0 0 0 0 0 0 0 0 0 0 0 0
39075- 0 0 0 0 0 0 0 0 0 0 0 0
39076- 0 0 0 0 0 0 0 0 0 0 0 0
39077- 0 0 0 0 0 0 0 0 0 0 0 0
39078- 0 0 0 0 0 0 0 0 0 0 0 0
39079- 0 0 0 0 0 0 0 0 0 0 0 0
39080- 0 0 0 0 0 0 0 0 0 0 0 0
39081- 0 0 0 0 0 0 0 0 0 0 0 0
39082- 0 0 1 0 0 1 0 0 1 0 0 0
39083- 0 0 0 0 0 0 0 0 0 0 0 0
39084- 0 0 0 0 0 0 0 0 0 0 0 0
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 0 0 0
39088- 6 6 6 18 18 18 42 42 42 82 82 82
39089- 26 26 26 2 2 6 2 2 6 2 2 6
39090- 2 2 6 2 2 6 2 2 6 2 2 6
39091- 2 2 6 2 2 6 2 2 6 14 14 14
39092- 46 46 46 34 34 34 6 6 6 2 2 6
39093- 42 42 42 78 78 78 42 42 42 18 18 18
39094- 6 6 6 0 0 0 0 0 0 0 0 0
39095- 0 0 0 0 0 0 0 0 0 0 0 0
39096- 0 0 0 0 0 0 0 0 0 0 0 0
39097- 0 0 0 0 0 0 0 0 0 0 0 0
39098- 0 0 0 0 0 0 0 0 0 0 0 0
39099- 0 0 0 0 0 0 0 0 0 0 0 0
39100- 0 0 0 0 0 0 0 0 0 0 0 0
39101- 0 0 0 0 0 0 0 0 0 0 0 0
39102- 0 0 1 0 0 0 0 0 1 0 0 0
39103- 0 0 0 0 0 0 0 0 0 0 0 0
39104- 0 0 0 0 0 0 0 0 0 0 0 0
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 0 0 0
39108- 10 10 10 30 30 30 66 66 66 58 58 58
39109- 2 2 6 2 2 6 2 2 6 2 2 6
39110- 2 2 6 2 2 6 2 2 6 2 2 6
39111- 2 2 6 2 2 6 2 2 6 26 26 26
39112- 86 86 86 101 101 101 46 46 46 10 10 10
39113- 2 2 6 58 58 58 70 70 70 34 34 34
39114- 10 10 10 0 0 0 0 0 0 0 0 0
39115- 0 0 0 0 0 0 0 0 0 0 0 0
39116- 0 0 0 0 0 0 0 0 0 0 0 0
39117- 0 0 0 0 0 0 0 0 0 0 0 0
39118- 0 0 0 0 0 0 0 0 0 0 0 0
39119- 0 0 0 0 0 0 0 0 0 0 0 0
39120- 0 0 0 0 0 0 0 0 0 0 0 0
39121- 0 0 0 0 0 0 0 0 0 0 0 0
39122- 0 0 1 0 0 1 0 0 1 0 0 0
39123- 0 0 0 0 0 0 0 0 0 0 0 0
39124- 0 0 0 0 0 0 0 0 0 0 0 0
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 0 0 0
39128- 14 14 14 42 42 42 86 86 86 10 10 10
39129- 2 2 6 2 2 6 2 2 6 2 2 6
39130- 2 2 6 2 2 6 2 2 6 2 2 6
39131- 2 2 6 2 2 6 2 2 6 30 30 30
39132- 94 94 94 94 94 94 58 58 58 26 26 26
39133- 2 2 6 6 6 6 78 78 78 54 54 54
39134- 22 22 22 6 6 6 0 0 0 0 0 0
39135- 0 0 0 0 0 0 0 0 0 0 0 0
39136- 0 0 0 0 0 0 0 0 0 0 0 0
39137- 0 0 0 0 0 0 0 0 0 0 0 0
39138- 0 0 0 0 0 0 0 0 0 0 0 0
39139- 0 0 0 0 0 0 0 0 0 0 0 0
39140- 0 0 0 0 0 0 0 0 0 0 0 0
39141- 0 0 0 0 0 0 0 0 0 0 0 0
39142- 0 0 0 0 0 0 0 0 0 0 0 0
39143- 0 0 0 0 0 0 0 0 0 0 0 0
39144- 0 0 0 0 0 0 0 0 0 0 0 0
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 0 0 0 6 6 6
39148- 22 22 22 62 62 62 62 62 62 2 2 6
39149- 2 2 6 2 2 6 2 2 6 2 2 6
39150- 2 2 6 2 2 6 2 2 6 2 2 6
39151- 2 2 6 2 2 6 2 2 6 26 26 26
39152- 54 54 54 38 38 38 18 18 18 10 10 10
39153- 2 2 6 2 2 6 34 34 34 82 82 82
39154- 38 38 38 14 14 14 0 0 0 0 0 0
39155- 0 0 0 0 0 0 0 0 0 0 0 0
39156- 0 0 0 0 0 0 0 0 0 0 0 0
39157- 0 0 0 0 0 0 0 0 0 0 0 0
39158- 0 0 0 0 0 0 0 0 0 0 0 0
39159- 0 0 0 0 0 0 0 0 0 0 0 0
39160- 0 0 0 0 0 0 0 0 0 0 0 0
39161- 0 0 0 0 0 0 0 0 0 0 0 0
39162- 0 0 0 0 0 1 0 0 1 0 0 0
39163- 0 0 0 0 0 0 0 0 0 0 0 0
39164- 0 0 0 0 0 0 0 0 0 0 0 0
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 0 0 0 6 6 6
39168- 30 30 30 78 78 78 30 30 30 2 2 6
39169- 2 2 6 2 2 6 2 2 6 2 2 6
39170- 2 2 6 2 2 6 2 2 6 2 2 6
39171- 2 2 6 2 2 6 2 2 6 10 10 10
39172- 10 10 10 2 2 6 2 2 6 2 2 6
39173- 2 2 6 2 2 6 2 2 6 78 78 78
39174- 50 50 50 18 18 18 6 6 6 0 0 0
39175- 0 0 0 0 0 0 0 0 0 0 0 0
39176- 0 0 0 0 0 0 0 0 0 0 0 0
39177- 0 0 0 0 0 0 0 0 0 0 0 0
39178- 0 0 0 0 0 0 0 0 0 0 0 0
39179- 0 0 0 0 0 0 0 0 0 0 0 0
39180- 0 0 0 0 0 0 0 0 0 0 0 0
39181- 0 0 0 0 0 0 0 0 0 0 0 0
39182- 0 0 1 0 0 0 0 0 0 0 0 0
39183- 0 0 0 0 0 0 0 0 0 0 0 0
39184- 0 0 0 0 0 0 0 0 0 0 0 0
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 0 0 0 0 0 0 10 10 10
39188- 38 38 38 86 86 86 14 14 14 2 2 6
39189- 2 2 6 2 2 6 2 2 6 2 2 6
39190- 2 2 6 2 2 6 2 2 6 2 2 6
39191- 2 2 6 2 2 6 2 2 6 2 2 6
39192- 2 2 6 2 2 6 2 2 6 2 2 6
39193- 2 2 6 2 2 6 2 2 6 54 54 54
39194- 66 66 66 26 26 26 6 6 6 0 0 0
39195- 0 0 0 0 0 0 0 0 0 0 0 0
39196- 0 0 0 0 0 0 0 0 0 0 0 0
39197- 0 0 0 0 0 0 0 0 0 0 0 0
39198- 0 0 0 0 0 0 0 0 0 0 0 0
39199- 0 0 0 0 0 0 0 0 0 0 0 0
39200- 0 0 0 0 0 0 0 0 0 0 0 0
39201- 0 0 0 0 0 0 0 0 0 0 0 0
39202- 0 0 0 0 0 1 0 0 1 0 0 0
39203- 0 0 0 0 0 0 0 0 0 0 0 0
39204- 0 0 0 0 0 0 0 0 0 0 0 0
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 0 0 0 0 0 0 14 14 14
39208- 42 42 42 82 82 82 2 2 6 2 2 6
39209- 2 2 6 6 6 6 10 10 10 2 2 6
39210- 2 2 6 2 2 6 2 2 6 2 2 6
39211- 2 2 6 2 2 6 2 2 6 6 6 6
39212- 14 14 14 10 10 10 2 2 6 2 2 6
39213- 2 2 6 2 2 6 2 2 6 18 18 18
39214- 82 82 82 34 34 34 10 10 10 0 0 0
39215- 0 0 0 0 0 0 0 0 0 0 0 0
39216- 0 0 0 0 0 0 0 0 0 0 0 0
39217- 0 0 0 0 0 0 0 0 0 0 0 0
39218- 0 0 0 0 0 0 0 0 0 0 0 0
39219- 0 0 0 0 0 0 0 0 0 0 0 0
39220- 0 0 0 0 0 0 0 0 0 0 0 0
39221- 0 0 0 0 0 0 0 0 0 0 0 0
39222- 0 0 1 0 0 0 0 0 0 0 0 0
39223- 0 0 0 0 0 0 0 0 0 0 0 0
39224- 0 0 0 0 0 0 0 0 0 0 0 0
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 0 0 0 0 0 0 0 0 0 14 14 14
39228- 46 46 46 86 86 86 2 2 6 2 2 6
39229- 6 6 6 6 6 6 22 22 22 34 34 34
39230- 6 6 6 2 2 6 2 2 6 2 2 6
39231- 2 2 6 2 2 6 18 18 18 34 34 34
39232- 10 10 10 50 50 50 22 22 22 2 2 6
39233- 2 2 6 2 2 6 2 2 6 10 10 10
39234- 86 86 86 42 42 42 14 14 14 0 0 0
39235- 0 0 0 0 0 0 0 0 0 0 0 0
39236- 0 0 0 0 0 0 0 0 0 0 0 0
39237- 0 0 0 0 0 0 0 0 0 0 0 0
39238- 0 0 0 0 0 0 0 0 0 0 0 0
39239- 0 0 0 0 0 0 0 0 0 0 0 0
39240- 0 0 0 0 0 0 0 0 0 0 0 0
39241- 0 0 0 0 0 0 0 0 0 0 0 0
39242- 0 0 1 0 0 1 0 0 1 0 0 0
39243- 0 0 0 0 0 0 0 0 0 0 0 0
39244- 0 0 0 0 0 0 0 0 0 0 0 0
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 0 0 0 0 0 0 0 0 0 14 14 14
39248- 46 46 46 86 86 86 2 2 6 2 2 6
39249- 38 38 38 116 116 116 94 94 94 22 22 22
39250- 22 22 22 2 2 6 2 2 6 2 2 6
39251- 14 14 14 86 86 86 138 138 138 162 162 162
39252-154 154 154 38 38 38 26 26 26 6 6 6
39253- 2 2 6 2 2 6 2 2 6 2 2 6
39254- 86 86 86 46 46 46 14 14 14 0 0 0
39255- 0 0 0 0 0 0 0 0 0 0 0 0
39256- 0 0 0 0 0 0 0 0 0 0 0 0
39257- 0 0 0 0 0 0 0 0 0 0 0 0
39258- 0 0 0 0 0 0 0 0 0 0 0 0
39259- 0 0 0 0 0 0 0 0 0 0 0 0
39260- 0 0 0 0 0 0 0 0 0 0 0 0
39261- 0 0 0 0 0 0 0 0 0 0 0 0
39262- 0 0 0 0 0 0 0 0 0 0 0 0
39263- 0 0 0 0 0 0 0 0 0 0 0 0
39264- 0 0 0 0 0 0 0 0 0 0 0 0
39265- 0 0 0 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 0 0 0
39267- 0 0 0 0 0 0 0 0 0 14 14 14
39268- 46 46 46 86 86 86 2 2 6 14 14 14
39269-134 134 134 198 198 198 195 195 195 116 116 116
39270- 10 10 10 2 2 6 2 2 6 6 6 6
39271-101 98 89 187 187 187 210 210 210 218 218 218
39272-214 214 214 134 134 134 14 14 14 6 6 6
39273- 2 2 6 2 2 6 2 2 6 2 2 6
39274- 86 86 86 50 50 50 18 18 18 6 6 6
39275- 0 0 0 0 0 0 0 0 0 0 0 0
39276- 0 0 0 0 0 0 0 0 0 0 0 0
39277- 0 0 0 0 0 0 0 0 0 0 0 0
39278- 0 0 0 0 0 0 0 0 0 0 0 0
39279- 0 0 0 0 0 0 0 0 0 0 0 0
39280- 0 0 0 0 0 0 0 0 0 0 0 0
39281- 0 0 0 0 0 0 0 0 1 0 0 0
39282- 0 0 1 0 0 1 0 0 1 0 0 0
39283- 0 0 0 0 0 0 0 0 0 0 0 0
39284- 0 0 0 0 0 0 0 0 0 0 0 0
39285- 0 0 0 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 0 0 0
39287- 0 0 0 0 0 0 0 0 0 14 14 14
39288- 46 46 46 86 86 86 2 2 6 54 54 54
39289-218 218 218 195 195 195 226 226 226 246 246 246
39290- 58 58 58 2 2 6 2 2 6 30 30 30
39291-210 210 210 253 253 253 174 174 174 123 123 123
39292-221 221 221 234 234 234 74 74 74 2 2 6
39293- 2 2 6 2 2 6 2 2 6 2 2 6
39294- 70 70 70 58 58 58 22 22 22 6 6 6
39295- 0 0 0 0 0 0 0 0 0 0 0 0
39296- 0 0 0 0 0 0 0 0 0 0 0 0
39297- 0 0 0 0 0 0 0 0 0 0 0 0
39298- 0 0 0 0 0 0 0 0 0 0 0 0
39299- 0 0 0 0 0 0 0 0 0 0 0 0
39300- 0 0 0 0 0 0 0 0 0 0 0 0
39301- 0 0 0 0 0 0 0 0 0 0 0 0
39302- 0 0 0 0 0 0 0 0 0 0 0 0
39303- 0 0 0 0 0 0 0 0 0 0 0 0
39304- 0 0 0 0 0 0 0 0 0 0 0 0
39305- 0 0 0 0 0 0 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 14 14 14
39308- 46 46 46 82 82 82 2 2 6 106 106 106
39309-170 170 170 26 26 26 86 86 86 226 226 226
39310-123 123 123 10 10 10 14 14 14 46 46 46
39311-231 231 231 190 190 190 6 6 6 70 70 70
39312- 90 90 90 238 238 238 158 158 158 2 2 6
39313- 2 2 6 2 2 6 2 2 6 2 2 6
39314- 70 70 70 58 58 58 22 22 22 6 6 6
39315- 0 0 0 0 0 0 0 0 0 0 0 0
39316- 0 0 0 0 0 0 0 0 0 0 0 0
39317- 0 0 0 0 0 0 0 0 0 0 0 0
39318- 0 0 0 0 0 0 0 0 0 0 0 0
39319- 0 0 0 0 0 0 0 0 0 0 0 0
39320- 0 0 0 0 0 0 0 0 0 0 0 0
39321- 0 0 0 0 0 0 0 0 1 0 0 0
39322- 0 0 1 0 0 1 0 0 1 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 14 14 14
39328- 42 42 42 86 86 86 6 6 6 116 116 116
39329-106 106 106 6 6 6 70 70 70 149 149 149
39330-128 128 128 18 18 18 38 38 38 54 54 54
39331-221 221 221 106 106 106 2 2 6 14 14 14
39332- 46 46 46 190 190 190 198 198 198 2 2 6
39333- 2 2 6 2 2 6 2 2 6 2 2 6
39334- 74 74 74 62 62 62 22 22 22 6 6 6
39335- 0 0 0 0 0 0 0 0 0 0 0 0
39336- 0 0 0 0 0 0 0 0 0 0 0 0
39337- 0 0 0 0 0 0 0 0 0 0 0 0
39338- 0 0 0 0 0 0 0 0 0 0 0 0
39339- 0 0 0 0 0 0 0 0 0 0 0 0
39340- 0 0 0 0 0 0 0 0 0 0 0 0
39341- 0 0 0 0 0 0 0 0 1 0 0 0
39342- 0 0 1 0 0 0 0 0 1 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 0 0 0 0 14 14 14
39348- 42 42 42 94 94 94 14 14 14 101 101 101
39349-128 128 128 2 2 6 18 18 18 116 116 116
39350-118 98 46 121 92 8 121 92 8 98 78 10
39351-162 162 162 106 106 106 2 2 6 2 2 6
39352- 2 2 6 195 195 195 195 195 195 6 6 6
39353- 2 2 6 2 2 6 2 2 6 2 2 6
39354- 74 74 74 62 62 62 22 22 22 6 6 6
39355- 0 0 0 0 0 0 0 0 0 0 0 0
39356- 0 0 0 0 0 0 0 0 0 0 0 0
39357- 0 0 0 0 0 0 0 0 0 0 0 0
39358- 0 0 0 0 0 0 0 0 0 0 0 0
39359- 0 0 0 0 0 0 0 0 0 0 0 0
39360- 0 0 0 0 0 0 0 0 0 0 0 0
39361- 0 0 0 0 0 0 0 0 1 0 0 1
39362- 0 0 1 0 0 0 0 0 1 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 0 0 0 0 0 0 0 10 10 10
39368- 38 38 38 90 90 90 14 14 14 58 58 58
39369-210 210 210 26 26 26 54 38 6 154 114 10
39370-226 170 11 236 186 11 225 175 15 184 144 12
39371-215 174 15 175 146 61 37 26 9 2 2 6
39372- 70 70 70 246 246 246 138 138 138 2 2 6
39373- 2 2 6 2 2 6 2 2 6 2 2 6
39374- 70 70 70 66 66 66 26 26 26 6 6 6
39375- 0 0 0 0 0 0 0 0 0 0 0 0
39376- 0 0 0 0 0 0 0 0 0 0 0 0
39377- 0 0 0 0 0 0 0 0 0 0 0 0
39378- 0 0 0 0 0 0 0 0 0 0 0 0
39379- 0 0 0 0 0 0 0 0 0 0 0 0
39380- 0 0 0 0 0 0 0 0 0 0 0 0
39381- 0 0 0 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 0 0 0 0 0 0 0 10 10 10
39388- 38 38 38 86 86 86 14 14 14 10 10 10
39389-195 195 195 188 164 115 192 133 9 225 175 15
39390-239 182 13 234 190 10 232 195 16 232 200 30
39391-245 207 45 241 208 19 232 195 16 184 144 12
39392-218 194 134 211 206 186 42 42 42 2 2 6
39393- 2 2 6 2 2 6 2 2 6 2 2 6
39394- 50 50 50 74 74 74 30 30 30 6 6 6
39395- 0 0 0 0 0 0 0 0 0 0 0 0
39396- 0 0 0 0 0 0 0 0 0 0 0 0
39397- 0 0 0 0 0 0 0 0 0 0 0 0
39398- 0 0 0 0 0 0 0 0 0 0 0 0
39399- 0 0 0 0 0 0 0 0 0 0 0 0
39400- 0 0 0 0 0 0 0 0 0 0 0 0
39401- 0 0 0 0 0 0 0 0 0 0 0 0
39402- 0 0 0 0 0 0 0 0 0 0 0 0
39403- 0 0 0 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 0 0 0
39406- 0 0 0 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 10 10 10
39408- 34 34 34 86 86 86 14 14 14 2 2 6
39409-121 87 25 192 133 9 219 162 10 239 182 13
39410-236 186 11 232 195 16 241 208 19 244 214 54
39411-246 218 60 246 218 38 246 215 20 241 208 19
39412-241 208 19 226 184 13 121 87 25 2 2 6
39413- 2 2 6 2 2 6 2 2 6 2 2 6
39414- 50 50 50 82 82 82 34 34 34 10 10 10
39415- 0 0 0 0 0 0 0 0 0 0 0 0
39416- 0 0 0 0 0 0 0 0 0 0 0 0
39417- 0 0 0 0 0 0 0 0 0 0 0 0
39418- 0 0 0 0 0 0 0 0 0 0 0 0
39419- 0 0 0 0 0 0 0 0 0 0 0 0
39420- 0 0 0 0 0 0 0 0 0 0 0 0
39421- 0 0 0 0 0 0 0 0 0 0 0 0
39422- 0 0 0 0 0 0 0 0 0 0 0 0
39423- 0 0 0 0 0 0 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 0 0 0 0 0 0 0 0 0
39426- 0 0 0 0 0 0 0 0 0 0 0 0
39427- 0 0 0 0 0 0 0 0 0 10 10 10
39428- 34 34 34 82 82 82 30 30 30 61 42 6
39429-180 123 7 206 145 10 230 174 11 239 182 13
39430-234 190 10 238 202 15 241 208 19 246 218 74
39431-246 218 38 246 215 20 246 215 20 246 215 20
39432-226 184 13 215 174 15 184 144 12 6 6 6
39433- 2 2 6 2 2 6 2 2 6 2 2 6
39434- 26 26 26 94 94 94 42 42 42 14 14 14
39435- 0 0 0 0 0 0 0 0 0 0 0 0
39436- 0 0 0 0 0 0 0 0 0 0 0 0
39437- 0 0 0 0 0 0 0 0 0 0 0 0
39438- 0 0 0 0 0 0 0 0 0 0 0 0
39439- 0 0 0 0 0 0 0 0 0 0 0 0
39440- 0 0 0 0 0 0 0 0 0 0 0 0
39441- 0 0 0 0 0 0 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 10 10 10
39448- 30 30 30 78 78 78 50 50 50 104 69 6
39449-192 133 9 216 158 10 236 178 12 236 186 11
39450-232 195 16 241 208 19 244 214 54 245 215 43
39451-246 215 20 246 215 20 241 208 19 198 155 10
39452-200 144 11 216 158 10 156 118 10 2 2 6
39453- 2 2 6 2 2 6 2 2 6 2 2 6
39454- 6 6 6 90 90 90 54 54 54 18 18 18
39455- 6 6 6 0 0 0 0 0 0 0 0 0
39456- 0 0 0 0 0 0 0 0 0 0 0 0
39457- 0 0 0 0 0 0 0 0 0 0 0 0
39458- 0 0 0 0 0 0 0 0 0 0 0 0
39459- 0 0 0 0 0 0 0 0 0 0 0 0
39460- 0 0 0 0 0 0 0 0 0 0 0 0
39461- 0 0 0 0 0 0 0 0 0 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 0 0 0 0 0 0 0 10 10 10
39468- 30 30 30 78 78 78 46 46 46 22 22 22
39469-137 92 6 210 162 10 239 182 13 238 190 10
39470-238 202 15 241 208 19 246 215 20 246 215 20
39471-241 208 19 203 166 17 185 133 11 210 150 10
39472-216 158 10 210 150 10 102 78 10 2 2 6
39473- 6 6 6 54 54 54 14 14 14 2 2 6
39474- 2 2 6 62 62 62 74 74 74 30 30 30
39475- 10 10 10 0 0 0 0 0 0 0 0 0
39476- 0 0 0 0 0 0 0 0 0 0 0 0
39477- 0 0 0 0 0 0 0 0 0 0 0 0
39478- 0 0 0 0 0 0 0 0 0 0 0 0
39479- 0 0 0 0 0 0 0 0 0 0 0 0
39480- 0 0 0 0 0 0 0 0 0 0 0 0
39481- 0 0 0 0 0 0 0 0 0 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 0 0 0 0 0 0 0 0 0 10 10 10
39488- 34 34 34 78 78 78 50 50 50 6 6 6
39489- 94 70 30 139 102 15 190 146 13 226 184 13
39490-232 200 30 232 195 16 215 174 15 190 146 13
39491-168 122 10 192 133 9 210 150 10 213 154 11
39492-202 150 34 182 157 106 101 98 89 2 2 6
39493- 2 2 6 78 78 78 116 116 116 58 58 58
39494- 2 2 6 22 22 22 90 90 90 46 46 46
39495- 18 18 18 6 6 6 0 0 0 0 0 0
39496- 0 0 0 0 0 0 0 0 0 0 0 0
39497- 0 0 0 0 0 0 0 0 0 0 0 0
39498- 0 0 0 0 0 0 0 0 0 0 0 0
39499- 0 0 0 0 0 0 0 0 0 0 0 0
39500- 0 0 0 0 0 0 0 0 0 0 0 0
39501- 0 0 0 0 0 0 0 0 0 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 0 0 0
39507- 0 0 0 0 0 0 0 0 0 10 10 10
39508- 38 38 38 86 86 86 50 50 50 6 6 6
39509-128 128 128 174 154 114 156 107 11 168 122 10
39510-198 155 10 184 144 12 197 138 11 200 144 11
39511-206 145 10 206 145 10 197 138 11 188 164 115
39512-195 195 195 198 198 198 174 174 174 14 14 14
39513- 2 2 6 22 22 22 116 116 116 116 116 116
39514- 22 22 22 2 2 6 74 74 74 70 70 70
39515- 30 30 30 10 10 10 0 0 0 0 0 0
39516- 0 0 0 0 0 0 0 0 0 0 0 0
39517- 0 0 0 0 0 0 0 0 0 0 0 0
39518- 0 0 0 0 0 0 0 0 0 0 0 0
39519- 0 0 0 0 0 0 0 0 0 0 0 0
39520- 0 0 0 0 0 0 0 0 0 0 0 0
39521- 0 0 0 0 0 0 0 0 0 0 0 0
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 0 0 0 0 0 0
39527- 0 0 0 0 0 0 6 6 6 18 18 18
39528- 50 50 50 101 101 101 26 26 26 10 10 10
39529-138 138 138 190 190 190 174 154 114 156 107 11
39530-197 138 11 200 144 11 197 138 11 192 133 9
39531-180 123 7 190 142 34 190 178 144 187 187 187
39532-202 202 202 221 221 221 214 214 214 66 66 66
39533- 2 2 6 2 2 6 50 50 50 62 62 62
39534- 6 6 6 2 2 6 10 10 10 90 90 90
39535- 50 50 50 18 18 18 6 6 6 0 0 0
39536- 0 0 0 0 0 0 0 0 0 0 0 0
39537- 0 0 0 0 0 0 0 0 0 0 0 0
39538- 0 0 0 0 0 0 0 0 0 0 0 0
39539- 0 0 0 0 0 0 0 0 0 0 0 0
39540- 0 0 0 0 0 0 0 0 0 0 0 0
39541- 0 0 0 0 0 0 0 0 0 0 0 0
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 0 0 0 0 0 0 0 0 0
39547- 0 0 0 0 0 0 10 10 10 34 34 34
39548- 74 74 74 74 74 74 2 2 6 6 6 6
39549-144 144 144 198 198 198 190 190 190 178 166 146
39550-154 121 60 156 107 11 156 107 11 168 124 44
39551-174 154 114 187 187 187 190 190 190 210 210 210
39552-246 246 246 253 253 253 253 253 253 182 182 182
39553- 6 6 6 2 2 6 2 2 6 2 2 6
39554- 2 2 6 2 2 6 2 2 6 62 62 62
39555- 74 74 74 34 34 34 14 14 14 0 0 0
39556- 0 0 0 0 0 0 0 0 0 0 0 0
39557- 0 0 0 0 0 0 0 0 0 0 0 0
39558- 0 0 0 0 0 0 0 0 0 0 0 0
39559- 0 0 0 0 0 0 0 0 0 0 0 0
39560- 0 0 0 0 0 0 0 0 0 0 0 0
39561- 0 0 0 0 0 0 0 0 0 0 0 0
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 0 0 0 0 0 0 0 0 0 0 0 0
39567- 0 0 0 10 10 10 22 22 22 54 54 54
39568- 94 94 94 18 18 18 2 2 6 46 46 46
39569-234 234 234 221 221 221 190 190 190 190 190 190
39570-190 190 190 187 187 187 187 187 187 190 190 190
39571-190 190 190 195 195 195 214 214 214 242 242 242
39572-253 253 253 253 253 253 253 253 253 253 253 253
39573- 82 82 82 2 2 6 2 2 6 2 2 6
39574- 2 2 6 2 2 6 2 2 6 14 14 14
39575- 86 86 86 54 54 54 22 22 22 6 6 6
39576- 0 0 0 0 0 0 0 0 0 0 0 0
39577- 0 0 0 0 0 0 0 0 0 0 0 0
39578- 0 0 0 0 0 0 0 0 0 0 0 0
39579- 0 0 0 0 0 0 0 0 0 0 0 0
39580- 0 0 0 0 0 0 0 0 0 0 0 0
39581- 0 0 0 0 0 0 0 0 0 0 0 0
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 6 6 6 18 18 18 46 46 46 90 90 90
39588- 46 46 46 18 18 18 6 6 6 182 182 182
39589-253 253 253 246 246 246 206 206 206 190 190 190
39590-190 190 190 190 190 190 190 190 190 190 190 190
39591-206 206 206 231 231 231 250 250 250 253 253 253
39592-253 253 253 253 253 253 253 253 253 253 253 253
39593-202 202 202 14 14 14 2 2 6 2 2 6
39594- 2 2 6 2 2 6 2 2 6 2 2 6
39595- 42 42 42 86 86 86 42 42 42 18 18 18
39596- 6 6 6 0 0 0 0 0 0 0 0 0
39597- 0 0 0 0 0 0 0 0 0 0 0 0
39598- 0 0 0 0 0 0 0 0 0 0 0 0
39599- 0 0 0 0 0 0 0 0 0 0 0 0
39600- 0 0 0 0 0 0 0 0 0 0 0 0
39601- 0 0 0 0 0 0 0 0 0 0 0 0
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 6 6 6
39607- 14 14 14 38 38 38 74 74 74 66 66 66
39608- 2 2 6 6 6 6 90 90 90 250 250 250
39609-253 253 253 253 253 253 238 238 238 198 198 198
39610-190 190 190 190 190 190 195 195 195 221 221 221
39611-246 246 246 253 253 253 253 253 253 253 253 253
39612-253 253 253 253 253 253 253 253 253 253 253 253
39613-253 253 253 82 82 82 2 2 6 2 2 6
39614- 2 2 6 2 2 6 2 2 6 2 2 6
39615- 2 2 6 78 78 78 70 70 70 34 34 34
39616- 14 14 14 6 6 6 0 0 0 0 0 0
39617- 0 0 0 0 0 0 0 0 0 0 0 0
39618- 0 0 0 0 0 0 0 0 0 0 0 0
39619- 0 0 0 0 0 0 0 0 0 0 0 0
39620- 0 0 0 0 0 0 0 0 0 0 0 0
39621- 0 0 0 0 0 0 0 0 0 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 14 14 14
39627- 34 34 34 66 66 66 78 78 78 6 6 6
39628- 2 2 6 18 18 18 218 218 218 253 253 253
39629-253 253 253 253 253 253 253 253 253 246 246 246
39630-226 226 226 231 231 231 246 246 246 253 253 253
39631-253 253 253 253 253 253 253 253 253 253 253 253
39632-253 253 253 253 253 253 253 253 253 253 253 253
39633-253 253 253 178 178 178 2 2 6 2 2 6
39634- 2 2 6 2 2 6 2 2 6 2 2 6
39635- 2 2 6 18 18 18 90 90 90 62 62 62
39636- 30 30 30 10 10 10 0 0 0 0 0 0
39637- 0 0 0 0 0 0 0 0 0 0 0 0
39638- 0 0 0 0 0 0 0 0 0 0 0 0
39639- 0 0 0 0 0 0 0 0 0 0 0 0
39640- 0 0 0 0 0 0 0 0 0 0 0 0
39641- 0 0 0 0 0 0 0 0 0 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 10 10 10 26 26 26
39647- 58 58 58 90 90 90 18 18 18 2 2 6
39648- 2 2 6 110 110 110 253 253 253 253 253 253
39649-253 253 253 253 253 253 253 253 253 253 253 253
39650-250 250 250 253 253 253 253 253 253 253 253 253
39651-253 253 253 253 253 253 253 253 253 253 253 253
39652-253 253 253 253 253 253 253 253 253 253 253 253
39653-253 253 253 231 231 231 18 18 18 2 2 6
39654- 2 2 6 2 2 6 2 2 6 2 2 6
39655- 2 2 6 2 2 6 18 18 18 94 94 94
39656- 54 54 54 26 26 26 10 10 10 0 0 0
39657- 0 0 0 0 0 0 0 0 0 0 0 0
39658- 0 0 0 0 0 0 0 0 0 0 0 0
39659- 0 0 0 0 0 0 0 0 0 0 0 0
39660- 0 0 0 0 0 0 0 0 0 0 0 0
39661- 0 0 0 0 0 0 0 0 0 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 6 6 6 22 22 22 50 50 50
39667- 90 90 90 26 26 26 2 2 6 2 2 6
39668- 14 14 14 195 195 195 250 250 250 253 253 253
39669-253 253 253 253 253 253 253 253 253 253 253 253
39670-253 253 253 253 253 253 253 253 253 253 253 253
39671-253 253 253 253 253 253 253 253 253 253 253 253
39672-253 253 253 253 253 253 253 253 253 253 253 253
39673-250 250 250 242 242 242 54 54 54 2 2 6
39674- 2 2 6 2 2 6 2 2 6 2 2 6
39675- 2 2 6 2 2 6 2 2 6 38 38 38
39676- 86 86 86 50 50 50 22 22 22 6 6 6
39677- 0 0 0 0 0 0 0 0 0 0 0 0
39678- 0 0 0 0 0 0 0 0 0 0 0 0
39679- 0 0 0 0 0 0 0 0 0 0 0 0
39680- 0 0 0 0 0 0 0 0 0 0 0 0
39681- 0 0 0 0 0 0 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 6 6 6 14 14 14 38 38 38 82 82 82
39687- 34 34 34 2 2 6 2 2 6 2 2 6
39688- 42 42 42 195 195 195 246 246 246 253 253 253
39689-253 253 253 253 253 253 253 253 253 250 250 250
39690-242 242 242 242 242 242 250 250 250 253 253 253
39691-253 253 253 253 253 253 253 253 253 253 253 253
39692-253 253 253 250 250 250 246 246 246 238 238 238
39693-226 226 226 231 231 231 101 101 101 6 6 6
39694- 2 2 6 2 2 6 2 2 6 2 2 6
39695- 2 2 6 2 2 6 2 2 6 2 2 6
39696- 38 38 38 82 82 82 42 42 42 14 14 14
39697- 6 6 6 0 0 0 0 0 0 0 0 0
39698- 0 0 0 0 0 0 0 0 0 0 0 0
39699- 0 0 0 0 0 0 0 0 0 0 0 0
39700- 0 0 0 0 0 0 0 0 0 0 0 0
39701- 0 0 0 0 0 0 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 10 10 10 26 26 26 62 62 62 66 66 66
39707- 2 2 6 2 2 6 2 2 6 6 6 6
39708- 70 70 70 170 170 170 206 206 206 234 234 234
39709-246 246 246 250 250 250 250 250 250 238 238 238
39710-226 226 226 231 231 231 238 238 238 250 250 250
39711-250 250 250 250 250 250 246 246 246 231 231 231
39712-214 214 214 206 206 206 202 202 202 202 202 202
39713-198 198 198 202 202 202 182 182 182 18 18 18
39714- 2 2 6 2 2 6 2 2 6 2 2 6
39715- 2 2 6 2 2 6 2 2 6 2 2 6
39716- 2 2 6 62 62 62 66 66 66 30 30 30
39717- 10 10 10 0 0 0 0 0 0 0 0 0
39718- 0 0 0 0 0 0 0 0 0 0 0 0
39719- 0 0 0 0 0 0 0 0 0 0 0 0
39720- 0 0 0 0 0 0 0 0 0 0 0 0
39721- 0 0 0 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 14 14 14 42 42 42 82 82 82 18 18 18
39727- 2 2 6 2 2 6 2 2 6 10 10 10
39728- 94 94 94 182 182 182 218 218 218 242 242 242
39729-250 250 250 253 253 253 253 253 253 250 250 250
39730-234 234 234 253 253 253 253 253 253 253 253 253
39731-253 253 253 253 253 253 253 253 253 246 246 246
39732-238 238 238 226 226 226 210 210 210 202 202 202
39733-195 195 195 195 195 195 210 210 210 158 158 158
39734- 6 6 6 14 14 14 50 50 50 14 14 14
39735- 2 2 6 2 2 6 2 2 6 2 2 6
39736- 2 2 6 6 6 6 86 86 86 46 46 46
39737- 18 18 18 6 6 6 0 0 0 0 0 0
39738- 0 0 0 0 0 0 0 0 0 0 0 0
39739- 0 0 0 0 0 0 0 0 0 0 0 0
39740- 0 0 0 0 0 0 0 0 0 0 0 0
39741- 0 0 0 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 6 6 6
39746- 22 22 22 54 54 54 70 70 70 2 2 6
39747- 2 2 6 10 10 10 2 2 6 22 22 22
39748-166 166 166 231 231 231 250 250 250 253 253 253
39749-253 253 253 253 253 253 253 253 253 250 250 250
39750-242 242 242 253 253 253 253 253 253 253 253 253
39751-253 253 253 253 253 253 253 253 253 253 253 253
39752-253 253 253 253 253 253 253 253 253 246 246 246
39753-231 231 231 206 206 206 198 198 198 226 226 226
39754- 94 94 94 2 2 6 6 6 6 38 38 38
39755- 30 30 30 2 2 6 2 2 6 2 2 6
39756- 2 2 6 2 2 6 62 62 62 66 66 66
39757- 26 26 26 10 10 10 0 0 0 0 0 0
39758- 0 0 0 0 0 0 0 0 0 0 0 0
39759- 0 0 0 0 0 0 0 0 0 0 0 0
39760- 0 0 0 0 0 0 0 0 0 0 0 0
39761- 0 0 0 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 10 10 10
39766- 30 30 30 74 74 74 50 50 50 2 2 6
39767- 26 26 26 26 26 26 2 2 6 106 106 106
39768-238 238 238 253 253 253 253 253 253 253 253 253
39769-253 253 253 253 253 253 253 253 253 253 253 253
39770-253 253 253 253 253 253 253 253 253 253 253 253
39771-253 253 253 253 253 253 253 253 253 253 253 253
39772-253 253 253 253 253 253 253 253 253 253 253 253
39773-253 253 253 246 246 246 218 218 218 202 202 202
39774-210 210 210 14 14 14 2 2 6 2 2 6
39775- 30 30 30 22 22 22 2 2 6 2 2 6
39776- 2 2 6 2 2 6 18 18 18 86 86 86
39777- 42 42 42 14 14 14 0 0 0 0 0 0
39778- 0 0 0 0 0 0 0 0 0 0 0 0
39779- 0 0 0 0 0 0 0 0 0 0 0 0
39780- 0 0 0 0 0 0 0 0 0 0 0 0
39781- 0 0 0 0 0 0 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 14 14 14
39786- 42 42 42 90 90 90 22 22 22 2 2 6
39787- 42 42 42 2 2 6 18 18 18 218 218 218
39788-253 253 253 253 253 253 253 253 253 253 253 253
39789-253 253 253 253 253 253 253 253 253 253 253 253
39790-253 253 253 253 253 253 253 253 253 253 253 253
39791-253 253 253 253 253 253 253 253 253 253 253 253
39792-253 253 253 253 253 253 253 253 253 253 253 253
39793-253 253 253 253 253 253 250 250 250 221 221 221
39794-218 218 218 101 101 101 2 2 6 14 14 14
39795- 18 18 18 38 38 38 10 10 10 2 2 6
39796- 2 2 6 2 2 6 2 2 6 78 78 78
39797- 58 58 58 22 22 22 6 6 6 0 0 0
39798- 0 0 0 0 0 0 0 0 0 0 0 0
39799- 0 0 0 0 0 0 0 0 0 0 0 0
39800- 0 0 0 0 0 0 0 0 0 0 0 0
39801- 0 0 0 0 0 0 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 6 6 6 18 18 18
39806- 54 54 54 82 82 82 2 2 6 26 26 26
39807- 22 22 22 2 2 6 123 123 123 253 253 253
39808-253 253 253 253 253 253 253 253 253 253 253 253
39809-253 253 253 253 253 253 253 253 253 253 253 253
39810-253 253 253 253 253 253 253 253 253 253 253 253
39811-253 253 253 253 253 253 253 253 253 253 253 253
39812-253 253 253 253 253 253 253 253 253 253 253 253
39813-253 253 253 253 253 253 253 253 253 250 250 250
39814-238 238 238 198 198 198 6 6 6 38 38 38
39815- 58 58 58 26 26 26 38 38 38 2 2 6
39816- 2 2 6 2 2 6 2 2 6 46 46 46
39817- 78 78 78 30 30 30 10 10 10 0 0 0
39818- 0 0 0 0 0 0 0 0 0 0 0 0
39819- 0 0 0 0 0 0 0 0 0 0 0 0
39820- 0 0 0 0 0 0 0 0 0 0 0 0
39821- 0 0 0 0 0 0 0 0 0 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 10 10 10 30 30 30
39826- 74 74 74 58 58 58 2 2 6 42 42 42
39827- 2 2 6 22 22 22 231 231 231 253 253 253
39828-253 253 253 253 253 253 253 253 253 253 253 253
39829-253 253 253 253 253 253 253 253 253 250 250 250
39830-253 253 253 253 253 253 253 253 253 253 253 253
39831-253 253 253 253 253 253 253 253 253 253 253 253
39832-253 253 253 253 253 253 253 253 253 253 253 253
39833-253 253 253 253 253 253 253 253 253 253 253 253
39834-253 253 253 246 246 246 46 46 46 38 38 38
39835- 42 42 42 14 14 14 38 38 38 14 14 14
39836- 2 2 6 2 2 6 2 2 6 6 6 6
39837- 86 86 86 46 46 46 14 14 14 0 0 0
39838- 0 0 0 0 0 0 0 0 0 0 0 0
39839- 0 0 0 0 0 0 0 0 0 0 0 0
39840- 0 0 0 0 0 0 0 0 0 0 0 0
39841- 0 0 0 0 0 0 0 0 0 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 6 6 6 14 14 14 42 42 42
39846- 90 90 90 18 18 18 18 18 18 26 26 26
39847- 2 2 6 116 116 116 253 253 253 253 253 253
39848-253 253 253 253 253 253 253 253 253 253 253 253
39849-253 253 253 253 253 253 250 250 250 238 238 238
39850-253 253 253 253 253 253 253 253 253 253 253 253
39851-253 253 253 253 253 253 253 253 253 253 253 253
39852-253 253 253 253 253 253 253 253 253 253 253 253
39853-253 253 253 253 253 253 253 253 253 253 253 253
39854-253 253 253 253 253 253 94 94 94 6 6 6
39855- 2 2 6 2 2 6 10 10 10 34 34 34
39856- 2 2 6 2 2 6 2 2 6 2 2 6
39857- 74 74 74 58 58 58 22 22 22 6 6 6
39858- 0 0 0 0 0 0 0 0 0 0 0 0
39859- 0 0 0 0 0 0 0 0 0 0 0 0
39860- 0 0 0 0 0 0 0 0 0 0 0 0
39861- 0 0 0 0 0 0 0 0 0 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 10 10 10 26 26 26 66 66 66
39866- 82 82 82 2 2 6 38 38 38 6 6 6
39867- 14 14 14 210 210 210 253 253 253 253 253 253
39868-253 253 253 253 253 253 253 253 253 253 253 253
39869-253 253 253 253 253 253 246 246 246 242 242 242
39870-253 253 253 253 253 253 253 253 253 253 253 253
39871-253 253 253 253 253 253 253 253 253 253 253 253
39872-253 253 253 253 253 253 253 253 253 253 253 253
39873-253 253 253 253 253 253 253 253 253 253 253 253
39874-253 253 253 253 253 253 144 144 144 2 2 6
39875- 2 2 6 2 2 6 2 2 6 46 46 46
39876- 2 2 6 2 2 6 2 2 6 2 2 6
39877- 42 42 42 74 74 74 30 30 30 10 10 10
39878- 0 0 0 0 0 0 0 0 0 0 0 0
39879- 0 0 0 0 0 0 0 0 0 0 0 0
39880- 0 0 0 0 0 0 0 0 0 0 0 0
39881- 0 0 0 0 0 0 0 0 0 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 6 6 6 14 14 14 42 42 42 90 90 90
39886- 26 26 26 6 6 6 42 42 42 2 2 6
39887- 74 74 74 250 250 250 253 253 253 253 253 253
39888-253 253 253 253 253 253 253 253 253 253 253 253
39889-253 253 253 253 253 253 242 242 242 242 242 242
39890-253 253 253 253 253 253 253 253 253 253 253 253
39891-253 253 253 253 253 253 253 253 253 253 253 253
39892-253 253 253 253 253 253 253 253 253 253 253 253
39893-253 253 253 253 253 253 253 253 253 253 253 253
39894-253 253 253 253 253 253 182 182 182 2 2 6
39895- 2 2 6 2 2 6 2 2 6 46 46 46
39896- 2 2 6 2 2 6 2 2 6 2 2 6
39897- 10 10 10 86 86 86 38 38 38 10 10 10
39898- 0 0 0 0 0 0 0 0 0 0 0 0
39899- 0 0 0 0 0 0 0 0 0 0 0 0
39900- 0 0 0 0 0 0 0 0 0 0 0 0
39901- 0 0 0 0 0 0 0 0 0 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 10 10 10 26 26 26 66 66 66 82 82 82
39906- 2 2 6 22 22 22 18 18 18 2 2 6
39907-149 149 149 253 253 253 253 253 253 253 253 253
39908-253 253 253 253 253 253 253 253 253 253 253 253
39909-253 253 253 253 253 253 234 234 234 242 242 242
39910-253 253 253 253 253 253 253 253 253 253 253 253
39911-253 253 253 253 253 253 253 253 253 253 253 253
39912-253 253 253 253 253 253 253 253 253 253 253 253
39913-253 253 253 253 253 253 253 253 253 253 253 253
39914-253 253 253 253 253 253 206 206 206 2 2 6
39915- 2 2 6 2 2 6 2 2 6 38 38 38
39916- 2 2 6 2 2 6 2 2 6 2 2 6
39917- 6 6 6 86 86 86 46 46 46 14 14 14
39918- 0 0 0 0 0 0 0 0 0 0 0 0
39919- 0 0 0 0 0 0 0 0 0 0 0 0
39920- 0 0 0 0 0 0 0 0 0 0 0 0
39921- 0 0 0 0 0 0 0 0 0 0 0 0
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 6 6 6
39925- 18 18 18 46 46 46 86 86 86 18 18 18
39926- 2 2 6 34 34 34 10 10 10 6 6 6
39927-210 210 210 253 253 253 253 253 253 253 253 253
39928-253 253 253 253 253 253 253 253 253 253 253 253
39929-253 253 253 253 253 253 234 234 234 242 242 242
39930-253 253 253 253 253 253 253 253 253 253 253 253
39931-253 253 253 253 253 253 253 253 253 253 253 253
39932-253 253 253 253 253 253 253 253 253 253 253 253
39933-253 253 253 253 253 253 253 253 253 253 253 253
39934-253 253 253 253 253 253 221 221 221 6 6 6
39935- 2 2 6 2 2 6 6 6 6 30 30 30
39936- 2 2 6 2 2 6 2 2 6 2 2 6
39937- 2 2 6 82 82 82 54 54 54 18 18 18
39938- 6 6 6 0 0 0 0 0 0 0 0 0
39939- 0 0 0 0 0 0 0 0 0 0 0 0
39940- 0 0 0 0 0 0 0 0 0 0 0 0
39941- 0 0 0 0 0 0 0 0 0 0 0 0
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 10 10 10
39945- 26 26 26 66 66 66 62 62 62 2 2 6
39946- 2 2 6 38 38 38 10 10 10 26 26 26
39947-238 238 238 253 253 253 253 253 253 253 253 253
39948-253 253 253 253 253 253 253 253 253 253 253 253
39949-253 253 253 253 253 253 231 231 231 238 238 238
39950-253 253 253 253 253 253 253 253 253 253 253 253
39951-253 253 253 253 253 253 253 253 253 253 253 253
39952-253 253 253 253 253 253 253 253 253 253 253 253
39953-253 253 253 253 253 253 253 253 253 253 253 253
39954-253 253 253 253 253 253 231 231 231 6 6 6
39955- 2 2 6 2 2 6 10 10 10 30 30 30
39956- 2 2 6 2 2 6 2 2 6 2 2 6
39957- 2 2 6 66 66 66 58 58 58 22 22 22
39958- 6 6 6 0 0 0 0 0 0 0 0 0
39959- 0 0 0 0 0 0 0 0 0 0 0 0
39960- 0 0 0 0 0 0 0 0 0 0 0 0
39961- 0 0 0 0 0 0 0 0 0 0 0 0
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 10 10 10
39965- 38 38 38 78 78 78 6 6 6 2 2 6
39966- 2 2 6 46 46 46 14 14 14 42 42 42
39967-246 246 246 253 253 253 253 253 253 253 253 253
39968-253 253 253 253 253 253 253 253 253 253 253 253
39969-253 253 253 253 253 253 231 231 231 242 242 242
39970-253 253 253 253 253 253 253 253 253 253 253 253
39971-253 253 253 253 253 253 253 253 253 253 253 253
39972-253 253 253 253 253 253 253 253 253 253 253 253
39973-253 253 253 253 253 253 253 253 253 253 253 253
39974-253 253 253 253 253 253 234 234 234 10 10 10
39975- 2 2 6 2 2 6 22 22 22 14 14 14
39976- 2 2 6 2 2 6 2 2 6 2 2 6
39977- 2 2 6 66 66 66 62 62 62 22 22 22
39978- 6 6 6 0 0 0 0 0 0 0 0 0
39979- 0 0 0 0 0 0 0 0 0 0 0 0
39980- 0 0 0 0 0 0 0 0 0 0 0 0
39981- 0 0 0 0 0 0 0 0 0 0 0 0
39982- 0 0 0 0 0 0 0 0 0 0 0 0
39983- 0 0 0 0 0 0 0 0 0 0 0 0
39984- 0 0 0 0 0 0 6 6 6 18 18 18
39985- 50 50 50 74 74 74 2 2 6 2 2 6
39986- 14 14 14 70 70 70 34 34 34 62 62 62
39987-250 250 250 253 253 253 253 253 253 253 253 253
39988-253 253 253 253 253 253 253 253 253 253 253 253
39989-253 253 253 253 253 253 231 231 231 246 246 246
39990-253 253 253 253 253 253 253 253 253 253 253 253
39991-253 253 253 253 253 253 253 253 253 253 253 253
39992-253 253 253 253 253 253 253 253 253 253 253 253
39993-253 253 253 253 253 253 253 253 253 253 253 253
39994-253 253 253 253 253 253 234 234 234 14 14 14
39995- 2 2 6 2 2 6 30 30 30 2 2 6
39996- 2 2 6 2 2 6 2 2 6 2 2 6
39997- 2 2 6 66 66 66 62 62 62 22 22 22
39998- 6 6 6 0 0 0 0 0 0 0 0 0
39999- 0 0 0 0 0 0 0 0 0 0 0 0
40000- 0 0 0 0 0 0 0 0 0 0 0 0
40001- 0 0 0 0 0 0 0 0 0 0 0 0
40002- 0 0 0 0 0 0 0 0 0 0 0 0
40003- 0 0 0 0 0 0 0 0 0 0 0 0
40004- 0 0 0 0 0 0 6 6 6 18 18 18
40005- 54 54 54 62 62 62 2 2 6 2 2 6
40006- 2 2 6 30 30 30 46 46 46 70 70 70
40007-250 250 250 253 253 253 253 253 253 253 253 253
40008-253 253 253 253 253 253 253 253 253 253 253 253
40009-253 253 253 253 253 253 231 231 231 246 246 246
40010-253 253 253 253 253 253 253 253 253 253 253 253
40011-253 253 253 253 253 253 253 253 253 253 253 253
40012-253 253 253 253 253 253 253 253 253 253 253 253
40013-253 253 253 253 253 253 253 253 253 253 253 253
40014-253 253 253 253 253 253 226 226 226 10 10 10
40015- 2 2 6 6 6 6 30 30 30 2 2 6
40016- 2 2 6 2 2 6 2 2 6 2 2 6
40017- 2 2 6 66 66 66 58 58 58 22 22 22
40018- 6 6 6 0 0 0 0 0 0 0 0 0
40019- 0 0 0 0 0 0 0 0 0 0 0 0
40020- 0 0 0 0 0 0 0 0 0 0 0 0
40021- 0 0 0 0 0 0 0 0 0 0 0 0
40022- 0 0 0 0 0 0 0 0 0 0 0 0
40023- 0 0 0 0 0 0 0 0 0 0 0 0
40024- 0 0 0 0 0 0 6 6 6 22 22 22
40025- 58 58 58 62 62 62 2 2 6 2 2 6
40026- 2 2 6 2 2 6 30 30 30 78 78 78
40027-250 250 250 253 253 253 253 253 253 253 253 253
40028-253 253 253 253 253 253 253 253 253 253 253 253
40029-253 253 253 253 253 253 231 231 231 246 246 246
40030-253 253 253 253 253 253 253 253 253 253 253 253
40031-253 253 253 253 253 253 253 253 253 253 253 253
40032-253 253 253 253 253 253 253 253 253 253 253 253
40033-253 253 253 253 253 253 253 253 253 253 253 253
40034-253 253 253 253 253 253 206 206 206 2 2 6
40035- 22 22 22 34 34 34 18 14 6 22 22 22
40036- 26 26 26 18 18 18 6 6 6 2 2 6
40037- 2 2 6 82 82 82 54 54 54 18 18 18
40038- 6 6 6 0 0 0 0 0 0 0 0 0
40039- 0 0 0 0 0 0 0 0 0 0 0 0
40040- 0 0 0 0 0 0 0 0 0 0 0 0
40041- 0 0 0 0 0 0 0 0 0 0 0 0
40042- 0 0 0 0 0 0 0 0 0 0 0 0
40043- 0 0 0 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 6 6 6 26 26 26
40045- 62 62 62 106 106 106 74 54 14 185 133 11
40046-210 162 10 121 92 8 6 6 6 62 62 62
40047-238 238 238 253 253 253 253 253 253 253 253 253
40048-253 253 253 253 253 253 253 253 253 253 253 253
40049-253 253 253 253 253 253 231 231 231 246 246 246
40050-253 253 253 253 253 253 253 253 253 253 253 253
40051-253 253 253 253 253 253 253 253 253 253 253 253
40052-253 253 253 253 253 253 253 253 253 253 253 253
40053-253 253 253 253 253 253 253 253 253 253 253 253
40054-253 253 253 253 253 253 158 158 158 18 18 18
40055- 14 14 14 2 2 6 2 2 6 2 2 6
40056- 6 6 6 18 18 18 66 66 66 38 38 38
40057- 6 6 6 94 94 94 50 50 50 18 18 18
40058- 6 6 6 0 0 0 0 0 0 0 0 0
40059- 0 0 0 0 0 0 0 0 0 0 0 0
40060- 0 0 0 0 0 0 0 0 0 0 0 0
40061- 0 0 0 0 0 0 0 0 0 0 0 0
40062- 0 0 0 0 0 0 0 0 0 0 0 0
40063- 0 0 0 0 0 0 0 0 0 6 6 6
40064- 10 10 10 10 10 10 18 18 18 38 38 38
40065- 78 78 78 142 134 106 216 158 10 242 186 14
40066-246 190 14 246 190 14 156 118 10 10 10 10
40067- 90 90 90 238 238 238 253 253 253 253 253 253
40068-253 253 253 253 253 253 253 253 253 253 253 253
40069-253 253 253 253 253 253 231 231 231 250 250 250
40070-253 253 253 253 253 253 253 253 253 253 253 253
40071-253 253 253 253 253 253 253 253 253 253 253 253
40072-253 253 253 253 253 253 253 253 253 253 253 253
40073-253 253 253 253 253 253 253 253 253 246 230 190
40074-238 204 91 238 204 91 181 142 44 37 26 9
40075- 2 2 6 2 2 6 2 2 6 2 2 6
40076- 2 2 6 2 2 6 38 38 38 46 46 46
40077- 26 26 26 106 106 106 54 54 54 18 18 18
40078- 6 6 6 0 0 0 0 0 0 0 0 0
40079- 0 0 0 0 0 0 0 0 0 0 0 0
40080- 0 0 0 0 0 0 0 0 0 0 0 0
40081- 0 0 0 0 0 0 0 0 0 0 0 0
40082- 0 0 0 0 0 0 0 0 0 0 0 0
40083- 0 0 0 6 6 6 14 14 14 22 22 22
40084- 30 30 30 38 38 38 50 50 50 70 70 70
40085-106 106 106 190 142 34 226 170 11 242 186 14
40086-246 190 14 246 190 14 246 190 14 154 114 10
40087- 6 6 6 74 74 74 226 226 226 253 253 253
40088-253 253 253 253 253 253 253 253 253 253 253 253
40089-253 253 253 253 253 253 231 231 231 250 250 250
40090-253 253 253 253 253 253 253 253 253 253 253 253
40091-253 253 253 253 253 253 253 253 253 253 253 253
40092-253 253 253 253 253 253 253 253 253 253 253 253
40093-253 253 253 253 253 253 253 253 253 228 184 62
40094-241 196 14 241 208 19 232 195 16 38 30 10
40095- 2 2 6 2 2 6 2 2 6 2 2 6
40096- 2 2 6 6 6 6 30 30 30 26 26 26
40097-203 166 17 154 142 90 66 66 66 26 26 26
40098- 6 6 6 0 0 0 0 0 0 0 0 0
40099- 0 0 0 0 0 0 0 0 0 0 0 0
40100- 0 0 0 0 0 0 0 0 0 0 0 0
40101- 0 0 0 0 0 0 0 0 0 0 0 0
40102- 0 0 0 0 0 0 0 0 0 0 0 0
40103- 6 6 6 18 18 18 38 38 38 58 58 58
40104- 78 78 78 86 86 86 101 101 101 123 123 123
40105-175 146 61 210 150 10 234 174 13 246 186 14
40106-246 190 14 246 190 14 246 190 14 238 190 10
40107-102 78 10 2 2 6 46 46 46 198 198 198
40108-253 253 253 253 253 253 253 253 253 253 253 253
40109-253 253 253 253 253 253 234 234 234 242 242 242
40110-253 253 253 253 253 253 253 253 253 253 253 253
40111-253 253 253 253 253 253 253 253 253 253 253 253
40112-253 253 253 253 253 253 253 253 253 253 253 253
40113-253 253 253 253 253 253 253 253 253 224 178 62
40114-242 186 14 241 196 14 210 166 10 22 18 6
40115- 2 2 6 2 2 6 2 2 6 2 2 6
40116- 2 2 6 2 2 6 6 6 6 121 92 8
40117-238 202 15 232 195 16 82 82 82 34 34 34
40118- 10 10 10 0 0 0 0 0 0 0 0 0
40119- 0 0 0 0 0 0 0 0 0 0 0 0
40120- 0 0 0 0 0 0 0 0 0 0 0 0
40121- 0 0 0 0 0 0 0 0 0 0 0 0
40122- 0 0 0 0 0 0 0 0 0 0 0 0
40123- 14 14 14 38 38 38 70 70 70 154 122 46
40124-190 142 34 200 144 11 197 138 11 197 138 11
40125-213 154 11 226 170 11 242 186 14 246 190 14
40126-246 190 14 246 190 14 246 190 14 246 190 14
40127-225 175 15 46 32 6 2 2 6 22 22 22
40128-158 158 158 250 250 250 253 253 253 253 253 253
40129-253 253 253 253 253 253 253 253 253 253 253 253
40130-253 253 253 253 253 253 253 253 253 253 253 253
40131-253 253 253 253 253 253 253 253 253 253 253 253
40132-253 253 253 253 253 253 253 253 253 253 253 253
40133-253 253 253 250 250 250 242 242 242 224 178 62
40134-239 182 13 236 186 11 213 154 11 46 32 6
40135- 2 2 6 2 2 6 2 2 6 2 2 6
40136- 2 2 6 2 2 6 61 42 6 225 175 15
40137-238 190 10 236 186 11 112 100 78 42 42 42
40138- 14 14 14 0 0 0 0 0 0 0 0 0
40139- 0 0 0 0 0 0 0 0 0 0 0 0
40140- 0 0 0 0 0 0 0 0 0 0 0 0
40141- 0 0 0 0 0 0 0 0 0 0 0 0
40142- 0 0 0 0 0 0 0 0 0 6 6 6
40143- 22 22 22 54 54 54 154 122 46 213 154 11
40144-226 170 11 230 174 11 226 170 11 226 170 11
40145-236 178 12 242 186 14 246 190 14 246 190 14
40146-246 190 14 246 190 14 246 190 14 246 190 14
40147-241 196 14 184 144 12 10 10 10 2 2 6
40148- 6 6 6 116 116 116 242 242 242 253 253 253
40149-253 253 253 253 253 253 253 253 253 253 253 253
40150-253 253 253 253 253 253 253 253 253 253 253 253
40151-253 253 253 253 253 253 253 253 253 253 253 253
40152-253 253 253 253 253 253 253 253 253 253 253 253
40153-253 253 253 231 231 231 198 198 198 214 170 54
40154-236 178 12 236 178 12 210 150 10 137 92 6
40155- 18 14 6 2 2 6 2 2 6 2 2 6
40156- 6 6 6 70 47 6 200 144 11 236 178 12
40157-239 182 13 239 182 13 124 112 88 58 58 58
40158- 22 22 22 6 6 6 0 0 0 0 0 0
40159- 0 0 0 0 0 0 0 0 0 0 0 0
40160- 0 0 0 0 0 0 0 0 0 0 0 0
40161- 0 0 0 0 0 0 0 0 0 0 0 0
40162- 0 0 0 0 0 0 0 0 0 10 10 10
40163- 30 30 30 70 70 70 180 133 36 226 170 11
40164-239 182 13 242 186 14 242 186 14 246 186 14
40165-246 190 14 246 190 14 246 190 14 246 190 14
40166-246 190 14 246 190 14 246 190 14 246 190 14
40167-246 190 14 232 195 16 98 70 6 2 2 6
40168- 2 2 6 2 2 6 66 66 66 221 221 221
40169-253 253 253 253 253 253 253 253 253 253 253 253
40170-253 253 253 253 253 253 253 253 253 253 253 253
40171-253 253 253 253 253 253 253 253 253 253 253 253
40172-253 253 253 253 253 253 253 253 253 253 253 253
40173-253 253 253 206 206 206 198 198 198 214 166 58
40174-230 174 11 230 174 11 216 158 10 192 133 9
40175-163 110 8 116 81 8 102 78 10 116 81 8
40176-167 114 7 197 138 11 226 170 11 239 182 13
40177-242 186 14 242 186 14 162 146 94 78 78 78
40178- 34 34 34 14 14 14 6 6 6 0 0 0
40179- 0 0 0 0 0 0 0 0 0 0 0 0
40180- 0 0 0 0 0 0 0 0 0 0 0 0
40181- 0 0 0 0 0 0 0 0 0 0 0 0
40182- 0 0 0 0 0 0 0 0 0 6 6 6
40183- 30 30 30 78 78 78 190 142 34 226 170 11
40184-239 182 13 246 190 14 246 190 14 246 190 14
40185-246 190 14 246 190 14 246 190 14 246 190 14
40186-246 190 14 246 190 14 246 190 14 246 190 14
40187-246 190 14 241 196 14 203 166 17 22 18 6
40188- 2 2 6 2 2 6 2 2 6 38 38 38
40189-218 218 218 253 253 253 253 253 253 253 253 253
40190-253 253 253 253 253 253 253 253 253 253 253 253
40191-253 253 253 253 253 253 253 253 253 253 253 253
40192-253 253 253 253 253 253 253 253 253 253 253 253
40193-250 250 250 206 206 206 198 198 198 202 162 69
40194-226 170 11 236 178 12 224 166 10 210 150 10
40195-200 144 11 197 138 11 192 133 9 197 138 11
40196-210 150 10 226 170 11 242 186 14 246 190 14
40197-246 190 14 246 186 14 225 175 15 124 112 88
40198- 62 62 62 30 30 30 14 14 14 6 6 6
40199- 0 0 0 0 0 0 0 0 0 0 0 0
40200- 0 0 0 0 0 0 0 0 0 0 0 0
40201- 0 0 0 0 0 0 0 0 0 0 0 0
40202- 0 0 0 0 0 0 0 0 0 10 10 10
40203- 30 30 30 78 78 78 174 135 50 224 166 10
40204-239 182 13 246 190 14 246 190 14 246 190 14
40205-246 190 14 246 190 14 246 190 14 246 190 14
40206-246 190 14 246 190 14 246 190 14 246 190 14
40207-246 190 14 246 190 14 241 196 14 139 102 15
40208- 2 2 6 2 2 6 2 2 6 2 2 6
40209- 78 78 78 250 250 250 253 253 253 253 253 253
40210-253 253 253 253 253 253 253 253 253 253 253 253
40211-253 253 253 253 253 253 253 253 253 253 253 253
40212-253 253 253 253 253 253 253 253 253 253 253 253
40213-250 250 250 214 214 214 198 198 198 190 150 46
40214-219 162 10 236 178 12 234 174 13 224 166 10
40215-216 158 10 213 154 11 213 154 11 216 158 10
40216-226 170 11 239 182 13 246 190 14 246 190 14
40217-246 190 14 246 190 14 242 186 14 206 162 42
40218-101 101 101 58 58 58 30 30 30 14 14 14
40219- 6 6 6 0 0 0 0 0 0 0 0 0
40220- 0 0 0 0 0 0 0 0 0 0 0 0
40221- 0 0 0 0 0 0 0 0 0 0 0 0
40222- 0 0 0 0 0 0 0 0 0 10 10 10
40223- 30 30 30 74 74 74 174 135 50 216 158 10
40224-236 178 12 246 190 14 246 190 14 246 190 14
40225-246 190 14 246 190 14 246 190 14 246 190 14
40226-246 190 14 246 190 14 246 190 14 246 190 14
40227-246 190 14 246 190 14 241 196 14 226 184 13
40228- 61 42 6 2 2 6 2 2 6 2 2 6
40229- 22 22 22 238 238 238 253 253 253 253 253 253
40230-253 253 253 253 253 253 253 253 253 253 253 253
40231-253 253 253 253 253 253 253 253 253 253 253 253
40232-253 253 253 253 253 253 253 253 253 253 253 253
40233-253 253 253 226 226 226 187 187 187 180 133 36
40234-216 158 10 236 178 12 239 182 13 236 178 12
40235-230 174 11 226 170 11 226 170 11 230 174 11
40236-236 178 12 242 186 14 246 190 14 246 190 14
40237-246 190 14 246 190 14 246 186 14 239 182 13
40238-206 162 42 106 106 106 66 66 66 34 34 34
40239- 14 14 14 6 6 6 0 0 0 0 0 0
40240- 0 0 0 0 0 0 0 0 0 0 0 0
40241- 0 0 0 0 0 0 0 0 0 0 0 0
40242- 0 0 0 0 0 0 0 0 0 6 6 6
40243- 26 26 26 70 70 70 163 133 67 213 154 11
40244-236 178 12 246 190 14 246 190 14 246 190 14
40245-246 190 14 246 190 14 246 190 14 246 190 14
40246-246 190 14 246 190 14 246 190 14 246 190 14
40247-246 190 14 246 190 14 246 190 14 241 196 14
40248-190 146 13 18 14 6 2 2 6 2 2 6
40249- 46 46 46 246 246 246 253 253 253 253 253 253
40250-253 253 253 253 253 253 253 253 253 253 253 253
40251-253 253 253 253 253 253 253 253 253 253 253 253
40252-253 253 253 253 253 253 253 253 253 253 253 253
40253-253 253 253 221 221 221 86 86 86 156 107 11
40254-216 158 10 236 178 12 242 186 14 246 186 14
40255-242 186 14 239 182 13 239 182 13 242 186 14
40256-242 186 14 246 186 14 246 190 14 246 190 14
40257-246 190 14 246 190 14 246 190 14 246 190 14
40258-242 186 14 225 175 15 142 122 72 66 66 66
40259- 30 30 30 10 10 10 0 0 0 0 0 0
40260- 0 0 0 0 0 0 0 0 0 0 0 0
40261- 0 0 0 0 0 0 0 0 0 0 0 0
40262- 0 0 0 0 0 0 0 0 0 6 6 6
40263- 26 26 26 70 70 70 163 133 67 210 150 10
40264-236 178 12 246 190 14 246 190 14 246 190 14
40265-246 190 14 246 190 14 246 190 14 246 190 14
40266-246 190 14 246 190 14 246 190 14 246 190 14
40267-246 190 14 246 190 14 246 190 14 246 190 14
40268-232 195 16 121 92 8 34 34 34 106 106 106
40269-221 221 221 253 253 253 253 253 253 253 253 253
40270-253 253 253 253 253 253 253 253 253 253 253 253
40271-253 253 253 253 253 253 253 253 253 253 253 253
40272-253 253 253 253 253 253 253 253 253 253 253 253
40273-242 242 242 82 82 82 18 14 6 163 110 8
40274-216 158 10 236 178 12 242 186 14 246 190 14
40275-246 190 14 246 190 14 246 190 14 246 190 14
40276-246 190 14 246 190 14 246 190 14 246 190 14
40277-246 190 14 246 190 14 246 190 14 246 190 14
40278-246 190 14 246 190 14 242 186 14 163 133 67
40279- 46 46 46 18 18 18 6 6 6 0 0 0
40280- 0 0 0 0 0 0 0 0 0 0 0 0
40281- 0 0 0 0 0 0 0 0 0 0 0 0
40282- 0 0 0 0 0 0 0 0 0 10 10 10
40283- 30 30 30 78 78 78 163 133 67 210 150 10
40284-236 178 12 246 186 14 246 190 14 246 190 14
40285-246 190 14 246 190 14 246 190 14 246 190 14
40286-246 190 14 246 190 14 246 190 14 246 190 14
40287-246 190 14 246 190 14 246 190 14 246 190 14
40288-241 196 14 215 174 15 190 178 144 253 253 253
40289-253 253 253 253 253 253 253 253 253 253 253 253
40290-253 253 253 253 253 253 253 253 253 253 253 253
40291-253 253 253 253 253 253 253 253 253 253 253 253
40292-253 253 253 253 253 253 253 253 253 218 218 218
40293- 58 58 58 2 2 6 22 18 6 167 114 7
40294-216 158 10 236 178 12 246 186 14 246 190 14
40295-246 190 14 246 190 14 246 190 14 246 190 14
40296-246 190 14 246 190 14 246 190 14 246 190 14
40297-246 190 14 246 190 14 246 190 14 246 190 14
40298-246 190 14 246 186 14 242 186 14 190 150 46
40299- 54 54 54 22 22 22 6 6 6 0 0 0
40300- 0 0 0 0 0 0 0 0 0 0 0 0
40301- 0 0 0 0 0 0 0 0 0 0 0 0
40302- 0 0 0 0 0 0 0 0 0 14 14 14
40303- 38 38 38 86 86 86 180 133 36 213 154 11
40304-236 178 12 246 186 14 246 190 14 246 190 14
40305-246 190 14 246 190 14 246 190 14 246 190 14
40306-246 190 14 246 190 14 246 190 14 246 190 14
40307-246 190 14 246 190 14 246 190 14 246 190 14
40308-246 190 14 232 195 16 190 146 13 214 214 214
40309-253 253 253 253 253 253 253 253 253 253 253 253
40310-253 253 253 253 253 253 253 253 253 253 253 253
40311-253 253 253 253 253 253 253 253 253 253 253 253
40312-253 253 253 250 250 250 170 170 170 26 26 26
40313- 2 2 6 2 2 6 37 26 9 163 110 8
40314-219 162 10 239 182 13 246 186 14 246 190 14
40315-246 190 14 246 190 14 246 190 14 246 190 14
40316-246 190 14 246 190 14 246 190 14 246 190 14
40317-246 190 14 246 190 14 246 190 14 246 190 14
40318-246 186 14 236 178 12 224 166 10 142 122 72
40319- 46 46 46 18 18 18 6 6 6 0 0 0
40320- 0 0 0 0 0 0 0 0 0 0 0 0
40321- 0 0 0 0 0 0 0 0 0 0 0 0
40322- 0 0 0 0 0 0 6 6 6 18 18 18
40323- 50 50 50 109 106 95 192 133 9 224 166 10
40324-242 186 14 246 190 14 246 190 14 246 190 14
40325-246 190 14 246 190 14 246 190 14 246 190 14
40326-246 190 14 246 190 14 246 190 14 246 190 14
40327-246 190 14 246 190 14 246 190 14 246 190 14
40328-242 186 14 226 184 13 210 162 10 142 110 46
40329-226 226 226 253 253 253 253 253 253 253 253 253
40330-253 253 253 253 253 253 253 253 253 253 253 253
40331-253 253 253 253 253 253 253 253 253 253 253 253
40332-198 198 198 66 66 66 2 2 6 2 2 6
40333- 2 2 6 2 2 6 50 34 6 156 107 11
40334-219 162 10 239 182 13 246 186 14 246 190 14
40335-246 190 14 246 190 14 246 190 14 246 190 14
40336-246 190 14 246 190 14 246 190 14 246 190 14
40337-246 190 14 246 190 14 246 190 14 242 186 14
40338-234 174 13 213 154 11 154 122 46 66 66 66
40339- 30 30 30 10 10 10 0 0 0 0 0 0
40340- 0 0 0 0 0 0 0 0 0 0 0 0
40341- 0 0 0 0 0 0 0 0 0 0 0 0
40342- 0 0 0 0 0 0 6 6 6 22 22 22
40343- 58 58 58 154 121 60 206 145 10 234 174 13
40344-242 186 14 246 186 14 246 190 14 246 190 14
40345-246 190 14 246 190 14 246 190 14 246 190 14
40346-246 190 14 246 190 14 246 190 14 246 190 14
40347-246 190 14 246 190 14 246 190 14 246 190 14
40348-246 186 14 236 178 12 210 162 10 163 110 8
40349- 61 42 6 138 138 138 218 218 218 250 250 250
40350-253 253 253 253 253 253 253 253 253 250 250 250
40351-242 242 242 210 210 210 144 144 144 66 66 66
40352- 6 6 6 2 2 6 2 2 6 2 2 6
40353- 2 2 6 2 2 6 61 42 6 163 110 8
40354-216 158 10 236 178 12 246 190 14 246 190 14
40355-246 190 14 246 190 14 246 190 14 246 190 14
40356-246 190 14 246 190 14 246 190 14 246 190 14
40357-246 190 14 239 182 13 230 174 11 216 158 10
40358-190 142 34 124 112 88 70 70 70 38 38 38
40359- 18 18 18 6 6 6 0 0 0 0 0 0
40360- 0 0 0 0 0 0 0 0 0 0 0 0
40361- 0 0 0 0 0 0 0 0 0 0 0 0
40362- 0 0 0 0 0 0 6 6 6 22 22 22
40363- 62 62 62 168 124 44 206 145 10 224 166 10
40364-236 178 12 239 182 13 242 186 14 242 186 14
40365-246 186 14 246 190 14 246 190 14 246 190 14
40366-246 190 14 246 190 14 246 190 14 246 190 14
40367-246 190 14 246 190 14 246 190 14 246 190 14
40368-246 190 14 236 178 12 216 158 10 175 118 6
40369- 80 54 7 2 2 6 6 6 6 30 30 30
40370- 54 54 54 62 62 62 50 50 50 38 38 38
40371- 14 14 14 2 2 6 2 2 6 2 2 6
40372- 2 2 6 2 2 6 2 2 6 2 2 6
40373- 2 2 6 6 6 6 80 54 7 167 114 7
40374-213 154 11 236 178 12 246 190 14 246 190 14
40375-246 190 14 246 190 14 246 190 14 246 190 14
40376-246 190 14 242 186 14 239 182 13 239 182 13
40377-230 174 11 210 150 10 174 135 50 124 112 88
40378- 82 82 82 54 54 54 34 34 34 18 18 18
40379- 6 6 6 0 0 0 0 0 0 0 0 0
40380- 0 0 0 0 0 0 0 0 0 0 0 0
40381- 0 0 0 0 0 0 0 0 0 0 0 0
40382- 0 0 0 0 0 0 6 6 6 18 18 18
40383- 50 50 50 158 118 36 192 133 9 200 144 11
40384-216 158 10 219 162 10 224 166 10 226 170 11
40385-230 174 11 236 178 12 239 182 13 239 182 13
40386-242 186 14 246 186 14 246 190 14 246 190 14
40387-246 190 14 246 190 14 246 190 14 246 190 14
40388-246 186 14 230 174 11 210 150 10 163 110 8
40389-104 69 6 10 10 10 2 2 6 2 2 6
40390- 2 2 6 2 2 6 2 2 6 2 2 6
40391- 2 2 6 2 2 6 2 2 6 2 2 6
40392- 2 2 6 2 2 6 2 2 6 2 2 6
40393- 2 2 6 6 6 6 91 60 6 167 114 7
40394-206 145 10 230 174 11 242 186 14 246 190 14
40395-246 190 14 246 190 14 246 186 14 242 186 14
40396-239 182 13 230 174 11 224 166 10 213 154 11
40397-180 133 36 124 112 88 86 86 86 58 58 58
40398- 38 38 38 22 22 22 10 10 10 6 6 6
40399- 0 0 0 0 0 0 0 0 0 0 0 0
40400- 0 0 0 0 0 0 0 0 0 0 0 0
40401- 0 0 0 0 0 0 0 0 0 0 0 0
40402- 0 0 0 0 0 0 0 0 0 14 14 14
40403- 34 34 34 70 70 70 138 110 50 158 118 36
40404-167 114 7 180 123 7 192 133 9 197 138 11
40405-200 144 11 206 145 10 213 154 11 219 162 10
40406-224 166 10 230 174 11 239 182 13 242 186 14
40407-246 186 14 246 186 14 246 186 14 246 186 14
40408-239 182 13 216 158 10 185 133 11 152 99 6
40409-104 69 6 18 14 6 2 2 6 2 2 6
40410- 2 2 6 2 2 6 2 2 6 2 2 6
40411- 2 2 6 2 2 6 2 2 6 2 2 6
40412- 2 2 6 2 2 6 2 2 6 2 2 6
40413- 2 2 6 6 6 6 80 54 7 152 99 6
40414-192 133 9 219 162 10 236 178 12 239 182 13
40415-246 186 14 242 186 14 239 182 13 236 178 12
40416-224 166 10 206 145 10 192 133 9 154 121 60
40417- 94 94 94 62 62 62 42 42 42 22 22 22
40418- 14 14 14 6 6 6 0 0 0 0 0 0
40419- 0 0 0 0 0 0 0 0 0 0 0 0
40420- 0 0 0 0 0 0 0 0 0 0 0 0
40421- 0 0 0 0 0 0 0 0 0 0 0 0
40422- 0 0 0 0 0 0 0 0 0 6 6 6
40423- 18 18 18 34 34 34 58 58 58 78 78 78
40424-101 98 89 124 112 88 142 110 46 156 107 11
40425-163 110 8 167 114 7 175 118 6 180 123 7
40426-185 133 11 197 138 11 210 150 10 219 162 10
40427-226 170 11 236 178 12 236 178 12 234 174 13
40428-219 162 10 197 138 11 163 110 8 130 83 6
40429- 91 60 6 10 10 10 2 2 6 2 2 6
40430- 18 18 18 38 38 38 38 38 38 38 38 38
40431- 38 38 38 38 38 38 38 38 38 38 38 38
40432- 38 38 38 38 38 38 26 26 26 2 2 6
40433- 2 2 6 6 6 6 70 47 6 137 92 6
40434-175 118 6 200 144 11 219 162 10 230 174 11
40435-234 174 13 230 174 11 219 162 10 210 150 10
40436-192 133 9 163 110 8 124 112 88 82 82 82
40437- 50 50 50 30 30 30 14 14 14 6 6 6
40438- 0 0 0 0 0 0 0 0 0 0 0 0
40439- 0 0 0 0 0 0 0 0 0 0 0 0
40440- 0 0 0 0 0 0 0 0 0 0 0 0
40441- 0 0 0 0 0 0 0 0 0 0 0 0
40442- 0 0 0 0 0 0 0 0 0 0 0 0
40443- 6 6 6 14 14 14 22 22 22 34 34 34
40444- 42 42 42 58 58 58 74 74 74 86 86 86
40445-101 98 89 122 102 70 130 98 46 121 87 25
40446-137 92 6 152 99 6 163 110 8 180 123 7
40447-185 133 11 197 138 11 206 145 10 200 144 11
40448-180 123 7 156 107 11 130 83 6 104 69 6
40449- 50 34 6 54 54 54 110 110 110 101 98 89
40450- 86 86 86 82 82 82 78 78 78 78 78 78
40451- 78 78 78 78 78 78 78 78 78 78 78 78
40452- 78 78 78 82 82 82 86 86 86 94 94 94
40453-106 106 106 101 101 101 86 66 34 124 80 6
40454-156 107 11 180 123 7 192 133 9 200 144 11
40455-206 145 10 200 144 11 192 133 9 175 118 6
40456-139 102 15 109 106 95 70 70 70 42 42 42
40457- 22 22 22 10 10 10 0 0 0 0 0 0
40458- 0 0 0 0 0 0 0 0 0 0 0 0
40459- 0 0 0 0 0 0 0 0 0 0 0 0
40460- 0 0 0 0 0 0 0 0 0 0 0 0
40461- 0 0 0 0 0 0 0 0 0 0 0 0
40462- 0 0 0 0 0 0 0 0 0 0 0 0
40463- 0 0 0 0 0 0 6 6 6 10 10 10
40464- 14 14 14 22 22 22 30 30 30 38 38 38
40465- 50 50 50 62 62 62 74 74 74 90 90 90
40466-101 98 89 112 100 78 121 87 25 124 80 6
40467-137 92 6 152 99 6 152 99 6 152 99 6
40468-138 86 6 124 80 6 98 70 6 86 66 30
40469-101 98 89 82 82 82 58 58 58 46 46 46
40470- 38 38 38 34 34 34 34 34 34 34 34 34
40471- 34 34 34 34 34 34 34 34 34 34 34 34
40472- 34 34 34 34 34 34 38 38 38 42 42 42
40473- 54 54 54 82 82 82 94 86 76 91 60 6
40474-134 86 6 156 107 11 167 114 7 175 118 6
40475-175 118 6 167 114 7 152 99 6 121 87 25
40476-101 98 89 62 62 62 34 34 34 18 18 18
40477- 6 6 6 0 0 0 0 0 0 0 0 0
40478- 0 0 0 0 0 0 0 0 0 0 0 0
40479- 0 0 0 0 0 0 0 0 0 0 0 0
40480- 0 0 0 0 0 0 0 0 0 0 0 0
40481- 0 0 0 0 0 0 0 0 0 0 0 0
40482- 0 0 0 0 0 0 0 0 0 0 0 0
40483- 0 0 0 0 0 0 0 0 0 0 0 0
40484- 0 0 0 6 6 6 6 6 6 10 10 10
40485- 18 18 18 22 22 22 30 30 30 42 42 42
40486- 50 50 50 66 66 66 86 86 86 101 98 89
40487-106 86 58 98 70 6 104 69 6 104 69 6
40488-104 69 6 91 60 6 82 62 34 90 90 90
40489- 62 62 62 38 38 38 22 22 22 14 14 14
40490- 10 10 10 10 10 10 10 10 10 10 10 10
40491- 10 10 10 10 10 10 6 6 6 10 10 10
40492- 10 10 10 10 10 10 10 10 10 14 14 14
40493- 22 22 22 42 42 42 70 70 70 89 81 66
40494- 80 54 7 104 69 6 124 80 6 137 92 6
40495-134 86 6 116 81 8 100 82 52 86 86 86
40496- 58 58 58 30 30 30 14 14 14 6 6 6
40497- 0 0 0 0 0 0 0 0 0 0 0 0
40498- 0 0 0 0 0 0 0 0 0 0 0 0
40499- 0 0 0 0 0 0 0 0 0 0 0 0
40500- 0 0 0 0 0 0 0 0 0 0 0 0
40501- 0 0 0 0 0 0 0 0 0 0 0 0
40502- 0 0 0 0 0 0 0 0 0 0 0 0
40503- 0 0 0 0 0 0 0 0 0 0 0 0
40504- 0 0 0 0 0 0 0 0 0 0 0 0
40505- 0 0 0 6 6 6 10 10 10 14 14 14
40506- 18 18 18 26 26 26 38 38 38 54 54 54
40507- 70 70 70 86 86 86 94 86 76 89 81 66
40508- 89 81 66 86 86 86 74 74 74 50 50 50
40509- 30 30 30 14 14 14 6 6 6 0 0 0
40510- 0 0 0 0 0 0 0 0 0 0 0 0
40511- 0 0 0 0 0 0 0 0 0 0 0 0
40512- 0 0 0 0 0 0 0 0 0 0 0 0
40513- 6 6 6 18 18 18 34 34 34 58 58 58
40514- 82 82 82 89 81 66 89 81 66 89 81 66
40515- 94 86 66 94 86 76 74 74 74 50 50 50
40516- 26 26 26 14 14 14 6 6 6 0 0 0
40517- 0 0 0 0 0 0 0 0 0 0 0 0
40518- 0 0 0 0 0 0 0 0 0 0 0 0
40519- 0 0 0 0 0 0 0 0 0 0 0 0
40520- 0 0 0 0 0 0 0 0 0 0 0 0
40521- 0 0 0 0 0 0 0 0 0 0 0 0
40522- 0 0 0 0 0 0 0 0 0 0 0 0
40523- 0 0 0 0 0 0 0 0 0 0 0 0
40524- 0 0 0 0 0 0 0 0 0 0 0 0
40525- 0 0 0 0 0 0 0 0 0 0 0 0
40526- 6 6 6 6 6 6 14 14 14 18 18 18
40527- 30 30 30 38 38 38 46 46 46 54 54 54
40528- 50 50 50 42 42 42 30 30 30 18 18 18
40529- 10 10 10 0 0 0 0 0 0 0 0 0
40530- 0 0 0 0 0 0 0 0 0 0 0 0
40531- 0 0 0 0 0 0 0 0 0 0 0 0
40532- 0 0 0 0 0 0 0 0 0 0 0 0
40533- 0 0 0 6 6 6 14 14 14 26 26 26
40534- 38 38 38 50 50 50 58 58 58 58 58 58
40535- 54 54 54 42 42 42 30 30 30 18 18 18
40536- 10 10 10 0 0 0 0 0 0 0 0 0
40537- 0 0 0 0 0 0 0 0 0 0 0 0
40538- 0 0 0 0 0 0 0 0 0 0 0 0
40539- 0 0 0 0 0 0 0 0 0 0 0 0
40540- 0 0 0 0 0 0 0 0 0 0 0 0
40541- 0 0 0 0 0 0 0 0 0 0 0 0
40542- 0 0 0 0 0 0 0 0 0 0 0 0
40543- 0 0 0 0 0 0 0 0 0 0 0 0
40544- 0 0 0 0 0 0 0 0 0 0 0 0
40545- 0 0 0 0 0 0 0 0 0 0 0 0
40546- 0 0 0 0 0 0 0 0 0 6 6 6
40547- 6 6 6 10 10 10 14 14 14 18 18 18
40548- 18 18 18 14 14 14 10 10 10 6 6 6
40549- 0 0 0 0 0 0 0 0 0 0 0 0
40550- 0 0 0 0 0 0 0 0 0 0 0 0
40551- 0 0 0 0 0 0 0 0 0 0 0 0
40552- 0 0 0 0 0 0 0 0 0 0 0 0
40553- 0 0 0 0 0 0 0 0 0 6 6 6
40554- 14 14 14 18 18 18 22 22 22 22 22 22
40555- 18 18 18 14 14 14 10 10 10 6 6 6
40556- 0 0 0 0 0 0 0 0 0 0 0 0
40557- 0 0 0 0 0 0 0 0 0 0 0 0
40558- 0 0 0 0 0 0 0 0 0 0 0 0
40559- 0 0 0 0 0 0 0 0 0 0 0 0
40560- 0 0 0 0 0 0 0 0 0 0 0 0
40561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40574+4 4 4 4 4 4
40575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40588+4 4 4 4 4 4
40589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40602+4 4 4 4 4 4
40603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40616+4 4 4 4 4 4
40617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630+4 4 4 4 4 4
40631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644+4 4 4 4 4 4
40645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40649+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40650+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40654+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40655+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40656+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658+4 4 4 4 4 4
40659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40663+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40664+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40665+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40668+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40669+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40670+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40671+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672+4 4 4 4 4 4
40673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40677+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40678+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40679+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40682+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40683+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40684+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40685+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40686+4 4 4 4 4 4
40687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40690+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40691+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40692+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40693+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40695+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40696+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40697+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40698+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40699+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40700+4 4 4 4 4 4
40701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40704+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40705+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40706+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40707+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40708+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40709+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40710+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40711+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40712+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40713+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40714+4 4 4 4 4 4
40715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40718+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40719+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40720+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40721+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40722+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40723+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40724+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40725+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40726+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40727+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40728+4 4 4 4 4 4
40729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40731+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40732+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40733+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40734+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40735+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40736+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40737+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40738+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40739+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40740+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40741+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40742+4 4 4 4 4 4
40743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40745+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40746+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40747+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40748+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40749+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40750+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40751+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40752+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40753+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40754+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40755+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40756+4 4 4 4 4 4
40757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40759+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40760+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40761+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40762+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40763+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40764+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40765+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40766+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40767+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40768+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40769+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40770+4 4 4 4 4 4
40771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40773+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40774+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40775+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40776+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40777+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40778+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40779+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40780+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40781+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40782+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40783+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40784+4 4 4 4 4 4
40785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40786+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40787+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40788+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40789+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40790+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40791+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40792+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40793+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40794+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40795+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40796+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40797+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40798+4 4 4 4 4 4
40799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40800+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40801+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40802+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40803+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40804+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40805+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40806+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40807+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40808+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40809+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40810+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40811+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40812+0 0 0 4 4 4
40813+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40814+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40815+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40816+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40817+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40818+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40819+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40820+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40821+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40822+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40823+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40824+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40825+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40826+2 0 0 0 0 0
40827+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40828+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40829+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40830+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40831+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40832+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40833+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40834+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40835+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40836+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40837+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40838+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40839+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40840+37 38 37 0 0 0
40841+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40842+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40843+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40844+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40845+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40846+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40847+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40848+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40849+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40850+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40851+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40852+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40853+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40854+85 115 134 4 0 0
40855+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40856+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40857+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40858+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40859+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40860+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40861+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40862+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40863+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40864+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40865+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40866+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40867+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40868+60 73 81 4 0 0
40869+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40870+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40871+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40872+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40873+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40874+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40875+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40876+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40877+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40878+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40879+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40880+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40881+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40882+16 19 21 4 0 0
40883+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40884+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40885+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40886+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40887+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40888+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40889+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40890+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40891+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40892+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40893+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40894+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40895+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40896+4 0 0 4 3 3
40897+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40898+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40899+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40901+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40902+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40903+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40904+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40905+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40906+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40907+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40908+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40909+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40910+3 2 2 4 4 4
40911+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40912+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40913+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40914+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40915+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40916+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40917+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40918+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40919+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40920+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40921+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40922+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40923+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40924+4 4 4 4 4 4
40925+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40926+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40927+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40928+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40929+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40930+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40931+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40932+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40933+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40934+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40935+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40936+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40937+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40938+4 4 4 4 4 4
40939+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40940+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40941+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40942+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40943+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40944+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40945+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40946+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40947+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40948+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40949+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40950+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40951+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40952+5 5 5 5 5 5
40953+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40954+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40955+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40956+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40957+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40958+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40959+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40960+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40961+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40962+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40963+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40964+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40965+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40966+5 5 5 4 4 4
40967+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40968+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40969+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40970+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40971+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40972+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40973+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40974+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40975+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40976+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40977+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40978+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980+4 4 4 4 4 4
40981+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40982+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40983+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40984+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40985+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40986+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40987+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40988+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40989+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40990+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40991+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40992+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994+4 4 4 4 4 4
40995+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40996+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40997+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40998+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40999+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41000+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41001+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41002+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41003+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41004+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41005+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008+4 4 4 4 4 4
41009+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41010+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41011+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41012+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41013+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41014+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41015+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41016+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41017+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41018+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41019+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022+4 4 4 4 4 4
41023+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41024+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41025+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41026+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41027+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41028+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41029+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41030+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41031+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41032+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41033+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41036+4 4 4 4 4 4
41037+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41038+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41039+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41040+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41041+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41042+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41043+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41044+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41045+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41046+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41047+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41050+4 4 4 4 4 4
41051+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41052+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41053+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41054+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41055+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41056+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41057+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41058+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41059+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41060+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41061+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41064+4 4 4 4 4 4
41065+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41066+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41067+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41068+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41069+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41070+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41071+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41072+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41073+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41074+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41075+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41078+4 4 4 4 4 4
41079+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41080+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41081+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41082+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41083+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41084+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41085+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41086+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41087+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41088+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41089+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41092+4 4 4 4 4 4
41093+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41094+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41095+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41096+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41097+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41098+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41099+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41100+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41101+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41102+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41103+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41106+4 4 4 4 4 4
41107+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41108+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41109+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41110+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41111+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41112+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41113+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41114+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41115+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41116+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41117+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41120+4 4 4 4 4 4
41121+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41122+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41123+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41124+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41125+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41126+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41127+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41128+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41129+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41130+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41131+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41134+4 4 4 4 4 4
41135+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41136+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41137+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41138+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41139+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41140+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41141+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41142+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41143+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41144+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41145+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41148+4 4 4 4 4 4
41149+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41150+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41151+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41152+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41153+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41154+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41155+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41156+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41157+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41158+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41159+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162+4 4 4 4 4 4
41163+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41164+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41165+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41166+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41167+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41168+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41169+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41170+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41171+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41172+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41173+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41176+4 4 4 4 4 4
41177+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41178+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41179+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41180+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41181+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41182+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41183+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41184+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41185+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41186+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41187+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190+4 4 4 4 4 4
41191+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41192+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41193+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41194+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41195+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41196+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41197+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41198+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41199+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41200+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41201+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41204+4 4 4 4 4 4
41205+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41206+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41207+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41208+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41209+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41210+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41211+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41212+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41213+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41214+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41215+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218+4 4 4 4 4 4
41219+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41220+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41221+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41222+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41223+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41224+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41225+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41226+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41227+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41228+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41229+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232+4 4 4 4 4 4
41233+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41234+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41235+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41236+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41237+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41238+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41239+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41240+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41241+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41242+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41243+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246+4 4 4 4 4 4
41247+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41248+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41249+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41250+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41251+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41252+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41253+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41254+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41255+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41256+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41257+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260+4 4 4 4 4 4
41261+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41262+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41263+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41264+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41265+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41266+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41267+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41268+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41269+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41270+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41271+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274+4 4 4 4 4 4
41275+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41276+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41277+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41278+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41279+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41280+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41281+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41282+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41283+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41284+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41285+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288+4 4 4 4 4 4
41289+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41290+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41291+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41292+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41293+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41294+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41295+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41296+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41297+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41298+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41299+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302+4 4 4 4 4 4
41303+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41304+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41305+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41306+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41307+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41308+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41309+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41310+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41311+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41312+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41313+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316+4 4 4 4 4 4
41317+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41318+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41319+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41320+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41321+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41322+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41323+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41324+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41325+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41326+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41327+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330+4 4 4 4 4 4
41331+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41332+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41333+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41334+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41335+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41336+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41337+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41338+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41339+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41340+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41341+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41344+4 4 4 4 4 4
41345+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41346+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41347+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41348+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41349+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41350+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41351+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41353+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41354+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41355+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41358+4 4 4 4 4 4
41359+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41360+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41361+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41362+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41363+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41364+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41365+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41366+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41367+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41368+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41369+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41372+4 4 4 4 4 4
41373+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41374+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41375+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41376+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41377+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41378+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41379+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41380+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41381+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41382+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41386+4 4 4 4 4 4
41387+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41388+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41389+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41390+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41391+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41392+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41393+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41394+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41395+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41396+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41400+4 4 4 4 4 4
41401+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41402+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41403+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41404+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41405+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41406+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41407+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41408+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41409+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41410+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41414+4 4 4 4 4 4
41415+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41416+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41417+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41418+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41419+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41420+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41421+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41422+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41423+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41424+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41428+4 4 4 4 4 4
41429+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41430+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41431+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41432+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41433+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41434+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41435+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41436+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41437+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41442+4 4 4 4 4 4
41443+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41444+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41445+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41446+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41447+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41448+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41449+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41450+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41451+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41456+4 4 4 4 4 4
41457+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41458+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41459+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41460+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41461+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41462+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41463+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41464+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41465+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41470+4 4 4 4 4 4
41471+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41472+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41473+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41474+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41475+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41476+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41477+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41478+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41484+4 4 4 4 4 4
41485+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41486+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41487+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41488+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41489+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41490+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41491+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41492+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41498+4 4 4 4 4 4
41499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41500+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41501+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41502+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41503+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41504+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41505+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41506+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41512+4 4 4 4 4 4
41513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41514+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41515+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41516+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41517+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41518+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41519+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41520+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41526+4 4 4 4 4 4
41527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41528+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41529+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41530+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41531+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41532+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41533+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41534+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41540+4 4 4 4 4 4
41541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41543+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41544+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41545+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41546+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41547+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41548+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41554+4 4 4 4 4 4
41555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41558+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41559+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41560+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41561+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41568+4 4 4 4 4 4
41569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41572+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41573+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41574+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41575+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41582+4 4 4 4 4 4
41583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41586+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41587+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41588+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41589+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41596+4 4 4 4 4 4
41597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41600+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41601+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41602+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41603+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41610+4 4 4 4 4 4
41611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41615+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41616+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41617+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41624+4 4 4 4 4 4
41625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41629+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41630+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41631+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41638+4 4 4 4 4 4
41639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41643+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41644+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41645+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41652+4 4 4 4 4 4
41653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41657+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41658+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41666+4 4 4 4 4 4
41667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41671+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41672+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41680+4 4 4 4 4 4
41681diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41682index 087fc99..f85ed76 100644
41683--- a/drivers/video/udlfb.c
41684+++ b/drivers/video/udlfb.c
41685@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41686 dlfb_urb_completion(urb);
41687
41688 error:
41689- atomic_add(bytes_sent, &dev->bytes_sent);
41690- atomic_add(bytes_identical, &dev->bytes_identical);
41691- atomic_add(width*height*2, &dev->bytes_rendered);
41692+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41693+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41694+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41695 end_cycles = get_cycles();
41696- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41697+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41698 >> 10)), /* Kcycles */
41699 &dev->cpu_kcycles_used);
41700
41701@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41702 dlfb_urb_completion(urb);
41703
41704 error:
41705- atomic_add(bytes_sent, &dev->bytes_sent);
41706- atomic_add(bytes_identical, &dev->bytes_identical);
41707- atomic_add(bytes_rendered, &dev->bytes_rendered);
41708+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41709+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41710+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41711 end_cycles = get_cycles();
41712- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41713+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41714 >> 10)), /* Kcycles */
41715 &dev->cpu_kcycles_used);
41716 }
41717@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41718 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41719 struct dlfb_data *dev = fb_info->par;
41720 return snprintf(buf, PAGE_SIZE, "%u\n",
41721- atomic_read(&dev->bytes_rendered));
41722+ atomic_read_unchecked(&dev->bytes_rendered));
41723 }
41724
41725 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41726@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41728 struct dlfb_data *dev = fb_info->par;
41729 return snprintf(buf, PAGE_SIZE, "%u\n",
41730- atomic_read(&dev->bytes_identical));
41731+ atomic_read_unchecked(&dev->bytes_identical));
41732 }
41733
41734 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41735@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41736 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41737 struct dlfb_data *dev = fb_info->par;
41738 return snprintf(buf, PAGE_SIZE, "%u\n",
41739- atomic_read(&dev->bytes_sent));
41740+ atomic_read_unchecked(&dev->bytes_sent));
41741 }
41742
41743 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41744@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41745 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41746 struct dlfb_data *dev = fb_info->par;
41747 return snprintf(buf, PAGE_SIZE, "%u\n",
41748- atomic_read(&dev->cpu_kcycles_used));
41749+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41750 }
41751
41752 static ssize_t edid_show(
41753@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41754 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41755 struct dlfb_data *dev = fb_info->par;
41756
41757- atomic_set(&dev->bytes_rendered, 0);
41758- atomic_set(&dev->bytes_identical, 0);
41759- atomic_set(&dev->bytes_sent, 0);
41760- atomic_set(&dev->cpu_kcycles_used, 0);
41761+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41762+ atomic_set_unchecked(&dev->bytes_identical, 0);
41763+ atomic_set_unchecked(&dev->bytes_sent, 0);
41764+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41765
41766 return count;
41767 }
41768diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41769index 7f8472c..9842e87 100644
41770--- a/drivers/video/uvesafb.c
41771+++ b/drivers/video/uvesafb.c
41772@@ -19,6 +19,7 @@
41773 #include <linux/io.h>
41774 #include <linux/mutex.h>
41775 #include <linux/slab.h>
41776+#include <linux/moduleloader.h>
41777 #include <video/edid.h>
41778 #include <video/uvesafb.h>
41779 #ifdef CONFIG_X86
41780@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41781 NULL,
41782 };
41783
41784- return call_usermodehelper(v86d_path, argv, envp, 1);
41785+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41786 }
41787
41788 /*
41789@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41790 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41791 par->pmi_setpal = par->ypan = 0;
41792 } else {
41793+
41794+#ifdef CONFIG_PAX_KERNEXEC
41795+#ifdef CONFIG_MODULES
41796+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41797+#endif
41798+ if (!par->pmi_code) {
41799+ par->pmi_setpal = par->ypan = 0;
41800+ return 0;
41801+ }
41802+#endif
41803+
41804 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41805 + task->t.regs.edi);
41806+
41807+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41808+ pax_open_kernel();
41809+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41810+ pax_close_kernel();
41811+
41812+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41813+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41814+#else
41815 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41816 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41817+#endif
41818+
41819 printk(KERN_INFO "uvesafb: protected mode interface info at "
41820 "%04x:%04x\n",
41821 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41822@@ -1821,6 +1844,11 @@ out:
41823 if (par->vbe_modes)
41824 kfree(par->vbe_modes);
41825
41826+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41827+ if (par->pmi_code)
41828+ module_free_exec(NULL, par->pmi_code);
41829+#endif
41830+
41831 framebuffer_release(info);
41832 return err;
41833 }
41834@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
41835 kfree(par->vbe_state_orig);
41836 if (par->vbe_state_saved)
41837 kfree(par->vbe_state_saved);
41838+
41839+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41840+ if (par->pmi_code)
41841+ module_free_exec(NULL, par->pmi_code);
41842+#endif
41843+
41844 }
41845
41846 framebuffer_release(info);
41847diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41848index 501b340..86bd4cf 100644
41849--- a/drivers/video/vesafb.c
41850+++ b/drivers/video/vesafb.c
41851@@ -9,6 +9,7 @@
41852 */
41853
41854 #include <linux/module.h>
41855+#include <linux/moduleloader.h>
41856 #include <linux/kernel.h>
41857 #include <linux/errno.h>
41858 #include <linux/string.h>
41859@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41860 static int vram_total __initdata; /* Set total amount of memory */
41861 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41862 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41863-static void (*pmi_start)(void) __read_mostly;
41864-static void (*pmi_pal) (void) __read_mostly;
41865+static void (*pmi_start)(void) __read_only;
41866+static void (*pmi_pal) (void) __read_only;
41867 static int depth __read_mostly;
41868 static int vga_compat __read_mostly;
41869 /* --------------------------------------------------------------------- */
41870@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41871 unsigned int size_vmode;
41872 unsigned int size_remap;
41873 unsigned int size_total;
41874+ void *pmi_code = NULL;
41875
41876 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41877 return -ENODEV;
41878@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41879 size_remap = size_total;
41880 vesafb_fix.smem_len = size_remap;
41881
41882-#ifndef __i386__
41883- screen_info.vesapm_seg = 0;
41884-#endif
41885-
41886 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41887 printk(KERN_WARNING
41888 "vesafb: cannot reserve video memory at 0x%lx\n",
41889@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41890 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41891 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41892
41893+#ifdef __i386__
41894+
41895+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41896+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41897+ if (!pmi_code)
41898+#elif !defined(CONFIG_PAX_KERNEXEC)
41899+ if (0)
41900+#endif
41901+
41902+#endif
41903+ screen_info.vesapm_seg = 0;
41904+
41905 if (screen_info.vesapm_seg) {
41906- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41907- screen_info.vesapm_seg,screen_info.vesapm_off);
41908+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41909+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41910 }
41911
41912 if (screen_info.vesapm_seg < 0xc000)
41913@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41914
41915 if (ypan || pmi_setpal) {
41916 unsigned short *pmi_base;
41917+
41918 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41919- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41920- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41921+
41922+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41923+ pax_open_kernel();
41924+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41925+#else
41926+ pmi_code = pmi_base;
41927+#endif
41928+
41929+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41930+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41931+
41932+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41933+ pmi_start = ktva_ktla(pmi_start);
41934+ pmi_pal = ktva_ktla(pmi_pal);
41935+ pax_close_kernel();
41936+#endif
41937+
41938 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41939 if (pmi_base[3]) {
41940 printk(KERN_INFO "vesafb: pmi: ports = ");
41941@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41942 info->node, info->fix.id);
41943 return 0;
41944 err:
41945+
41946+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41947+ module_free_exec(NULL, pmi_code);
41948+#endif
41949+
41950 if (info->screen_base)
41951 iounmap(info->screen_base);
41952 framebuffer_release(info);
41953diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41954index 88714ae..16c2e11 100644
41955--- a/drivers/video/via/via_clock.h
41956+++ b/drivers/video/via/via_clock.h
41957@@ -56,7 +56,7 @@ struct via_clock {
41958
41959 void (*set_engine_pll_state)(u8 state);
41960 void (*set_engine_pll)(struct via_pll_config config);
41961-};
41962+} __no_const;
41963
41964
41965 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41966diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
41967index e058ace..2424d93 100644
41968--- a/drivers/virtio/virtio_balloon.c
41969+++ b/drivers/virtio/virtio_balloon.c
41970@@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
41971 struct sysinfo i;
41972 int idx = 0;
41973
41974+ pax_track_stack();
41975+
41976 all_vm_events(events);
41977 si_meminfo(&i);
41978
41979diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41980index e56c934..fc22f4b 100644
41981--- a/drivers/xen/xen-pciback/conf_space.h
41982+++ b/drivers/xen/xen-pciback/conf_space.h
41983@@ -44,15 +44,15 @@ struct config_field {
41984 struct {
41985 conf_dword_write write;
41986 conf_dword_read read;
41987- } dw;
41988+ } __no_const dw;
41989 struct {
41990 conf_word_write write;
41991 conf_word_read read;
41992- } w;
41993+ } __no_const w;
41994 struct {
41995 conf_byte_write write;
41996 conf_byte_read read;
41997- } b;
41998+ } __no_const b;
41999 } u;
42000 struct list_head list;
42001 };
42002diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42003index e3c03db..93b0172 100644
42004--- a/fs/9p/vfs_inode.c
42005+++ b/fs/9p/vfs_inode.c
42006@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42007 void
42008 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42009 {
42010- char *s = nd_get_link(nd);
42011+ const char *s = nd_get_link(nd);
42012
42013 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
42014 IS_ERR(s) ? "<error>" : s);
42015diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42016index 79e2ca7..5828ad1 100644
42017--- a/fs/Kconfig.binfmt
42018+++ b/fs/Kconfig.binfmt
42019@@ -86,7 +86,7 @@ config HAVE_AOUT
42020
42021 config BINFMT_AOUT
42022 tristate "Kernel support for a.out and ECOFF binaries"
42023- depends on HAVE_AOUT
42024+ depends on HAVE_AOUT && BROKEN
42025 ---help---
42026 A.out (Assembler.OUTput) is a set of formats for libraries and
42027 executables used in the earliest versions of UNIX. Linux used
42028diff --git a/fs/aio.c b/fs/aio.c
42029index e29ec48..f083e5e 100644
42030--- a/fs/aio.c
42031+++ b/fs/aio.c
42032@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42033 size += sizeof(struct io_event) * nr_events;
42034 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42035
42036- if (nr_pages < 0)
42037+ if (nr_pages <= 0)
42038 return -EINVAL;
42039
42040 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42041@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx,
42042 struct aio_timeout to;
42043 int retry = 0;
42044
42045+ pax_track_stack();
42046+
42047 /* needed to zero any padding within an entry (there shouldn't be
42048 * any, but C is fun!
42049 */
42050@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42051 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42052 {
42053 ssize_t ret;
42054+ struct iovec iovstack;
42055
42056 #ifdef CONFIG_COMPAT
42057 if (compat)
42058 ret = compat_rw_copy_check_uvector(type,
42059 (struct compat_iovec __user *)kiocb->ki_buf,
42060- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42061+ kiocb->ki_nbytes, 1, &iovstack,
42062 &kiocb->ki_iovec);
42063 else
42064 #endif
42065 ret = rw_copy_check_uvector(type,
42066 (struct iovec __user *)kiocb->ki_buf,
42067- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42068+ kiocb->ki_nbytes, 1, &iovstack,
42069 &kiocb->ki_iovec);
42070 if (ret < 0)
42071 goto out;
42072
42073+ if (kiocb->ki_iovec == &iovstack) {
42074+ kiocb->ki_inline_vec = iovstack;
42075+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
42076+ }
42077 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42078 kiocb->ki_cur_seg = 0;
42079 /* ki_nbytes/left now reflect bytes instead of segs */
42080diff --git a/fs/attr.c b/fs/attr.c
42081index 538e279..046cc6d 100644
42082--- a/fs/attr.c
42083+++ b/fs/attr.c
42084@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42085 unsigned long limit;
42086
42087 limit = rlimit(RLIMIT_FSIZE);
42088+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42089 if (limit != RLIM_INFINITY && offset > limit)
42090 goto out_sig;
42091 if (offset > inode->i_sb->s_maxbytes)
42092diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42093index e1fbdee..cd5ea56 100644
42094--- a/fs/autofs4/waitq.c
42095+++ b/fs/autofs4/waitq.c
42096@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
42097 {
42098 unsigned long sigpipe, flags;
42099 mm_segment_t fs;
42100- const char *data = (const char *)addr;
42101+ const char __user *data = (const char __force_user *)addr;
42102 ssize_t wr = 0;
42103
42104 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
42105diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42106index 720d885..012e7f0 100644
42107--- a/fs/befs/linuxvfs.c
42108+++ b/fs/befs/linuxvfs.c
42109@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42110 {
42111 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42112 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42113- char *link = nd_get_link(nd);
42114+ const char *link = nd_get_link(nd);
42115 if (!IS_ERR(link))
42116 kfree(link);
42117 }
42118diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42119index a6395bd..a5b24c4 100644
42120--- a/fs/binfmt_aout.c
42121+++ b/fs/binfmt_aout.c
42122@@ -16,6 +16,7 @@
42123 #include <linux/string.h>
42124 #include <linux/fs.h>
42125 #include <linux/file.h>
42126+#include <linux/security.h>
42127 #include <linux/stat.h>
42128 #include <linux/fcntl.h>
42129 #include <linux/ptrace.h>
42130@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42131 #endif
42132 # define START_STACK(u) ((void __user *)u.start_stack)
42133
42134+ memset(&dump, 0, sizeof(dump));
42135+
42136 fs = get_fs();
42137 set_fs(KERNEL_DS);
42138 has_dumped = 1;
42139@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42140
42141 /* If the size of the dump file exceeds the rlimit, then see what would happen
42142 if we wrote the stack, but not the data area. */
42143+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42144 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42145 dump.u_dsize = 0;
42146
42147 /* Make sure we have enough room to write the stack and data areas. */
42148+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42149 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42150 dump.u_ssize = 0;
42151
42152@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42153 rlim = rlimit(RLIMIT_DATA);
42154 if (rlim >= RLIM_INFINITY)
42155 rlim = ~0;
42156+
42157+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42158 if (ex.a_data + ex.a_bss > rlim)
42159 return -ENOMEM;
42160
42161@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42162 install_exec_creds(bprm);
42163 current->flags &= ~PF_FORKNOEXEC;
42164
42165+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42166+ current->mm->pax_flags = 0UL;
42167+#endif
42168+
42169+#ifdef CONFIG_PAX_PAGEEXEC
42170+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42171+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42172+
42173+#ifdef CONFIG_PAX_EMUTRAMP
42174+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42175+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42176+#endif
42177+
42178+#ifdef CONFIG_PAX_MPROTECT
42179+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42180+ current->mm->pax_flags |= MF_PAX_MPROTECT;
42181+#endif
42182+
42183+ }
42184+#endif
42185+
42186 if (N_MAGIC(ex) == OMAGIC) {
42187 unsigned long text_addr, map_size;
42188 loff_t pos;
42189@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42190
42191 down_write(&current->mm->mmap_sem);
42192 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42193- PROT_READ | PROT_WRITE | PROT_EXEC,
42194+ PROT_READ | PROT_WRITE,
42195 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42196 fd_offset + ex.a_text);
42197 up_write(&current->mm->mmap_sem);
42198diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42199index 21ac5ee..f54fdd0 100644
42200--- a/fs/binfmt_elf.c
42201+++ b/fs/binfmt_elf.c
42202@@ -32,6 +32,7 @@
42203 #include <linux/elf.h>
42204 #include <linux/utsname.h>
42205 #include <linux/coredump.h>
42206+#include <linux/xattr.h>
42207 #include <asm/uaccess.h>
42208 #include <asm/param.h>
42209 #include <asm/page.h>
42210@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42211 #define elf_core_dump NULL
42212 #endif
42213
42214+#ifdef CONFIG_PAX_MPROTECT
42215+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42216+#endif
42217+
42218 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42219 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42220 #else
42221@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42222 .load_binary = load_elf_binary,
42223 .load_shlib = load_elf_library,
42224 .core_dump = elf_core_dump,
42225+
42226+#ifdef CONFIG_PAX_MPROTECT
42227+ .handle_mprotect= elf_handle_mprotect,
42228+#endif
42229+
42230 .min_coredump = ELF_EXEC_PAGESIZE,
42231 };
42232
42233@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42234
42235 static int set_brk(unsigned long start, unsigned long end)
42236 {
42237+ unsigned long e = end;
42238+
42239 start = ELF_PAGEALIGN(start);
42240 end = ELF_PAGEALIGN(end);
42241 if (end > start) {
42242@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42243 if (BAD_ADDR(addr))
42244 return addr;
42245 }
42246- current->mm->start_brk = current->mm->brk = end;
42247+ current->mm->start_brk = current->mm->brk = e;
42248 return 0;
42249 }
42250
42251@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42252 elf_addr_t __user *u_rand_bytes;
42253 const char *k_platform = ELF_PLATFORM;
42254 const char *k_base_platform = ELF_BASE_PLATFORM;
42255- unsigned char k_rand_bytes[16];
42256+ u32 k_rand_bytes[4];
42257 int items;
42258 elf_addr_t *elf_info;
42259 int ei_index = 0;
42260 const struct cred *cred = current_cred();
42261 struct vm_area_struct *vma;
42262+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42263+
42264+ pax_track_stack();
42265
42266 /*
42267 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42268@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42269 * Generate 16 random bytes for userspace PRNG seeding.
42270 */
42271 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42272- u_rand_bytes = (elf_addr_t __user *)
42273- STACK_ALLOC(p, sizeof(k_rand_bytes));
42274+ srandom32(k_rand_bytes[0] ^ random32());
42275+ srandom32(k_rand_bytes[1] ^ random32());
42276+ srandom32(k_rand_bytes[2] ^ random32());
42277+ srandom32(k_rand_bytes[3] ^ random32());
42278+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42279+ u_rand_bytes = (elf_addr_t __user *) p;
42280 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42281 return -EFAULT;
42282
42283@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42284 return -EFAULT;
42285 current->mm->env_end = p;
42286
42287+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42288+
42289 /* Put the elf_info on the stack in the right place. */
42290 sp = (elf_addr_t __user *)envp + 1;
42291- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42292+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42293 return -EFAULT;
42294 return 0;
42295 }
42296@@ -381,10 +402,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42297 {
42298 struct elf_phdr *elf_phdata;
42299 struct elf_phdr *eppnt;
42300- unsigned long load_addr = 0;
42301+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42302 int load_addr_set = 0;
42303 unsigned long last_bss = 0, elf_bss = 0;
42304- unsigned long error = ~0UL;
42305+ unsigned long error = -EINVAL;
42306 unsigned long total_size;
42307 int retval, i, size;
42308
42309@@ -430,6 +451,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42310 goto out_close;
42311 }
42312
42313+#ifdef CONFIG_PAX_SEGMEXEC
42314+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42315+ pax_task_size = SEGMEXEC_TASK_SIZE;
42316+#endif
42317+
42318 eppnt = elf_phdata;
42319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42320 if (eppnt->p_type == PT_LOAD) {
42321@@ -473,8 +499,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42322 k = load_addr + eppnt->p_vaddr;
42323 if (BAD_ADDR(k) ||
42324 eppnt->p_filesz > eppnt->p_memsz ||
42325- eppnt->p_memsz > TASK_SIZE ||
42326- TASK_SIZE - eppnt->p_memsz < k) {
42327+ eppnt->p_memsz > pax_task_size ||
42328+ pax_task_size - eppnt->p_memsz < k) {
42329 error = -ENOMEM;
42330 goto out_close;
42331 }
42332@@ -528,6 +554,348 @@ out:
42333 return error;
42334 }
42335
42336+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42337+{
42338+ unsigned long pax_flags = 0UL;
42339+
42340+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42341+
42342+#ifdef CONFIG_PAX_PAGEEXEC
42343+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42344+ pax_flags |= MF_PAX_PAGEEXEC;
42345+#endif
42346+
42347+#ifdef CONFIG_PAX_SEGMEXEC
42348+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42349+ pax_flags |= MF_PAX_SEGMEXEC;
42350+#endif
42351+
42352+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42353+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42354+ if ((__supported_pte_mask & _PAGE_NX))
42355+ pax_flags &= ~MF_PAX_SEGMEXEC;
42356+ else
42357+ pax_flags &= ~MF_PAX_PAGEEXEC;
42358+ }
42359+#endif
42360+
42361+#ifdef CONFIG_PAX_EMUTRAMP
42362+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42363+ pax_flags |= MF_PAX_EMUTRAMP;
42364+#endif
42365+
42366+#ifdef CONFIG_PAX_MPROTECT
42367+ if (elf_phdata->p_flags & PF_MPROTECT)
42368+ pax_flags |= MF_PAX_MPROTECT;
42369+#endif
42370+
42371+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42372+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42373+ pax_flags |= MF_PAX_RANDMMAP;
42374+#endif
42375+
42376+#endif
42377+
42378+ return pax_flags;
42379+}
42380+
42381+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42382+{
42383+ unsigned long pax_flags = 0UL;
42384+
42385+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42386+
42387+#ifdef CONFIG_PAX_PAGEEXEC
42388+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42389+ pax_flags |= MF_PAX_PAGEEXEC;
42390+#endif
42391+
42392+#ifdef CONFIG_PAX_SEGMEXEC
42393+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42394+ pax_flags |= MF_PAX_SEGMEXEC;
42395+#endif
42396+
42397+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42398+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42399+ if ((__supported_pte_mask & _PAGE_NX))
42400+ pax_flags &= ~MF_PAX_SEGMEXEC;
42401+ else
42402+ pax_flags &= ~MF_PAX_PAGEEXEC;
42403+ }
42404+#endif
42405+
42406+#ifdef CONFIG_PAX_EMUTRAMP
42407+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42408+ pax_flags |= MF_PAX_EMUTRAMP;
42409+#endif
42410+
42411+#ifdef CONFIG_PAX_MPROTECT
42412+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42413+ pax_flags |= MF_PAX_MPROTECT;
42414+#endif
42415+
42416+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42417+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42418+ pax_flags |= MF_PAX_RANDMMAP;
42419+#endif
42420+
42421+#endif
42422+
42423+ return pax_flags;
42424+}
42425+
42426+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42427+{
42428+ unsigned long pax_flags = 0UL;
42429+
42430+#ifdef CONFIG_PAX_EI_PAX
42431+
42432+#ifdef CONFIG_PAX_PAGEEXEC
42433+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42434+ pax_flags |= MF_PAX_PAGEEXEC;
42435+#endif
42436+
42437+#ifdef CONFIG_PAX_SEGMEXEC
42438+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42439+ pax_flags |= MF_PAX_SEGMEXEC;
42440+#endif
42441+
42442+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42443+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42444+ if ((__supported_pte_mask & _PAGE_NX))
42445+ pax_flags &= ~MF_PAX_SEGMEXEC;
42446+ else
42447+ pax_flags &= ~MF_PAX_PAGEEXEC;
42448+ }
42449+#endif
42450+
42451+#ifdef CONFIG_PAX_EMUTRAMP
42452+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42453+ pax_flags |= MF_PAX_EMUTRAMP;
42454+#endif
42455+
42456+#ifdef CONFIG_PAX_MPROTECT
42457+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42458+ pax_flags |= MF_PAX_MPROTECT;
42459+#endif
42460+
42461+#ifdef CONFIG_PAX_ASLR
42462+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42463+ pax_flags |= MF_PAX_RANDMMAP;
42464+#endif
42465+
42466+#else
42467+
42468+#ifdef CONFIG_PAX_PAGEEXEC
42469+ pax_flags |= MF_PAX_PAGEEXEC;
42470+#endif
42471+
42472+#ifdef CONFIG_PAX_MPROTECT
42473+ pax_flags |= MF_PAX_MPROTECT;
42474+#endif
42475+
42476+#ifdef CONFIG_PAX_RANDMMAP
42477+ pax_flags |= MF_PAX_RANDMMAP;
42478+#endif
42479+
42480+#ifdef CONFIG_PAX_SEGMEXEC
42481+ if (!(__supported_pte_mask & _PAGE_NX)) {
42482+ pax_flags &= ~MF_PAX_PAGEEXEC;
42483+ pax_flags |= MF_PAX_SEGMEXEC;
42484+ }
42485+#endif
42486+
42487+#endif
42488+
42489+ return pax_flags;
42490+}
42491+
42492+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42493+{
42494+
42495+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42496+ unsigned long i;
42497+
42498+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42499+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42500+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42501+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42502+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42503+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42504+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42505+ return ~0UL;
42506+
42507+#ifdef CONFIG_PAX_SOFTMODE
42508+ if (pax_softmode)
42509+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42510+ else
42511+#endif
42512+
42513+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42514+ break;
42515+ }
42516+#endif
42517+
42518+ return ~0UL;
42519+}
42520+
42521+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42522+{
42523+ unsigned long pax_flags = 0UL;
42524+
42525+#ifdef CONFIG_PAX_PAGEEXEC
42526+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42527+ pax_flags |= MF_PAX_PAGEEXEC;
42528+#endif
42529+
42530+#ifdef CONFIG_PAX_SEGMEXEC
42531+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42532+ pax_flags |= MF_PAX_SEGMEXEC;
42533+#endif
42534+
42535+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42536+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42537+ if ((__supported_pte_mask & _PAGE_NX))
42538+ pax_flags &= ~MF_PAX_SEGMEXEC;
42539+ else
42540+ pax_flags &= ~MF_PAX_PAGEEXEC;
42541+ }
42542+#endif
42543+
42544+#ifdef CONFIG_PAX_EMUTRAMP
42545+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42546+ pax_flags |= MF_PAX_EMUTRAMP;
42547+#endif
42548+
42549+#ifdef CONFIG_PAX_MPROTECT
42550+ if (pax_flags_softmode & MF_PAX_MPROTECT)
42551+ pax_flags |= MF_PAX_MPROTECT;
42552+#endif
42553+
42554+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42555+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42556+ pax_flags |= MF_PAX_RANDMMAP;
42557+#endif
42558+
42559+ return pax_flags;
42560+}
42561+
42562+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42563+{
42564+ unsigned long pax_flags = 0UL;
42565+
42566+#ifdef CONFIG_PAX_PAGEEXEC
42567+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42568+ pax_flags |= MF_PAX_PAGEEXEC;
42569+#endif
42570+
42571+#ifdef CONFIG_PAX_SEGMEXEC
42572+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42573+ pax_flags |= MF_PAX_SEGMEXEC;
42574+#endif
42575+
42576+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42577+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42578+ if ((__supported_pte_mask & _PAGE_NX))
42579+ pax_flags &= ~MF_PAX_SEGMEXEC;
42580+ else
42581+ pax_flags &= ~MF_PAX_PAGEEXEC;
42582+ }
42583+#endif
42584+
42585+#ifdef CONFIG_PAX_EMUTRAMP
42586+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42587+ pax_flags |= MF_PAX_EMUTRAMP;
42588+#endif
42589+
42590+#ifdef CONFIG_PAX_MPROTECT
42591+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42592+ pax_flags |= MF_PAX_MPROTECT;
42593+#endif
42594+
42595+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42596+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42597+ pax_flags |= MF_PAX_RANDMMAP;
42598+#endif
42599+
42600+ return pax_flags;
42601+}
42602+
42603+static unsigned long pax_parse_xattr_pax(struct file * const file)
42604+{
42605+
42606+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42607+ ssize_t xattr_size, i;
42608+ unsigned char xattr_value[5];
42609+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42610+
42611+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42612+ if (xattr_size <= 0)
42613+ return ~0UL;
42614+
42615+ for (i = 0; i < xattr_size; i++)
42616+ switch (xattr_value[i]) {
42617+ default:
42618+ return ~0UL;
42619+
42620+#define parse_flag(option1, option2, flag) \
42621+ case option1: \
42622+ pax_flags_hardmode |= MF_PAX_##flag; \
42623+ break; \
42624+ case option2: \
42625+ pax_flags_softmode |= MF_PAX_##flag; \
42626+ break;
42627+
42628+ parse_flag('p', 'P', PAGEEXEC);
42629+ parse_flag('e', 'E', EMUTRAMP);
42630+ parse_flag('m', 'M', MPROTECT);
42631+ parse_flag('r', 'R', RANDMMAP);
42632+ parse_flag('s', 'S', SEGMEXEC);
42633+
42634+#undef parse_flag
42635+ }
42636+
42637+ if (pax_flags_hardmode & pax_flags_softmode)
42638+ return ~0UL;
42639+
42640+#ifdef CONFIG_PAX_SOFTMODE
42641+ if (pax_softmode)
42642+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42643+ else
42644+#endif
42645+
42646+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42647+#else
42648+ return ~0UL;
42649+#endif
42650+}
42651+
42652+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42653+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42654+{
42655+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42656+
42657+ pax_flags = pax_parse_ei_pax(elf_ex);
42658+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42659+ xattr_pax_flags = pax_parse_xattr_pax(file);
42660+
42661+ if (pt_pax_flags == ~0UL)
42662+ pt_pax_flags = xattr_pax_flags;
42663+ else if (xattr_pax_flags == ~0UL)
42664+ xattr_pax_flags = pt_pax_flags;
42665+ if (pt_pax_flags != xattr_pax_flags)
42666+ return -EINVAL;
42667+ if (pt_pax_flags != ~0UL)
42668+ pax_flags = pt_pax_flags;
42669+
42670+ if (0 > pax_check_flags(&pax_flags))
42671+ return -EINVAL;
42672+
42673+ current->mm->pax_flags = pax_flags;
42674+ return 0;
42675+}
42676+#endif
42677+
42678 /*
42679 * These are the functions used to load ELF style executables and shared
42680 * libraries. There is no binary dependent code anywhere else.
42681@@ -544,6 +912,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42682 {
42683 unsigned int random_variable = 0;
42684
42685+#ifdef CONFIG_PAX_RANDUSTACK
42686+ if (randomize_va_space)
42687+ return stack_top - current->mm->delta_stack;
42688+#endif
42689+
42690 if ((current->flags & PF_RANDOMIZE) &&
42691 !(current->personality & ADDR_NO_RANDOMIZE)) {
42692 random_variable = get_random_int() & STACK_RND_MASK;
42693@@ -562,7 +935,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42694 unsigned long load_addr = 0, load_bias = 0;
42695 int load_addr_set = 0;
42696 char * elf_interpreter = NULL;
42697- unsigned long error;
42698+ unsigned long error = 0;
42699 struct elf_phdr *elf_ppnt, *elf_phdata;
42700 unsigned long elf_bss, elf_brk;
42701 int retval, i;
42702@@ -572,11 +945,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42703 unsigned long start_code, end_code, start_data, end_data;
42704 unsigned long reloc_func_desc __maybe_unused = 0;
42705 int executable_stack = EXSTACK_DEFAULT;
42706- unsigned long def_flags = 0;
42707 struct {
42708 struct elfhdr elf_ex;
42709 struct elfhdr interp_elf_ex;
42710 } *loc;
42711+ unsigned long pax_task_size = TASK_SIZE;
42712
42713 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42714 if (!loc) {
42715@@ -713,11 +1086,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42716
42717 /* OK, This is the point of no return */
42718 current->flags &= ~PF_FORKNOEXEC;
42719- current->mm->def_flags = def_flags;
42720+
42721+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42722+ current->mm->pax_flags = 0UL;
42723+#endif
42724+
42725+#ifdef CONFIG_PAX_DLRESOLVE
42726+ current->mm->call_dl_resolve = 0UL;
42727+#endif
42728+
42729+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42730+ current->mm->call_syscall = 0UL;
42731+#endif
42732+
42733+#ifdef CONFIG_PAX_ASLR
42734+ current->mm->delta_mmap = 0UL;
42735+ current->mm->delta_stack = 0UL;
42736+#endif
42737+
42738+ current->mm->def_flags = 0;
42739+
42740+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42741+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42742+ send_sig(SIGKILL, current, 0);
42743+ goto out_free_dentry;
42744+ }
42745+#endif
42746+
42747+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42748+ pax_set_initial_flags(bprm);
42749+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42750+ if (pax_set_initial_flags_func)
42751+ (pax_set_initial_flags_func)(bprm);
42752+#endif
42753+
42754+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42755+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42756+ current->mm->context.user_cs_limit = PAGE_SIZE;
42757+ current->mm->def_flags |= VM_PAGEEXEC;
42758+ }
42759+#endif
42760+
42761+#ifdef CONFIG_PAX_SEGMEXEC
42762+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42763+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42764+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42765+ pax_task_size = SEGMEXEC_TASK_SIZE;
42766+ current->mm->def_flags |= VM_NOHUGEPAGE;
42767+ }
42768+#endif
42769+
42770+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42771+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42772+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42773+ put_cpu();
42774+ }
42775+#endif
42776
42777 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42778 may depend on the personality. */
42779 SET_PERSONALITY(loc->elf_ex);
42780+
42781+#ifdef CONFIG_PAX_ASLR
42782+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42783+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42784+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42785+ }
42786+#endif
42787+
42788+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42789+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42790+ executable_stack = EXSTACK_DISABLE_X;
42791+ current->personality &= ~READ_IMPLIES_EXEC;
42792+ } else
42793+#endif
42794+
42795 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42796 current->personality |= READ_IMPLIES_EXEC;
42797
42798@@ -808,6 +1251,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42799 #else
42800 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42801 #endif
42802+
42803+#ifdef CONFIG_PAX_RANDMMAP
42804+ /* PaX: randomize base address at the default exe base if requested */
42805+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42806+#ifdef CONFIG_SPARC64
42807+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42808+#else
42809+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42810+#endif
42811+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42812+ elf_flags |= MAP_FIXED;
42813+ }
42814+#endif
42815+
42816 }
42817
42818 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42819@@ -840,9 +1297,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42820 * allowed task size. Note that p_filesz must always be
42821 * <= p_memsz so it is only necessary to check p_memsz.
42822 */
42823- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42824- elf_ppnt->p_memsz > TASK_SIZE ||
42825- TASK_SIZE - elf_ppnt->p_memsz < k) {
42826+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42827+ elf_ppnt->p_memsz > pax_task_size ||
42828+ pax_task_size - elf_ppnt->p_memsz < k) {
42829 /* set_brk can never work. Avoid overflows. */
42830 send_sig(SIGKILL, current, 0);
42831 retval = -EINVAL;
42832@@ -870,6 +1327,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42833 start_data += load_bias;
42834 end_data += load_bias;
42835
42836+#ifdef CONFIG_PAX_RANDMMAP
42837+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
42838+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
42839+#endif
42840+
42841 /* Calling set_brk effectively mmaps the pages that we need
42842 * for the bss and break sections. We must do this before
42843 * mapping in the interpreter, to make sure it doesn't wind
42844@@ -881,9 +1343,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42845 goto out_free_dentry;
42846 }
42847 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42848- send_sig(SIGSEGV, current, 0);
42849- retval = -EFAULT; /* Nobody gets to see this, but.. */
42850- goto out_free_dentry;
42851+ /*
42852+ * This bss-zeroing can fail if the ELF
42853+ * file specifies odd protections. So
42854+ * we don't check the return value
42855+ */
42856 }
42857
42858 if (elf_interpreter) {
42859@@ -1098,7 +1562,7 @@ out:
42860 * Decide what to dump of a segment, part, all or none.
42861 */
42862 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42863- unsigned long mm_flags)
42864+ unsigned long mm_flags, long signr)
42865 {
42866 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42867
42868@@ -1132,7 +1596,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42869 if (vma->vm_file == NULL)
42870 return 0;
42871
42872- if (FILTER(MAPPED_PRIVATE))
42873+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42874 goto whole;
42875
42876 /*
42877@@ -1354,9 +1818,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42878 {
42879 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42880 int i = 0;
42881- do
42882+ do {
42883 i += 2;
42884- while (auxv[i - 2] != AT_NULL);
42885+ } while (auxv[i - 2] != AT_NULL);
42886 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42887 }
42888
42889@@ -1862,14 +2326,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42890 }
42891
42892 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42893- unsigned long mm_flags)
42894+ struct coredump_params *cprm)
42895 {
42896 struct vm_area_struct *vma;
42897 size_t size = 0;
42898
42899 for (vma = first_vma(current, gate_vma); vma != NULL;
42900 vma = next_vma(vma, gate_vma))
42901- size += vma_dump_size(vma, mm_flags);
42902+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42903 return size;
42904 }
42905
42906@@ -1963,7 +2427,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42907
42908 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42909
42910- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42911+ offset += elf_core_vma_data_size(gate_vma, cprm);
42912 offset += elf_core_extra_data_size();
42913 e_shoff = offset;
42914
42915@@ -1977,10 +2441,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42916 offset = dataoff;
42917
42918 size += sizeof(*elf);
42919+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42920 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42921 goto end_coredump;
42922
42923 size += sizeof(*phdr4note);
42924+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42925 if (size > cprm->limit
42926 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42927 goto end_coredump;
42928@@ -1994,7 +2460,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42929 phdr.p_offset = offset;
42930 phdr.p_vaddr = vma->vm_start;
42931 phdr.p_paddr = 0;
42932- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42933+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42934 phdr.p_memsz = vma->vm_end - vma->vm_start;
42935 offset += phdr.p_filesz;
42936 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42937@@ -2005,6 +2471,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42938 phdr.p_align = ELF_EXEC_PAGESIZE;
42939
42940 size += sizeof(phdr);
42941+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42942 if (size > cprm->limit
42943 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42944 goto end_coredump;
42945@@ -2029,7 +2496,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42946 unsigned long addr;
42947 unsigned long end;
42948
42949- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42950+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42951
42952 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42953 struct page *page;
42954@@ -2038,6 +2505,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42955 page = get_dump_page(addr);
42956 if (page) {
42957 void *kaddr = kmap(page);
42958+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42959 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42960 !dump_write(cprm->file, kaddr,
42961 PAGE_SIZE);
42962@@ -2055,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42963
42964 if (e_phnum == PN_XNUM) {
42965 size += sizeof(*shdr4extnum);
42966+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42967 if (size > cprm->limit
42968 || !dump_write(cprm->file, shdr4extnum,
42969 sizeof(*shdr4extnum)))
42970@@ -2075,6 +2544,97 @@ out:
42971
42972 #endif /* CONFIG_ELF_CORE */
42973
42974+#ifdef CONFIG_PAX_MPROTECT
42975+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42976+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42977+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42978+ *
42979+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42980+ * basis because we want to allow the common case and not the special ones.
42981+ */
42982+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42983+{
42984+ struct elfhdr elf_h;
42985+ struct elf_phdr elf_p;
42986+ unsigned long i;
42987+ unsigned long oldflags;
42988+ bool is_textrel_rw, is_textrel_rx, is_relro;
42989+
42990+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42991+ return;
42992+
42993+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42994+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42995+
42996+#ifdef CONFIG_PAX_ELFRELOCS
42997+ /* possible TEXTREL */
42998+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42999+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43000+#else
43001+ is_textrel_rw = false;
43002+ is_textrel_rx = false;
43003+#endif
43004+
43005+ /* possible RELRO */
43006+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43007+
43008+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43009+ return;
43010+
43011+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43012+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43013+
43014+#ifdef CONFIG_PAX_ETEXECRELOCS
43015+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43016+#else
43017+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43018+#endif
43019+
43020+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43021+ !elf_check_arch(&elf_h) ||
43022+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43023+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43024+ return;
43025+
43026+ for (i = 0UL; i < elf_h.e_phnum; i++) {
43027+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43028+ return;
43029+ switch (elf_p.p_type) {
43030+ case PT_DYNAMIC:
43031+ if (!is_textrel_rw && !is_textrel_rx)
43032+ continue;
43033+ i = 0UL;
43034+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43035+ elf_dyn dyn;
43036+
43037+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43038+ return;
43039+ if (dyn.d_tag == DT_NULL)
43040+ return;
43041+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43042+ gr_log_textrel(vma);
43043+ if (is_textrel_rw)
43044+ vma->vm_flags |= VM_MAYWRITE;
43045+ else
43046+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43047+ vma->vm_flags &= ~VM_MAYWRITE;
43048+ return;
43049+ }
43050+ i++;
43051+ }
43052+ return;
43053+
43054+ case PT_GNU_RELRO:
43055+ if (!is_relro)
43056+ continue;
43057+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43058+ vma->vm_flags &= ~VM_MAYWRITE;
43059+ return;
43060+ }
43061+ }
43062+}
43063+#endif
43064+
43065 static int __init init_elf_binfmt(void)
43066 {
43067 return register_binfmt(&elf_format);
43068diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43069index 1bffbe0..c8c283e 100644
43070--- a/fs/binfmt_flat.c
43071+++ b/fs/binfmt_flat.c
43072@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43073 realdatastart = (unsigned long) -ENOMEM;
43074 printk("Unable to allocate RAM for process data, errno %d\n",
43075 (int)-realdatastart);
43076+ down_write(&current->mm->mmap_sem);
43077 do_munmap(current->mm, textpos, text_len);
43078+ up_write(&current->mm->mmap_sem);
43079 ret = realdatastart;
43080 goto err;
43081 }
43082@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43083 }
43084 if (IS_ERR_VALUE(result)) {
43085 printk("Unable to read data+bss, errno %d\n", (int)-result);
43086+ down_write(&current->mm->mmap_sem);
43087 do_munmap(current->mm, textpos, text_len);
43088 do_munmap(current->mm, realdatastart, len);
43089+ up_write(&current->mm->mmap_sem);
43090 ret = result;
43091 goto err;
43092 }
43093@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43094 }
43095 if (IS_ERR_VALUE(result)) {
43096 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43097+ down_write(&current->mm->mmap_sem);
43098 do_munmap(current->mm, textpos, text_len + data_len + extra +
43099 MAX_SHARED_LIBS * sizeof(unsigned long));
43100+ up_write(&current->mm->mmap_sem);
43101 ret = result;
43102 goto err;
43103 }
43104diff --git a/fs/bio.c b/fs/bio.c
43105index 9bfade8..782f3b9 100644
43106--- a/fs/bio.c
43107+++ b/fs/bio.c
43108@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43109 const int read = bio_data_dir(bio) == READ;
43110 struct bio_map_data *bmd = bio->bi_private;
43111 int i;
43112- char *p = bmd->sgvecs[0].iov_base;
43113+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43114
43115 __bio_for_each_segment(bvec, bio, i, 0) {
43116 char *addr = page_address(bvec->bv_page);
43117diff --git a/fs/block_dev.c b/fs/block_dev.c
43118index 1c44b8d..e2507b4 100644
43119--- a/fs/block_dev.c
43120+++ b/fs/block_dev.c
43121@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43122 else if (bdev->bd_contains == bdev)
43123 return true; /* is a whole device which isn't held */
43124
43125- else if (whole->bd_holder == bd_may_claim)
43126+ else if (whole->bd_holder == (void *)bd_may_claim)
43127 return true; /* is a partition of a device that is being partitioned */
43128 else if (whole->bd_holder != NULL)
43129 return false; /* is a partition of a held device */
43130diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43131index 011cab3..9ace713 100644
43132--- a/fs/btrfs/ctree.c
43133+++ b/fs/btrfs/ctree.c
43134@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43135 free_extent_buffer(buf);
43136 add_root_to_dirty_list(root);
43137 } else {
43138- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43139- parent_start = parent->start;
43140- else
43141+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43142+ if (parent)
43143+ parent_start = parent->start;
43144+ else
43145+ parent_start = 0;
43146+ } else
43147 parent_start = 0;
43148
43149 WARN_ON(trans->transid != btrfs_header_generation(parent));
43150diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43151index b2d004a..6bb543d 100644
43152--- a/fs/btrfs/inode.c
43153+++ b/fs/btrfs/inode.c
43154@@ -6922,7 +6922,7 @@ fail:
43155 return -ENOMEM;
43156 }
43157
43158-static int btrfs_getattr(struct vfsmount *mnt,
43159+int btrfs_getattr(struct vfsmount *mnt,
43160 struct dentry *dentry, struct kstat *stat)
43161 {
43162 struct inode *inode = dentry->d_inode;
43163@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43164 return 0;
43165 }
43166
43167+EXPORT_SYMBOL(btrfs_getattr);
43168+
43169+dev_t get_btrfs_dev_from_inode(struct inode *inode)
43170+{
43171+ return BTRFS_I(inode)->root->anon_dev;
43172+}
43173+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43174+
43175 /*
43176 * If a file is moved, it will inherit the cow and compression flags of the new
43177 * directory.
43178diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43179index dae5dfe..6aa01b1 100644
43180--- a/fs/btrfs/ioctl.c
43181+++ b/fs/btrfs/ioctl.c
43182@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43183 for (i = 0; i < num_types; i++) {
43184 struct btrfs_space_info *tmp;
43185
43186+ /* Don't copy in more than we allocated */
43187 if (!slot_count)
43188 break;
43189
43190+ slot_count--;
43191+
43192 info = NULL;
43193 rcu_read_lock();
43194 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43195@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43196 memcpy(dest, &space, sizeof(space));
43197 dest++;
43198 space_args.total_spaces++;
43199- slot_count--;
43200 }
43201- if (!slot_count)
43202- break;
43203 }
43204 up_read(&info->groups_sem);
43205 }
43206
43207- user_dest = (struct btrfs_ioctl_space_info *)
43208+ user_dest = (struct btrfs_ioctl_space_info __user *)
43209 (arg + sizeof(struct btrfs_ioctl_space_args));
43210
43211 if (copy_to_user(user_dest, dest_orig, alloc_size))
43212diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43213index 59bb176..be9977d 100644
43214--- a/fs/btrfs/relocation.c
43215+++ b/fs/btrfs/relocation.c
43216@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43217 }
43218 spin_unlock(&rc->reloc_root_tree.lock);
43219
43220- BUG_ON((struct btrfs_root *)node->data != root);
43221+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43222
43223 if (!del) {
43224 spin_lock(&rc->reloc_root_tree.lock);
43225diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43226index 622f469..e8d2d55 100644
43227--- a/fs/cachefiles/bind.c
43228+++ b/fs/cachefiles/bind.c
43229@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43230 args);
43231
43232 /* start by checking things over */
43233- ASSERT(cache->fstop_percent >= 0 &&
43234- cache->fstop_percent < cache->fcull_percent &&
43235+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43236 cache->fcull_percent < cache->frun_percent &&
43237 cache->frun_percent < 100);
43238
43239- ASSERT(cache->bstop_percent >= 0 &&
43240- cache->bstop_percent < cache->bcull_percent &&
43241+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43242 cache->bcull_percent < cache->brun_percent &&
43243 cache->brun_percent < 100);
43244
43245diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43246index 0a1467b..6a53245 100644
43247--- a/fs/cachefiles/daemon.c
43248+++ b/fs/cachefiles/daemon.c
43249@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43250 if (n > buflen)
43251 return -EMSGSIZE;
43252
43253- if (copy_to_user(_buffer, buffer, n) != 0)
43254+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43255 return -EFAULT;
43256
43257 return n;
43258@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43259 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43260 return -EIO;
43261
43262- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43263+ if (datalen > PAGE_SIZE - 1)
43264 return -EOPNOTSUPP;
43265
43266 /* drag the command string into the kernel so we can parse it */
43267@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43268 if (args[0] != '%' || args[1] != '\0')
43269 return -EINVAL;
43270
43271- if (fstop < 0 || fstop >= cache->fcull_percent)
43272+ if (fstop >= cache->fcull_percent)
43273 return cachefiles_daemon_range_error(cache, args);
43274
43275 cache->fstop_percent = fstop;
43276@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43277 if (args[0] != '%' || args[1] != '\0')
43278 return -EINVAL;
43279
43280- if (bstop < 0 || bstop >= cache->bcull_percent)
43281+ if (bstop >= cache->bcull_percent)
43282 return cachefiles_daemon_range_error(cache, args);
43283
43284 cache->bstop_percent = bstop;
43285diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43286index bd6bc1b..b627b53 100644
43287--- a/fs/cachefiles/internal.h
43288+++ b/fs/cachefiles/internal.h
43289@@ -57,7 +57,7 @@ struct cachefiles_cache {
43290 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43291 struct rb_root active_nodes; /* active nodes (can't be culled) */
43292 rwlock_t active_lock; /* lock for active_nodes */
43293- atomic_t gravecounter; /* graveyard uniquifier */
43294+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43295 unsigned frun_percent; /* when to stop culling (% files) */
43296 unsigned fcull_percent; /* when to start culling (% files) */
43297 unsigned fstop_percent; /* when to stop allocating (% files) */
43298@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43299 * proc.c
43300 */
43301 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43302-extern atomic_t cachefiles_lookup_histogram[HZ];
43303-extern atomic_t cachefiles_mkdir_histogram[HZ];
43304-extern atomic_t cachefiles_create_histogram[HZ];
43305+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43306+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43307+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43308
43309 extern int __init cachefiles_proc_init(void);
43310 extern void cachefiles_proc_cleanup(void);
43311 static inline
43312-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43313+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43314 {
43315 unsigned long jif = jiffies - start_jif;
43316 if (jif >= HZ)
43317 jif = HZ - 1;
43318- atomic_inc(&histogram[jif]);
43319+ atomic_inc_unchecked(&histogram[jif]);
43320 }
43321
43322 #else
43323diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43324index a0358c2..d6137f2 100644
43325--- a/fs/cachefiles/namei.c
43326+++ b/fs/cachefiles/namei.c
43327@@ -318,7 +318,7 @@ try_again:
43328 /* first step is to make up a grave dentry in the graveyard */
43329 sprintf(nbuffer, "%08x%08x",
43330 (uint32_t) get_seconds(),
43331- (uint32_t) atomic_inc_return(&cache->gravecounter));
43332+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43333
43334 /* do the multiway lock magic */
43335 trap = lock_rename(cache->graveyard, dir);
43336diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43337index eccd339..4c1d995 100644
43338--- a/fs/cachefiles/proc.c
43339+++ b/fs/cachefiles/proc.c
43340@@ -14,9 +14,9 @@
43341 #include <linux/seq_file.h>
43342 #include "internal.h"
43343
43344-atomic_t cachefiles_lookup_histogram[HZ];
43345-atomic_t cachefiles_mkdir_histogram[HZ];
43346-atomic_t cachefiles_create_histogram[HZ];
43347+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43348+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43349+atomic_unchecked_t cachefiles_create_histogram[HZ];
43350
43351 /*
43352 * display the latency histogram
43353@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43354 return 0;
43355 default:
43356 index = (unsigned long) v - 3;
43357- x = atomic_read(&cachefiles_lookup_histogram[index]);
43358- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43359- z = atomic_read(&cachefiles_create_histogram[index]);
43360+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43361+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43362+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43363 if (x == 0 && y == 0 && z == 0)
43364 return 0;
43365
43366diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43367index 0e3c092..818480e 100644
43368--- a/fs/cachefiles/rdwr.c
43369+++ b/fs/cachefiles/rdwr.c
43370@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43371 old_fs = get_fs();
43372 set_fs(KERNEL_DS);
43373 ret = file->f_op->write(
43374- file, (const void __user *) data, len, &pos);
43375+ file, (const void __force_user *) data, len, &pos);
43376 set_fs(old_fs);
43377 kunmap(page);
43378 if (ret != len)
43379diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43380index 382abc9..bd89646 100644
43381--- a/fs/ceph/dir.c
43382+++ b/fs/ceph/dir.c
43383@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43384 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43385 struct ceph_mds_client *mdsc = fsc->mdsc;
43386 unsigned frag = fpos_frag(filp->f_pos);
43387- int off = fpos_off(filp->f_pos);
43388+ unsigned int off = fpos_off(filp->f_pos);
43389 int err;
43390 u32 ftype;
43391 struct ceph_mds_reply_info_parsed *rinfo;
43392diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43393index 6d40656..bc1f825 100644
43394--- a/fs/cifs/cifs_debug.c
43395+++ b/fs/cifs/cifs_debug.c
43396@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43397
43398 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43399 #ifdef CONFIG_CIFS_STATS2
43400- atomic_set(&totBufAllocCount, 0);
43401- atomic_set(&totSmBufAllocCount, 0);
43402+ atomic_set_unchecked(&totBufAllocCount, 0);
43403+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43404 #endif /* CONFIG_CIFS_STATS2 */
43405 spin_lock(&cifs_tcp_ses_lock);
43406 list_for_each(tmp1, &cifs_tcp_ses_list) {
43407@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43408 tcon = list_entry(tmp3,
43409 struct cifs_tcon,
43410 tcon_list);
43411- atomic_set(&tcon->num_smbs_sent, 0);
43412- atomic_set(&tcon->num_writes, 0);
43413- atomic_set(&tcon->num_reads, 0);
43414- atomic_set(&tcon->num_oplock_brks, 0);
43415- atomic_set(&tcon->num_opens, 0);
43416- atomic_set(&tcon->num_posixopens, 0);
43417- atomic_set(&tcon->num_posixmkdirs, 0);
43418- atomic_set(&tcon->num_closes, 0);
43419- atomic_set(&tcon->num_deletes, 0);
43420- atomic_set(&tcon->num_mkdirs, 0);
43421- atomic_set(&tcon->num_rmdirs, 0);
43422- atomic_set(&tcon->num_renames, 0);
43423- atomic_set(&tcon->num_t2renames, 0);
43424- atomic_set(&tcon->num_ffirst, 0);
43425- atomic_set(&tcon->num_fnext, 0);
43426- atomic_set(&tcon->num_fclose, 0);
43427- atomic_set(&tcon->num_hardlinks, 0);
43428- atomic_set(&tcon->num_symlinks, 0);
43429- atomic_set(&tcon->num_locks, 0);
43430+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43431+ atomic_set_unchecked(&tcon->num_writes, 0);
43432+ atomic_set_unchecked(&tcon->num_reads, 0);
43433+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43434+ atomic_set_unchecked(&tcon->num_opens, 0);
43435+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43436+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43437+ atomic_set_unchecked(&tcon->num_closes, 0);
43438+ atomic_set_unchecked(&tcon->num_deletes, 0);
43439+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43440+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43441+ atomic_set_unchecked(&tcon->num_renames, 0);
43442+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43443+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43444+ atomic_set_unchecked(&tcon->num_fnext, 0);
43445+ atomic_set_unchecked(&tcon->num_fclose, 0);
43446+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43447+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43448+ atomic_set_unchecked(&tcon->num_locks, 0);
43449 }
43450 }
43451 }
43452@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43453 smBufAllocCount.counter, cifs_min_small);
43454 #ifdef CONFIG_CIFS_STATS2
43455 seq_printf(m, "Total Large %d Small %d Allocations\n",
43456- atomic_read(&totBufAllocCount),
43457- atomic_read(&totSmBufAllocCount));
43458+ atomic_read_unchecked(&totBufAllocCount),
43459+ atomic_read_unchecked(&totSmBufAllocCount));
43460 #endif /* CONFIG_CIFS_STATS2 */
43461
43462 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43463@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43464 if (tcon->need_reconnect)
43465 seq_puts(m, "\tDISCONNECTED ");
43466 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43467- atomic_read(&tcon->num_smbs_sent),
43468- atomic_read(&tcon->num_oplock_brks));
43469+ atomic_read_unchecked(&tcon->num_smbs_sent),
43470+ atomic_read_unchecked(&tcon->num_oplock_brks));
43471 seq_printf(m, "\nReads: %d Bytes: %lld",
43472- atomic_read(&tcon->num_reads),
43473+ atomic_read_unchecked(&tcon->num_reads),
43474 (long long)(tcon->bytes_read));
43475 seq_printf(m, "\nWrites: %d Bytes: %lld",
43476- atomic_read(&tcon->num_writes),
43477+ atomic_read_unchecked(&tcon->num_writes),
43478 (long long)(tcon->bytes_written));
43479 seq_printf(m, "\nFlushes: %d",
43480- atomic_read(&tcon->num_flushes));
43481+ atomic_read_unchecked(&tcon->num_flushes));
43482 seq_printf(m, "\nLocks: %d HardLinks: %d "
43483 "Symlinks: %d",
43484- atomic_read(&tcon->num_locks),
43485- atomic_read(&tcon->num_hardlinks),
43486- atomic_read(&tcon->num_symlinks));
43487+ atomic_read_unchecked(&tcon->num_locks),
43488+ atomic_read_unchecked(&tcon->num_hardlinks),
43489+ atomic_read_unchecked(&tcon->num_symlinks));
43490 seq_printf(m, "\nOpens: %d Closes: %d "
43491 "Deletes: %d",
43492- atomic_read(&tcon->num_opens),
43493- atomic_read(&tcon->num_closes),
43494- atomic_read(&tcon->num_deletes));
43495+ atomic_read_unchecked(&tcon->num_opens),
43496+ atomic_read_unchecked(&tcon->num_closes),
43497+ atomic_read_unchecked(&tcon->num_deletes));
43498 seq_printf(m, "\nPosix Opens: %d "
43499 "Posix Mkdirs: %d",
43500- atomic_read(&tcon->num_posixopens),
43501- atomic_read(&tcon->num_posixmkdirs));
43502+ atomic_read_unchecked(&tcon->num_posixopens),
43503+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43504 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43505- atomic_read(&tcon->num_mkdirs),
43506- atomic_read(&tcon->num_rmdirs));
43507+ atomic_read_unchecked(&tcon->num_mkdirs),
43508+ atomic_read_unchecked(&tcon->num_rmdirs));
43509 seq_printf(m, "\nRenames: %d T2 Renames %d",
43510- atomic_read(&tcon->num_renames),
43511- atomic_read(&tcon->num_t2renames));
43512+ atomic_read_unchecked(&tcon->num_renames),
43513+ atomic_read_unchecked(&tcon->num_t2renames));
43514 seq_printf(m, "\nFindFirst: %d FNext %d "
43515 "FClose %d",
43516- atomic_read(&tcon->num_ffirst),
43517- atomic_read(&tcon->num_fnext),
43518- atomic_read(&tcon->num_fclose));
43519+ atomic_read_unchecked(&tcon->num_ffirst),
43520+ atomic_read_unchecked(&tcon->num_fnext),
43521+ atomic_read_unchecked(&tcon->num_fclose));
43522 }
43523 }
43524 }
43525diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43526index 54b8f1e..f6a4c00 100644
43527--- a/fs/cifs/cifsfs.c
43528+++ b/fs/cifs/cifsfs.c
43529@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
43530 cifs_req_cachep = kmem_cache_create("cifs_request",
43531 CIFSMaxBufSize +
43532 MAX_CIFS_HDR_SIZE, 0,
43533- SLAB_HWCACHE_ALIGN, NULL);
43534+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43535 if (cifs_req_cachep == NULL)
43536 return -ENOMEM;
43537
43538@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
43539 efficient to alloc 1 per page off the slab compared to 17K (5page)
43540 alloc of large cifs buffers even when page debugging is on */
43541 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43542- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43543+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43544 NULL);
43545 if (cifs_sm_req_cachep == NULL) {
43546 mempool_destroy(cifs_req_poolp);
43547@@ -1093,8 +1093,8 @@ init_cifs(void)
43548 atomic_set(&bufAllocCount, 0);
43549 atomic_set(&smBufAllocCount, 0);
43550 #ifdef CONFIG_CIFS_STATS2
43551- atomic_set(&totBufAllocCount, 0);
43552- atomic_set(&totSmBufAllocCount, 0);
43553+ atomic_set_unchecked(&totBufAllocCount, 0);
43554+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43555 #endif /* CONFIG_CIFS_STATS2 */
43556
43557 atomic_set(&midCount, 0);
43558diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43559index 95dad9d..fe7af1a 100644
43560--- a/fs/cifs/cifsglob.h
43561+++ b/fs/cifs/cifsglob.h
43562@@ -381,28 +381,28 @@ struct cifs_tcon {
43563 __u16 Flags; /* optional support bits */
43564 enum statusEnum tidStatus;
43565 #ifdef CONFIG_CIFS_STATS
43566- atomic_t num_smbs_sent;
43567- atomic_t num_writes;
43568- atomic_t num_reads;
43569- atomic_t num_flushes;
43570- atomic_t num_oplock_brks;
43571- atomic_t num_opens;
43572- atomic_t num_closes;
43573- atomic_t num_deletes;
43574- atomic_t num_mkdirs;
43575- atomic_t num_posixopens;
43576- atomic_t num_posixmkdirs;
43577- atomic_t num_rmdirs;
43578- atomic_t num_renames;
43579- atomic_t num_t2renames;
43580- atomic_t num_ffirst;
43581- atomic_t num_fnext;
43582- atomic_t num_fclose;
43583- atomic_t num_hardlinks;
43584- atomic_t num_symlinks;
43585- atomic_t num_locks;
43586- atomic_t num_acl_get;
43587- atomic_t num_acl_set;
43588+ atomic_unchecked_t num_smbs_sent;
43589+ atomic_unchecked_t num_writes;
43590+ atomic_unchecked_t num_reads;
43591+ atomic_unchecked_t num_flushes;
43592+ atomic_unchecked_t num_oplock_brks;
43593+ atomic_unchecked_t num_opens;
43594+ atomic_unchecked_t num_closes;
43595+ atomic_unchecked_t num_deletes;
43596+ atomic_unchecked_t num_mkdirs;
43597+ atomic_unchecked_t num_posixopens;
43598+ atomic_unchecked_t num_posixmkdirs;
43599+ atomic_unchecked_t num_rmdirs;
43600+ atomic_unchecked_t num_renames;
43601+ atomic_unchecked_t num_t2renames;
43602+ atomic_unchecked_t num_ffirst;
43603+ atomic_unchecked_t num_fnext;
43604+ atomic_unchecked_t num_fclose;
43605+ atomic_unchecked_t num_hardlinks;
43606+ atomic_unchecked_t num_symlinks;
43607+ atomic_unchecked_t num_locks;
43608+ atomic_unchecked_t num_acl_get;
43609+ atomic_unchecked_t num_acl_set;
43610 #ifdef CONFIG_CIFS_STATS2
43611 unsigned long long time_writes;
43612 unsigned long long time_reads;
43613@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim)
43614 }
43615
43616 #ifdef CONFIG_CIFS_STATS
43617-#define cifs_stats_inc atomic_inc
43618+#define cifs_stats_inc atomic_inc_unchecked
43619
43620 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43621 unsigned int bytes)
43622@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43623 /* Various Debug counters */
43624 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43625 #ifdef CONFIG_CIFS_STATS2
43626-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43627-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43628+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43629+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43630 #endif
43631 GLOBAL_EXTERN atomic_t smBufAllocCount;
43632 GLOBAL_EXTERN atomic_t midCount;
43633diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43634index db3f18c..1f5955e 100644
43635--- a/fs/cifs/link.c
43636+++ b/fs/cifs/link.c
43637@@ -593,7 +593,7 @@ symlink_exit:
43638
43639 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43640 {
43641- char *p = nd_get_link(nd);
43642+ const char *p = nd_get_link(nd);
43643 if (!IS_ERR(p))
43644 kfree(p);
43645 }
43646diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43647index 7c16933..c8212b5 100644
43648--- a/fs/cifs/misc.c
43649+++ b/fs/cifs/misc.c
43650@@ -156,7 +156,7 @@ cifs_buf_get(void)
43651 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43652 atomic_inc(&bufAllocCount);
43653 #ifdef CONFIG_CIFS_STATS2
43654- atomic_inc(&totBufAllocCount);
43655+ atomic_inc_unchecked(&totBufAllocCount);
43656 #endif /* CONFIG_CIFS_STATS2 */
43657 }
43658
43659@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43660 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43661 atomic_inc(&smBufAllocCount);
43662 #ifdef CONFIG_CIFS_STATS2
43663- atomic_inc(&totSmBufAllocCount);
43664+ atomic_inc_unchecked(&totSmBufAllocCount);
43665 #endif /* CONFIG_CIFS_STATS2 */
43666
43667 }
43668diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43669index 6901578..d402eb5 100644
43670--- a/fs/coda/cache.c
43671+++ b/fs/coda/cache.c
43672@@ -24,7 +24,7 @@
43673 #include "coda_linux.h"
43674 #include "coda_cache.h"
43675
43676-static atomic_t permission_epoch = ATOMIC_INIT(0);
43677+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43678
43679 /* replace or extend an acl cache hit */
43680 void coda_cache_enter(struct inode *inode, int mask)
43681@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43682 struct coda_inode_info *cii = ITOC(inode);
43683
43684 spin_lock(&cii->c_lock);
43685- cii->c_cached_epoch = atomic_read(&permission_epoch);
43686+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43687 if (cii->c_uid != current_fsuid()) {
43688 cii->c_uid = current_fsuid();
43689 cii->c_cached_perm = mask;
43690@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43691 {
43692 struct coda_inode_info *cii = ITOC(inode);
43693 spin_lock(&cii->c_lock);
43694- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43695+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43696 spin_unlock(&cii->c_lock);
43697 }
43698
43699 /* remove all acl caches */
43700 void coda_cache_clear_all(struct super_block *sb)
43701 {
43702- atomic_inc(&permission_epoch);
43703+ atomic_inc_unchecked(&permission_epoch);
43704 }
43705
43706
43707@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43708 spin_lock(&cii->c_lock);
43709 hit = (mask & cii->c_cached_perm) == mask &&
43710 cii->c_uid == current_fsuid() &&
43711- cii->c_cached_epoch == atomic_read(&permission_epoch);
43712+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43713 spin_unlock(&cii->c_lock);
43714
43715 return hit;
43716diff --git a/fs/compat.c b/fs/compat.c
43717index 58b1da4..afcd9b8 100644
43718--- a/fs/compat.c
43719+++ b/fs/compat.c
43720@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
43721 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
43722 {
43723 compat_ino_t ino = stat->ino;
43724- typeof(ubuf->st_uid) uid = 0;
43725- typeof(ubuf->st_gid) gid = 0;
43726+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
43727+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
43728 int err;
43729
43730 SET_UID(uid, stat->uid);
43731@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43732
43733 set_fs(KERNEL_DS);
43734 /* The __user pointer cast is valid because of the set_fs() */
43735- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43736+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43737 set_fs(oldfs);
43738 /* truncating is ok because it's a user address */
43739 if (!ret)
43740@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43741 goto out;
43742
43743 ret = -EINVAL;
43744- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43745+ if (nr_segs > UIO_MAXIOV)
43746 goto out;
43747 if (nr_segs > fast_segs) {
43748 ret = -ENOMEM;
43749@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
43750
43751 struct compat_readdir_callback {
43752 struct compat_old_linux_dirent __user *dirent;
43753+ struct file * file;
43754 int result;
43755 };
43756
43757@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43758 buf->result = -EOVERFLOW;
43759 return -EOVERFLOW;
43760 }
43761+
43762+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43763+ return 0;
43764+
43765 buf->result++;
43766 dirent = buf->dirent;
43767 if (!access_ok(VERIFY_WRITE, dirent,
43768@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43769
43770 buf.result = 0;
43771 buf.dirent = dirent;
43772+ buf.file = file;
43773
43774 error = vfs_readdir(file, compat_fillonedir, &buf);
43775 if (buf.result)
43776@@ -917,6 +923,7 @@ struct compat_linux_dirent {
43777 struct compat_getdents_callback {
43778 struct compat_linux_dirent __user *current_dir;
43779 struct compat_linux_dirent __user *previous;
43780+ struct file * file;
43781 int count;
43782 int error;
43783 };
43784@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43785 buf->error = -EOVERFLOW;
43786 return -EOVERFLOW;
43787 }
43788+
43789+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43790+ return 0;
43791+
43792 dirent = buf->previous;
43793 if (dirent) {
43794 if (__put_user(offset, &dirent->d_off))
43795@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43796 buf.previous = NULL;
43797 buf.count = count;
43798 buf.error = 0;
43799+ buf.file = file;
43800
43801 error = vfs_readdir(file, compat_filldir, &buf);
43802 if (error >= 0)
43803@@ -1006,6 +1018,7 @@ out:
43804 struct compat_getdents_callback64 {
43805 struct linux_dirent64 __user *current_dir;
43806 struct linux_dirent64 __user *previous;
43807+ struct file * file;
43808 int count;
43809 int error;
43810 };
43811@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43812 buf->error = -EINVAL; /* only used if we fail.. */
43813 if (reclen > buf->count)
43814 return -EINVAL;
43815+
43816+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43817+ return 0;
43818+
43819 dirent = buf->previous;
43820
43821 if (dirent) {
43822@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43823 buf.previous = NULL;
43824 buf.count = count;
43825 buf.error = 0;
43826+ buf.file = file;
43827
43828 error = vfs_readdir(file, compat_filldir64, &buf);
43829 if (error >= 0)
43830 error = buf.error;
43831 lastdirent = buf.previous;
43832 if (lastdirent) {
43833- typeof(lastdirent->d_off) d_off = file->f_pos;
43834+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43835 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43836 error = -EFAULT;
43837 else
43838@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
43839 struct fdtable *fdt;
43840 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43841
43842+ pax_track_stack();
43843+
43844 if (n < 0)
43845 goto out_nofds;
43846
43847diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43848index 112e45a..b59845b 100644
43849--- a/fs/compat_binfmt_elf.c
43850+++ b/fs/compat_binfmt_elf.c
43851@@ -30,11 +30,13 @@
43852 #undef elf_phdr
43853 #undef elf_shdr
43854 #undef elf_note
43855+#undef elf_dyn
43856 #undef elf_addr_t
43857 #define elfhdr elf32_hdr
43858 #define elf_phdr elf32_phdr
43859 #define elf_shdr elf32_shdr
43860 #define elf_note elf32_note
43861+#define elf_dyn Elf32_Dyn
43862 #define elf_addr_t Elf32_Addr
43863
43864 /*
43865diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43866index 51352de..93292ff 100644
43867--- a/fs/compat_ioctl.c
43868+++ b/fs/compat_ioctl.c
43869@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43870
43871 err = get_user(palp, &up->palette);
43872 err |= get_user(length, &up->length);
43873+ if (err)
43874+ return -EFAULT;
43875
43876 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43877 err = put_user(compat_ptr(palp), &up_native->palette);
43878@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43879 return -EFAULT;
43880 if (__get_user(udata, &ss32->iomem_base))
43881 return -EFAULT;
43882- ss.iomem_base = compat_ptr(udata);
43883+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43884 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43885 __get_user(ss.port_high, &ss32->port_high))
43886 return -EFAULT;
43887@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43888 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43889 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43890 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43891- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43892+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43893 return -EFAULT;
43894
43895 return ioctl_preallocate(file, p);
43896@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43897 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43898 {
43899 unsigned int a, b;
43900- a = *(unsigned int *)p;
43901- b = *(unsigned int *)q;
43902+ a = *(const unsigned int *)p;
43903+ b = *(const unsigned int *)q;
43904 if (a > b)
43905 return 1;
43906 if (a < b)
43907diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43908index 9a37a9b..35792b6 100644
43909--- a/fs/configfs/dir.c
43910+++ b/fs/configfs/dir.c
43911@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43912 }
43913 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43914 struct configfs_dirent *next;
43915- const char * name;
43916+ const unsigned char * name;
43917+ char d_name[sizeof(next->s_dentry->d_iname)];
43918 int len;
43919 struct inode *inode = NULL;
43920
43921@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43922 continue;
43923
43924 name = configfs_get_name(next);
43925- len = strlen(name);
43926+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43927+ len = next->s_dentry->d_name.len;
43928+ memcpy(d_name, name, len);
43929+ name = d_name;
43930+ } else
43931+ len = strlen(name);
43932
43933 /*
43934 * We'll have a dentry and an inode for
43935diff --git a/fs/dcache.c b/fs/dcache.c
43936index 8b732a2..6db6c27 100644
43937--- a/fs/dcache.c
43938+++ b/fs/dcache.c
43939@@ -3015,7 +3015,7 @@ void __init vfs_caches_init(unsigned long mempages)
43940 mempages -= reserve;
43941
43942 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43943- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43944+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43945
43946 dcache_init();
43947 inode_init();
43948diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43949index 528da01..bd8c23d 100644
43950--- a/fs/ecryptfs/inode.c
43951+++ b/fs/ecryptfs/inode.c
43952@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43953 old_fs = get_fs();
43954 set_fs(get_ds());
43955 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43956- (char __user *)lower_buf,
43957+ (char __force_user *)lower_buf,
43958 lower_bufsiz);
43959 set_fs(old_fs);
43960 if (rc < 0)
43961@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43962 }
43963 old_fs = get_fs();
43964 set_fs(get_ds());
43965- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43966+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43967 set_fs(old_fs);
43968 if (rc < 0) {
43969 kfree(buf);
43970@@ -752,7 +752,7 @@ out:
43971 static void
43972 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43973 {
43974- char *buf = nd_get_link(nd);
43975+ const char *buf = nd_get_link(nd);
43976 if (!IS_ERR(buf)) {
43977 /* Free the char* */
43978 kfree(buf);
43979diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43980index 940a82e..63af89e 100644
43981--- a/fs/ecryptfs/miscdev.c
43982+++ b/fs/ecryptfs/miscdev.c
43983@@ -328,7 +328,7 @@ check_list:
43984 goto out_unlock_msg_ctx;
43985 i = 5;
43986 if (msg_ctx->msg) {
43987- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43988+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43989 goto out_unlock_msg_ctx;
43990 i += packet_length_size;
43991 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43992diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43993index 3745f7c..89cc7a3 100644
43994--- a/fs/ecryptfs/read_write.c
43995+++ b/fs/ecryptfs/read_write.c
43996@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43997 return -EIO;
43998 fs_save = get_fs();
43999 set_fs(get_ds());
44000- rc = vfs_write(lower_file, data, size, &offset);
44001+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44002 set_fs(fs_save);
44003 mark_inode_dirty_sync(ecryptfs_inode);
44004 return rc;
44005@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44006 return -EIO;
44007 fs_save = get_fs();
44008 set_fs(get_ds());
44009- rc = vfs_read(lower_file, data, size, &offset);
44010+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44011 set_fs(fs_save);
44012 return rc;
44013 }
44014diff --git a/fs/exec.c b/fs/exec.c
44015index 25dcbe5..09c172c 100644
44016--- a/fs/exec.c
44017+++ b/fs/exec.c
44018@@ -55,12 +55,28 @@
44019 #include <linux/pipe_fs_i.h>
44020 #include <linux/oom.h>
44021 #include <linux/compat.h>
44022+#include <linux/random.h>
44023+#include <linux/seq_file.h>
44024+
44025+#ifdef CONFIG_PAX_REFCOUNT
44026+#include <linux/kallsyms.h>
44027+#include <linux/kdebug.h>
44028+#endif
44029
44030 #include <asm/uaccess.h>
44031 #include <asm/mmu_context.h>
44032 #include <asm/tlb.h>
44033 #include "internal.h"
44034
44035+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
44036+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
44037+#endif
44038+
44039+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44040+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44041+EXPORT_SYMBOL(pax_set_initial_flags_func);
44042+#endif
44043+
44044 int core_uses_pid;
44045 char core_pattern[CORENAME_MAX_SIZE] = "core";
44046 unsigned int core_pipe_limit;
44047@@ -70,7 +86,7 @@ struct core_name {
44048 char *corename;
44049 int used, size;
44050 };
44051-static atomic_t call_count = ATOMIC_INIT(1);
44052+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44053
44054 /* The maximal length of core_pattern is also specified in sysctl.c */
44055
44056@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44057 int write)
44058 {
44059 struct page *page;
44060- int ret;
44061
44062-#ifdef CONFIG_STACK_GROWSUP
44063- if (write) {
44064- ret = expand_downwards(bprm->vma, pos);
44065- if (ret < 0)
44066- return NULL;
44067- }
44068-#endif
44069- ret = get_user_pages(current, bprm->mm, pos,
44070- 1, write, 1, &page, NULL);
44071- if (ret <= 0)
44072+ if (0 > expand_downwards(bprm->vma, pos))
44073+ return NULL;
44074+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44075 return NULL;
44076
44077 if (write) {
44078@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44079 vma->vm_end = STACK_TOP_MAX;
44080 vma->vm_start = vma->vm_end - PAGE_SIZE;
44081 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44082+
44083+#ifdef CONFIG_PAX_SEGMEXEC
44084+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44085+#endif
44086+
44087 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44088 INIT_LIST_HEAD(&vma->anon_vma_chain);
44089
44090@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44091 mm->stack_vm = mm->total_vm = 1;
44092 up_write(&mm->mmap_sem);
44093 bprm->p = vma->vm_end - sizeof(void *);
44094+
44095+#ifdef CONFIG_PAX_RANDUSTACK
44096+ if (randomize_va_space)
44097+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
44098+#endif
44099+
44100 return 0;
44101 err:
44102 up_write(&mm->mmap_sem);
44103@@ -396,19 +415,7 @@ err:
44104 return err;
44105 }
44106
44107-struct user_arg_ptr {
44108-#ifdef CONFIG_COMPAT
44109- bool is_compat;
44110-#endif
44111- union {
44112- const char __user *const __user *native;
44113-#ifdef CONFIG_COMPAT
44114- compat_uptr_t __user *compat;
44115-#endif
44116- } ptr;
44117-};
44118-
44119-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44120+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44121 {
44122 const char __user *native;
44123
44124@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44125 compat_uptr_t compat;
44126
44127 if (get_user(compat, argv.ptr.compat + nr))
44128- return ERR_PTR(-EFAULT);
44129+ return (const char __force_user *)ERR_PTR(-EFAULT);
44130
44131 return compat_ptr(compat);
44132 }
44133 #endif
44134
44135 if (get_user(native, argv.ptr.native + nr))
44136- return ERR_PTR(-EFAULT);
44137+ return (const char __force_user *)ERR_PTR(-EFAULT);
44138
44139 return native;
44140 }
44141@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
44142 if (!p)
44143 break;
44144
44145- if (IS_ERR(p))
44146+ if (IS_ERR((const char __force_kernel *)p))
44147 return -EFAULT;
44148
44149 if (i++ >= max)
44150@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44151
44152 ret = -EFAULT;
44153 str = get_user_arg_ptr(argv, argc);
44154- if (IS_ERR(str))
44155+ if (IS_ERR((const char __force_kernel *)str))
44156 goto out;
44157
44158 len = strnlen_user(str, MAX_ARG_STRLEN);
44159@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44160 int r;
44161 mm_segment_t oldfs = get_fs();
44162 struct user_arg_ptr argv = {
44163- .ptr.native = (const char __user *const __user *)__argv,
44164+ .ptr.native = (const char __force_user *const __force_user *)__argv,
44165 };
44166
44167 set_fs(KERNEL_DS);
44168@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44169 unsigned long new_end = old_end - shift;
44170 struct mmu_gather tlb;
44171
44172- BUG_ON(new_start > new_end);
44173+ if (new_start >= new_end || new_start < mmap_min_addr)
44174+ return -ENOMEM;
44175
44176 /*
44177 * ensure there are no vmas between where we want to go
44178@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44179 if (vma != find_vma(mm, new_start))
44180 return -EFAULT;
44181
44182+#ifdef CONFIG_PAX_SEGMEXEC
44183+ BUG_ON(pax_find_mirror_vma(vma));
44184+#endif
44185+
44186 /*
44187 * cover the whole range: [new_start, old_end)
44188 */
44189@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44190 stack_top = arch_align_stack(stack_top);
44191 stack_top = PAGE_ALIGN(stack_top);
44192
44193- if (unlikely(stack_top < mmap_min_addr) ||
44194- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44195- return -ENOMEM;
44196-
44197 stack_shift = vma->vm_end - stack_top;
44198
44199 bprm->p -= stack_shift;
44200@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44201 bprm->exec -= stack_shift;
44202
44203 down_write(&mm->mmap_sem);
44204+
44205+ /* Move stack pages down in memory. */
44206+ if (stack_shift) {
44207+ ret = shift_arg_pages(vma, stack_shift);
44208+ if (ret)
44209+ goto out_unlock;
44210+ }
44211+
44212 vm_flags = VM_STACK_FLAGS;
44213
44214+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44215+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44216+ vm_flags &= ~VM_EXEC;
44217+
44218+#ifdef CONFIG_PAX_MPROTECT
44219+ if (mm->pax_flags & MF_PAX_MPROTECT)
44220+ vm_flags &= ~VM_MAYEXEC;
44221+#endif
44222+
44223+ }
44224+#endif
44225+
44226 /*
44227 * Adjust stack execute permissions; explicitly enable for
44228 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44229@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44230 goto out_unlock;
44231 BUG_ON(prev != vma);
44232
44233- /* Move stack pages down in memory. */
44234- if (stack_shift) {
44235- ret = shift_arg_pages(vma, stack_shift);
44236- if (ret)
44237- goto out_unlock;
44238- }
44239-
44240 /* mprotect_fixup is overkill to remove the temporary stack flags */
44241 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44242
44243@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
44244 old_fs = get_fs();
44245 set_fs(get_ds());
44246 /* The cast to a user pointer is valid due to the set_fs() */
44247- result = vfs_read(file, (void __user *)addr, count, &pos);
44248+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
44249 set_fs(old_fs);
44250 return result;
44251 }
44252@@ -1251,7 +1272,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
44253 }
44254 rcu_read_unlock();
44255
44256- if (p->fs->users > n_fs) {
44257+ if (atomic_read(&p->fs->users) > n_fs) {
44258 bprm->unsafe |= LSM_UNSAFE_SHARE;
44259 } else {
44260 res = -EAGAIN;
44261@@ -1454,6 +1475,11 @@ static int do_execve_common(const char *filename,
44262 struct user_arg_ptr envp,
44263 struct pt_regs *regs)
44264 {
44265+#ifdef CONFIG_GRKERNSEC
44266+ struct file *old_exec_file;
44267+ struct acl_subject_label *old_acl;
44268+ struct rlimit old_rlim[RLIM_NLIMITS];
44269+#endif
44270 struct linux_binprm *bprm;
44271 struct file *file;
44272 struct files_struct *displaced;
44273@@ -1461,6 +1487,8 @@ static int do_execve_common(const char *filename,
44274 int retval;
44275 const struct cred *cred = current_cred();
44276
44277+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44278+
44279 /*
44280 * We move the actual failure in case of RLIMIT_NPROC excess from
44281 * set*uid() to execve() because too many poorly written programs
44282@@ -1507,6 +1535,16 @@ static int do_execve_common(const char *filename,
44283 bprm->filename = filename;
44284 bprm->interp = filename;
44285
44286+ if (gr_process_user_ban()) {
44287+ retval = -EPERM;
44288+ goto out_file;
44289+ }
44290+
44291+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44292+ retval = -EACCES;
44293+ goto out_file;
44294+ }
44295+
44296 retval = bprm_mm_init(bprm);
44297 if (retval)
44298 goto out_file;
44299@@ -1536,9 +1574,40 @@ static int do_execve_common(const char *filename,
44300 if (retval < 0)
44301 goto out;
44302
44303+ if (!gr_tpe_allow(file)) {
44304+ retval = -EACCES;
44305+ goto out;
44306+ }
44307+
44308+ if (gr_check_crash_exec(file)) {
44309+ retval = -EACCES;
44310+ goto out;
44311+ }
44312+
44313+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44314+
44315+ gr_handle_exec_args(bprm, argv);
44316+
44317+#ifdef CONFIG_GRKERNSEC
44318+ old_acl = current->acl;
44319+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44320+ old_exec_file = current->exec_file;
44321+ get_file(file);
44322+ current->exec_file = file;
44323+#endif
44324+
44325+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44326+ bprm->unsafe & LSM_UNSAFE_SHARE);
44327+ if (retval < 0)
44328+ goto out_fail;
44329+
44330 retval = search_binary_handler(bprm,regs);
44331 if (retval < 0)
44332- goto out;
44333+ goto out_fail;
44334+#ifdef CONFIG_GRKERNSEC
44335+ if (old_exec_file)
44336+ fput(old_exec_file);
44337+#endif
44338
44339 /* execve succeeded */
44340 current->fs->in_exec = 0;
44341@@ -1549,6 +1618,14 @@ static int do_execve_common(const char *filename,
44342 put_files_struct(displaced);
44343 return retval;
44344
44345+out_fail:
44346+#ifdef CONFIG_GRKERNSEC
44347+ current->acl = old_acl;
44348+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44349+ fput(current->exec_file);
44350+ current->exec_file = old_exec_file;
44351+#endif
44352+
44353 out:
44354 if (bprm->mm) {
44355 acct_arg_size(bprm, 0);
44356@@ -1622,7 +1699,7 @@ static int expand_corename(struct core_name *cn)
44357 {
44358 char *old_corename = cn->corename;
44359
44360- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44361+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44362 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44363
44364 if (!cn->corename) {
44365@@ -1719,7 +1796,7 @@ static int format_corename(struct core_name *cn, long signr)
44366 int pid_in_pattern = 0;
44367 int err = 0;
44368
44369- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44370+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44371 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44372 cn->used = 0;
44373
44374@@ -1816,6 +1893,218 @@ out:
44375 return ispipe;
44376 }
44377
44378+int pax_check_flags(unsigned long *flags)
44379+{
44380+ int retval = 0;
44381+
44382+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44383+ if (*flags & MF_PAX_SEGMEXEC)
44384+ {
44385+ *flags &= ~MF_PAX_SEGMEXEC;
44386+ retval = -EINVAL;
44387+ }
44388+#endif
44389+
44390+ if ((*flags & MF_PAX_PAGEEXEC)
44391+
44392+#ifdef CONFIG_PAX_PAGEEXEC
44393+ && (*flags & MF_PAX_SEGMEXEC)
44394+#endif
44395+
44396+ )
44397+ {
44398+ *flags &= ~MF_PAX_PAGEEXEC;
44399+ retval = -EINVAL;
44400+ }
44401+
44402+ if ((*flags & MF_PAX_MPROTECT)
44403+
44404+#ifdef CONFIG_PAX_MPROTECT
44405+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44406+#endif
44407+
44408+ )
44409+ {
44410+ *flags &= ~MF_PAX_MPROTECT;
44411+ retval = -EINVAL;
44412+ }
44413+
44414+ if ((*flags & MF_PAX_EMUTRAMP)
44415+
44416+#ifdef CONFIG_PAX_EMUTRAMP
44417+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44418+#endif
44419+
44420+ )
44421+ {
44422+ *flags &= ~MF_PAX_EMUTRAMP;
44423+ retval = -EINVAL;
44424+ }
44425+
44426+ return retval;
44427+}
44428+
44429+EXPORT_SYMBOL(pax_check_flags);
44430+
44431+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44432+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44433+{
44434+ struct task_struct *tsk = current;
44435+ struct mm_struct *mm = current->mm;
44436+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44437+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44438+ char *path_exec = NULL;
44439+ char *path_fault = NULL;
44440+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44441+
44442+ if (buffer_exec && buffer_fault) {
44443+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44444+
44445+ down_read(&mm->mmap_sem);
44446+ vma = mm->mmap;
44447+ while (vma && (!vma_exec || !vma_fault)) {
44448+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44449+ vma_exec = vma;
44450+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44451+ vma_fault = vma;
44452+ vma = vma->vm_next;
44453+ }
44454+ if (vma_exec) {
44455+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44456+ if (IS_ERR(path_exec))
44457+ path_exec = "<path too long>";
44458+ else {
44459+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44460+ if (path_exec) {
44461+ *path_exec = 0;
44462+ path_exec = buffer_exec;
44463+ } else
44464+ path_exec = "<path too long>";
44465+ }
44466+ }
44467+ if (vma_fault) {
44468+ start = vma_fault->vm_start;
44469+ end = vma_fault->vm_end;
44470+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44471+ if (vma_fault->vm_file) {
44472+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44473+ if (IS_ERR(path_fault))
44474+ path_fault = "<path too long>";
44475+ else {
44476+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44477+ if (path_fault) {
44478+ *path_fault = 0;
44479+ path_fault = buffer_fault;
44480+ } else
44481+ path_fault = "<path too long>";
44482+ }
44483+ } else
44484+ path_fault = "<anonymous mapping>";
44485+ }
44486+ up_read(&mm->mmap_sem);
44487+ }
44488+ if (tsk->signal->curr_ip)
44489+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44490+ else
44491+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44492+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44493+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44494+ task_uid(tsk), task_euid(tsk), pc, sp);
44495+ free_page((unsigned long)buffer_exec);
44496+ free_page((unsigned long)buffer_fault);
44497+ pax_report_insns(regs, pc, sp);
44498+ do_coredump(SIGKILL, SIGKILL, regs);
44499+}
44500+#endif
44501+
44502+#ifdef CONFIG_PAX_REFCOUNT
44503+void pax_report_refcount_overflow(struct pt_regs *regs)
44504+{
44505+ if (current->signal->curr_ip)
44506+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44507+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44508+ else
44509+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44510+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44511+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44512+ show_regs(regs);
44513+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44514+}
44515+#endif
44516+
44517+#ifdef CONFIG_PAX_USERCOPY
44518+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44519+int object_is_on_stack(const void *obj, unsigned long len)
44520+{
44521+ const void * const stack = task_stack_page(current);
44522+ const void * const stackend = stack + THREAD_SIZE;
44523+
44524+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44525+ const void *frame = NULL;
44526+ const void *oldframe;
44527+#endif
44528+
44529+ if (obj + len < obj)
44530+ return -1;
44531+
44532+ if (obj + len <= stack || stackend <= obj)
44533+ return 0;
44534+
44535+ if (obj < stack || stackend < obj + len)
44536+ return -1;
44537+
44538+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44539+ oldframe = __builtin_frame_address(1);
44540+ if (oldframe)
44541+ frame = __builtin_frame_address(2);
44542+ /*
44543+ low ----------------------------------------------> high
44544+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44545+ ^----------------^
44546+ allow copies only within here
44547+ */
44548+ while (stack <= frame && frame < stackend) {
44549+ /* if obj + len extends past the last frame, this
44550+ check won't pass and the next frame will be 0,
44551+ causing us to bail out and correctly report
44552+ the copy as invalid
44553+ */
44554+ if (obj + len <= frame)
44555+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44556+ oldframe = frame;
44557+ frame = *(const void * const *)frame;
44558+ }
44559+ return -1;
44560+#else
44561+ return 1;
44562+#endif
44563+}
44564+
44565+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44566+{
44567+ if (current->signal->curr_ip)
44568+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44569+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44570+ else
44571+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44572+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44573+ dump_stack();
44574+ gr_handle_kernel_exploit();
44575+ do_group_exit(SIGKILL);
44576+}
44577+#endif
44578+
44579+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44580+void pax_track_stack(void)
44581+{
44582+ unsigned long sp = (unsigned long)&sp;
44583+ if (sp < current_thread_info()->lowest_stack &&
44584+ sp > (unsigned long)task_stack_page(current))
44585+ current_thread_info()->lowest_stack = sp;
44586+}
44587+EXPORT_SYMBOL(pax_track_stack);
44588+#endif
44589+
44590 static int zap_process(struct task_struct *start, int exit_code)
44591 {
44592 struct task_struct *t;
44593@@ -2027,17 +2316,17 @@ static void wait_for_dump_helpers(struct file *file)
44594 pipe = file->f_path.dentry->d_inode->i_pipe;
44595
44596 pipe_lock(pipe);
44597- pipe->readers++;
44598- pipe->writers--;
44599+ atomic_inc(&pipe->readers);
44600+ atomic_dec(&pipe->writers);
44601
44602- while ((pipe->readers > 1) && (!signal_pending(current))) {
44603+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44604 wake_up_interruptible_sync(&pipe->wait);
44605 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44606 pipe_wait(pipe);
44607 }
44608
44609- pipe->readers--;
44610- pipe->writers++;
44611+ atomic_dec(&pipe->readers);
44612+ atomic_inc(&pipe->writers);
44613 pipe_unlock(pipe);
44614
44615 }
44616@@ -2098,7 +2387,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44617 int retval = 0;
44618 int flag = 0;
44619 int ispipe;
44620- static atomic_t core_dump_count = ATOMIC_INIT(0);
44621+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44622 struct coredump_params cprm = {
44623 .signr = signr,
44624 .regs = regs,
44625@@ -2113,6 +2402,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44626
44627 audit_core_dumps(signr);
44628
44629+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44630+ gr_handle_brute_attach(current, cprm.mm_flags);
44631+
44632 binfmt = mm->binfmt;
44633 if (!binfmt || !binfmt->core_dump)
44634 goto fail;
44635@@ -2180,7 +2472,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44636 }
44637 cprm.limit = RLIM_INFINITY;
44638
44639- dump_count = atomic_inc_return(&core_dump_count);
44640+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44641 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44642 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44643 task_tgid_vnr(current), current->comm);
44644@@ -2207,6 +2499,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44645 } else {
44646 struct inode *inode;
44647
44648+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44649+
44650 if (cprm.limit < binfmt->min_coredump)
44651 goto fail_unlock;
44652
44653@@ -2250,7 +2544,7 @@ close_fail:
44654 filp_close(cprm.file, NULL);
44655 fail_dropcount:
44656 if (ispipe)
44657- atomic_dec(&core_dump_count);
44658+ atomic_dec_unchecked(&core_dump_count);
44659 fail_unlock:
44660 kfree(cn.corename);
44661 fail_corename:
44662@@ -2269,7 +2563,7 @@ fail:
44663 */
44664 int dump_write(struct file *file, const void *addr, int nr)
44665 {
44666- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44667+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44668 }
44669 EXPORT_SYMBOL(dump_write);
44670
44671diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44672index 8f44cef..cb07120 100644
44673--- a/fs/ext2/balloc.c
44674+++ b/fs/ext2/balloc.c
44675@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44676
44677 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44678 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44679- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44680+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44681 sbi->s_resuid != current_fsuid() &&
44682 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44683 return 0;
44684diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44685index 6386d76..0a266b1 100644
44686--- a/fs/ext3/balloc.c
44687+++ b/fs/ext3/balloc.c
44688@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
44689
44690 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44691 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44692- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44693+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44694 sbi->s_resuid != current_fsuid() &&
44695 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44696 return 0;
44697diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44698index f8224ad..fbef97c 100644
44699--- a/fs/ext4/balloc.c
44700+++ b/fs/ext4/balloc.c
44701@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
44702 /* Hm, nope. Are (enough) root reserved blocks available? */
44703 if (sbi->s_resuid == current_fsuid() ||
44704 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44705- capable(CAP_SYS_RESOURCE) ||
44706- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44707+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44708+ capable_nolog(CAP_SYS_RESOURCE)) {
44709
44710 if (free_blocks >= (nblocks + dirty_blocks))
44711 return 1;
44712diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44713index 5c38120..2291d18 100644
44714--- a/fs/ext4/ext4.h
44715+++ b/fs/ext4/ext4.h
44716@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
44717 unsigned long s_mb_last_start;
44718
44719 /* stats for buddy allocator */
44720- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44721- atomic_t s_bal_success; /* we found long enough chunks */
44722- atomic_t s_bal_allocated; /* in blocks */
44723- atomic_t s_bal_ex_scanned; /* total extents scanned */
44724- atomic_t s_bal_goals; /* goal hits */
44725- atomic_t s_bal_breaks; /* too long searches */
44726- atomic_t s_bal_2orders; /* 2^order hits */
44727+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44728+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44729+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44730+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44731+ atomic_unchecked_t s_bal_goals; /* goal hits */
44732+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44733+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44734 spinlock_t s_bal_lock;
44735 unsigned long s_mb_buddies_generated;
44736 unsigned long long s_mb_generation_time;
44737- atomic_t s_mb_lost_chunks;
44738- atomic_t s_mb_preallocated;
44739- atomic_t s_mb_discarded;
44740+ atomic_unchecked_t s_mb_lost_chunks;
44741+ atomic_unchecked_t s_mb_preallocated;
44742+ atomic_unchecked_t s_mb_discarded;
44743 atomic_t s_lock_busy;
44744
44745 /* locality groups */
44746diff --git a/fs/ext4/file.c b/fs/ext4/file.c
44747index e4095e9..1c006c5 100644
44748--- a/fs/ext4/file.c
44749+++ b/fs/ext4/file.c
44750@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
44751 path.dentry = mnt->mnt_root;
44752 cp = d_path(&path, buf, sizeof(buf));
44753 if (!IS_ERR(cp)) {
44754- memcpy(sbi->s_es->s_last_mounted, cp,
44755- sizeof(sbi->s_es->s_last_mounted));
44756+ strlcpy(sbi->s_es->s_last_mounted, cp,
44757+ sizeof(sbi->s_es->s_last_mounted));
44758 ext4_mark_super_dirty(sb);
44759 }
44760 }
44761diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44762index f18bfe3..43759b1 100644
44763--- a/fs/ext4/ioctl.c
44764+++ b/fs/ext4/ioctl.c
44765@@ -348,7 +348,7 @@ mext_out:
44766 if (!blk_queue_discard(q))
44767 return -EOPNOTSUPP;
44768
44769- if (copy_from_user(&range, (struct fstrim_range *)arg,
44770+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
44771 sizeof(range)))
44772 return -EFAULT;
44773
44774@@ -358,7 +358,7 @@ mext_out:
44775 if (ret < 0)
44776 return ret;
44777
44778- if (copy_to_user((struct fstrim_range *)arg, &range,
44779+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
44780 sizeof(range)))
44781 return -EFAULT;
44782
44783diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44784index 17a5a57..b6be3c5 100644
44785--- a/fs/ext4/mballoc.c
44786+++ b/fs/ext4/mballoc.c
44787@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44788 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44789
44790 if (EXT4_SB(sb)->s_mb_stats)
44791- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44792+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44793
44794 break;
44795 }
44796@@ -2089,7 +2089,7 @@ repeat:
44797 ac->ac_status = AC_STATUS_CONTINUE;
44798 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44799 cr = 3;
44800- atomic_inc(&sbi->s_mb_lost_chunks);
44801+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44802 goto repeat;
44803 }
44804 }
44805@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
44806 ext4_grpblk_t counters[16];
44807 } sg;
44808
44809+ pax_track_stack();
44810+
44811 group--;
44812 if (group == 0)
44813 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
44814@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb)
44815 if (sbi->s_mb_stats) {
44816 ext4_msg(sb, KERN_INFO,
44817 "mballoc: %u blocks %u reqs (%u success)",
44818- atomic_read(&sbi->s_bal_allocated),
44819- atomic_read(&sbi->s_bal_reqs),
44820- atomic_read(&sbi->s_bal_success));
44821+ atomic_read_unchecked(&sbi->s_bal_allocated),
44822+ atomic_read_unchecked(&sbi->s_bal_reqs),
44823+ atomic_read_unchecked(&sbi->s_bal_success));
44824 ext4_msg(sb, KERN_INFO,
44825 "mballoc: %u extents scanned, %u goal hits, "
44826 "%u 2^N hits, %u breaks, %u lost",
44827- atomic_read(&sbi->s_bal_ex_scanned),
44828- atomic_read(&sbi->s_bal_goals),
44829- atomic_read(&sbi->s_bal_2orders),
44830- atomic_read(&sbi->s_bal_breaks),
44831- atomic_read(&sbi->s_mb_lost_chunks));
44832+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44833+ atomic_read_unchecked(&sbi->s_bal_goals),
44834+ atomic_read_unchecked(&sbi->s_bal_2orders),
44835+ atomic_read_unchecked(&sbi->s_bal_breaks),
44836+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44837 ext4_msg(sb, KERN_INFO,
44838 "mballoc: %lu generated and it took %Lu",
44839 sbi->s_mb_buddies_generated,
44840 sbi->s_mb_generation_time);
44841 ext4_msg(sb, KERN_INFO,
44842 "mballoc: %u preallocated, %u discarded",
44843- atomic_read(&sbi->s_mb_preallocated),
44844- atomic_read(&sbi->s_mb_discarded));
44845+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44846+ atomic_read_unchecked(&sbi->s_mb_discarded));
44847 }
44848
44849 free_percpu(sbi->s_locality_groups);
44850@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44851 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44852
44853 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44854- atomic_inc(&sbi->s_bal_reqs);
44855- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44856+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44857+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44858 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44859- atomic_inc(&sbi->s_bal_success);
44860- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44861+ atomic_inc_unchecked(&sbi->s_bal_success);
44862+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44863 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44864 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44865- atomic_inc(&sbi->s_bal_goals);
44866+ atomic_inc_unchecked(&sbi->s_bal_goals);
44867 if (ac->ac_found > sbi->s_mb_max_to_scan)
44868- atomic_inc(&sbi->s_bal_breaks);
44869+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44870 }
44871
44872 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44873@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44874 trace_ext4_mb_new_inode_pa(ac, pa);
44875
44876 ext4_mb_use_inode_pa(ac, pa);
44877- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44878+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44879
44880 ei = EXT4_I(ac->ac_inode);
44881 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44882@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44883 trace_ext4_mb_new_group_pa(ac, pa);
44884
44885 ext4_mb_use_group_pa(ac, pa);
44886- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44887+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44888
44889 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44890 lg = ac->ac_lg;
44891@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44892 * from the bitmap and continue.
44893 */
44894 }
44895- atomic_add(free, &sbi->s_mb_discarded);
44896+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44897
44898 return err;
44899 }
44900@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44901 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44902 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44903 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44904- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44905+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44906 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44907
44908 return 0;
44909diff --git a/fs/fcntl.c b/fs/fcntl.c
44910index 22764c7..86372c9 100644
44911--- a/fs/fcntl.c
44912+++ b/fs/fcntl.c
44913@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44914 if (err)
44915 return err;
44916
44917+ if (gr_handle_chroot_fowner(pid, type))
44918+ return -ENOENT;
44919+ if (gr_check_protected_task_fowner(pid, type))
44920+ return -EACCES;
44921+
44922 f_modown(filp, pid, type, force);
44923 return 0;
44924 }
44925@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44926
44927 static int f_setown_ex(struct file *filp, unsigned long arg)
44928 {
44929- struct f_owner_ex * __user owner_p = (void * __user)arg;
44930+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44931 struct f_owner_ex owner;
44932 struct pid *pid;
44933 int type;
44934@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44935
44936 static int f_getown_ex(struct file *filp, unsigned long arg)
44937 {
44938- struct f_owner_ex * __user owner_p = (void * __user)arg;
44939+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44940 struct f_owner_ex owner;
44941 int ret = 0;
44942
44943@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44944 switch (cmd) {
44945 case F_DUPFD:
44946 case F_DUPFD_CLOEXEC:
44947+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44948 if (arg >= rlimit(RLIMIT_NOFILE))
44949 break;
44950 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44951diff --git a/fs/fifo.c b/fs/fifo.c
44952index b1a524d..4ee270e 100644
44953--- a/fs/fifo.c
44954+++ b/fs/fifo.c
44955@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44956 */
44957 filp->f_op = &read_pipefifo_fops;
44958 pipe->r_counter++;
44959- if (pipe->readers++ == 0)
44960+ if (atomic_inc_return(&pipe->readers) == 1)
44961 wake_up_partner(inode);
44962
44963- if (!pipe->writers) {
44964+ if (!atomic_read(&pipe->writers)) {
44965 if ((filp->f_flags & O_NONBLOCK)) {
44966 /* suppress POLLHUP until we have
44967 * seen a writer */
44968@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44969 * errno=ENXIO when there is no process reading the FIFO.
44970 */
44971 ret = -ENXIO;
44972- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44973+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44974 goto err;
44975
44976 filp->f_op = &write_pipefifo_fops;
44977 pipe->w_counter++;
44978- if (!pipe->writers++)
44979+ if (atomic_inc_return(&pipe->writers) == 1)
44980 wake_up_partner(inode);
44981
44982- if (!pipe->readers) {
44983+ if (!atomic_read(&pipe->readers)) {
44984 wait_for_partner(inode, &pipe->r_counter);
44985 if (signal_pending(current))
44986 goto err_wr;
44987@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44988 */
44989 filp->f_op = &rdwr_pipefifo_fops;
44990
44991- pipe->readers++;
44992- pipe->writers++;
44993+ atomic_inc(&pipe->readers);
44994+ atomic_inc(&pipe->writers);
44995 pipe->r_counter++;
44996 pipe->w_counter++;
44997- if (pipe->readers == 1 || pipe->writers == 1)
44998+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44999 wake_up_partner(inode);
45000 break;
45001
45002@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45003 return 0;
45004
45005 err_rd:
45006- if (!--pipe->readers)
45007+ if (atomic_dec_and_test(&pipe->readers))
45008 wake_up_interruptible(&pipe->wait);
45009 ret = -ERESTARTSYS;
45010 goto err;
45011
45012 err_wr:
45013- if (!--pipe->writers)
45014+ if (atomic_dec_and_test(&pipe->writers))
45015 wake_up_interruptible(&pipe->wait);
45016 ret = -ERESTARTSYS;
45017 goto err;
45018
45019 err:
45020- if (!pipe->readers && !pipe->writers)
45021+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45022 free_pipe_info(inode);
45023
45024 err_nocleanup:
45025diff --git a/fs/file.c b/fs/file.c
45026index 4c6992d..104cdea 100644
45027--- a/fs/file.c
45028+++ b/fs/file.c
45029@@ -15,6 +15,7 @@
45030 #include <linux/slab.h>
45031 #include <linux/vmalloc.h>
45032 #include <linux/file.h>
45033+#include <linux/security.h>
45034 #include <linux/fdtable.h>
45035 #include <linux/bitops.h>
45036 #include <linux/interrupt.h>
45037@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
45038 * N.B. For clone tasks sharing a files structure, this test
45039 * will limit the total number of files that can be opened.
45040 */
45041+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45042 if (nr >= rlimit(RLIMIT_NOFILE))
45043 return -EMFILE;
45044
45045diff --git a/fs/filesystems.c b/fs/filesystems.c
45046index 0845f84..7b4ebef 100644
45047--- a/fs/filesystems.c
45048+++ b/fs/filesystems.c
45049@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
45050 int len = dot ? dot - name : strlen(name);
45051
45052 fs = __get_fs_type(name, len);
45053+
45054+#ifdef CONFIG_GRKERNSEC_MODHARDEN
45055+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45056+#else
45057 if (!fs && (request_module("%.*s", len, name) == 0))
45058+#endif
45059 fs = __get_fs_type(name, len);
45060
45061 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45062diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45063index 78b519c..212c0d0 100644
45064--- a/fs/fs_struct.c
45065+++ b/fs/fs_struct.c
45066@@ -4,6 +4,7 @@
45067 #include <linux/path.h>
45068 #include <linux/slab.h>
45069 #include <linux/fs_struct.h>
45070+#include <linux/grsecurity.h>
45071 #include "internal.h"
45072
45073 static inline void path_get_longterm(struct path *path)
45074@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45075 old_root = fs->root;
45076 fs->root = *path;
45077 path_get_longterm(path);
45078+ gr_set_chroot_entries(current, path);
45079 write_seqcount_end(&fs->seq);
45080 spin_unlock(&fs->lock);
45081 if (old_root.dentry)
45082@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45083 && fs->root.mnt == old_root->mnt) {
45084 path_get_longterm(new_root);
45085 fs->root = *new_root;
45086+ gr_set_chroot_entries(p, new_root);
45087 count++;
45088 }
45089 if (fs->pwd.dentry == old_root->dentry
45090@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45091 spin_lock(&fs->lock);
45092 write_seqcount_begin(&fs->seq);
45093 tsk->fs = NULL;
45094- kill = !--fs->users;
45095+ gr_clear_chroot_entries(tsk);
45096+ kill = !atomic_dec_return(&fs->users);
45097 write_seqcount_end(&fs->seq);
45098 spin_unlock(&fs->lock);
45099 task_unlock(tsk);
45100@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45101 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45102 /* We don't need to lock fs - think why ;-) */
45103 if (fs) {
45104- fs->users = 1;
45105+ atomic_set(&fs->users, 1);
45106 fs->in_exec = 0;
45107 spin_lock_init(&fs->lock);
45108 seqcount_init(&fs->seq);
45109@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45110 spin_lock(&old->lock);
45111 fs->root = old->root;
45112 path_get_longterm(&fs->root);
45113+ /* instead of calling gr_set_chroot_entries here,
45114+ we call it from every caller of this function
45115+ */
45116 fs->pwd = old->pwd;
45117 path_get_longterm(&fs->pwd);
45118 spin_unlock(&old->lock);
45119@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45120
45121 task_lock(current);
45122 spin_lock(&fs->lock);
45123- kill = !--fs->users;
45124+ kill = !atomic_dec_return(&fs->users);
45125 current->fs = new_fs;
45126+ gr_set_chroot_entries(current, &new_fs->root);
45127 spin_unlock(&fs->lock);
45128 task_unlock(current);
45129
45130@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
45131
45132 /* to be mentioned only in INIT_TASK */
45133 struct fs_struct init_fs = {
45134- .users = 1,
45135+ .users = ATOMIC_INIT(1),
45136 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45137 .seq = SEQCNT_ZERO,
45138 .umask = 0022,
45139@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45140 task_lock(current);
45141
45142 spin_lock(&init_fs.lock);
45143- init_fs.users++;
45144+ atomic_inc(&init_fs.users);
45145 spin_unlock(&init_fs.lock);
45146
45147 spin_lock(&fs->lock);
45148 current->fs = &init_fs;
45149- kill = !--fs->users;
45150+ gr_set_chroot_entries(current, &current->fs->root);
45151+ kill = !atomic_dec_return(&fs->users);
45152 spin_unlock(&fs->lock);
45153
45154 task_unlock(current);
45155diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45156index 9905350..02eaec4 100644
45157--- a/fs/fscache/cookie.c
45158+++ b/fs/fscache/cookie.c
45159@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45160 parent ? (char *) parent->def->name : "<no-parent>",
45161 def->name, netfs_data);
45162
45163- fscache_stat(&fscache_n_acquires);
45164+ fscache_stat_unchecked(&fscache_n_acquires);
45165
45166 /* if there's no parent cookie, then we don't create one here either */
45167 if (!parent) {
45168- fscache_stat(&fscache_n_acquires_null);
45169+ fscache_stat_unchecked(&fscache_n_acquires_null);
45170 _leave(" [no parent]");
45171 return NULL;
45172 }
45173@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45174 /* allocate and initialise a cookie */
45175 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45176 if (!cookie) {
45177- fscache_stat(&fscache_n_acquires_oom);
45178+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45179 _leave(" [ENOMEM]");
45180 return NULL;
45181 }
45182@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45183
45184 switch (cookie->def->type) {
45185 case FSCACHE_COOKIE_TYPE_INDEX:
45186- fscache_stat(&fscache_n_cookie_index);
45187+ fscache_stat_unchecked(&fscache_n_cookie_index);
45188 break;
45189 case FSCACHE_COOKIE_TYPE_DATAFILE:
45190- fscache_stat(&fscache_n_cookie_data);
45191+ fscache_stat_unchecked(&fscache_n_cookie_data);
45192 break;
45193 default:
45194- fscache_stat(&fscache_n_cookie_special);
45195+ fscache_stat_unchecked(&fscache_n_cookie_special);
45196 break;
45197 }
45198
45199@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45200 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45201 atomic_dec(&parent->n_children);
45202 __fscache_cookie_put(cookie);
45203- fscache_stat(&fscache_n_acquires_nobufs);
45204+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45205 _leave(" = NULL");
45206 return NULL;
45207 }
45208 }
45209
45210- fscache_stat(&fscache_n_acquires_ok);
45211+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45212 _leave(" = %p", cookie);
45213 return cookie;
45214 }
45215@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45216 cache = fscache_select_cache_for_object(cookie->parent);
45217 if (!cache) {
45218 up_read(&fscache_addremove_sem);
45219- fscache_stat(&fscache_n_acquires_no_cache);
45220+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45221 _leave(" = -ENOMEDIUM [no cache]");
45222 return -ENOMEDIUM;
45223 }
45224@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45225 object = cache->ops->alloc_object(cache, cookie);
45226 fscache_stat_d(&fscache_n_cop_alloc_object);
45227 if (IS_ERR(object)) {
45228- fscache_stat(&fscache_n_object_no_alloc);
45229+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45230 ret = PTR_ERR(object);
45231 goto error;
45232 }
45233
45234- fscache_stat(&fscache_n_object_alloc);
45235+ fscache_stat_unchecked(&fscache_n_object_alloc);
45236
45237 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45238
45239@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45240 struct fscache_object *object;
45241 struct hlist_node *_p;
45242
45243- fscache_stat(&fscache_n_updates);
45244+ fscache_stat_unchecked(&fscache_n_updates);
45245
45246 if (!cookie) {
45247- fscache_stat(&fscache_n_updates_null);
45248+ fscache_stat_unchecked(&fscache_n_updates_null);
45249 _leave(" [no cookie]");
45250 return;
45251 }
45252@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45253 struct fscache_object *object;
45254 unsigned long event;
45255
45256- fscache_stat(&fscache_n_relinquishes);
45257+ fscache_stat_unchecked(&fscache_n_relinquishes);
45258 if (retire)
45259- fscache_stat(&fscache_n_relinquishes_retire);
45260+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45261
45262 if (!cookie) {
45263- fscache_stat(&fscache_n_relinquishes_null);
45264+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45265 _leave(" [no cookie]");
45266 return;
45267 }
45268@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45269
45270 /* wait for the cookie to finish being instantiated (or to fail) */
45271 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45272- fscache_stat(&fscache_n_relinquishes_waitcrt);
45273+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45274 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45275 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45276 }
45277diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45278index f6aad48..88dcf26 100644
45279--- a/fs/fscache/internal.h
45280+++ b/fs/fscache/internal.h
45281@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45282 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45283 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45284
45285-extern atomic_t fscache_n_op_pend;
45286-extern atomic_t fscache_n_op_run;
45287-extern atomic_t fscache_n_op_enqueue;
45288-extern atomic_t fscache_n_op_deferred_release;
45289-extern atomic_t fscache_n_op_release;
45290-extern atomic_t fscache_n_op_gc;
45291-extern atomic_t fscache_n_op_cancelled;
45292-extern atomic_t fscache_n_op_rejected;
45293+extern atomic_unchecked_t fscache_n_op_pend;
45294+extern atomic_unchecked_t fscache_n_op_run;
45295+extern atomic_unchecked_t fscache_n_op_enqueue;
45296+extern atomic_unchecked_t fscache_n_op_deferred_release;
45297+extern atomic_unchecked_t fscache_n_op_release;
45298+extern atomic_unchecked_t fscache_n_op_gc;
45299+extern atomic_unchecked_t fscache_n_op_cancelled;
45300+extern atomic_unchecked_t fscache_n_op_rejected;
45301
45302-extern atomic_t fscache_n_attr_changed;
45303-extern atomic_t fscache_n_attr_changed_ok;
45304-extern atomic_t fscache_n_attr_changed_nobufs;
45305-extern atomic_t fscache_n_attr_changed_nomem;
45306-extern atomic_t fscache_n_attr_changed_calls;
45307+extern atomic_unchecked_t fscache_n_attr_changed;
45308+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45309+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45310+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45311+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45312
45313-extern atomic_t fscache_n_allocs;
45314-extern atomic_t fscache_n_allocs_ok;
45315-extern atomic_t fscache_n_allocs_wait;
45316-extern atomic_t fscache_n_allocs_nobufs;
45317-extern atomic_t fscache_n_allocs_intr;
45318-extern atomic_t fscache_n_allocs_object_dead;
45319-extern atomic_t fscache_n_alloc_ops;
45320-extern atomic_t fscache_n_alloc_op_waits;
45321+extern atomic_unchecked_t fscache_n_allocs;
45322+extern atomic_unchecked_t fscache_n_allocs_ok;
45323+extern atomic_unchecked_t fscache_n_allocs_wait;
45324+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45325+extern atomic_unchecked_t fscache_n_allocs_intr;
45326+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45327+extern atomic_unchecked_t fscache_n_alloc_ops;
45328+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45329
45330-extern atomic_t fscache_n_retrievals;
45331-extern atomic_t fscache_n_retrievals_ok;
45332-extern atomic_t fscache_n_retrievals_wait;
45333-extern atomic_t fscache_n_retrievals_nodata;
45334-extern atomic_t fscache_n_retrievals_nobufs;
45335-extern atomic_t fscache_n_retrievals_intr;
45336-extern atomic_t fscache_n_retrievals_nomem;
45337-extern atomic_t fscache_n_retrievals_object_dead;
45338-extern atomic_t fscache_n_retrieval_ops;
45339-extern atomic_t fscache_n_retrieval_op_waits;
45340+extern atomic_unchecked_t fscache_n_retrievals;
45341+extern atomic_unchecked_t fscache_n_retrievals_ok;
45342+extern atomic_unchecked_t fscache_n_retrievals_wait;
45343+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45344+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45345+extern atomic_unchecked_t fscache_n_retrievals_intr;
45346+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45347+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45348+extern atomic_unchecked_t fscache_n_retrieval_ops;
45349+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45350
45351-extern atomic_t fscache_n_stores;
45352-extern atomic_t fscache_n_stores_ok;
45353-extern atomic_t fscache_n_stores_again;
45354-extern atomic_t fscache_n_stores_nobufs;
45355-extern atomic_t fscache_n_stores_oom;
45356-extern atomic_t fscache_n_store_ops;
45357-extern atomic_t fscache_n_store_calls;
45358-extern atomic_t fscache_n_store_pages;
45359-extern atomic_t fscache_n_store_radix_deletes;
45360-extern atomic_t fscache_n_store_pages_over_limit;
45361+extern atomic_unchecked_t fscache_n_stores;
45362+extern atomic_unchecked_t fscache_n_stores_ok;
45363+extern atomic_unchecked_t fscache_n_stores_again;
45364+extern atomic_unchecked_t fscache_n_stores_nobufs;
45365+extern atomic_unchecked_t fscache_n_stores_oom;
45366+extern atomic_unchecked_t fscache_n_store_ops;
45367+extern atomic_unchecked_t fscache_n_store_calls;
45368+extern atomic_unchecked_t fscache_n_store_pages;
45369+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45370+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45371
45372-extern atomic_t fscache_n_store_vmscan_not_storing;
45373-extern atomic_t fscache_n_store_vmscan_gone;
45374-extern atomic_t fscache_n_store_vmscan_busy;
45375-extern atomic_t fscache_n_store_vmscan_cancelled;
45376+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45377+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45378+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45379+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45380
45381-extern atomic_t fscache_n_marks;
45382-extern atomic_t fscache_n_uncaches;
45383+extern atomic_unchecked_t fscache_n_marks;
45384+extern atomic_unchecked_t fscache_n_uncaches;
45385
45386-extern atomic_t fscache_n_acquires;
45387-extern atomic_t fscache_n_acquires_null;
45388-extern atomic_t fscache_n_acquires_no_cache;
45389-extern atomic_t fscache_n_acquires_ok;
45390-extern atomic_t fscache_n_acquires_nobufs;
45391-extern atomic_t fscache_n_acquires_oom;
45392+extern atomic_unchecked_t fscache_n_acquires;
45393+extern atomic_unchecked_t fscache_n_acquires_null;
45394+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45395+extern atomic_unchecked_t fscache_n_acquires_ok;
45396+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45397+extern atomic_unchecked_t fscache_n_acquires_oom;
45398
45399-extern atomic_t fscache_n_updates;
45400-extern atomic_t fscache_n_updates_null;
45401-extern atomic_t fscache_n_updates_run;
45402+extern atomic_unchecked_t fscache_n_updates;
45403+extern atomic_unchecked_t fscache_n_updates_null;
45404+extern atomic_unchecked_t fscache_n_updates_run;
45405
45406-extern atomic_t fscache_n_relinquishes;
45407-extern atomic_t fscache_n_relinquishes_null;
45408-extern atomic_t fscache_n_relinquishes_waitcrt;
45409-extern atomic_t fscache_n_relinquishes_retire;
45410+extern atomic_unchecked_t fscache_n_relinquishes;
45411+extern atomic_unchecked_t fscache_n_relinquishes_null;
45412+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45413+extern atomic_unchecked_t fscache_n_relinquishes_retire;
45414
45415-extern atomic_t fscache_n_cookie_index;
45416-extern atomic_t fscache_n_cookie_data;
45417-extern atomic_t fscache_n_cookie_special;
45418+extern atomic_unchecked_t fscache_n_cookie_index;
45419+extern atomic_unchecked_t fscache_n_cookie_data;
45420+extern atomic_unchecked_t fscache_n_cookie_special;
45421
45422-extern atomic_t fscache_n_object_alloc;
45423-extern atomic_t fscache_n_object_no_alloc;
45424-extern atomic_t fscache_n_object_lookups;
45425-extern atomic_t fscache_n_object_lookups_negative;
45426-extern atomic_t fscache_n_object_lookups_positive;
45427-extern atomic_t fscache_n_object_lookups_timed_out;
45428-extern atomic_t fscache_n_object_created;
45429-extern atomic_t fscache_n_object_avail;
45430-extern atomic_t fscache_n_object_dead;
45431+extern atomic_unchecked_t fscache_n_object_alloc;
45432+extern atomic_unchecked_t fscache_n_object_no_alloc;
45433+extern atomic_unchecked_t fscache_n_object_lookups;
45434+extern atomic_unchecked_t fscache_n_object_lookups_negative;
45435+extern atomic_unchecked_t fscache_n_object_lookups_positive;
45436+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45437+extern atomic_unchecked_t fscache_n_object_created;
45438+extern atomic_unchecked_t fscache_n_object_avail;
45439+extern atomic_unchecked_t fscache_n_object_dead;
45440
45441-extern atomic_t fscache_n_checkaux_none;
45442-extern atomic_t fscache_n_checkaux_okay;
45443-extern atomic_t fscache_n_checkaux_update;
45444-extern atomic_t fscache_n_checkaux_obsolete;
45445+extern atomic_unchecked_t fscache_n_checkaux_none;
45446+extern atomic_unchecked_t fscache_n_checkaux_okay;
45447+extern atomic_unchecked_t fscache_n_checkaux_update;
45448+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45449
45450 extern atomic_t fscache_n_cop_alloc_object;
45451 extern atomic_t fscache_n_cop_lookup_object;
45452@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45453 atomic_inc(stat);
45454 }
45455
45456+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45457+{
45458+ atomic_inc_unchecked(stat);
45459+}
45460+
45461 static inline void fscache_stat_d(atomic_t *stat)
45462 {
45463 atomic_dec(stat);
45464@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45465
45466 #define __fscache_stat(stat) (NULL)
45467 #define fscache_stat(stat) do {} while (0)
45468+#define fscache_stat_unchecked(stat) do {} while (0)
45469 #define fscache_stat_d(stat) do {} while (0)
45470 #endif
45471
45472diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45473index b6b897c..0ffff9c 100644
45474--- a/fs/fscache/object.c
45475+++ b/fs/fscache/object.c
45476@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45477 /* update the object metadata on disk */
45478 case FSCACHE_OBJECT_UPDATING:
45479 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45480- fscache_stat(&fscache_n_updates_run);
45481+ fscache_stat_unchecked(&fscache_n_updates_run);
45482 fscache_stat(&fscache_n_cop_update_object);
45483 object->cache->ops->update_object(object);
45484 fscache_stat_d(&fscache_n_cop_update_object);
45485@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45486 spin_lock(&object->lock);
45487 object->state = FSCACHE_OBJECT_DEAD;
45488 spin_unlock(&object->lock);
45489- fscache_stat(&fscache_n_object_dead);
45490+ fscache_stat_unchecked(&fscache_n_object_dead);
45491 goto terminal_transit;
45492
45493 /* handle the parent cache of this object being withdrawn from
45494@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45495 spin_lock(&object->lock);
45496 object->state = FSCACHE_OBJECT_DEAD;
45497 spin_unlock(&object->lock);
45498- fscache_stat(&fscache_n_object_dead);
45499+ fscache_stat_unchecked(&fscache_n_object_dead);
45500 goto terminal_transit;
45501
45502 /* complain about the object being woken up once it is
45503@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45504 parent->cookie->def->name, cookie->def->name,
45505 object->cache->tag->name);
45506
45507- fscache_stat(&fscache_n_object_lookups);
45508+ fscache_stat_unchecked(&fscache_n_object_lookups);
45509 fscache_stat(&fscache_n_cop_lookup_object);
45510 ret = object->cache->ops->lookup_object(object);
45511 fscache_stat_d(&fscache_n_cop_lookup_object);
45512@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45513 if (ret == -ETIMEDOUT) {
45514 /* probably stuck behind another object, so move this one to
45515 * the back of the queue */
45516- fscache_stat(&fscache_n_object_lookups_timed_out);
45517+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45518 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45519 }
45520
45521@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45522
45523 spin_lock(&object->lock);
45524 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45525- fscache_stat(&fscache_n_object_lookups_negative);
45526+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45527
45528 /* transit here to allow write requests to begin stacking up
45529 * and read requests to begin returning ENODATA */
45530@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45531 * result, in which case there may be data available */
45532 spin_lock(&object->lock);
45533 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45534- fscache_stat(&fscache_n_object_lookups_positive);
45535+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45536
45537 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45538
45539@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45540 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45541 } else {
45542 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45543- fscache_stat(&fscache_n_object_created);
45544+ fscache_stat_unchecked(&fscache_n_object_created);
45545
45546 object->state = FSCACHE_OBJECT_AVAILABLE;
45547 spin_unlock(&object->lock);
45548@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45549 fscache_enqueue_dependents(object);
45550
45551 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45552- fscache_stat(&fscache_n_object_avail);
45553+ fscache_stat_unchecked(&fscache_n_object_avail);
45554
45555 _leave("");
45556 }
45557@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45558 enum fscache_checkaux result;
45559
45560 if (!object->cookie->def->check_aux) {
45561- fscache_stat(&fscache_n_checkaux_none);
45562+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45563 return FSCACHE_CHECKAUX_OKAY;
45564 }
45565
45566@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45567 switch (result) {
45568 /* entry okay as is */
45569 case FSCACHE_CHECKAUX_OKAY:
45570- fscache_stat(&fscache_n_checkaux_okay);
45571+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45572 break;
45573
45574 /* entry requires update */
45575 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45576- fscache_stat(&fscache_n_checkaux_update);
45577+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45578 break;
45579
45580 /* entry requires deletion */
45581 case FSCACHE_CHECKAUX_OBSOLETE:
45582- fscache_stat(&fscache_n_checkaux_obsolete);
45583+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45584 break;
45585
45586 default:
45587diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45588index 30afdfa..2256596 100644
45589--- a/fs/fscache/operation.c
45590+++ b/fs/fscache/operation.c
45591@@ -17,7 +17,7 @@
45592 #include <linux/slab.h>
45593 #include "internal.h"
45594
45595-atomic_t fscache_op_debug_id;
45596+atomic_unchecked_t fscache_op_debug_id;
45597 EXPORT_SYMBOL(fscache_op_debug_id);
45598
45599 /**
45600@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45601 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45602 ASSERTCMP(atomic_read(&op->usage), >, 0);
45603
45604- fscache_stat(&fscache_n_op_enqueue);
45605+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45606 switch (op->flags & FSCACHE_OP_TYPE) {
45607 case FSCACHE_OP_ASYNC:
45608 _debug("queue async");
45609@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45610 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45611 if (op->processor)
45612 fscache_enqueue_operation(op);
45613- fscache_stat(&fscache_n_op_run);
45614+ fscache_stat_unchecked(&fscache_n_op_run);
45615 }
45616
45617 /*
45618@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45619 if (object->n_ops > 1) {
45620 atomic_inc(&op->usage);
45621 list_add_tail(&op->pend_link, &object->pending_ops);
45622- fscache_stat(&fscache_n_op_pend);
45623+ fscache_stat_unchecked(&fscache_n_op_pend);
45624 } else if (!list_empty(&object->pending_ops)) {
45625 atomic_inc(&op->usage);
45626 list_add_tail(&op->pend_link, &object->pending_ops);
45627- fscache_stat(&fscache_n_op_pend);
45628+ fscache_stat_unchecked(&fscache_n_op_pend);
45629 fscache_start_operations(object);
45630 } else {
45631 ASSERTCMP(object->n_in_progress, ==, 0);
45632@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45633 object->n_exclusive++; /* reads and writes must wait */
45634 atomic_inc(&op->usage);
45635 list_add_tail(&op->pend_link, &object->pending_ops);
45636- fscache_stat(&fscache_n_op_pend);
45637+ fscache_stat_unchecked(&fscache_n_op_pend);
45638 ret = 0;
45639 } else {
45640 /* not allowed to submit ops in any other state */
45641@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45642 if (object->n_exclusive > 0) {
45643 atomic_inc(&op->usage);
45644 list_add_tail(&op->pend_link, &object->pending_ops);
45645- fscache_stat(&fscache_n_op_pend);
45646+ fscache_stat_unchecked(&fscache_n_op_pend);
45647 } else if (!list_empty(&object->pending_ops)) {
45648 atomic_inc(&op->usage);
45649 list_add_tail(&op->pend_link, &object->pending_ops);
45650- fscache_stat(&fscache_n_op_pend);
45651+ fscache_stat_unchecked(&fscache_n_op_pend);
45652 fscache_start_operations(object);
45653 } else {
45654 ASSERTCMP(object->n_exclusive, ==, 0);
45655@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45656 object->n_ops++;
45657 atomic_inc(&op->usage);
45658 list_add_tail(&op->pend_link, &object->pending_ops);
45659- fscache_stat(&fscache_n_op_pend);
45660+ fscache_stat_unchecked(&fscache_n_op_pend);
45661 ret = 0;
45662 } else if (object->state == FSCACHE_OBJECT_DYING ||
45663 object->state == FSCACHE_OBJECT_LC_DYING ||
45664 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45665- fscache_stat(&fscache_n_op_rejected);
45666+ fscache_stat_unchecked(&fscache_n_op_rejected);
45667 ret = -ENOBUFS;
45668 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45669 fscache_report_unexpected_submission(object, op, ostate);
45670@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45671
45672 ret = -EBUSY;
45673 if (!list_empty(&op->pend_link)) {
45674- fscache_stat(&fscache_n_op_cancelled);
45675+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45676 list_del_init(&op->pend_link);
45677 object->n_ops--;
45678 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45679@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45680 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45681 BUG();
45682
45683- fscache_stat(&fscache_n_op_release);
45684+ fscache_stat_unchecked(&fscache_n_op_release);
45685
45686 if (op->release) {
45687 op->release(op);
45688@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45689 * lock, and defer it otherwise */
45690 if (!spin_trylock(&object->lock)) {
45691 _debug("defer put");
45692- fscache_stat(&fscache_n_op_deferred_release);
45693+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45694
45695 cache = object->cache;
45696 spin_lock(&cache->op_gc_list_lock);
45697@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45698
45699 _debug("GC DEFERRED REL OBJ%x OP%x",
45700 object->debug_id, op->debug_id);
45701- fscache_stat(&fscache_n_op_gc);
45702+ fscache_stat_unchecked(&fscache_n_op_gc);
45703
45704 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45705
45706diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45707index 3f7a59b..cf196cc 100644
45708--- a/fs/fscache/page.c
45709+++ b/fs/fscache/page.c
45710@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45711 val = radix_tree_lookup(&cookie->stores, page->index);
45712 if (!val) {
45713 rcu_read_unlock();
45714- fscache_stat(&fscache_n_store_vmscan_not_storing);
45715+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45716 __fscache_uncache_page(cookie, page);
45717 return true;
45718 }
45719@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45720 spin_unlock(&cookie->stores_lock);
45721
45722 if (xpage) {
45723- fscache_stat(&fscache_n_store_vmscan_cancelled);
45724- fscache_stat(&fscache_n_store_radix_deletes);
45725+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45726+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45727 ASSERTCMP(xpage, ==, page);
45728 } else {
45729- fscache_stat(&fscache_n_store_vmscan_gone);
45730+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45731 }
45732
45733 wake_up_bit(&cookie->flags, 0);
45734@@ -107,7 +107,7 @@ page_busy:
45735 /* we might want to wait here, but that could deadlock the allocator as
45736 * the work threads writing to the cache may all end up sleeping
45737 * on memory allocation */
45738- fscache_stat(&fscache_n_store_vmscan_busy);
45739+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45740 return false;
45741 }
45742 EXPORT_SYMBOL(__fscache_maybe_release_page);
45743@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45744 FSCACHE_COOKIE_STORING_TAG);
45745 if (!radix_tree_tag_get(&cookie->stores, page->index,
45746 FSCACHE_COOKIE_PENDING_TAG)) {
45747- fscache_stat(&fscache_n_store_radix_deletes);
45748+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45749 xpage = radix_tree_delete(&cookie->stores, page->index);
45750 }
45751 spin_unlock(&cookie->stores_lock);
45752@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45753
45754 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45755
45756- fscache_stat(&fscache_n_attr_changed_calls);
45757+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45758
45759 if (fscache_object_is_active(object)) {
45760 fscache_stat(&fscache_n_cop_attr_changed);
45761@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45762
45763 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45764
45765- fscache_stat(&fscache_n_attr_changed);
45766+ fscache_stat_unchecked(&fscache_n_attr_changed);
45767
45768 op = kzalloc(sizeof(*op), GFP_KERNEL);
45769 if (!op) {
45770- fscache_stat(&fscache_n_attr_changed_nomem);
45771+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45772 _leave(" = -ENOMEM");
45773 return -ENOMEM;
45774 }
45775@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45776 if (fscache_submit_exclusive_op(object, op) < 0)
45777 goto nobufs;
45778 spin_unlock(&cookie->lock);
45779- fscache_stat(&fscache_n_attr_changed_ok);
45780+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45781 fscache_put_operation(op);
45782 _leave(" = 0");
45783 return 0;
45784@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45785 nobufs:
45786 spin_unlock(&cookie->lock);
45787 kfree(op);
45788- fscache_stat(&fscache_n_attr_changed_nobufs);
45789+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45790 _leave(" = %d", -ENOBUFS);
45791 return -ENOBUFS;
45792 }
45793@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45794 /* allocate a retrieval operation and attempt to submit it */
45795 op = kzalloc(sizeof(*op), GFP_NOIO);
45796 if (!op) {
45797- fscache_stat(&fscache_n_retrievals_nomem);
45798+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45799 return NULL;
45800 }
45801
45802@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45803 return 0;
45804 }
45805
45806- fscache_stat(&fscache_n_retrievals_wait);
45807+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45808
45809 jif = jiffies;
45810 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45811 fscache_wait_bit_interruptible,
45812 TASK_INTERRUPTIBLE) != 0) {
45813- fscache_stat(&fscache_n_retrievals_intr);
45814+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45815 _leave(" = -ERESTARTSYS");
45816 return -ERESTARTSYS;
45817 }
45818@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45819 */
45820 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45821 struct fscache_retrieval *op,
45822- atomic_t *stat_op_waits,
45823- atomic_t *stat_object_dead)
45824+ atomic_unchecked_t *stat_op_waits,
45825+ atomic_unchecked_t *stat_object_dead)
45826 {
45827 int ret;
45828
45829@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45830 goto check_if_dead;
45831
45832 _debug(">>> WT");
45833- fscache_stat(stat_op_waits);
45834+ fscache_stat_unchecked(stat_op_waits);
45835 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45836 fscache_wait_bit_interruptible,
45837 TASK_INTERRUPTIBLE) < 0) {
45838@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45839
45840 check_if_dead:
45841 if (unlikely(fscache_object_is_dead(object))) {
45842- fscache_stat(stat_object_dead);
45843+ fscache_stat_unchecked(stat_object_dead);
45844 return -ENOBUFS;
45845 }
45846 return 0;
45847@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45848
45849 _enter("%p,%p,,,", cookie, page);
45850
45851- fscache_stat(&fscache_n_retrievals);
45852+ fscache_stat_unchecked(&fscache_n_retrievals);
45853
45854 if (hlist_empty(&cookie->backing_objects))
45855 goto nobufs;
45856@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45857 goto nobufs_unlock;
45858 spin_unlock(&cookie->lock);
45859
45860- fscache_stat(&fscache_n_retrieval_ops);
45861+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45862
45863 /* pin the netfs read context in case we need to do the actual netfs
45864 * read because we've encountered a cache read failure */
45865@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45866
45867 error:
45868 if (ret == -ENOMEM)
45869- fscache_stat(&fscache_n_retrievals_nomem);
45870+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45871 else if (ret == -ERESTARTSYS)
45872- fscache_stat(&fscache_n_retrievals_intr);
45873+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45874 else if (ret == -ENODATA)
45875- fscache_stat(&fscache_n_retrievals_nodata);
45876+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45877 else if (ret < 0)
45878- fscache_stat(&fscache_n_retrievals_nobufs);
45879+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45880 else
45881- fscache_stat(&fscache_n_retrievals_ok);
45882+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45883
45884 fscache_put_retrieval(op);
45885 _leave(" = %d", ret);
45886@@ -429,7 +429,7 @@ nobufs_unlock:
45887 spin_unlock(&cookie->lock);
45888 kfree(op);
45889 nobufs:
45890- fscache_stat(&fscache_n_retrievals_nobufs);
45891+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45892 _leave(" = -ENOBUFS");
45893 return -ENOBUFS;
45894 }
45895@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45896
45897 _enter("%p,,%d,,,", cookie, *nr_pages);
45898
45899- fscache_stat(&fscache_n_retrievals);
45900+ fscache_stat_unchecked(&fscache_n_retrievals);
45901
45902 if (hlist_empty(&cookie->backing_objects))
45903 goto nobufs;
45904@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45905 goto nobufs_unlock;
45906 spin_unlock(&cookie->lock);
45907
45908- fscache_stat(&fscache_n_retrieval_ops);
45909+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45910
45911 /* pin the netfs read context in case we need to do the actual netfs
45912 * read because we've encountered a cache read failure */
45913@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45914
45915 error:
45916 if (ret == -ENOMEM)
45917- fscache_stat(&fscache_n_retrievals_nomem);
45918+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45919 else if (ret == -ERESTARTSYS)
45920- fscache_stat(&fscache_n_retrievals_intr);
45921+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45922 else if (ret == -ENODATA)
45923- fscache_stat(&fscache_n_retrievals_nodata);
45924+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45925 else if (ret < 0)
45926- fscache_stat(&fscache_n_retrievals_nobufs);
45927+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45928 else
45929- fscache_stat(&fscache_n_retrievals_ok);
45930+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45931
45932 fscache_put_retrieval(op);
45933 _leave(" = %d", ret);
45934@@ -545,7 +545,7 @@ nobufs_unlock:
45935 spin_unlock(&cookie->lock);
45936 kfree(op);
45937 nobufs:
45938- fscache_stat(&fscache_n_retrievals_nobufs);
45939+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45940 _leave(" = -ENOBUFS");
45941 return -ENOBUFS;
45942 }
45943@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45944
45945 _enter("%p,%p,,,", cookie, page);
45946
45947- fscache_stat(&fscache_n_allocs);
45948+ fscache_stat_unchecked(&fscache_n_allocs);
45949
45950 if (hlist_empty(&cookie->backing_objects))
45951 goto nobufs;
45952@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45953 goto nobufs_unlock;
45954 spin_unlock(&cookie->lock);
45955
45956- fscache_stat(&fscache_n_alloc_ops);
45957+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45958
45959 ret = fscache_wait_for_retrieval_activation(
45960 object, op,
45961@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45962
45963 error:
45964 if (ret == -ERESTARTSYS)
45965- fscache_stat(&fscache_n_allocs_intr);
45966+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45967 else if (ret < 0)
45968- fscache_stat(&fscache_n_allocs_nobufs);
45969+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45970 else
45971- fscache_stat(&fscache_n_allocs_ok);
45972+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45973
45974 fscache_put_retrieval(op);
45975 _leave(" = %d", ret);
45976@@ -625,7 +625,7 @@ nobufs_unlock:
45977 spin_unlock(&cookie->lock);
45978 kfree(op);
45979 nobufs:
45980- fscache_stat(&fscache_n_allocs_nobufs);
45981+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45982 _leave(" = -ENOBUFS");
45983 return -ENOBUFS;
45984 }
45985@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45986
45987 spin_lock(&cookie->stores_lock);
45988
45989- fscache_stat(&fscache_n_store_calls);
45990+ fscache_stat_unchecked(&fscache_n_store_calls);
45991
45992 /* find a page to store */
45993 page = NULL;
45994@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45995 page = results[0];
45996 _debug("gang %d [%lx]", n, page->index);
45997 if (page->index > op->store_limit) {
45998- fscache_stat(&fscache_n_store_pages_over_limit);
45999+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46000 goto superseded;
46001 }
46002
46003@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46004 spin_unlock(&cookie->stores_lock);
46005 spin_unlock(&object->lock);
46006
46007- fscache_stat(&fscache_n_store_pages);
46008+ fscache_stat_unchecked(&fscache_n_store_pages);
46009 fscache_stat(&fscache_n_cop_write_page);
46010 ret = object->cache->ops->write_page(op, page);
46011 fscache_stat_d(&fscache_n_cop_write_page);
46012@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46013 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46014 ASSERT(PageFsCache(page));
46015
46016- fscache_stat(&fscache_n_stores);
46017+ fscache_stat_unchecked(&fscache_n_stores);
46018
46019 op = kzalloc(sizeof(*op), GFP_NOIO);
46020 if (!op)
46021@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46022 spin_unlock(&cookie->stores_lock);
46023 spin_unlock(&object->lock);
46024
46025- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46026+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46027 op->store_limit = object->store_limit;
46028
46029 if (fscache_submit_op(object, &op->op) < 0)
46030@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46031
46032 spin_unlock(&cookie->lock);
46033 radix_tree_preload_end();
46034- fscache_stat(&fscache_n_store_ops);
46035- fscache_stat(&fscache_n_stores_ok);
46036+ fscache_stat_unchecked(&fscache_n_store_ops);
46037+ fscache_stat_unchecked(&fscache_n_stores_ok);
46038
46039 /* the work queue now carries its own ref on the object */
46040 fscache_put_operation(&op->op);
46041@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46042 return 0;
46043
46044 already_queued:
46045- fscache_stat(&fscache_n_stores_again);
46046+ fscache_stat_unchecked(&fscache_n_stores_again);
46047 already_pending:
46048 spin_unlock(&cookie->stores_lock);
46049 spin_unlock(&object->lock);
46050 spin_unlock(&cookie->lock);
46051 radix_tree_preload_end();
46052 kfree(op);
46053- fscache_stat(&fscache_n_stores_ok);
46054+ fscache_stat_unchecked(&fscache_n_stores_ok);
46055 _leave(" = 0");
46056 return 0;
46057
46058@@ -851,14 +851,14 @@ nobufs:
46059 spin_unlock(&cookie->lock);
46060 radix_tree_preload_end();
46061 kfree(op);
46062- fscache_stat(&fscache_n_stores_nobufs);
46063+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
46064 _leave(" = -ENOBUFS");
46065 return -ENOBUFS;
46066
46067 nomem_free:
46068 kfree(op);
46069 nomem:
46070- fscache_stat(&fscache_n_stores_oom);
46071+ fscache_stat_unchecked(&fscache_n_stores_oom);
46072 _leave(" = -ENOMEM");
46073 return -ENOMEM;
46074 }
46075@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46076 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46077 ASSERTCMP(page, !=, NULL);
46078
46079- fscache_stat(&fscache_n_uncaches);
46080+ fscache_stat_unchecked(&fscache_n_uncaches);
46081
46082 /* cache withdrawal may beat us to it */
46083 if (!PageFsCache(page))
46084@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46085 unsigned long loop;
46086
46087 #ifdef CONFIG_FSCACHE_STATS
46088- atomic_add(pagevec->nr, &fscache_n_marks);
46089+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46090 #endif
46091
46092 for (loop = 0; loop < pagevec->nr; loop++) {
46093diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46094index 4765190..2a067f2 100644
46095--- a/fs/fscache/stats.c
46096+++ b/fs/fscache/stats.c
46097@@ -18,95 +18,95 @@
46098 /*
46099 * operation counters
46100 */
46101-atomic_t fscache_n_op_pend;
46102-atomic_t fscache_n_op_run;
46103-atomic_t fscache_n_op_enqueue;
46104-atomic_t fscache_n_op_requeue;
46105-atomic_t fscache_n_op_deferred_release;
46106-atomic_t fscache_n_op_release;
46107-atomic_t fscache_n_op_gc;
46108-atomic_t fscache_n_op_cancelled;
46109-atomic_t fscache_n_op_rejected;
46110+atomic_unchecked_t fscache_n_op_pend;
46111+atomic_unchecked_t fscache_n_op_run;
46112+atomic_unchecked_t fscache_n_op_enqueue;
46113+atomic_unchecked_t fscache_n_op_requeue;
46114+atomic_unchecked_t fscache_n_op_deferred_release;
46115+atomic_unchecked_t fscache_n_op_release;
46116+atomic_unchecked_t fscache_n_op_gc;
46117+atomic_unchecked_t fscache_n_op_cancelled;
46118+atomic_unchecked_t fscache_n_op_rejected;
46119
46120-atomic_t fscache_n_attr_changed;
46121-atomic_t fscache_n_attr_changed_ok;
46122-atomic_t fscache_n_attr_changed_nobufs;
46123-atomic_t fscache_n_attr_changed_nomem;
46124-atomic_t fscache_n_attr_changed_calls;
46125+atomic_unchecked_t fscache_n_attr_changed;
46126+atomic_unchecked_t fscache_n_attr_changed_ok;
46127+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46128+atomic_unchecked_t fscache_n_attr_changed_nomem;
46129+atomic_unchecked_t fscache_n_attr_changed_calls;
46130
46131-atomic_t fscache_n_allocs;
46132-atomic_t fscache_n_allocs_ok;
46133-atomic_t fscache_n_allocs_wait;
46134-atomic_t fscache_n_allocs_nobufs;
46135-atomic_t fscache_n_allocs_intr;
46136-atomic_t fscache_n_allocs_object_dead;
46137-atomic_t fscache_n_alloc_ops;
46138-atomic_t fscache_n_alloc_op_waits;
46139+atomic_unchecked_t fscache_n_allocs;
46140+atomic_unchecked_t fscache_n_allocs_ok;
46141+atomic_unchecked_t fscache_n_allocs_wait;
46142+atomic_unchecked_t fscache_n_allocs_nobufs;
46143+atomic_unchecked_t fscache_n_allocs_intr;
46144+atomic_unchecked_t fscache_n_allocs_object_dead;
46145+atomic_unchecked_t fscache_n_alloc_ops;
46146+atomic_unchecked_t fscache_n_alloc_op_waits;
46147
46148-atomic_t fscache_n_retrievals;
46149-atomic_t fscache_n_retrievals_ok;
46150-atomic_t fscache_n_retrievals_wait;
46151-atomic_t fscache_n_retrievals_nodata;
46152-atomic_t fscache_n_retrievals_nobufs;
46153-atomic_t fscache_n_retrievals_intr;
46154-atomic_t fscache_n_retrievals_nomem;
46155-atomic_t fscache_n_retrievals_object_dead;
46156-atomic_t fscache_n_retrieval_ops;
46157-atomic_t fscache_n_retrieval_op_waits;
46158+atomic_unchecked_t fscache_n_retrievals;
46159+atomic_unchecked_t fscache_n_retrievals_ok;
46160+atomic_unchecked_t fscache_n_retrievals_wait;
46161+atomic_unchecked_t fscache_n_retrievals_nodata;
46162+atomic_unchecked_t fscache_n_retrievals_nobufs;
46163+atomic_unchecked_t fscache_n_retrievals_intr;
46164+atomic_unchecked_t fscache_n_retrievals_nomem;
46165+atomic_unchecked_t fscache_n_retrievals_object_dead;
46166+atomic_unchecked_t fscache_n_retrieval_ops;
46167+atomic_unchecked_t fscache_n_retrieval_op_waits;
46168
46169-atomic_t fscache_n_stores;
46170-atomic_t fscache_n_stores_ok;
46171-atomic_t fscache_n_stores_again;
46172-atomic_t fscache_n_stores_nobufs;
46173-atomic_t fscache_n_stores_oom;
46174-atomic_t fscache_n_store_ops;
46175-atomic_t fscache_n_store_calls;
46176-atomic_t fscache_n_store_pages;
46177-atomic_t fscache_n_store_radix_deletes;
46178-atomic_t fscache_n_store_pages_over_limit;
46179+atomic_unchecked_t fscache_n_stores;
46180+atomic_unchecked_t fscache_n_stores_ok;
46181+atomic_unchecked_t fscache_n_stores_again;
46182+atomic_unchecked_t fscache_n_stores_nobufs;
46183+atomic_unchecked_t fscache_n_stores_oom;
46184+atomic_unchecked_t fscache_n_store_ops;
46185+atomic_unchecked_t fscache_n_store_calls;
46186+atomic_unchecked_t fscache_n_store_pages;
46187+atomic_unchecked_t fscache_n_store_radix_deletes;
46188+atomic_unchecked_t fscache_n_store_pages_over_limit;
46189
46190-atomic_t fscache_n_store_vmscan_not_storing;
46191-atomic_t fscache_n_store_vmscan_gone;
46192-atomic_t fscache_n_store_vmscan_busy;
46193-atomic_t fscache_n_store_vmscan_cancelled;
46194+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46195+atomic_unchecked_t fscache_n_store_vmscan_gone;
46196+atomic_unchecked_t fscache_n_store_vmscan_busy;
46197+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46198
46199-atomic_t fscache_n_marks;
46200-atomic_t fscache_n_uncaches;
46201+atomic_unchecked_t fscache_n_marks;
46202+atomic_unchecked_t fscache_n_uncaches;
46203
46204-atomic_t fscache_n_acquires;
46205-atomic_t fscache_n_acquires_null;
46206-atomic_t fscache_n_acquires_no_cache;
46207-atomic_t fscache_n_acquires_ok;
46208-atomic_t fscache_n_acquires_nobufs;
46209-atomic_t fscache_n_acquires_oom;
46210+atomic_unchecked_t fscache_n_acquires;
46211+atomic_unchecked_t fscache_n_acquires_null;
46212+atomic_unchecked_t fscache_n_acquires_no_cache;
46213+atomic_unchecked_t fscache_n_acquires_ok;
46214+atomic_unchecked_t fscache_n_acquires_nobufs;
46215+atomic_unchecked_t fscache_n_acquires_oom;
46216
46217-atomic_t fscache_n_updates;
46218-atomic_t fscache_n_updates_null;
46219-atomic_t fscache_n_updates_run;
46220+atomic_unchecked_t fscache_n_updates;
46221+atomic_unchecked_t fscache_n_updates_null;
46222+atomic_unchecked_t fscache_n_updates_run;
46223
46224-atomic_t fscache_n_relinquishes;
46225-atomic_t fscache_n_relinquishes_null;
46226-atomic_t fscache_n_relinquishes_waitcrt;
46227-atomic_t fscache_n_relinquishes_retire;
46228+atomic_unchecked_t fscache_n_relinquishes;
46229+atomic_unchecked_t fscache_n_relinquishes_null;
46230+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46231+atomic_unchecked_t fscache_n_relinquishes_retire;
46232
46233-atomic_t fscache_n_cookie_index;
46234-atomic_t fscache_n_cookie_data;
46235-atomic_t fscache_n_cookie_special;
46236+atomic_unchecked_t fscache_n_cookie_index;
46237+atomic_unchecked_t fscache_n_cookie_data;
46238+atomic_unchecked_t fscache_n_cookie_special;
46239
46240-atomic_t fscache_n_object_alloc;
46241-atomic_t fscache_n_object_no_alloc;
46242-atomic_t fscache_n_object_lookups;
46243-atomic_t fscache_n_object_lookups_negative;
46244-atomic_t fscache_n_object_lookups_positive;
46245-atomic_t fscache_n_object_lookups_timed_out;
46246-atomic_t fscache_n_object_created;
46247-atomic_t fscache_n_object_avail;
46248-atomic_t fscache_n_object_dead;
46249+atomic_unchecked_t fscache_n_object_alloc;
46250+atomic_unchecked_t fscache_n_object_no_alloc;
46251+atomic_unchecked_t fscache_n_object_lookups;
46252+atomic_unchecked_t fscache_n_object_lookups_negative;
46253+atomic_unchecked_t fscache_n_object_lookups_positive;
46254+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46255+atomic_unchecked_t fscache_n_object_created;
46256+atomic_unchecked_t fscache_n_object_avail;
46257+atomic_unchecked_t fscache_n_object_dead;
46258
46259-atomic_t fscache_n_checkaux_none;
46260-atomic_t fscache_n_checkaux_okay;
46261-atomic_t fscache_n_checkaux_update;
46262-atomic_t fscache_n_checkaux_obsolete;
46263+atomic_unchecked_t fscache_n_checkaux_none;
46264+atomic_unchecked_t fscache_n_checkaux_okay;
46265+atomic_unchecked_t fscache_n_checkaux_update;
46266+atomic_unchecked_t fscache_n_checkaux_obsolete;
46267
46268 atomic_t fscache_n_cop_alloc_object;
46269 atomic_t fscache_n_cop_lookup_object;
46270@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46271 seq_puts(m, "FS-Cache statistics\n");
46272
46273 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46274- atomic_read(&fscache_n_cookie_index),
46275- atomic_read(&fscache_n_cookie_data),
46276- atomic_read(&fscache_n_cookie_special));
46277+ atomic_read_unchecked(&fscache_n_cookie_index),
46278+ atomic_read_unchecked(&fscache_n_cookie_data),
46279+ atomic_read_unchecked(&fscache_n_cookie_special));
46280
46281 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46282- atomic_read(&fscache_n_object_alloc),
46283- atomic_read(&fscache_n_object_no_alloc),
46284- atomic_read(&fscache_n_object_avail),
46285- atomic_read(&fscache_n_object_dead));
46286+ atomic_read_unchecked(&fscache_n_object_alloc),
46287+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46288+ atomic_read_unchecked(&fscache_n_object_avail),
46289+ atomic_read_unchecked(&fscache_n_object_dead));
46290 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46291- atomic_read(&fscache_n_checkaux_none),
46292- atomic_read(&fscache_n_checkaux_okay),
46293- atomic_read(&fscache_n_checkaux_update),
46294- atomic_read(&fscache_n_checkaux_obsolete));
46295+ atomic_read_unchecked(&fscache_n_checkaux_none),
46296+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46297+ atomic_read_unchecked(&fscache_n_checkaux_update),
46298+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46299
46300 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46301- atomic_read(&fscache_n_marks),
46302- atomic_read(&fscache_n_uncaches));
46303+ atomic_read_unchecked(&fscache_n_marks),
46304+ atomic_read_unchecked(&fscache_n_uncaches));
46305
46306 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46307 " oom=%u\n",
46308- atomic_read(&fscache_n_acquires),
46309- atomic_read(&fscache_n_acquires_null),
46310- atomic_read(&fscache_n_acquires_no_cache),
46311- atomic_read(&fscache_n_acquires_ok),
46312- atomic_read(&fscache_n_acquires_nobufs),
46313- atomic_read(&fscache_n_acquires_oom));
46314+ atomic_read_unchecked(&fscache_n_acquires),
46315+ atomic_read_unchecked(&fscache_n_acquires_null),
46316+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46317+ atomic_read_unchecked(&fscache_n_acquires_ok),
46318+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46319+ atomic_read_unchecked(&fscache_n_acquires_oom));
46320
46321 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46322- atomic_read(&fscache_n_object_lookups),
46323- atomic_read(&fscache_n_object_lookups_negative),
46324- atomic_read(&fscache_n_object_lookups_positive),
46325- atomic_read(&fscache_n_object_created),
46326- atomic_read(&fscache_n_object_lookups_timed_out));
46327+ atomic_read_unchecked(&fscache_n_object_lookups),
46328+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46329+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46330+ atomic_read_unchecked(&fscache_n_object_created),
46331+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46332
46333 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46334- atomic_read(&fscache_n_updates),
46335- atomic_read(&fscache_n_updates_null),
46336- atomic_read(&fscache_n_updates_run));
46337+ atomic_read_unchecked(&fscache_n_updates),
46338+ atomic_read_unchecked(&fscache_n_updates_null),
46339+ atomic_read_unchecked(&fscache_n_updates_run));
46340
46341 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46342- atomic_read(&fscache_n_relinquishes),
46343- atomic_read(&fscache_n_relinquishes_null),
46344- atomic_read(&fscache_n_relinquishes_waitcrt),
46345- atomic_read(&fscache_n_relinquishes_retire));
46346+ atomic_read_unchecked(&fscache_n_relinquishes),
46347+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46348+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46349+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46350
46351 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46352- atomic_read(&fscache_n_attr_changed),
46353- atomic_read(&fscache_n_attr_changed_ok),
46354- atomic_read(&fscache_n_attr_changed_nobufs),
46355- atomic_read(&fscache_n_attr_changed_nomem),
46356- atomic_read(&fscache_n_attr_changed_calls));
46357+ atomic_read_unchecked(&fscache_n_attr_changed),
46358+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46359+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46360+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46361+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46362
46363 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46364- atomic_read(&fscache_n_allocs),
46365- atomic_read(&fscache_n_allocs_ok),
46366- atomic_read(&fscache_n_allocs_wait),
46367- atomic_read(&fscache_n_allocs_nobufs),
46368- atomic_read(&fscache_n_allocs_intr));
46369+ atomic_read_unchecked(&fscache_n_allocs),
46370+ atomic_read_unchecked(&fscache_n_allocs_ok),
46371+ atomic_read_unchecked(&fscache_n_allocs_wait),
46372+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46373+ atomic_read_unchecked(&fscache_n_allocs_intr));
46374 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46375- atomic_read(&fscache_n_alloc_ops),
46376- atomic_read(&fscache_n_alloc_op_waits),
46377- atomic_read(&fscache_n_allocs_object_dead));
46378+ atomic_read_unchecked(&fscache_n_alloc_ops),
46379+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46380+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46381
46382 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46383 " int=%u oom=%u\n",
46384- atomic_read(&fscache_n_retrievals),
46385- atomic_read(&fscache_n_retrievals_ok),
46386- atomic_read(&fscache_n_retrievals_wait),
46387- atomic_read(&fscache_n_retrievals_nodata),
46388- atomic_read(&fscache_n_retrievals_nobufs),
46389- atomic_read(&fscache_n_retrievals_intr),
46390- atomic_read(&fscache_n_retrievals_nomem));
46391+ atomic_read_unchecked(&fscache_n_retrievals),
46392+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46393+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46394+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46395+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46396+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46397+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46398 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46399- atomic_read(&fscache_n_retrieval_ops),
46400- atomic_read(&fscache_n_retrieval_op_waits),
46401- atomic_read(&fscache_n_retrievals_object_dead));
46402+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46403+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46404+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46405
46406 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46407- atomic_read(&fscache_n_stores),
46408- atomic_read(&fscache_n_stores_ok),
46409- atomic_read(&fscache_n_stores_again),
46410- atomic_read(&fscache_n_stores_nobufs),
46411- atomic_read(&fscache_n_stores_oom));
46412+ atomic_read_unchecked(&fscache_n_stores),
46413+ atomic_read_unchecked(&fscache_n_stores_ok),
46414+ atomic_read_unchecked(&fscache_n_stores_again),
46415+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46416+ atomic_read_unchecked(&fscache_n_stores_oom));
46417 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46418- atomic_read(&fscache_n_store_ops),
46419- atomic_read(&fscache_n_store_calls),
46420- atomic_read(&fscache_n_store_pages),
46421- atomic_read(&fscache_n_store_radix_deletes),
46422- atomic_read(&fscache_n_store_pages_over_limit));
46423+ atomic_read_unchecked(&fscache_n_store_ops),
46424+ atomic_read_unchecked(&fscache_n_store_calls),
46425+ atomic_read_unchecked(&fscache_n_store_pages),
46426+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
46427+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46428
46429 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46430- atomic_read(&fscache_n_store_vmscan_not_storing),
46431- atomic_read(&fscache_n_store_vmscan_gone),
46432- atomic_read(&fscache_n_store_vmscan_busy),
46433- atomic_read(&fscache_n_store_vmscan_cancelled));
46434+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46435+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46436+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46437+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46438
46439 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46440- atomic_read(&fscache_n_op_pend),
46441- atomic_read(&fscache_n_op_run),
46442- atomic_read(&fscache_n_op_enqueue),
46443- atomic_read(&fscache_n_op_cancelled),
46444- atomic_read(&fscache_n_op_rejected));
46445+ atomic_read_unchecked(&fscache_n_op_pend),
46446+ atomic_read_unchecked(&fscache_n_op_run),
46447+ atomic_read_unchecked(&fscache_n_op_enqueue),
46448+ atomic_read_unchecked(&fscache_n_op_cancelled),
46449+ atomic_read_unchecked(&fscache_n_op_rejected));
46450 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46451- atomic_read(&fscache_n_op_deferred_release),
46452- atomic_read(&fscache_n_op_release),
46453- atomic_read(&fscache_n_op_gc));
46454+ atomic_read_unchecked(&fscache_n_op_deferred_release),
46455+ atomic_read_unchecked(&fscache_n_op_release),
46456+ atomic_read_unchecked(&fscache_n_op_gc));
46457
46458 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46459 atomic_read(&fscache_n_cop_alloc_object),
46460diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46461index b6cca47..ec782c3 100644
46462--- a/fs/fuse/cuse.c
46463+++ b/fs/fuse/cuse.c
46464@@ -586,10 +586,12 @@ static int __init cuse_init(void)
46465 INIT_LIST_HEAD(&cuse_conntbl[i]);
46466
46467 /* inherit and extend fuse_dev_operations */
46468- cuse_channel_fops = fuse_dev_operations;
46469- cuse_channel_fops.owner = THIS_MODULE;
46470- cuse_channel_fops.open = cuse_channel_open;
46471- cuse_channel_fops.release = cuse_channel_release;
46472+ pax_open_kernel();
46473+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46474+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46475+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46476+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46477+ pax_close_kernel();
46478
46479 cuse_class = class_create(THIS_MODULE, "cuse");
46480 if (IS_ERR(cuse_class))
46481diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46482index 2aaf3ea..8e50863 100644
46483--- a/fs/fuse/dev.c
46484+++ b/fs/fuse/dev.c
46485@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46486 ret = 0;
46487 pipe_lock(pipe);
46488
46489- if (!pipe->readers) {
46490+ if (!atomic_read(&pipe->readers)) {
46491 send_sig(SIGPIPE, current, 0);
46492 if (!ret)
46493 ret = -EPIPE;
46494diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46495index 9f63e49..d8a64c0 100644
46496--- a/fs/fuse/dir.c
46497+++ b/fs/fuse/dir.c
46498@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
46499 return link;
46500 }
46501
46502-static void free_link(char *link)
46503+static void free_link(const char *link)
46504 {
46505 if (!IS_ERR(link))
46506 free_page((unsigned long) link);
46507diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46508index 900cf98..3896726 100644
46509--- a/fs/gfs2/inode.c
46510+++ b/fs/gfs2/inode.c
46511@@ -1517,7 +1517,7 @@ out:
46512
46513 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46514 {
46515- char *s = nd_get_link(nd);
46516+ const char *s = nd_get_link(nd);
46517 if (!IS_ERR(s))
46518 kfree(s);
46519 }
46520diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
46521index 4dfbfec..947c9c2 100644
46522--- a/fs/hfsplus/catalog.c
46523+++ b/fs/hfsplus/catalog.c
46524@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
46525 int err;
46526 u16 type;
46527
46528+ pax_track_stack();
46529+
46530 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
46531 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
46532 if (err)
46533@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
46534 int entry_size;
46535 int err;
46536
46537+ pax_track_stack();
46538+
46539 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
46540 str->name, cnid, inode->i_nlink);
46541 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
46542@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
46543 int entry_size, type;
46544 int err;
46545
46546+ pax_track_stack();
46547+
46548 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
46549 cnid, src_dir->i_ino, src_name->name,
46550 dst_dir->i_ino, dst_name->name);
46551diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
46552index 25b2443..09a3341 100644
46553--- a/fs/hfsplus/dir.c
46554+++ b/fs/hfsplus/dir.c
46555@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
46556 struct hfsplus_readdir_data *rd;
46557 u16 type;
46558
46559+ pax_track_stack();
46560+
46561 if (filp->f_pos >= inode->i_size)
46562 return 0;
46563
46564diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
46565index 4cc1e3a..ad0f70b 100644
46566--- a/fs/hfsplus/inode.c
46567+++ b/fs/hfsplus/inode.c
46568@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
46569 int res = 0;
46570 u16 type;
46571
46572+ pax_track_stack();
46573+
46574 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
46575
46576 HFSPLUS_I(inode)->linkid = 0;
46577@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
46578 struct hfs_find_data fd;
46579 hfsplus_cat_entry entry;
46580
46581+ pax_track_stack();
46582+
46583 if (HFSPLUS_IS_RSRC(inode))
46584 main_inode = HFSPLUS_I(inode)->rsrc_inode;
46585
46586diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
46587index fbaa669..c548cd0 100644
46588--- a/fs/hfsplus/ioctl.c
46589+++ b/fs/hfsplus/ioctl.c
46590@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
46591 struct hfsplus_cat_file *file;
46592 int res;
46593
46594+ pax_track_stack();
46595+
46596 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46597 return -EOPNOTSUPP;
46598
46599@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
46600 struct hfsplus_cat_file *file;
46601 ssize_t res = 0;
46602
46603+ pax_track_stack();
46604+
46605 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46606 return -EOPNOTSUPP;
46607
46608diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
46609index d24a9b6..dd9b3dd 100644
46610--- a/fs/hfsplus/super.c
46611+++ b/fs/hfsplus/super.c
46612@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
46613 u64 last_fs_block, last_fs_page;
46614 int err;
46615
46616+ pax_track_stack();
46617+
46618 err = -EINVAL;
46619 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46620 if (!sbi)
46621diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46622index ec88953..cb5e98e 100644
46623--- a/fs/hugetlbfs/inode.c
46624+++ b/fs/hugetlbfs/inode.c
46625@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46626 .kill_sb = kill_litter_super,
46627 };
46628
46629-static struct vfsmount *hugetlbfs_vfsmount;
46630+struct vfsmount *hugetlbfs_vfsmount;
46631
46632 static int can_do_hugetlb_shm(void)
46633 {
46634diff --git a/fs/inode.c b/fs/inode.c
46635index ec79246..054c36a 100644
46636--- a/fs/inode.c
46637+++ b/fs/inode.c
46638@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
46639
46640 #ifdef CONFIG_SMP
46641 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46642- static atomic_t shared_last_ino;
46643- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46644+ static atomic_unchecked_t shared_last_ino;
46645+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46646
46647 res = next - LAST_INO_BATCH;
46648 }
46649diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
46650index f94fc48..3bb8d30 100644
46651--- a/fs/jbd/checkpoint.c
46652+++ b/fs/jbd/checkpoint.c
46653@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal)
46654 tid_t this_tid;
46655 int result;
46656
46657+ pax_track_stack();
46658+
46659 jbd_debug(1, "Start checkpoint\n");
46660
46661 /*
46662diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
46663index 16a5047..88ff6ca 100644
46664--- a/fs/jffs2/compr_rtime.c
46665+++ b/fs/jffs2/compr_rtime.c
46666@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
46667 int outpos = 0;
46668 int pos=0;
46669
46670+ pax_track_stack();
46671+
46672 memset(positions,0,sizeof(positions));
46673
46674 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
46675@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
46676 int outpos = 0;
46677 int pos=0;
46678
46679+ pax_track_stack();
46680+
46681 memset(positions,0,sizeof(positions));
46682
46683 while (outpos<destlen) {
46684diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
46685index 9e7cec8..4713089 100644
46686--- a/fs/jffs2/compr_rubin.c
46687+++ b/fs/jffs2/compr_rubin.c
46688@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
46689 int ret;
46690 uint32_t mysrclen, mydstlen;
46691
46692+ pax_track_stack();
46693+
46694 mysrclen = *sourcelen;
46695 mydstlen = *dstlen - 8;
46696
46697diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46698index e513f19..2ab1351 100644
46699--- a/fs/jffs2/erase.c
46700+++ b/fs/jffs2/erase.c
46701@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46702 struct jffs2_unknown_node marker = {
46703 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46704 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46705- .totlen = cpu_to_je32(c->cleanmarker_size)
46706+ .totlen = cpu_to_je32(c->cleanmarker_size),
46707+ .hdr_crc = cpu_to_je32(0)
46708 };
46709
46710 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46711diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46712index 4515bea..178f2d6 100644
46713--- a/fs/jffs2/wbuf.c
46714+++ b/fs/jffs2/wbuf.c
46715@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46716 {
46717 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46718 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46719- .totlen = constant_cpu_to_je32(8)
46720+ .totlen = constant_cpu_to_je32(8),
46721+ .hdr_crc = constant_cpu_to_je32(0)
46722 };
46723
46724 /*
46725diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
46726index 3e93cdd..c8a80e1 100644
46727--- a/fs/jffs2/xattr.c
46728+++ b/fs/jffs2/xattr.c
46729@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
46730
46731 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
46732
46733+ pax_track_stack();
46734+
46735 /* Phase.1 : Merge same xref */
46736 for (i=0; i < XREF_TMPHASH_SIZE; i++)
46737 xref_tmphash[i] = NULL;
46738diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46739index 06c8a67..589dbbd 100644
46740--- a/fs/jfs/super.c
46741+++ b/fs/jfs/super.c
46742@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
46743
46744 jfs_inode_cachep =
46745 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46746- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46747+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46748 init_once);
46749 if (jfs_inode_cachep == NULL)
46750 return -ENOMEM;
46751diff --git a/fs/libfs.c b/fs/libfs.c
46752index c18e9a1..0b04e2c 100644
46753--- a/fs/libfs.c
46754+++ b/fs/libfs.c
46755@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46756
46757 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46758 struct dentry *next;
46759+ char d_name[sizeof(next->d_iname)];
46760+ const unsigned char *name;
46761+
46762 next = list_entry(p, struct dentry, d_u.d_child);
46763 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46764 if (!simple_positive(next)) {
46765@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46766
46767 spin_unlock(&next->d_lock);
46768 spin_unlock(&dentry->d_lock);
46769- if (filldir(dirent, next->d_name.name,
46770+ name = next->d_name.name;
46771+ if (name == next->d_iname) {
46772+ memcpy(d_name, name, next->d_name.len);
46773+ name = d_name;
46774+ }
46775+ if (filldir(dirent, name,
46776 next->d_name.len, filp->f_pos,
46777 next->d_inode->i_ino,
46778 dt_type(next->d_inode)) < 0)
46779diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46780index 8392cb8..ae8ed40 100644
46781--- a/fs/lockd/clntproc.c
46782+++ b/fs/lockd/clntproc.c
46783@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46784 /*
46785 * Cookie counter for NLM requests
46786 */
46787-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46788+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46789
46790 void nlmclnt_next_cookie(struct nlm_cookie *c)
46791 {
46792- u32 cookie = atomic_inc_return(&nlm_cookie);
46793+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46794
46795 memcpy(c->data, &cookie, 4);
46796 c->len=4;
46797@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
46798 struct nlm_rqst reqst, *req;
46799 int status;
46800
46801+ pax_track_stack();
46802+
46803 req = &reqst;
46804 memset(req, 0, sizeof(*req));
46805 locks_init_lock(&req->a_args.lock.fl);
46806diff --git a/fs/locks.c b/fs/locks.c
46807index 703f545..150a552 100644
46808--- a/fs/locks.c
46809+++ b/fs/locks.c
46810@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp)
46811 return;
46812
46813 if (filp->f_op && filp->f_op->flock) {
46814- struct file_lock fl = {
46815+ struct file_lock flock = {
46816 .fl_pid = current->tgid,
46817 .fl_file = filp,
46818 .fl_flags = FL_FLOCK,
46819 .fl_type = F_UNLCK,
46820 .fl_end = OFFSET_MAX,
46821 };
46822- filp->f_op->flock(filp, F_SETLKW, &fl);
46823- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46824- fl.fl_ops->fl_release_private(&fl);
46825+ filp->f_op->flock(filp, F_SETLKW, &flock);
46826+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46827+ flock.fl_ops->fl_release_private(&flock);
46828 }
46829
46830 lock_flocks();
46831diff --git a/fs/logfs/super.c b/fs/logfs/super.c
46832index ce03a18..ac8c14f 100644
46833--- a/fs/logfs/super.c
46834+++ b/fs/logfs/super.c
46835@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb)
46836 struct logfs_disk_super _ds1, *ds1 = &_ds1;
46837 int err, valid0, valid1;
46838
46839+ pax_track_stack();
46840+
46841 /* read first superblock */
46842 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
46843 if (err)
46844diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
46845index 3f32bcb..7c82c29 100644
46846--- a/fs/minix/bitmap.c
46847+++ b/fs/minix/bitmap.c
46848@@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
46849
46850 static DEFINE_SPINLOCK(bitmap_lock);
46851
46852-static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
46853+static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
46854 {
46855 unsigned i, j, sum = 0;
46856 struct buffer_head *bh;
46857+ unsigned numblocks = minix_blocks_needed(numbits, blocksize);
46858
46859 for (i=0; i<numblocks-1; i++) {
46860 if (!(bh=map[i]))
46861@@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode)
46862 return 0;
46863 }
46864
46865-unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
46866+unsigned long minix_count_free_blocks(struct super_block *sb)
46867 {
46868- return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
46869- sbi->s_nzones - sbi->s_firstdatazone + 1)
46870+ struct minix_sb_info *sbi = minix_sb(sb);
46871+ u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
46872+
46873+ return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
46874 << sbi->s_log_zone_size);
46875 }
46876
46877@@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
46878 return inode;
46879 }
46880
46881-unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
46882+unsigned long minix_count_free_inodes(struct super_block *sb)
46883 {
46884- return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
46885+ struct minix_sb_info *sbi = minix_sb(sb);
46886+ u32 bits = sbi->s_ninodes + 1;
46887+
46888+ return count_free(sbi->s_imap, sb->s_blocksize, bits);
46889 }
46890diff --git a/fs/minix/inode.c b/fs/minix/inode.c
46891index e7d23e2..1ed1351 100644
46892--- a/fs/minix/inode.c
46893+++ b/fs/minix/inode.c
46894@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
46895 else if (sbi->s_mount_state & MINIX_ERROR_FS)
46896 printk("MINIX-fs: mounting file system with errors, "
46897 "running fsck is recommended\n");
46898+
46899+ /* Apparently minix can create filesystems that allocate more blocks for
46900+ * the bitmaps than needed. We simply ignore that, but verify it didn't
46901+ * create one with not enough blocks and bail out if so.
46902+ */
46903+ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
46904+ if (sbi->s_imap_blocks < block) {
46905+ printk("MINIX-fs: file system does not have enough "
46906+ "imap blocks allocated. Refusing to mount\n");
46907+ goto out_iput;
46908+ }
46909+
46910+ block = minix_blocks_needed(
46911+ (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
46912+ s->s_blocksize);
46913+ if (sbi->s_zmap_blocks < block) {
46914+ printk("MINIX-fs: file system does not have enough "
46915+ "zmap blocks allocated. Refusing to mount.\n");
46916+ goto out_iput;
46917+ }
46918+
46919 return 0;
46920
46921 out_iput:
46922@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
46923 buf->f_type = sb->s_magic;
46924 buf->f_bsize = sb->s_blocksize;
46925 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
46926- buf->f_bfree = minix_count_free_blocks(sbi);
46927+ buf->f_bfree = minix_count_free_blocks(sb);
46928 buf->f_bavail = buf->f_bfree;
46929 buf->f_files = sbi->s_ninodes;
46930- buf->f_ffree = minix_count_free_inodes(sbi);
46931+ buf->f_ffree = minix_count_free_inodes(sb);
46932 buf->f_namelen = sbi->s_namelen;
46933 buf->f_fsid.val[0] = (u32)id;
46934 buf->f_fsid.val[1] = (u32)(id >> 32);
46935diff --git a/fs/minix/minix.h b/fs/minix/minix.h
46936index 341e212..6415fe0 100644
46937--- a/fs/minix/minix.h
46938+++ b/fs/minix/minix.h
46939@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
46940 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
46941 extern struct inode * minix_new_inode(const struct inode *, int, int *);
46942 extern void minix_free_inode(struct inode * inode);
46943-extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
46944+extern unsigned long minix_count_free_inodes(struct super_block *sb);
46945 extern int minix_new_block(struct inode * inode);
46946 extern void minix_free_block(struct inode *inode, unsigned long block);
46947-extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
46948+extern unsigned long minix_count_free_blocks(struct super_block *sb);
46949 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
46950 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
46951
46952@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
46953 return list_entry(inode, struct minix_inode_info, vfs_inode);
46954 }
46955
46956+static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
46957+{
46958+ return DIV_ROUND_UP(bits, blocksize * 8);
46959+}
46960+
46961 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
46962 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
46963
46964diff --git a/fs/namei.c b/fs/namei.c
46965index 3d15072..c1ddf9c 100644
46966--- a/fs/namei.c
46967+++ b/fs/namei.c
46968@@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask)
46969 if (ret != -EACCES)
46970 return ret;
46971
46972+#ifdef CONFIG_GRKERNSEC
46973+ /* we'll block if we have to log due to a denied capability use */
46974+ if (mask & MAY_NOT_BLOCK)
46975+ return -ECHILD;
46976+#endif
46977+
46978 if (S_ISDIR(inode->i_mode)) {
46979 /* DACs are overridable for directories */
46980- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46981- return 0;
46982 if (!(mask & MAY_WRITE))
46983- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46984+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46985+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46986 return 0;
46987+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46988+ return 0;
46989 return -EACCES;
46990 }
46991 /*
46992+ * Searching includes executable on directories, else just read.
46993+ */
46994+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46995+ if (mask == MAY_READ)
46996+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46997+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46998+ return 0;
46999+
47000+ /*
47001 * Read/write DACs are always overridable.
47002 * Executable DACs are overridable when there is
47003 * at least one exec bit set.
47004@@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask)
47005 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47006 return 0;
47007
47008- /*
47009- * Searching includes executable on directories, else just read.
47010- */
47011- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47012- if (mask == MAY_READ)
47013- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47014- return 0;
47015-
47016 return -EACCES;
47017 }
47018
47019@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47020 return error;
47021 }
47022
47023+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
47024+ dentry->d_inode, dentry, nd->path.mnt)) {
47025+ error = -EACCES;
47026+ *p = ERR_PTR(error); /* no ->put_link(), please */
47027+ path_put(&nd->path);
47028+ return error;
47029+ }
47030+
47031 nd->last_type = LAST_BIND;
47032 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47033 error = PTR_ERR(*p);
47034 if (!IS_ERR(*p)) {
47035- char *s = nd_get_link(nd);
47036+ const char *s = nd_get_link(nd);
47037 error = 0;
47038 if (s)
47039 error = __vfs_follow_link(nd, s);
47040@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
47041 if (!err)
47042 err = complete_walk(nd);
47043
47044+ if (!(nd->flags & LOOKUP_PARENT)) {
47045+#ifdef CONFIG_GRKERNSEC
47046+ if (flags & LOOKUP_RCU) {
47047+ if (!err)
47048+ path_put(&nd->path);
47049+ err = -ECHILD;
47050+ } else
47051+#endif
47052+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47053+ if (!err)
47054+ path_put(&nd->path);
47055+ err = -ENOENT;
47056+ }
47057+ }
47058+
47059 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47060 if (!nd->inode->i_op->lookup) {
47061 path_put(&nd->path);
47062@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
47063 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47064
47065 if (likely(!retval)) {
47066+ if (*name != '/' && nd->path.dentry && nd->inode) {
47067+#ifdef CONFIG_GRKERNSEC
47068+ if (flags & LOOKUP_RCU)
47069+ return -ECHILD;
47070+#endif
47071+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47072+ return -ENOENT;
47073+ }
47074+
47075 if (unlikely(!audit_dummy_context())) {
47076 if (nd->path.dentry && nd->inode)
47077 audit_inode(name, nd->path.dentry);
47078@@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag)
47079 /*
47080 * Ensure there are no outstanding leases on the file.
47081 */
47082- return break_lease(inode, flag);
47083+ error = break_lease(inode, flag);
47084+
47085+ if (error)
47086+ return error;
47087+
47088+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
47089+ error = -EPERM;
47090+ goto exit;
47091+ }
47092+
47093+ if (gr_handle_rawio(inode)) {
47094+ error = -EPERM;
47095+ goto exit;
47096+ }
47097+
47098+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
47099+ error = -EACCES;
47100+ goto exit;
47101+ }
47102+exit:
47103+ return error;
47104 }
47105
47106 static int handle_truncate(struct file *filp)
47107@@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47108 error = complete_walk(nd);
47109 if (error)
47110 return ERR_PTR(error);
47111+#ifdef CONFIG_GRKERNSEC
47112+ if (nd->flags & LOOKUP_RCU) {
47113+ error = -ECHILD;
47114+ goto exit;
47115+ }
47116+#endif
47117+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47118+ error = -ENOENT;
47119+ goto exit;
47120+ }
47121 audit_inode(pathname, nd->path.dentry);
47122 if (open_flag & O_CREAT) {
47123 error = -EISDIR;
47124@@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47125 error = complete_walk(nd);
47126 if (error)
47127 return ERR_PTR(error);
47128+#ifdef CONFIG_GRKERNSEC
47129+ if (nd->flags & LOOKUP_RCU) {
47130+ error = -ECHILD;
47131+ goto exit;
47132+ }
47133+#endif
47134+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47135+ error = -ENOENT;
47136+ goto exit;
47137+ }
47138 audit_inode(pathname, dir);
47139 goto ok;
47140 }
47141@@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47142 error = complete_walk(nd);
47143 if (error)
47144 return ERR_PTR(-ECHILD);
47145+#ifdef CONFIG_GRKERNSEC
47146+ if (nd->flags & LOOKUP_RCU) {
47147+ error = -ECHILD;
47148+ goto exit;
47149+ }
47150+#endif
47151+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47152+ error = -ENOENT;
47153+ goto exit;
47154+ }
47155
47156 error = -ENOTDIR;
47157 if (nd->flags & LOOKUP_DIRECTORY) {
47158@@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47159 /* Negative dentry, just create the file */
47160 if (!dentry->d_inode) {
47161 int mode = op->mode;
47162+
47163+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47164+ error = -EACCES;
47165+ goto exit_mutex_unlock;
47166+ }
47167+
47168 if (!IS_POSIXACL(dir->d_inode))
47169 mode &= ~current_umask();
47170 /*
47171@@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47172 error = vfs_create(dir->d_inode, dentry, mode, nd);
47173 if (error)
47174 goto exit_mutex_unlock;
47175+ else
47176+ gr_handle_create(path->dentry, path->mnt);
47177 mutex_unlock(&dir->d_inode->i_mutex);
47178 dput(nd->path.dentry);
47179 nd->path.dentry = dentry;
47180@@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47181 /*
47182 * It already exists.
47183 */
47184+
47185+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47186+ error = -ENOENT;
47187+ goto exit_mutex_unlock;
47188+ }
47189+
47190+ /* only check if O_CREAT is specified, all other checks need to go
47191+ into may_open */
47192+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47193+ error = -EACCES;
47194+ goto exit_mutex_unlock;
47195+ }
47196+
47197 mutex_unlock(&dir->d_inode->i_mutex);
47198 audit_inode(pathname, path->dentry);
47199
47200@@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47201 *path = nd.path;
47202 return dentry;
47203 eexist:
47204+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47205+ dput(dentry);
47206+ dentry = ERR_PTR(-ENOENT);
47207+ goto fail;
47208+ }
47209 dput(dentry);
47210 dentry = ERR_PTR(-EEXIST);
47211 fail:
47212@@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47213 }
47214 EXPORT_SYMBOL(user_path_create);
47215
47216+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47217+{
47218+ char *tmp = getname(pathname);
47219+ struct dentry *res;
47220+ if (IS_ERR(tmp))
47221+ return ERR_CAST(tmp);
47222+ res = kern_path_create(dfd, tmp, path, is_dir);
47223+ if (IS_ERR(res))
47224+ putname(tmp);
47225+ else
47226+ *to = tmp;
47227+ return res;
47228+}
47229+
47230 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
47231 {
47232 int error = may_create(dir, dentry);
47233@@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
47234 error = mnt_want_write(path.mnt);
47235 if (error)
47236 goto out_dput;
47237+
47238+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47239+ error = -EPERM;
47240+ goto out_drop_write;
47241+ }
47242+
47243+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47244+ error = -EACCES;
47245+ goto out_drop_write;
47246+ }
47247+
47248 error = security_path_mknod(&path, dentry, mode, dev);
47249 if (error)
47250 goto out_drop_write;
47251@@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
47252 }
47253 out_drop_write:
47254 mnt_drop_write(path.mnt);
47255+
47256+ if (!error)
47257+ gr_handle_create(dentry, path.mnt);
47258 out_dput:
47259 dput(dentry);
47260 mutex_unlock(&path.dentry->d_inode->i_mutex);
47261@@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
47262 error = mnt_want_write(path.mnt);
47263 if (error)
47264 goto out_dput;
47265+
47266+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47267+ error = -EACCES;
47268+ goto out_drop_write;
47269+ }
47270+
47271 error = security_path_mkdir(&path, dentry, mode);
47272 if (error)
47273 goto out_drop_write;
47274 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47275 out_drop_write:
47276 mnt_drop_write(path.mnt);
47277+
47278+ if (!error)
47279+ gr_handle_create(dentry, path.mnt);
47280 out_dput:
47281 dput(dentry);
47282 mutex_unlock(&path.dentry->d_inode->i_mutex);
47283@@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47284 char * name;
47285 struct dentry *dentry;
47286 struct nameidata nd;
47287+ ino_t saved_ino = 0;
47288+ dev_t saved_dev = 0;
47289
47290 error = user_path_parent(dfd, pathname, &nd, &name);
47291 if (error)
47292@@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47293 error = -ENOENT;
47294 goto exit3;
47295 }
47296+
47297+ saved_ino = dentry->d_inode->i_ino;
47298+ saved_dev = gr_get_dev_from_dentry(dentry);
47299+
47300+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47301+ error = -EACCES;
47302+ goto exit3;
47303+ }
47304+
47305 error = mnt_want_write(nd.path.mnt);
47306 if (error)
47307 goto exit3;
47308@@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47309 if (error)
47310 goto exit4;
47311 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47312+ if (!error && (saved_dev || saved_ino))
47313+ gr_handle_delete(saved_ino, saved_dev);
47314 exit4:
47315 mnt_drop_write(nd.path.mnt);
47316 exit3:
47317@@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47318 struct dentry *dentry;
47319 struct nameidata nd;
47320 struct inode *inode = NULL;
47321+ ino_t saved_ino = 0;
47322+ dev_t saved_dev = 0;
47323
47324 error = user_path_parent(dfd, pathname, &nd, &name);
47325 if (error)
47326@@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47327 if (!inode)
47328 goto slashes;
47329 ihold(inode);
47330+
47331+ if (inode->i_nlink <= 1) {
47332+ saved_ino = inode->i_ino;
47333+ saved_dev = gr_get_dev_from_dentry(dentry);
47334+ }
47335+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47336+ error = -EACCES;
47337+ goto exit2;
47338+ }
47339+
47340 error = mnt_want_write(nd.path.mnt);
47341 if (error)
47342 goto exit2;
47343@@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47344 if (error)
47345 goto exit3;
47346 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47347+ if (!error && (saved_ino || saved_dev))
47348+ gr_handle_delete(saved_ino, saved_dev);
47349 exit3:
47350 mnt_drop_write(nd.path.mnt);
47351 exit2:
47352@@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47353 error = mnt_want_write(path.mnt);
47354 if (error)
47355 goto out_dput;
47356+
47357+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47358+ error = -EACCES;
47359+ goto out_drop_write;
47360+ }
47361+
47362 error = security_path_symlink(&path, dentry, from);
47363 if (error)
47364 goto out_drop_write;
47365 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47366+ if (!error)
47367+ gr_handle_create(dentry, path.mnt);
47368 out_drop_write:
47369 mnt_drop_write(path.mnt);
47370 out_dput:
47371@@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47372 {
47373 struct dentry *new_dentry;
47374 struct path old_path, new_path;
47375+ char *to = NULL;
47376 int how = 0;
47377 int error;
47378
47379@@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47380 if (error)
47381 return error;
47382
47383- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47384+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47385 error = PTR_ERR(new_dentry);
47386 if (IS_ERR(new_dentry))
47387 goto out;
47388@@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47389 error = mnt_want_write(new_path.mnt);
47390 if (error)
47391 goto out_dput;
47392+
47393+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47394+ old_path.dentry->d_inode,
47395+ old_path.dentry->d_inode->i_mode, to)) {
47396+ error = -EACCES;
47397+ goto out_drop_write;
47398+ }
47399+
47400+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47401+ old_path.dentry, old_path.mnt, to)) {
47402+ error = -EACCES;
47403+ goto out_drop_write;
47404+ }
47405+
47406 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47407 if (error)
47408 goto out_drop_write;
47409 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47410+ if (!error)
47411+ gr_handle_create(new_dentry, new_path.mnt);
47412 out_drop_write:
47413 mnt_drop_write(new_path.mnt);
47414 out_dput:
47415+ putname(to);
47416 dput(new_dentry);
47417 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47418 path_put(&new_path);
47419@@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47420 char *to;
47421 int error;
47422
47423+ pax_track_stack();
47424+
47425 error = user_path_parent(olddfd, oldname, &oldnd, &from);
47426 if (error)
47427 goto exit;
47428@@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47429 if (new_dentry == trap)
47430 goto exit5;
47431
47432+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47433+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47434+ to);
47435+ if (error)
47436+ goto exit5;
47437+
47438 error = mnt_want_write(oldnd.path.mnt);
47439 if (error)
47440 goto exit5;
47441@@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47442 goto exit6;
47443 error = vfs_rename(old_dir->d_inode, old_dentry,
47444 new_dir->d_inode, new_dentry);
47445+ if (!error)
47446+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47447+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47448 exit6:
47449 mnt_drop_write(oldnd.path.mnt);
47450 exit5:
47451@@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47452
47453 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47454 {
47455+ char tmpbuf[64];
47456+ const char *newlink;
47457 int len;
47458
47459 len = PTR_ERR(link);
47460@@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47461 len = strlen(link);
47462 if (len > (unsigned) buflen)
47463 len = buflen;
47464- if (copy_to_user(buffer, link, len))
47465+
47466+ if (len < sizeof(tmpbuf)) {
47467+ memcpy(tmpbuf, link, len);
47468+ newlink = tmpbuf;
47469+ } else
47470+ newlink = link;
47471+
47472+ if (copy_to_user(buffer, newlink, len))
47473 len = -EFAULT;
47474 out:
47475 return len;
47476diff --git a/fs/namespace.c b/fs/namespace.c
47477index 5e7f2e9..cd13685 100644
47478--- a/fs/namespace.c
47479+++ b/fs/namespace.c
47480@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47481 if (!(sb->s_flags & MS_RDONLY))
47482 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47483 up_write(&sb->s_umount);
47484+
47485+ gr_log_remount(mnt->mnt_devname, retval);
47486+
47487 return retval;
47488 }
47489
47490@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47491 br_write_unlock(vfsmount_lock);
47492 up_write(&namespace_sem);
47493 release_mounts(&umount_list);
47494+
47495+ gr_log_unmount(mnt->mnt_devname, retval);
47496+
47497 return retval;
47498 }
47499
47500@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47501 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47502 MS_STRICTATIME);
47503
47504+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47505+ retval = -EPERM;
47506+ goto dput_out;
47507+ }
47508+
47509+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47510+ retval = -EPERM;
47511+ goto dput_out;
47512+ }
47513+
47514 if (flags & MS_REMOUNT)
47515 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47516 data_page);
47517@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47518 dev_name, data_page);
47519 dput_out:
47520 path_put(&path);
47521+
47522+ gr_log_mount(dev_name, dir_name, retval);
47523+
47524 return retval;
47525 }
47526
47527@@ -2573,6 +2592,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47528 if (error)
47529 goto out2;
47530
47531+ if (gr_handle_chroot_pivot()) {
47532+ error = -EPERM;
47533+ goto out2;
47534+ }
47535+
47536 get_fs_root(current->fs, &root);
47537 error = lock_mount(&old);
47538 if (error)
47539diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
47540index 9c51f62..503b252 100644
47541--- a/fs/ncpfs/dir.c
47542+++ b/fs/ncpfs/dir.c
47543@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
47544 int res, val = 0, len;
47545 __u8 __name[NCP_MAXPATHLEN + 1];
47546
47547+ pax_track_stack();
47548+
47549 if (dentry == dentry->d_sb->s_root)
47550 return 1;
47551
47552@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
47553 int error, res, len;
47554 __u8 __name[NCP_MAXPATHLEN + 1];
47555
47556+ pax_track_stack();
47557+
47558 error = -EIO;
47559 if (!ncp_conn_valid(server))
47560 goto finished;
47561@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
47562 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
47563 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
47564
47565+ pax_track_stack();
47566+
47567 ncp_age_dentry(server, dentry);
47568 len = sizeof(__name);
47569 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
47570@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
47571 int error, len;
47572 __u8 __name[NCP_MAXPATHLEN + 1];
47573
47574+ pax_track_stack();
47575+
47576 DPRINTK("ncp_mkdir: making %s/%s\n",
47577 dentry->d_parent->d_name.name, dentry->d_name.name);
47578
47579@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
47580 int old_len, new_len;
47581 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
47582
47583+ pax_track_stack();
47584+
47585 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47586 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47587 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
47588diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
47589index 202f370..9d4565e 100644
47590--- a/fs/ncpfs/inode.c
47591+++ b/fs/ncpfs/inode.c
47592@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
47593 #endif
47594 struct ncp_entry_info finfo;
47595
47596+ pax_track_stack();
47597+
47598 memset(&data, 0, sizeof(data));
47599 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47600 if (!server)
47601diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
47602index 281ae95..dd895b9 100644
47603--- a/fs/nfs/blocklayout/blocklayout.c
47604+++ b/fs/nfs/blocklayout/blocklayout.c
47605@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
47606 */
47607 struct parallel_io {
47608 struct kref refcnt;
47609- struct rpc_call_ops call_ops;
47610+ rpc_call_ops_no_const call_ops;
47611 void (*pnfs_callback) (void *data);
47612 void *data;
47613 };
47614diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47615index 679d2f5..ef1ffec 100644
47616--- a/fs/nfs/inode.c
47617+++ b/fs/nfs/inode.c
47618@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47619 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47620 nfsi->attrtimeo_timestamp = jiffies;
47621
47622- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47623+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47624 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47625 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47626 else
47627@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47628 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47629 }
47630
47631-static atomic_long_t nfs_attr_generation_counter;
47632+static atomic_long_unchecked_t nfs_attr_generation_counter;
47633
47634 static unsigned long nfs_read_attr_generation_counter(void)
47635 {
47636- return atomic_long_read(&nfs_attr_generation_counter);
47637+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47638 }
47639
47640 unsigned long nfs_inc_attr_generation_counter(void)
47641 {
47642- return atomic_long_inc_return(&nfs_attr_generation_counter);
47643+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47644 }
47645
47646 void nfs_fattr_init(struct nfs_fattr *fattr)
47647diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
47648index 6f8bcc7..8f823c5 100644
47649--- a/fs/nfsd/nfs4state.c
47650+++ b/fs/nfsd/nfs4state.c
47651@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
47652 unsigned int strhashval;
47653 int err;
47654
47655+ pax_track_stack();
47656+
47657 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47658 (long long) lock->lk_offset,
47659 (long long) lock->lk_length);
47660diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
47661index f810996..cec8977 100644
47662--- a/fs/nfsd/nfs4xdr.c
47663+++ b/fs/nfsd/nfs4xdr.c
47664@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
47665 .dentry = dentry,
47666 };
47667
47668+ pax_track_stack();
47669+
47670 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47671 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47672 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
47673diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47674index acf88ae..4fd6245 100644
47675--- a/fs/nfsd/vfs.c
47676+++ b/fs/nfsd/vfs.c
47677@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47678 } else {
47679 oldfs = get_fs();
47680 set_fs(KERNEL_DS);
47681- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47682+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47683 set_fs(oldfs);
47684 }
47685
47686@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47687
47688 /* Write the data. */
47689 oldfs = get_fs(); set_fs(KERNEL_DS);
47690- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47691+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47692 set_fs(oldfs);
47693 if (host_err < 0)
47694 goto out_nfserr;
47695@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47696 */
47697
47698 oldfs = get_fs(); set_fs(KERNEL_DS);
47699- host_err = inode->i_op->readlink(dentry, buf, *lenp);
47700+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47701 set_fs(oldfs);
47702
47703 if (host_err < 0)
47704diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
47705index 3e65427..ac258be 100644
47706--- a/fs/nilfs2/ioctl.c
47707+++ b/fs/nilfs2/ioctl.c
47708@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
47709 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
47710 goto out_free;
47711
47712+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
47713+ goto out_free;
47714+
47715 len = argv[n].v_size * argv[n].v_nmembs;
47716 base = (void __user *)(unsigned long)argv[n].v_base;
47717 if (len == 0) {
47718diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47719index 9fde1c0..14e8827 100644
47720--- a/fs/notify/fanotify/fanotify_user.c
47721+++ b/fs/notify/fanotify/fanotify_user.c
47722@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47723 goto out_close_fd;
47724
47725 ret = -EFAULT;
47726- if (copy_to_user(buf, &fanotify_event_metadata,
47727+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47728+ copy_to_user(buf, &fanotify_event_metadata,
47729 fanotify_event_metadata.event_len))
47730 goto out_kill_access_response;
47731
47732diff --git a/fs/notify/mark.c b/fs/notify/mark.c
47733index e14587d..f104d56 100644
47734--- a/fs/notify/mark.c
47735+++ b/fs/notify/mark.c
47736@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
47737
47738 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
47739
47740- /* 1 from caller and 1 for being on i_list/g_list */
47741- BUG_ON(atomic_read(&mark->refcnt) < 2);
47742-
47743 spin_lock(&group->mark_lock);
47744
47745 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
47746@@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
47747 iput(inode);
47748
47749 /*
47750+ * We don't necessarily have a ref on mark from caller so the above iput
47751+ * may have already destroyed it. Don't touch from now on.
47752+ */
47753+
47754+ /*
47755 * it's possible that this group tried to destroy itself, but this
47756 * this mark was simultaneously being freed by inode. If that's the
47757 * case, we finish freeing the group here.
47758diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47759index ee18815..7aa5d01 100644
47760--- a/fs/notify/notification.c
47761+++ b/fs/notify/notification.c
47762@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47763 * get set to 0 so it will never get 'freed'
47764 */
47765 static struct fsnotify_event *q_overflow_event;
47766-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47767+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47768
47769 /**
47770 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47771@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47772 */
47773 u32 fsnotify_get_cookie(void)
47774 {
47775- return atomic_inc_return(&fsnotify_sync_cookie);
47776+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47777 }
47778 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47779
47780diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47781index 99e3610..02c1068 100644
47782--- a/fs/ntfs/dir.c
47783+++ b/fs/ntfs/dir.c
47784@@ -1329,7 +1329,7 @@ find_next_index_buffer:
47785 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47786 ~(s64)(ndir->itype.index.block_size - 1)));
47787 /* Bounds checks. */
47788- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47789+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47790 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47791 "inode 0x%lx or driver bug.", vdir->i_ino);
47792 goto err_out;
47793diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47794index c587e2d..3641eaa 100644
47795--- a/fs/ntfs/file.c
47796+++ b/fs/ntfs/file.c
47797@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47798 #endif /* NTFS_RW */
47799 };
47800
47801-const struct file_operations ntfs_empty_file_ops = {};
47802+const struct file_operations ntfs_empty_file_ops __read_only;
47803
47804-const struct inode_operations ntfs_empty_inode_ops = {};
47805+const struct inode_operations ntfs_empty_inode_ops __read_only;
47806diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47807index 210c352..a174f83 100644
47808--- a/fs/ocfs2/localalloc.c
47809+++ b/fs/ocfs2/localalloc.c
47810@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47811 goto bail;
47812 }
47813
47814- atomic_inc(&osb->alloc_stats.moves);
47815+ atomic_inc_unchecked(&osb->alloc_stats.moves);
47816
47817 bail:
47818 if (handle)
47819diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
47820index 53aa41e..d7df9f1 100644
47821--- a/fs/ocfs2/namei.c
47822+++ b/fs/ocfs2/namei.c
47823@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir,
47824 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
47825 struct ocfs2_dir_lookup_result target_insert = { NULL, };
47826
47827+ pax_track_stack();
47828+
47829 /* At some point it might be nice to break this function up a
47830 * bit. */
47831
47832diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47833index 4092858..51c70ff 100644
47834--- a/fs/ocfs2/ocfs2.h
47835+++ b/fs/ocfs2/ocfs2.h
47836@@ -235,11 +235,11 @@ enum ocfs2_vol_state
47837
47838 struct ocfs2_alloc_stats
47839 {
47840- atomic_t moves;
47841- atomic_t local_data;
47842- atomic_t bitmap_data;
47843- atomic_t bg_allocs;
47844- atomic_t bg_extends;
47845+ atomic_unchecked_t moves;
47846+ atomic_unchecked_t local_data;
47847+ atomic_unchecked_t bitmap_data;
47848+ atomic_unchecked_t bg_allocs;
47849+ atomic_unchecked_t bg_extends;
47850 };
47851
47852 enum ocfs2_local_alloc_state
47853diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47854index ba5d97e..c77db25 100644
47855--- a/fs/ocfs2/suballoc.c
47856+++ b/fs/ocfs2/suballoc.c
47857@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47858 mlog_errno(status);
47859 goto bail;
47860 }
47861- atomic_inc(&osb->alloc_stats.bg_extends);
47862+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47863
47864 /* You should never ask for this much metadata */
47865 BUG_ON(bits_wanted >
47866@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47867 mlog_errno(status);
47868 goto bail;
47869 }
47870- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47871+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47872
47873 *suballoc_loc = res.sr_bg_blkno;
47874 *suballoc_bit_start = res.sr_bit_offset;
47875@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47876 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47877 res->sr_bits);
47878
47879- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47880+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47881
47882 BUG_ON(res->sr_bits != 1);
47883
47884@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47885 mlog_errno(status);
47886 goto bail;
47887 }
47888- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47889+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47890
47891 BUG_ON(res.sr_bits != 1);
47892
47893@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47894 cluster_start,
47895 num_clusters);
47896 if (!status)
47897- atomic_inc(&osb->alloc_stats.local_data);
47898+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
47899 } else {
47900 if (min_clusters > (osb->bitmap_cpg - 1)) {
47901 /* The only paths asking for contiguousness
47902@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47903 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47904 res.sr_bg_blkno,
47905 res.sr_bit_offset);
47906- atomic_inc(&osb->alloc_stats.bitmap_data);
47907+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47908 *num_clusters = res.sr_bits;
47909 }
47910 }
47911diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47912index 56f6102..1433c29 100644
47913--- a/fs/ocfs2/super.c
47914+++ b/fs/ocfs2/super.c
47915@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47916 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47917 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47918 "Stats",
47919- atomic_read(&osb->alloc_stats.bitmap_data),
47920- atomic_read(&osb->alloc_stats.local_data),
47921- atomic_read(&osb->alloc_stats.bg_allocs),
47922- atomic_read(&osb->alloc_stats.moves),
47923- atomic_read(&osb->alloc_stats.bg_extends));
47924+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47925+ atomic_read_unchecked(&osb->alloc_stats.local_data),
47926+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47927+ atomic_read_unchecked(&osb->alloc_stats.moves),
47928+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47929
47930 out += snprintf(buf + out, len - out,
47931 "%10s => State: %u Descriptor: %llu Size: %u bits "
47932@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47933 spin_lock_init(&osb->osb_xattr_lock);
47934 ocfs2_init_steal_slots(osb);
47935
47936- atomic_set(&osb->alloc_stats.moves, 0);
47937- atomic_set(&osb->alloc_stats.local_data, 0);
47938- atomic_set(&osb->alloc_stats.bitmap_data, 0);
47939- atomic_set(&osb->alloc_stats.bg_allocs, 0);
47940- atomic_set(&osb->alloc_stats.bg_extends, 0);
47941+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47942+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47943+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47944+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47945+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47946
47947 /* Copy the blockcheck stats from the superblock probe */
47948 osb->osb_ecc_stats = *stats;
47949diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47950index 5d22872..523db20 100644
47951--- a/fs/ocfs2/symlink.c
47952+++ b/fs/ocfs2/symlink.c
47953@@ -142,7 +142,7 @@ bail:
47954
47955 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47956 {
47957- char *link = nd_get_link(nd);
47958+ const char *link = nd_get_link(nd);
47959 if (!IS_ERR(link))
47960 kfree(link);
47961 }
47962diff --git a/fs/open.c b/fs/open.c
47963index f711921..28d5958 100644
47964--- a/fs/open.c
47965+++ b/fs/open.c
47966@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47967 error = locks_verify_truncate(inode, NULL, length);
47968 if (!error)
47969 error = security_path_truncate(&path);
47970+
47971+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47972+ error = -EACCES;
47973+
47974 if (!error)
47975 error = do_truncate(path.dentry, length, 0, NULL);
47976
47977@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47978 if (__mnt_is_readonly(path.mnt))
47979 res = -EROFS;
47980
47981+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47982+ res = -EACCES;
47983+
47984 out_path_release:
47985 path_put(&path);
47986 out:
47987@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47988 if (error)
47989 goto dput_and_out;
47990
47991+ gr_log_chdir(path.dentry, path.mnt);
47992+
47993 set_fs_pwd(current->fs, &path);
47994
47995 dput_and_out:
47996@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47997 goto out_putf;
47998
47999 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
48000+
48001+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48002+ error = -EPERM;
48003+
48004+ if (!error)
48005+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48006+
48007 if (!error)
48008 set_fs_pwd(current->fs, &file->f_path);
48009 out_putf:
48010@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
48011 if (error)
48012 goto dput_and_out;
48013
48014+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48015+ goto dput_and_out;
48016+
48017 set_fs_root(current->fs, &path);
48018+
48019+ gr_handle_chroot_chdir(&path);
48020+
48021 error = 0;
48022 dput_and_out:
48023 path_put(&path);
48024@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
48025 if (error)
48026 return error;
48027 mutex_lock(&inode->i_mutex);
48028+
48029+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
48030+ error = -EACCES;
48031+ goto out_unlock;
48032+ }
48033+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
48034+ error = -EACCES;
48035+ goto out_unlock;
48036+ }
48037+
48038 error = security_path_chmod(path->dentry, path->mnt, mode);
48039 if (error)
48040 goto out_unlock;
48041@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
48042 int error;
48043 struct iattr newattrs;
48044
48045+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
48046+ return -EACCES;
48047+
48048 newattrs.ia_valid = ATTR_CTIME;
48049 if (user != (uid_t) -1) {
48050 newattrs.ia_valid |= ATTR_UID;
48051diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
48052index 6296b40..417c00f 100644
48053--- a/fs/partitions/efi.c
48054+++ b/fs/partitions/efi.c
48055@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
48056 if (!gpt)
48057 return NULL;
48058
48059+ if (!le32_to_cpu(gpt->num_partition_entries))
48060+ return NULL;
48061+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
48062+ if (!pte)
48063+ return NULL;
48064+
48065 count = le32_to_cpu(gpt->num_partition_entries) *
48066 le32_to_cpu(gpt->sizeof_partition_entry);
48067- if (!count)
48068- return NULL;
48069- pte = kzalloc(count, GFP_KERNEL);
48070- if (!pte)
48071- return NULL;
48072-
48073 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
48074 (u8 *) pte,
48075 count) < count) {
48076diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
48077index af9fdf0..75b15c3 100644
48078--- a/fs/partitions/ldm.c
48079+++ b/fs/partitions/ldm.c
48080@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
48081 goto found;
48082 }
48083
48084- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
48085+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
48086 if (!f) {
48087 ldm_crit ("Out of memory.");
48088 return false;
48089diff --git a/fs/pipe.c b/fs/pipe.c
48090index 0e0be1d..f62a72d 100644
48091--- a/fs/pipe.c
48092+++ b/fs/pipe.c
48093@@ -420,9 +420,9 @@ redo:
48094 }
48095 if (bufs) /* More to do? */
48096 continue;
48097- if (!pipe->writers)
48098+ if (!atomic_read(&pipe->writers))
48099 break;
48100- if (!pipe->waiting_writers) {
48101+ if (!atomic_read(&pipe->waiting_writers)) {
48102 /* syscall merging: Usually we must not sleep
48103 * if O_NONBLOCK is set, or if we got some data.
48104 * But if a writer sleeps in kernel space, then
48105@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48106 mutex_lock(&inode->i_mutex);
48107 pipe = inode->i_pipe;
48108
48109- if (!pipe->readers) {
48110+ if (!atomic_read(&pipe->readers)) {
48111 send_sig(SIGPIPE, current, 0);
48112 ret = -EPIPE;
48113 goto out;
48114@@ -530,7 +530,7 @@ redo1:
48115 for (;;) {
48116 int bufs;
48117
48118- if (!pipe->readers) {
48119+ if (!atomic_read(&pipe->readers)) {
48120 send_sig(SIGPIPE, current, 0);
48121 if (!ret)
48122 ret = -EPIPE;
48123@@ -616,9 +616,9 @@ redo2:
48124 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48125 do_wakeup = 0;
48126 }
48127- pipe->waiting_writers++;
48128+ atomic_inc(&pipe->waiting_writers);
48129 pipe_wait(pipe);
48130- pipe->waiting_writers--;
48131+ atomic_dec(&pipe->waiting_writers);
48132 }
48133 out:
48134 mutex_unlock(&inode->i_mutex);
48135@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48136 mask = 0;
48137 if (filp->f_mode & FMODE_READ) {
48138 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48139- if (!pipe->writers && filp->f_version != pipe->w_counter)
48140+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48141 mask |= POLLHUP;
48142 }
48143
48144@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48145 * Most Unices do not set POLLERR for FIFOs but on Linux they
48146 * behave exactly like pipes for poll().
48147 */
48148- if (!pipe->readers)
48149+ if (!atomic_read(&pipe->readers))
48150 mask |= POLLERR;
48151 }
48152
48153@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48154
48155 mutex_lock(&inode->i_mutex);
48156 pipe = inode->i_pipe;
48157- pipe->readers -= decr;
48158- pipe->writers -= decw;
48159+ atomic_sub(decr, &pipe->readers);
48160+ atomic_sub(decw, &pipe->writers);
48161
48162- if (!pipe->readers && !pipe->writers) {
48163+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48164 free_pipe_info(inode);
48165 } else {
48166 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48167@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48168
48169 if (inode->i_pipe) {
48170 ret = 0;
48171- inode->i_pipe->readers++;
48172+ atomic_inc(&inode->i_pipe->readers);
48173 }
48174
48175 mutex_unlock(&inode->i_mutex);
48176@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48177
48178 if (inode->i_pipe) {
48179 ret = 0;
48180- inode->i_pipe->writers++;
48181+ atomic_inc(&inode->i_pipe->writers);
48182 }
48183
48184 mutex_unlock(&inode->i_mutex);
48185@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48186 if (inode->i_pipe) {
48187 ret = 0;
48188 if (filp->f_mode & FMODE_READ)
48189- inode->i_pipe->readers++;
48190+ atomic_inc(&inode->i_pipe->readers);
48191 if (filp->f_mode & FMODE_WRITE)
48192- inode->i_pipe->writers++;
48193+ atomic_inc(&inode->i_pipe->writers);
48194 }
48195
48196 mutex_unlock(&inode->i_mutex);
48197@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
48198 inode->i_pipe = NULL;
48199 }
48200
48201-static struct vfsmount *pipe_mnt __read_mostly;
48202+struct vfsmount *pipe_mnt __read_mostly;
48203
48204 /*
48205 * pipefs_dname() is called from d_path().
48206@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
48207 goto fail_iput;
48208 inode->i_pipe = pipe;
48209
48210- pipe->readers = pipe->writers = 1;
48211+ atomic_set(&pipe->readers, 1);
48212+ atomic_set(&pipe->writers, 1);
48213 inode->i_fop = &rdwr_pipefifo_fops;
48214
48215 /*
48216diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48217index 15af622..0e9f4467 100644
48218--- a/fs/proc/Kconfig
48219+++ b/fs/proc/Kconfig
48220@@ -30,12 +30,12 @@ config PROC_FS
48221
48222 config PROC_KCORE
48223 bool "/proc/kcore support" if !ARM
48224- depends on PROC_FS && MMU
48225+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48226
48227 config PROC_VMCORE
48228 bool "/proc/vmcore support"
48229- depends on PROC_FS && CRASH_DUMP
48230- default y
48231+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48232+ default n
48233 help
48234 Exports the dump image of crashed kernel in ELF format.
48235
48236@@ -59,8 +59,8 @@ config PROC_SYSCTL
48237 limited in memory.
48238
48239 config PROC_PAGE_MONITOR
48240- default y
48241- depends on PROC_FS && MMU
48242+ default n
48243+ depends on PROC_FS && MMU && !GRKERNSEC
48244 bool "Enable /proc page monitoring" if EXPERT
48245 help
48246 Various /proc files exist to monitor process memory utilization:
48247diff --git a/fs/proc/array.c b/fs/proc/array.c
48248index 3a1dafd..c7fed72 100644
48249--- a/fs/proc/array.c
48250+++ b/fs/proc/array.c
48251@@ -60,6 +60,7 @@
48252 #include <linux/tty.h>
48253 #include <linux/string.h>
48254 #include <linux/mman.h>
48255+#include <linux/grsecurity.h>
48256 #include <linux/proc_fs.h>
48257 #include <linux/ioport.h>
48258 #include <linux/uaccess.h>
48259@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48260 seq_putc(m, '\n');
48261 }
48262
48263+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48264+static inline void task_pax(struct seq_file *m, struct task_struct *p)
48265+{
48266+ if (p->mm)
48267+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48268+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48269+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48270+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48271+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48272+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48273+ else
48274+ seq_printf(m, "PaX:\t-----\n");
48275+}
48276+#endif
48277+
48278 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48279 struct pid *pid, struct task_struct *task)
48280 {
48281@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48282 task_cpus_allowed(m, task);
48283 cpuset_task_status_allowed(m, task);
48284 task_context_switch_counts(m, task);
48285+
48286+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48287+ task_pax(m, task);
48288+#endif
48289+
48290+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48291+ task_grsec_rbac(m, task);
48292+#endif
48293+
48294 return 0;
48295 }
48296
48297+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48298+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48299+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48300+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48301+#endif
48302+
48303 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48304 struct pid *pid, struct task_struct *task, int whole)
48305 {
48306@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48307 char tcomm[sizeof(task->comm)];
48308 unsigned long flags;
48309
48310+ pax_track_stack();
48311+
48312 state = *get_task_state(task);
48313 vsize = eip = esp = 0;
48314 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
48315@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48316 gtime = task->gtime;
48317 }
48318
48319+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48320+ if (PAX_RAND_FLAGS(mm)) {
48321+ eip = 0;
48322+ esp = 0;
48323+ wchan = 0;
48324+ }
48325+#endif
48326+#ifdef CONFIG_GRKERNSEC_HIDESYM
48327+ wchan = 0;
48328+ eip =0;
48329+ esp =0;
48330+#endif
48331+
48332 /* scale priority and nice values from timeslices to -20..20 */
48333 /* to make it look like a "normal" Unix priority/nice value */
48334 priority = task_prio(task);
48335@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48336 vsize,
48337 mm ? get_mm_rss(mm) : 0,
48338 rsslim,
48339+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48340+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48341+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
48342+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48343+#else
48344 mm ? (permitted ? mm->start_code : 1) : 0,
48345 mm ? (permitted ? mm->end_code : 1) : 0,
48346 (permitted && mm) ? mm->start_stack : 0,
48347+#endif
48348 esp,
48349 eip,
48350 /* The signal information here is obsolete.
48351@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48352
48353 return 0;
48354 }
48355+
48356+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48357+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48358+{
48359+ u32 curr_ip = 0;
48360+ unsigned long flags;
48361+
48362+ if (lock_task_sighand(task, &flags)) {
48363+ curr_ip = task->signal->curr_ip;
48364+ unlock_task_sighand(task, &flags);
48365+ }
48366+
48367+ return sprintf(buffer, "%pI4\n", &curr_ip);
48368+}
48369+#endif
48370diff --git a/fs/proc/base.c b/fs/proc/base.c
48371index 5eb0206..f8f1974 100644
48372--- a/fs/proc/base.c
48373+++ b/fs/proc/base.c
48374@@ -107,6 +107,22 @@ struct pid_entry {
48375 union proc_op op;
48376 };
48377
48378+struct getdents_callback {
48379+ struct linux_dirent __user * current_dir;
48380+ struct linux_dirent __user * previous;
48381+ struct file * file;
48382+ int count;
48383+ int error;
48384+};
48385+
48386+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
48387+ loff_t offset, u64 ino, unsigned int d_type)
48388+{
48389+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
48390+ buf->error = -EINVAL;
48391+ return 0;
48392+}
48393+
48394 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48395 .name = (NAME), \
48396 .len = sizeof(NAME) - 1, \
48397@@ -194,65 +210,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
48398 return result;
48399 }
48400
48401-static struct mm_struct *__check_mem_permission(struct task_struct *task)
48402-{
48403- struct mm_struct *mm;
48404-
48405- mm = get_task_mm(task);
48406- if (!mm)
48407- return ERR_PTR(-EINVAL);
48408-
48409- /*
48410- * A task can always look at itself, in case it chooses
48411- * to use system calls instead of load instructions.
48412- */
48413- if (task == current)
48414- return mm;
48415-
48416- /*
48417- * If current is actively ptrace'ing, and would also be
48418- * permitted to freshly attach with ptrace now, permit it.
48419- */
48420- if (task_is_stopped_or_traced(task)) {
48421- int match;
48422- rcu_read_lock();
48423- match = (ptrace_parent(task) == current);
48424- rcu_read_unlock();
48425- if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
48426- return mm;
48427- }
48428-
48429- /*
48430- * No one else is allowed.
48431- */
48432- mmput(mm);
48433- return ERR_PTR(-EPERM);
48434-}
48435-
48436-/*
48437- * If current may access user memory in @task return a reference to the
48438- * corresponding mm, otherwise ERR_PTR.
48439- */
48440-static struct mm_struct *check_mem_permission(struct task_struct *task)
48441-{
48442- struct mm_struct *mm;
48443- int err;
48444-
48445- /*
48446- * Avoid racing if task exec's as we might get a new mm but validate
48447- * against old credentials.
48448- */
48449- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
48450- if (err)
48451- return ERR_PTR(err);
48452-
48453- mm = __check_mem_permission(task);
48454- mutex_unlock(&task->signal->cred_guard_mutex);
48455-
48456- return mm;
48457-}
48458-
48459-struct mm_struct *mm_for_maps(struct task_struct *task)
48460+static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
48461 {
48462 struct mm_struct *mm;
48463 int err;
48464@@ -262,16 +220,23 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
48465 return ERR_PTR(err);
48466
48467 mm = get_task_mm(task);
48468- if (mm && mm != current->mm &&
48469- !ptrace_may_access(task, PTRACE_MODE_READ)) {
48470- mmput(mm);
48471- mm = ERR_PTR(-EACCES);
48472+ if (mm) {
48473+ if ((mm != current->mm && !ptrace_may_access(task, mode)) ||
48474+ (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))) {
48475+ mmput(mm);
48476+ mm = ERR_PTR(-EACCES);
48477+ }
48478 }
48479 mutex_unlock(&task->signal->cred_guard_mutex);
48480
48481 return mm;
48482 }
48483
48484+struct mm_struct *mm_for_maps(struct task_struct *task)
48485+{
48486+ return mm_access(task, PTRACE_MODE_READ);
48487+}
48488+
48489 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48490 {
48491 int res = 0;
48492@@ -282,6 +247,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48493 if (!mm->arg_end)
48494 goto out_mm; /* Shh! No looking before we're done */
48495
48496+ if (gr_acl_handle_procpidmem(task))
48497+ goto out_mm;
48498+
48499 len = mm->arg_end - mm->arg_start;
48500
48501 if (len > PAGE_SIZE)
48502@@ -309,12 +277,28 @@ out:
48503 return res;
48504 }
48505
48506+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48507+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48508+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48509+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48510+#endif
48511+
48512 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48513 {
48514 struct mm_struct *mm = mm_for_maps(task);
48515 int res = PTR_ERR(mm);
48516 if (mm && !IS_ERR(mm)) {
48517 unsigned int nwords = 0;
48518+
48519+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48520+ /* allow if we're currently ptracing this task */
48521+ if (PAX_RAND_FLAGS(mm) &&
48522+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48523+ mmput(mm);
48524+ return 0;
48525+ }
48526+#endif
48527+
48528 do {
48529 nwords += 2;
48530 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48531@@ -328,7 +312,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48532 }
48533
48534
48535-#ifdef CONFIG_KALLSYMS
48536+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48537 /*
48538 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48539 * Returns the resolved symbol. If that fails, simply return the address.
48540@@ -367,7 +351,7 @@ static void unlock_trace(struct task_struct *task)
48541 mutex_unlock(&task->signal->cred_guard_mutex);
48542 }
48543
48544-#ifdef CONFIG_STACKTRACE
48545+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48546
48547 #define MAX_STACK_TRACE_DEPTH 64
48548
48549@@ -558,7 +542,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48550 return count;
48551 }
48552
48553-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48554+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48555 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48556 {
48557 long nr;
48558@@ -587,7 +571,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48559 /************************************************************************/
48560
48561 /* permission checks */
48562-static int proc_fd_access_allowed(struct inode *inode)
48563+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48564 {
48565 struct task_struct *task;
48566 int allowed = 0;
48567@@ -597,7 +581,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48568 */
48569 task = get_proc_task(inode);
48570 if (task) {
48571- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48572+ if (log)
48573+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
48574+ else
48575+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48576 put_task_struct(task);
48577 }
48578 return allowed;
48579@@ -816,38 +803,39 @@ static const struct file_operations proc_single_file_operations = {
48580
48581 static int mem_open(struct inode* inode, struct file* file)
48582 {
48583- file->private_data = (void*)((long)current->self_exec_id);
48584+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
48585+ struct mm_struct *mm;
48586+
48587+ if (!task)
48588+ return -ESRCH;
48589+
48590+ mm = mm_access(task, PTRACE_MODE_ATTACH);
48591+ put_task_struct(task);
48592+
48593+ if (IS_ERR(mm))
48594+ return PTR_ERR(mm);
48595+
48596 /* OK to pass negative loff_t, we can catch out-of-range */
48597 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48598+ file->private_data = mm;
48599+
48600 return 0;
48601 }
48602
48603 static ssize_t mem_read(struct file * file, char __user * buf,
48604 size_t count, loff_t *ppos)
48605 {
48606- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
48607+ int ret;
48608 char *page;
48609 unsigned long src = *ppos;
48610- int ret = -ESRCH;
48611- struct mm_struct *mm;
48612+ struct mm_struct *mm = file->private_data;
48613
48614- if (!task)
48615- goto out_no_task;
48616+ if (!mm)
48617+ return 0;
48618
48619- ret = -ENOMEM;
48620 page = (char *)__get_free_page(GFP_TEMPORARY);
48621 if (!page)
48622- goto out;
48623-
48624- mm = check_mem_permission(task);
48625- ret = PTR_ERR(mm);
48626- if (IS_ERR(mm))
48627- goto out_free;
48628-
48629- ret = -EIO;
48630-
48631- if (file->private_data != (void*)((long)current->self_exec_id))
48632- goto out_put;
48633+ return -ENOMEM;
48634
48635 ret = 0;
48636
48637@@ -874,42 +862,28 @@ static ssize_t mem_read(struct file * file, char __user * buf,
48638 }
48639 *ppos = src;
48640
48641-out_put:
48642- mmput(mm);
48643-out_free:
48644 free_page((unsigned long) page);
48645-out:
48646- put_task_struct(task);
48647-out_no_task:
48648 return ret;
48649 }
48650
48651+#define mem_write NULL
48652+
48653+#ifndef mem_write
48654+/* They were right the first time */
48655 static ssize_t mem_write(struct file * file, const char __user *buf,
48656 size_t count, loff_t *ppos)
48657 {
48658 int copied;
48659 char *page;
48660- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
48661 unsigned long dst = *ppos;
48662- struct mm_struct *mm;
48663+ struct mm_struct *mm = file->private_data;
48664
48665- copied = -ESRCH;
48666- if (!task)
48667- goto out_no_task;
48668+ if (!mm)
48669+ return 0;
48670
48671- copied = -ENOMEM;
48672 page = (char *)__get_free_page(GFP_TEMPORARY);
48673 if (!page)
48674- goto out_task;
48675-
48676- mm = check_mem_permission(task);
48677- copied = PTR_ERR(mm);
48678- if (IS_ERR(mm))
48679- goto out_free;
48680-
48681- copied = -EIO;
48682- if (file->private_data != (void *)((long)current->self_exec_id))
48683- goto out_mm;
48684+ return -ENOMEM;
48685
48686 copied = 0;
48687 while (count > 0) {
48688@@ -933,15 +907,10 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
48689 }
48690 *ppos = dst;
48691
48692-out_mm:
48693- mmput(mm);
48694-out_free:
48695 free_page((unsigned long) page);
48696-out_task:
48697- put_task_struct(task);
48698-out_no_task:
48699 return copied;
48700 }
48701+#endif
48702
48703 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
48704 {
48705@@ -959,11 +928,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
48706 return file->f_pos;
48707 }
48708
48709+static int mem_release(struct inode *inode, struct file *file)
48710+{
48711+ struct mm_struct *mm = file->private_data;
48712+
48713+ mmput(mm);
48714+ return 0;
48715+}
48716+
48717 static const struct file_operations proc_mem_operations = {
48718 .llseek = mem_lseek,
48719 .read = mem_read,
48720 .write = mem_write,
48721 .open = mem_open,
48722+ .release = mem_release,
48723 };
48724
48725 static ssize_t environ_read(struct file *file, char __user *buf,
48726@@ -978,6 +956,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48727 if (!task)
48728 goto out_no_task;
48729
48730+ if (gr_acl_handle_procpidmem(task))
48731+ goto out;
48732+
48733 ret = -ENOMEM;
48734 page = (char *)__get_free_page(GFP_TEMPORARY);
48735 if (!page)
48736@@ -1613,7 +1594,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48737 path_put(&nd->path);
48738
48739 /* Are we allowed to snoop on the tasks file descriptors? */
48740- if (!proc_fd_access_allowed(inode))
48741+ if (!proc_fd_access_allowed(inode,0))
48742 goto out;
48743
48744 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
48745@@ -1652,8 +1633,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48746 struct path path;
48747
48748 /* Are we allowed to snoop on the tasks file descriptors? */
48749- if (!proc_fd_access_allowed(inode))
48750- goto out;
48751+ /* logging this is needed for learning on chromium to work properly,
48752+ but we don't want to flood the logs from 'ps' which does a readlink
48753+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48754+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48755+ */
48756+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48757+ if (!proc_fd_access_allowed(inode,0))
48758+ goto out;
48759+ } else {
48760+ if (!proc_fd_access_allowed(inode,1))
48761+ goto out;
48762+ }
48763
48764 error = PROC_I(inode)->op.proc_get_link(inode, &path);
48765 if (error)
48766@@ -1718,7 +1709,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48767 rcu_read_lock();
48768 cred = __task_cred(task);
48769 inode->i_uid = cred->euid;
48770+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48771+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48772+#else
48773 inode->i_gid = cred->egid;
48774+#endif
48775 rcu_read_unlock();
48776 }
48777 security_task_to_inode(task, inode);
48778@@ -1736,6 +1731,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48779 struct inode *inode = dentry->d_inode;
48780 struct task_struct *task;
48781 const struct cred *cred;
48782+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48783+ const struct cred *tmpcred = current_cred();
48784+#endif
48785
48786 generic_fillattr(inode, stat);
48787
48788@@ -1743,13 +1741,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48789 stat->uid = 0;
48790 stat->gid = 0;
48791 task = pid_task(proc_pid(inode), PIDTYPE_PID);
48792+
48793+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
48794+ rcu_read_unlock();
48795+ return -ENOENT;
48796+ }
48797+
48798 if (task) {
48799+ cred = __task_cred(task);
48800+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48801+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48802+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48803+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48804+#endif
48805+ ) {
48806+#endif
48807 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48808+#ifdef CONFIG_GRKERNSEC_PROC_USER
48809+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48810+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48811+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48812+#endif
48813 task_dumpable(task)) {
48814- cred = __task_cred(task);
48815 stat->uid = cred->euid;
48816+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48817+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48818+#else
48819 stat->gid = cred->egid;
48820+#endif
48821 }
48822+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48823+ } else {
48824+ rcu_read_unlock();
48825+ return -ENOENT;
48826+ }
48827+#endif
48828 }
48829 rcu_read_unlock();
48830 return 0;
48831@@ -1786,11 +1812,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48832
48833 if (task) {
48834 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48835+#ifdef CONFIG_GRKERNSEC_PROC_USER
48836+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48837+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48838+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48839+#endif
48840 task_dumpable(task)) {
48841 rcu_read_lock();
48842 cred = __task_cred(task);
48843 inode->i_uid = cred->euid;
48844+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48845+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48846+#else
48847 inode->i_gid = cred->egid;
48848+#endif
48849 rcu_read_unlock();
48850 } else {
48851 inode->i_uid = 0;
48852@@ -1908,7 +1943,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48853 int fd = proc_fd(inode);
48854
48855 if (task) {
48856- files = get_files_struct(task);
48857+ if (!gr_acl_handle_procpidmem(task))
48858+ files = get_files_struct(task);
48859 put_task_struct(task);
48860 }
48861 if (files) {
48862@@ -2176,11 +2212,21 @@ static const struct file_operations proc_fd_operations = {
48863 */
48864 static int proc_fd_permission(struct inode *inode, int mask)
48865 {
48866+ struct task_struct *task;
48867 int rv = generic_permission(inode, mask);
48868- if (rv == 0)
48869- return 0;
48870+
48871 if (task_pid(current) == proc_pid(inode))
48872 rv = 0;
48873+
48874+ task = get_proc_task(inode);
48875+ if (task == NULL)
48876+ return rv;
48877+
48878+ if (gr_acl_handle_procpidmem(task))
48879+ rv = -EACCES;
48880+
48881+ put_task_struct(task);
48882+
48883 return rv;
48884 }
48885
48886@@ -2290,6 +2336,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48887 if (!task)
48888 goto out_no_task;
48889
48890+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48891+ goto out;
48892+
48893 /*
48894 * Yes, it does not scale. And it should not. Don't add
48895 * new entries into /proc/<tgid>/ without very good reasons.
48896@@ -2334,6 +2383,9 @@ static int proc_pident_readdir(struct file *filp,
48897 if (!task)
48898 goto out_no_task;
48899
48900+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48901+ goto out;
48902+
48903 ret = 0;
48904 i = filp->f_pos;
48905 switch (i) {
48906@@ -2604,7 +2656,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48907 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48908 void *cookie)
48909 {
48910- char *s = nd_get_link(nd);
48911+ const char *s = nd_get_link(nd);
48912 if (!IS_ERR(s))
48913 __putname(s);
48914 }
48915@@ -2802,7 +2854,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48916 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48917 #endif
48918 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48919-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48920+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48921 INF("syscall", S_IRUGO, proc_pid_syscall),
48922 #endif
48923 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48924@@ -2827,10 +2879,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48925 #ifdef CONFIG_SECURITY
48926 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48927 #endif
48928-#ifdef CONFIG_KALLSYMS
48929+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48930 INF("wchan", S_IRUGO, proc_pid_wchan),
48931 #endif
48932-#ifdef CONFIG_STACKTRACE
48933+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48934 ONE("stack", S_IRUGO, proc_pid_stack),
48935 #endif
48936 #ifdef CONFIG_SCHEDSTATS
48937@@ -2864,6 +2916,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48938 #ifdef CONFIG_HARDWALL
48939 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48940 #endif
48941+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48942+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48943+#endif
48944 };
48945
48946 static int proc_tgid_base_readdir(struct file * filp,
48947@@ -2989,7 +3044,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48948 if (!inode)
48949 goto out;
48950
48951+#ifdef CONFIG_GRKERNSEC_PROC_USER
48952+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48953+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48954+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48955+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48956+#else
48957 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48958+#endif
48959 inode->i_op = &proc_tgid_base_inode_operations;
48960 inode->i_fop = &proc_tgid_base_operations;
48961 inode->i_flags|=S_IMMUTABLE;
48962@@ -3031,7 +3093,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48963 if (!task)
48964 goto out;
48965
48966+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48967+ goto out_put_task;
48968+
48969 result = proc_pid_instantiate(dir, dentry, task, NULL);
48970+out_put_task:
48971 put_task_struct(task);
48972 out:
48973 return result;
48974@@ -3096,6 +3162,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48975 {
48976 unsigned int nr;
48977 struct task_struct *reaper;
48978+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48979+ const struct cred *tmpcred = current_cred();
48980+ const struct cred *itercred;
48981+#endif
48982+ filldir_t __filldir = filldir;
48983 struct tgid_iter iter;
48984 struct pid_namespace *ns;
48985
48986@@ -3119,8 +3190,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48987 for (iter = next_tgid(ns, iter);
48988 iter.task;
48989 iter.tgid += 1, iter = next_tgid(ns, iter)) {
48990+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48991+ rcu_read_lock();
48992+ itercred = __task_cred(iter.task);
48993+#endif
48994+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
48995+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48996+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
48997+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48998+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48999+#endif
49000+ )
49001+#endif
49002+ )
49003+ __filldir = &gr_fake_filldir;
49004+ else
49005+ __filldir = filldir;
49006+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49007+ rcu_read_unlock();
49008+#endif
49009 filp->f_pos = iter.tgid + TGID_OFFSET;
49010- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
49011+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
49012 put_task_struct(iter.task);
49013 goto out;
49014 }
49015@@ -3148,7 +3238,7 @@ static const struct pid_entry tid_base_stuff[] = {
49016 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49017 #endif
49018 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49019-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49020+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49021 INF("syscall", S_IRUGO, proc_pid_syscall),
49022 #endif
49023 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49024@@ -3172,10 +3262,10 @@ static const struct pid_entry tid_base_stuff[] = {
49025 #ifdef CONFIG_SECURITY
49026 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49027 #endif
49028-#ifdef CONFIG_KALLSYMS
49029+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49030 INF("wchan", S_IRUGO, proc_pid_wchan),
49031 #endif
49032-#ifdef CONFIG_STACKTRACE
49033+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49034 ONE("stack", S_IRUGO, proc_pid_stack),
49035 #endif
49036 #ifdef CONFIG_SCHEDSTATS
49037diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49038index 82676e3..5f8518a 100644
49039--- a/fs/proc/cmdline.c
49040+++ b/fs/proc/cmdline.c
49041@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
49042
49043 static int __init proc_cmdline_init(void)
49044 {
49045+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49046+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49047+#else
49048 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49049+#endif
49050 return 0;
49051 }
49052 module_init(proc_cmdline_init);
49053diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49054index b143471..bb105e5 100644
49055--- a/fs/proc/devices.c
49056+++ b/fs/proc/devices.c
49057@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
49058
49059 static int __init proc_devices_init(void)
49060 {
49061+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49062+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49063+#else
49064 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49065+#endif
49066 return 0;
49067 }
49068 module_init(proc_devices_init);
49069diff --git a/fs/proc/inode.c b/fs/proc/inode.c
49070index 7ed72d6..d5f061a 100644
49071--- a/fs/proc/inode.c
49072+++ b/fs/proc/inode.c
49073@@ -18,12 +18,18 @@
49074 #include <linux/module.h>
49075 #include <linux/sysctl.h>
49076 #include <linux/slab.h>
49077+#include <linux/grsecurity.h>
49078
49079 #include <asm/system.h>
49080 #include <asm/uaccess.h>
49081
49082 #include "internal.h"
49083
49084+#ifdef CONFIG_PROC_SYSCTL
49085+extern const struct inode_operations proc_sys_inode_operations;
49086+extern const struct inode_operations proc_sys_dir_operations;
49087+#endif
49088+
49089 static void proc_evict_inode(struct inode *inode)
49090 {
49091 struct proc_dir_entry *de;
49092@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
49093 ns_ops = PROC_I(inode)->ns_ops;
49094 if (ns_ops && ns_ops->put)
49095 ns_ops->put(PROC_I(inode)->ns);
49096+
49097+#ifdef CONFIG_PROC_SYSCTL
49098+ if (inode->i_op == &proc_sys_inode_operations ||
49099+ inode->i_op == &proc_sys_dir_operations)
49100+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49101+#endif
49102+
49103 }
49104
49105 static struct kmem_cache * proc_inode_cachep;
49106@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
49107 if (de->mode) {
49108 inode->i_mode = de->mode;
49109 inode->i_uid = de->uid;
49110+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49111+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49112+#else
49113 inode->i_gid = de->gid;
49114+#endif
49115 }
49116 if (de->size)
49117 inode->i_size = de->size;
49118diff --git a/fs/proc/internal.h b/fs/proc/internal.h
49119index 7838e5c..ff92cbc 100644
49120--- a/fs/proc/internal.h
49121+++ b/fs/proc/internal.h
49122@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
49123 struct pid *pid, struct task_struct *task);
49124 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49125 struct pid *pid, struct task_struct *task);
49126+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49127+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49128+#endif
49129 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49130
49131 extern const struct file_operations proc_maps_operations;
49132diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
49133index d245cb2..7e645bd 100644
49134--- a/fs/proc/kcore.c
49135+++ b/fs/proc/kcore.c
49136@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
49137 off_t offset = 0;
49138 struct kcore_list *m;
49139
49140+ pax_track_stack();
49141+
49142 /* setup ELF header */
49143 elf = (struct elfhdr *) bufp;
49144 bufp += sizeof(struct elfhdr);
49145@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49146 * the addresses in the elf_phdr on our list.
49147 */
49148 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49149- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49150+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49151+ if (tsz > buflen)
49152 tsz = buflen;
49153-
49154+
49155 while (buflen) {
49156 struct kcore_list *m;
49157
49158@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49159 kfree(elf_buf);
49160 } else {
49161 if (kern_addr_valid(start)) {
49162- unsigned long n;
49163+ char *elf_buf;
49164+ mm_segment_t oldfs;
49165
49166- n = copy_to_user(buffer, (char *)start, tsz);
49167- /*
49168- * We cannot distingush between fault on source
49169- * and fault on destination. When this happens
49170- * we clear too and hope it will trigger the
49171- * EFAULT again.
49172- */
49173- if (n) {
49174- if (clear_user(buffer + tsz - n,
49175- n))
49176+ elf_buf = kmalloc(tsz, GFP_KERNEL);
49177+ if (!elf_buf)
49178+ return -ENOMEM;
49179+ oldfs = get_fs();
49180+ set_fs(KERNEL_DS);
49181+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49182+ set_fs(oldfs);
49183+ if (copy_to_user(buffer, elf_buf, tsz)) {
49184+ kfree(elf_buf);
49185 return -EFAULT;
49186+ }
49187 }
49188+ set_fs(oldfs);
49189+ kfree(elf_buf);
49190 } else {
49191 if (clear_user(buffer, tsz))
49192 return -EFAULT;
49193@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49194
49195 static int open_kcore(struct inode *inode, struct file *filp)
49196 {
49197+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49198+ return -EPERM;
49199+#endif
49200 if (!capable(CAP_SYS_RAWIO))
49201 return -EPERM;
49202 if (kcore_need_update)
49203diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
49204index 80e4645..d2689e9 100644
49205--- a/fs/proc/meminfo.c
49206+++ b/fs/proc/meminfo.c
49207@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49208 unsigned long pages[NR_LRU_LISTS];
49209 int lru;
49210
49211+ pax_track_stack();
49212+
49213 /*
49214 * display in kilobytes.
49215 */
49216@@ -158,7 +160,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49217 vmi.used >> 10,
49218 vmi.largest_chunk >> 10
49219 #ifdef CONFIG_MEMORY_FAILURE
49220- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49221+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49222 #endif
49223 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49224 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
49225diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49226index b1822dd..df622cb 100644
49227--- a/fs/proc/nommu.c
49228+++ b/fs/proc/nommu.c
49229@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
49230 if (len < 1)
49231 len = 1;
49232 seq_printf(m, "%*c", len, ' ');
49233- seq_path(m, &file->f_path, "");
49234+ seq_path(m, &file->f_path, "\n\\");
49235 }
49236
49237 seq_putc(m, '\n');
49238diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
49239index f738024..876984a 100644
49240--- a/fs/proc/proc_net.c
49241+++ b/fs/proc/proc_net.c
49242@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
49243 struct task_struct *task;
49244 struct nsproxy *ns;
49245 struct net *net = NULL;
49246+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49247+ const struct cred *cred = current_cred();
49248+#endif
49249+
49250+#ifdef CONFIG_GRKERNSEC_PROC_USER
49251+ if (cred->fsuid)
49252+ return net;
49253+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49254+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49255+ return net;
49256+#endif
49257
49258 rcu_read_lock();
49259 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49260diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
49261index 1a77dbe..56ec911 100644
49262--- a/fs/proc/proc_sysctl.c
49263+++ b/fs/proc/proc_sysctl.c
49264@@ -8,11 +8,13 @@
49265 #include <linux/namei.h>
49266 #include "internal.h"
49267
49268+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
49269+
49270 static const struct dentry_operations proc_sys_dentry_operations;
49271 static const struct file_operations proc_sys_file_operations;
49272-static const struct inode_operations proc_sys_inode_operations;
49273+const struct inode_operations proc_sys_inode_operations;
49274 static const struct file_operations proc_sys_dir_file_operations;
49275-static const struct inode_operations proc_sys_dir_operations;
49276+const struct inode_operations proc_sys_dir_operations;
49277
49278 static struct inode *proc_sys_make_inode(struct super_block *sb,
49279 struct ctl_table_header *head, struct ctl_table *table)
49280@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
49281
49282 err = NULL;
49283 d_set_d_op(dentry, &proc_sys_dentry_operations);
49284+
49285+ gr_handle_proc_create(dentry, inode);
49286+
49287 d_add(dentry, inode);
49288
49289+ if (gr_handle_sysctl(p, MAY_EXEC))
49290+ err = ERR_PTR(-ENOENT);
49291+
49292 out:
49293 sysctl_head_finish(head);
49294 return err;
49295@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
49296 return -ENOMEM;
49297 } else {
49298 d_set_d_op(child, &proc_sys_dentry_operations);
49299+
49300+ gr_handle_proc_create(child, inode);
49301+
49302 d_add(child, inode);
49303 }
49304 } else {
49305@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
49306 if (*pos < file->f_pos)
49307 continue;
49308
49309+ if (gr_handle_sysctl(table, 0))
49310+ continue;
49311+
49312 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
49313 if (res)
49314 return res;
49315@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49316 if (IS_ERR(head))
49317 return PTR_ERR(head);
49318
49319+ if (table && gr_handle_sysctl(table, MAY_EXEC))
49320+ return -ENOENT;
49321+
49322 generic_fillattr(inode, stat);
49323 if (table)
49324 stat->mode = (stat->mode & S_IFMT) | table->mode;
49325@@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = {
49326 };
49327
49328 static const struct file_operations proc_sys_dir_file_operations = {
49329+ .read = generic_read_dir,
49330 .readdir = proc_sys_readdir,
49331 .llseek = generic_file_llseek,
49332 };
49333
49334-static const struct inode_operations proc_sys_inode_operations = {
49335+const struct inode_operations proc_sys_inode_operations = {
49336 .permission = proc_sys_permission,
49337 .setattr = proc_sys_setattr,
49338 .getattr = proc_sys_getattr,
49339 };
49340
49341-static const struct inode_operations proc_sys_dir_operations = {
49342+const struct inode_operations proc_sys_dir_operations = {
49343 .lookup = proc_sys_lookup,
49344 .permission = proc_sys_permission,
49345 .setattr = proc_sys_setattr,
49346diff --git a/fs/proc/root.c b/fs/proc/root.c
49347index 9a8a2b7..3018df6 100644
49348--- a/fs/proc/root.c
49349+++ b/fs/proc/root.c
49350@@ -123,7 +123,15 @@ void __init proc_root_init(void)
49351 #ifdef CONFIG_PROC_DEVICETREE
49352 proc_device_tree_init();
49353 #endif
49354+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49355+#ifdef CONFIG_GRKERNSEC_PROC_USER
49356+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49357+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49358+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49359+#endif
49360+#else
49361 proc_mkdir("bus", NULL);
49362+#endif
49363 proc_sys_init();
49364 }
49365
49366diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49367index c7d4ee6..41c5564 100644
49368--- a/fs/proc/task_mmu.c
49369+++ b/fs/proc/task_mmu.c
49370@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49371 "VmExe:\t%8lu kB\n"
49372 "VmLib:\t%8lu kB\n"
49373 "VmPTE:\t%8lu kB\n"
49374- "VmSwap:\t%8lu kB\n",
49375- hiwater_vm << (PAGE_SHIFT-10),
49376+ "VmSwap:\t%8lu kB\n"
49377+
49378+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49379+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49380+#endif
49381+
49382+ ,hiwater_vm << (PAGE_SHIFT-10),
49383 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49384 mm->locked_vm << (PAGE_SHIFT-10),
49385 hiwater_rss << (PAGE_SHIFT-10),
49386@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49387 data << (PAGE_SHIFT-10),
49388 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49389 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49390- swap << (PAGE_SHIFT-10));
49391+ swap << (PAGE_SHIFT-10)
49392+
49393+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49394+ , mm->context.user_cs_base, mm->context.user_cs_limit
49395+#endif
49396+
49397+ );
49398 }
49399
49400 unsigned long task_vsize(struct mm_struct *mm)
49401@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
49402 return ret;
49403 }
49404
49405+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49406+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49407+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
49408+ _mm->pax_flags & MF_PAX_SEGMEXEC))
49409+#endif
49410+
49411 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49412 {
49413 struct mm_struct *mm = vma->vm_mm;
49414@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49415 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49416 }
49417
49418- /* We don't show the stack guard page in /proc/maps */
49419+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49420+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49421+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49422+#else
49423 start = vma->vm_start;
49424- if (stack_guard_page_start(vma, start))
49425- start += PAGE_SIZE;
49426 end = vma->vm_end;
49427- if (stack_guard_page_end(vma, end))
49428- end -= PAGE_SIZE;
49429+#endif
49430
49431 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49432 start,
49433@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49434 flags & VM_WRITE ? 'w' : '-',
49435 flags & VM_EXEC ? 'x' : '-',
49436 flags & VM_MAYSHARE ? 's' : 'p',
49437+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49438+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49439+#else
49440 pgoff,
49441+#endif
49442 MAJOR(dev), MINOR(dev), ino, &len);
49443
49444 /*
49445@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49446 */
49447 if (file) {
49448 pad_len_spaces(m, len);
49449- seq_path(m, &file->f_path, "\n");
49450+ seq_path(m, &file->f_path, "\n\\");
49451 } else {
49452 const char *name = arch_vma_name(vma);
49453 if (!name) {
49454@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49455 if (vma->vm_start <= mm->brk &&
49456 vma->vm_end >= mm->start_brk) {
49457 name = "[heap]";
49458- } else if (vma->vm_start <= mm->start_stack &&
49459- vma->vm_end >= mm->start_stack) {
49460+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49461+ (vma->vm_start <= mm->start_stack &&
49462+ vma->vm_end >= mm->start_stack)) {
49463 name = "[stack]";
49464 }
49465 } else {
49466@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v)
49467 };
49468
49469 memset(&mss, 0, sizeof mss);
49470- mss.vma = vma;
49471- /* mmap_sem is held in m_start */
49472- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49473- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49474-
49475+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49476+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49477+#endif
49478+ mss.vma = vma;
49479+ /* mmap_sem is held in m_start */
49480+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49481+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49482+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49483+ }
49484+#endif
49485 show_map_vma(m, vma);
49486
49487 seq_printf(m,
49488@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v)
49489 "KernelPageSize: %8lu kB\n"
49490 "MMUPageSize: %8lu kB\n"
49491 "Locked: %8lu kB\n",
49492+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49493+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49494+#else
49495 (vma->vm_end - vma->vm_start) >> 10,
49496+#endif
49497 mss.resident >> 10,
49498 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49499 mss.shared_clean >> 10,
49500@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v)
49501
49502 if (file) {
49503 seq_printf(m, " file=");
49504- seq_path(m, &file->f_path, "\n\t= ");
49505+ seq_path(m, &file->f_path, "\n\t\\= ");
49506 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49507 seq_printf(m, " heap");
49508 } else if (vma->vm_start <= mm->start_stack &&
49509diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49510index 980de54..2a4db5f 100644
49511--- a/fs/proc/task_nommu.c
49512+++ b/fs/proc/task_nommu.c
49513@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49514 else
49515 bytes += kobjsize(mm);
49516
49517- if (current->fs && current->fs->users > 1)
49518+ if (current->fs && atomic_read(&current->fs->users) > 1)
49519 sbytes += kobjsize(current->fs);
49520 else
49521 bytes += kobjsize(current->fs);
49522@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
49523
49524 if (file) {
49525 pad_len_spaces(m, len);
49526- seq_path(m, &file->f_path, "");
49527+ seq_path(m, &file->f_path, "\n\\");
49528 } else if (mm) {
49529 if (vma->vm_start <= mm->start_stack &&
49530 vma->vm_end >= mm->start_stack) {
49531diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49532index d67908b..d13f6a6 100644
49533--- a/fs/quota/netlink.c
49534+++ b/fs/quota/netlink.c
49535@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49536 void quota_send_warning(short type, unsigned int id, dev_t dev,
49537 const char warntype)
49538 {
49539- static atomic_t seq;
49540+ static atomic_unchecked_t seq;
49541 struct sk_buff *skb;
49542 void *msg_head;
49543 int ret;
49544@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49545 "VFS: Not enough memory to send quota warning.\n");
49546 return;
49547 }
49548- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49549+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49550 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49551 if (!msg_head) {
49552 printk(KERN_ERR
49553diff --git a/fs/readdir.c b/fs/readdir.c
49554index 356f715..c918d38 100644
49555--- a/fs/readdir.c
49556+++ b/fs/readdir.c
49557@@ -17,6 +17,7 @@
49558 #include <linux/security.h>
49559 #include <linux/syscalls.h>
49560 #include <linux/unistd.h>
49561+#include <linux/namei.h>
49562
49563 #include <asm/uaccess.h>
49564
49565@@ -67,6 +68,7 @@ struct old_linux_dirent {
49566
49567 struct readdir_callback {
49568 struct old_linux_dirent __user * dirent;
49569+ struct file * file;
49570 int result;
49571 };
49572
49573@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49574 buf->result = -EOVERFLOW;
49575 return -EOVERFLOW;
49576 }
49577+
49578+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49579+ return 0;
49580+
49581 buf->result++;
49582 dirent = buf->dirent;
49583 if (!access_ok(VERIFY_WRITE, dirent,
49584@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49585
49586 buf.result = 0;
49587 buf.dirent = dirent;
49588+ buf.file = file;
49589
49590 error = vfs_readdir(file, fillonedir, &buf);
49591 if (buf.result)
49592@@ -142,6 +149,7 @@ struct linux_dirent {
49593 struct getdents_callback {
49594 struct linux_dirent __user * current_dir;
49595 struct linux_dirent __user * previous;
49596+ struct file * file;
49597 int count;
49598 int error;
49599 };
49600@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49601 buf->error = -EOVERFLOW;
49602 return -EOVERFLOW;
49603 }
49604+
49605+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49606+ return 0;
49607+
49608 dirent = buf->previous;
49609 if (dirent) {
49610 if (__put_user(offset, &dirent->d_off))
49611@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49612 buf.previous = NULL;
49613 buf.count = count;
49614 buf.error = 0;
49615+ buf.file = file;
49616
49617 error = vfs_readdir(file, filldir, &buf);
49618 if (error >= 0)
49619@@ -229,6 +242,7 @@ out:
49620 struct getdents_callback64 {
49621 struct linux_dirent64 __user * current_dir;
49622 struct linux_dirent64 __user * previous;
49623+ struct file *file;
49624 int count;
49625 int error;
49626 };
49627@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49628 buf->error = -EINVAL; /* only used if we fail.. */
49629 if (reclen > buf->count)
49630 return -EINVAL;
49631+
49632+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49633+ return 0;
49634+
49635 dirent = buf->previous;
49636 if (dirent) {
49637 if (__put_user(offset, &dirent->d_off))
49638@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49639
49640 buf.current_dir = dirent;
49641 buf.previous = NULL;
49642+ buf.file = file;
49643 buf.count = count;
49644 buf.error = 0;
49645
49646@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49647 error = buf.error;
49648 lastdirent = buf.previous;
49649 if (lastdirent) {
49650- typeof(lastdirent->d_off) d_off = file->f_pos;
49651+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49652 if (__put_user(d_off, &lastdirent->d_off))
49653 error = -EFAULT;
49654 else
49655diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
49656index 133e935..349ef18 100644
49657--- a/fs/reiserfs/dir.c
49658+++ b/fs/reiserfs/dir.c
49659@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
49660 struct reiserfs_dir_entry de;
49661 int ret = 0;
49662
49663+ pax_track_stack();
49664+
49665 reiserfs_write_lock(inode->i_sb);
49666
49667 reiserfs_check_lock_depth(inode->i_sb, "readdir");
49668diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49669index 60c0804..d814f98 100644
49670--- a/fs/reiserfs/do_balan.c
49671+++ b/fs/reiserfs/do_balan.c
49672@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49673 return;
49674 }
49675
49676- atomic_inc(&(fs_generation(tb->tb_sb)));
49677+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49678 do_balance_starts(tb);
49679
49680 /* balance leaf returns 0 except if combining L R and S into
49681diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
49682index a159ba5..0396a76 100644
49683--- a/fs/reiserfs/journal.c
49684+++ b/fs/reiserfs/journal.c
49685@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
49686 struct buffer_head *bh;
49687 int i, j;
49688
49689+ pax_track_stack();
49690+
49691 bh = __getblk(dev, block, bufsize);
49692 if (buffer_uptodate(bh))
49693 return (bh);
49694diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
49695index ef39232..0fa91ba 100644
49696--- a/fs/reiserfs/namei.c
49697+++ b/fs/reiserfs/namei.c
49698@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
49699 unsigned long savelink = 1;
49700 struct timespec ctime;
49701
49702+ pax_track_stack();
49703+
49704 /* three balancings: (1) old name removal, (2) new name insertion
49705 and (3) maybe "save" link insertion
49706 stat data updates: (1) old directory,
49707diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49708index 7a99811..2c9286f 100644
49709--- a/fs/reiserfs/procfs.c
49710+++ b/fs/reiserfs/procfs.c
49711@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49712 "SMALL_TAILS " : "NO_TAILS ",
49713 replay_only(sb) ? "REPLAY_ONLY " : "",
49714 convert_reiserfs(sb) ? "CONV " : "",
49715- atomic_read(&r->s_generation_counter),
49716+ atomic_read_unchecked(&r->s_generation_counter),
49717 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49718 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49719 SF(s_good_search_by_key_reada), SF(s_bmaps),
49720@@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
49721 struct journal_params *jp = &rs->s_v1.s_journal;
49722 char b[BDEVNAME_SIZE];
49723
49724+ pax_track_stack();
49725+
49726 seq_printf(m, /* on-disk fields */
49727 "jp_journal_1st_block: \t%i\n"
49728 "jp_journal_dev: \t%s[%x]\n"
49729diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
49730index 313d39d..3a5811b 100644
49731--- a/fs/reiserfs/stree.c
49732+++ b/fs/reiserfs/stree.c
49733@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
49734 int iter = 0;
49735 #endif
49736
49737+ pax_track_stack();
49738+
49739 BUG_ON(!th->t_trans_id);
49740
49741 init_tb_struct(th, &s_del_balance, sb, path,
49742@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
49743 int retval;
49744 int quota_cut_bytes = 0;
49745
49746+ pax_track_stack();
49747+
49748 BUG_ON(!th->t_trans_id);
49749
49750 le_key2cpu_key(&cpu_key, key);
49751@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
49752 int quota_cut_bytes;
49753 loff_t tail_pos = 0;
49754
49755+ pax_track_stack();
49756+
49757 BUG_ON(!th->t_trans_id);
49758
49759 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
49760@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
49761 int retval;
49762 int fs_gen;
49763
49764+ pax_track_stack();
49765+
49766 BUG_ON(!th->t_trans_id);
49767
49768 fs_gen = get_generation(inode->i_sb);
49769@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
49770 int fs_gen = 0;
49771 int quota_bytes = 0;
49772
49773+ pax_track_stack();
49774+
49775 BUG_ON(!th->t_trans_id);
49776
49777 if (inode) { /* Do we count quotas for item? */
49778diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
49779index 5e3527b..e55e569 100644
49780--- a/fs/reiserfs/super.c
49781+++ b/fs/reiserfs/super.c
49782@@ -931,6 +931,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
49783 {.option_name = NULL}
49784 };
49785
49786+ pax_track_stack();
49787+
49788 *blocks = 0;
49789 if (!options || !*options)
49790 /* use default configuration: create tails, journaling on, no
49791diff --git a/fs/select.c b/fs/select.c
49792index d33418f..f8e06bc 100644
49793--- a/fs/select.c
49794+++ b/fs/select.c
49795@@ -20,6 +20,7 @@
49796 #include <linux/module.h>
49797 #include <linux/slab.h>
49798 #include <linux/poll.h>
49799+#include <linux/security.h>
49800 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49801 #include <linux/file.h>
49802 #include <linux/fdtable.h>
49803@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
49804 int retval, i, timed_out = 0;
49805 unsigned long slack = 0;
49806
49807+ pax_track_stack();
49808+
49809 rcu_read_lock();
49810 retval = max_select_fd(n, fds);
49811 rcu_read_unlock();
49812@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
49813 /* Allocate small arguments on the stack to save memory and be faster */
49814 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49815
49816+ pax_track_stack();
49817+
49818 ret = -EINVAL;
49819 if (n < 0)
49820 goto out_nofds;
49821@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49822 struct poll_list *walk = head;
49823 unsigned long todo = nfds;
49824
49825+ pax_track_stack();
49826+
49827+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49828 if (nfds > rlimit(RLIMIT_NOFILE))
49829 return -EINVAL;
49830
49831diff --git a/fs/seq_file.c b/fs/seq_file.c
49832index dba43c3..a99fb63 100644
49833--- a/fs/seq_file.c
49834+++ b/fs/seq_file.c
49835@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49836 return 0;
49837 }
49838 if (!m->buf) {
49839- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49840+ m->size = PAGE_SIZE;
49841+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49842 if (!m->buf)
49843 return -ENOMEM;
49844 }
49845@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49846 Eoverflow:
49847 m->op->stop(m, p);
49848 kfree(m->buf);
49849- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49850+ m->size <<= 1;
49851+ m->buf = kmalloc(m->size, GFP_KERNEL);
49852 return !m->buf ? -ENOMEM : -EAGAIN;
49853 }
49854
49855@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49856 m->version = file->f_version;
49857 /* grab buffer if we didn't have one */
49858 if (!m->buf) {
49859- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49860+ m->size = PAGE_SIZE;
49861+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49862 if (!m->buf)
49863 goto Enomem;
49864 }
49865@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49866 goto Fill;
49867 m->op->stop(m, p);
49868 kfree(m->buf);
49869- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49870+ m->size <<= 1;
49871+ m->buf = kmalloc(m->size, GFP_KERNEL);
49872 if (!m->buf)
49873 goto Enomem;
49874 m->count = 0;
49875@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49876 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49877 void *data)
49878 {
49879- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49880+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49881 int res = -ENOMEM;
49882
49883 if (op) {
49884diff --git a/fs/splice.c b/fs/splice.c
49885index fa2defa..9a697a5 100644
49886--- a/fs/splice.c
49887+++ b/fs/splice.c
49888@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49889 pipe_lock(pipe);
49890
49891 for (;;) {
49892- if (!pipe->readers) {
49893+ if (!atomic_read(&pipe->readers)) {
49894 send_sig(SIGPIPE, current, 0);
49895 if (!ret)
49896 ret = -EPIPE;
49897@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49898 do_wakeup = 0;
49899 }
49900
49901- pipe->waiting_writers++;
49902+ atomic_inc(&pipe->waiting_writers);
49903 pipe_wait(pipe);
49904- pipe->waiting_writers--;
49905+ atomic_dec(&pipe->waiting_writers);
49906 }
49907
49908 pipe_unlock(pipe);
49909@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
49910 .spd_release = spd_release_page,
49911 };
49912
49913+ pax_track_stack();
49914+
49915 if (splice_grow_spd(pipe, &spd))
49916 return -ENOMEM;
49917
49918@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49919 old_fs = get_fs();
49920 set_fs(get_ds());
49921 /* The cast to a user pointer is valid due to the set_fs() */
49922- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49923+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49924 set_fs(old_fs);
49925
49926 return res;
49927@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49928 old_fs = get_fs();
49929 set_fs(get_ds());
49930 /* The cast to a user pointer is valid due to the set_fs() */
49931- res = vfs_write(file, (const char __user *)buf, count, &pos);
49932+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49933 set_fs(old_fs);
49934
49935 return res;
49936@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49937 .spd_release = spd_release_page,
49938 };
49939
49940+ pax_track_stack();
49941+
49942 if (splice_grow_spd(pipe, &spd))
49943 return -ENOMEM;
49944
49945@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49946 goto err;
49947
49948 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49949- vec[i].iov_base = (void __user *) page_address(page);
49950+ vec[i].iov_base = (void __force_user *) page_address(page);
49951 vec[i].iov_len = this_len;
49952 spd.pages[i] = page;
49953 spd.nr_pages++;
49954@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49955 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49956 {
49957 while (!pipe->nrbufs) {
49958- if (!pipe->writers)
49959+ if (!atomic_read(&pipe->writers))
49960 return 0;
49961
49962- if (!pipe->waiting_writers && sd->num_spliced)
49963+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49964 return 0;
49965
49966 if (sd->flags & SPLICE_F_NONBLOCK)
49967@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49968 * out of the pipe right after the splice_to_pipe(). So set
49969 * PIPE_READERS appropriately.
49970 */
49971- pipe->readers = 1;
49972+ atomic_set(&pipe->readers, 1);
49973
49974 current->splice_pipe = pipe;
49975 }
49976@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
49977 };
49978 long ret;
49979
49980+ pax_track_stack();
49981+
49982 pipe = get_pipe_info(file);
49983 if (!pipe)
49984 return -EBADF;
49985@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49986 ret = -ERESTARTSYS;
49987 break;
49988 }
49989- if (!pipe->writers)
49990+ if (!atomic_read(&pipe->writers))
49991 break;
49992- if (!pipe->waiting_writers) {
49993+ if (!atomic_read(&pipe->waiting_writers)) {
49994 if (flags & SPLICE_F_NONBLOCK) {
49995 ret = -EAGAIN;
49996 break;
49997@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49998 pipe_lock(pipe);
49999
50000 while (pipe->nrbufs >= pipe->buffers) {
50001- if (!pipe->readers) {
50002+ if (!atomic_read(&pipe->readers)) {
50003 send_sig(SIGPIPE, current, 0);
50004 ret = -EPIPE;
50005 break;
50006@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
50007 ret = -ERESTARTSYS;
50008 break;
50009 }
50010- pipe->waiting_writers++;
50011+ atomic_inc(&pipe->waiting_writers);
50012 pipe_wait(pipe);
50013- pipe->waiting_writers--;
50014+ atomic_dec(&pipe->waiting_writers);
50015 }
50016
50017 pipe_unlock(pipe);
50018@@ -1819,14 +1825,14 @@ retry:
50019 pipe_double_lock(ipipe, opipe);
50020
50021 do {
50022- if (!opipe->readers) {
50023+ if (!atomic_read(&opipe->readers)) {
50024 send_sig(SIGPIPE, current, 0);
50025 if (!ret)
50026 ret = -EPIPE;
50027 break;
50028 }
50029
50030- if (!ipipe->nrbufs && !ipipe->writers)
50031+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
50032 break;
50033
50034 /*
50035@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50036 pipe_double_lock(ipipe, opipe);
50037
50038 do {
50039- if (!opipe->readers) {
50040+ if (!atomic_read(&opipe->readers)) {
50041 send_sig(SIGPIPE, current, 0);
50042 if (!ret)
50043 ret = -EPIPE;
50044@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50045 * return EAGAIN if we have the potential of some data in the
50046 * future, otherwise just return 0
50047 */
50048- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
50049+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
50050 ret = -EAGAIN;
50051
50052 pipe_unlock(ipipe);
50053diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
50054index 1ad8c93..6633545 100644
50055--- a/fs/sysfs/file.c
50056+++ b/fs/sysfs/file.c
50057@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
50058
50059 struct sysfs_open_dirent {
50060 atomic_t refcnt;
50061- atomic_t event;
50062+ atomic_unchecked_t event;
50063 wait_queue_head_t poll;
50064 struct list_head buffers; /* goes through sysfs_buffer.list */
50065 };
50066@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
50067 if (!sysfs_get_active(attr_sd))
50068 return -ENODEV;
50069
50070- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50071+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50072 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50073
50074 sysfs_put_active(attr_sd);
50075@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
50076 return -ENOMEM;
50077
50078 atomic_set(&new_od->refcnt, 0);
50079- atomic_set(&new_od->event, 1);
50080+ atomic_set_unchecked(&new_od->event, 1);
50081 init_waitqueue_head(&new_od->poll);
50082 INIT_LIST_HEAD(&new_od->buffers);
50083 goto retry;
50084@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
50085
50086 sysfs_put_active(attr_sd);
50087
50088- if (buffer->event != atomic_read(&od->event))
50089+ if (buffer->event != atomic_read_unchecked(&od->event))
50090 goto trigger;
50091
50092 return DEFAULT_POLLMASK;
50093@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
50094
50095 od = sd->s_attr.open;
50096 if (od) {
50097- atomic_inc(&od->event);
50098+ atomic_inc_unchecked(&od->event);
50099 wake_up_interruptible(&od->poll);
50100 }
50101
50102diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
50103index e34f0d9..740ea7b 100644
50104--- a/fs/sysfs/mount.c
50105+++ b/fs/sysfs/mount.c
50106@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
50107 .s_name = "",
50108 .s_count = ATOMIC_INIT(1),
50109 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
50110+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50111+ .s_mode = S_IFDIR | S_IRWXU,
50112+#else
50113 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50114+#endif
50115 .s_ino = 1,
50116 };
50117
50118diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50119index a7ac78f..02158e1 100644
50120--- a/fs/sysfs/symlink.c
50121+++ b/fs/sysfs/symlink.c
50122@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50123
50124 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50125 {
50126- char *page = nd_get_link(nd);
50127+ const char *page = nd_get_link(nd);
50128 if (!IS_ERR(page))
50129 free_page((unsigned long)page);
50130 }
50131diff --git a/fs/udf/inode.c b/fs/udf/inode.c
50132index 262050f..d2df565 100644
50133--- a/fs/udf/inode.c
50134+++ b/fs/udf/inode.c
50135@@ -576,6 +576,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
50136 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
50137 int lastblock = 0;
50138
50139+ pax_track_stack();
50140+
50141 prev_epos.offset = udf_file_entry_alloc_offset(inode);
50142 prev_epos.block = iinfo->i_location;
50143 prev_epos.bh = NULL;
50144diff --git a/fs/udf/misc.c b/fs/udf/misc.c
50145index 9215700..bf1f68e 100644
50146--- a/fs/udf/misc.c
50147+++ b/fs/udf/misc.c
50148@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
50149
50150 u8 udf_tag_checksum(const struct tag *t)
50151 {
50152- u8 *data = (u8 *)t;
50153+ const u8 *data = (const u8 *)t;
50154 u8 checksum = 0;
50155 int i;
50156 for (i = 0; i < sizeof(struct tag); ++i)
50157diff --git a/fs/utimes.c b/fs/utimes.c
50158index ba653f3..06ea4b1 100644
50159--- a/fs/utimes.c
50160+++ b/fs/utimes.c
50161@@ -1,6 +1,7 @@
50162 #include <linux/compiler.h>
50163 #include <linux/file.h>
50164 #include <linux/fs.h>
50165+#include <linux/security.h>
50166 #include <linux/linkage.h>
50167 #include <linux/mount.h>
50168 #include <linux/namei.h>
50169@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
50170 goto mnt_drop_write_and_out;
50171 }
50172 }
50173+
50174+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50175+ error = -EACCES;
50176+ goto mnt_drop_write_and_out;
50177+ }
50178+
50179 mutex_lock(&inode->i_mutex);
50180 error = notify_change(path->dentry, &newattrs);
50181 mutex_unlock(&inode->i_mutex);
50182diff --git a/fs/xattr.c b/fs/xattr.c
50183index f060663..def7007 100644
50184--- a/fs/xattr.c
50185+++ b/fs/xattr.c
50186@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50187 * Extended attribute SET operations
50188 */
50189 static long
50190-setxattr(struct dentry *d, const char __user *name, const void __user *value,
50191+setxattr(struct path *path, const char __user *name, const void __user *value,
50192 size_t size, int flags)
50193 {
50194 int error;
50195@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
50196 return PTR_ERR(kvalue);
50197 }
50198
50199- error = vfs_setxattr(d, kname, kvalue, size, flags);
50200+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50201+ error = -EACCES;
50202+ goto out;
50203+ }
50204+
50205+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50206+out:
50207 kfree(kvalue);
50208 return error;
50209 }
50210@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
50211 return error;
50212 error = mnt_want_write(path.mnt);
50213 if (!error) {
50214- error = setxattr(path.dentry, name, value, size, flags);
50215+ error = setxattr(&path, name, value, size, flags);
50216 mnt_drop_write(path.mnt);
50217 }
50218 path_put(&path);
50219@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
50220 return error;
50221 error = mnt_want_write(path.mnt);
50222 if (!error) {
50223- error = setxattr(path.dentry, name, value, size, flags);
50224+ error = setxattr(&path, name, value, size, flags);
50225 mnt_drop_write(path.mnt);
50226 }
50227 path_put(&path);
50228@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
50229 const void __user *,value, size_t, size, int, flags)
50230 {
50231 struct file *f;
50232- struct dentry *dentry;
50233 int error = -EBADF;
50234
50235 f = fget(fd);
50236 if (!f)
50237 return error;
50238- dentry = f->f_path.dentry;
50239- audit_inode(NULL, dentry);
50240+ audit_inode(NULL, f->f_path.dentry);
50241 error = mnt_want_write_file(f);
50242 if (!error) {
50243- error = setxattr(dentry, name, value, size, flags);
50244+ error = setxattr(&f->f_path, name, value, size, flags);
50245 mnt_drop_write(f->f_path.mnt);
50246 }
50247 fput(f);
50248diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50249index 8d5a506..7f62712 100644
50250--- a/fs/xattr_acl.c
50251+++ b/fs/xattr_acl.c
50252@@ -17,8 +17,8 @@
50253 struct posix_acl *
50254 posix_acl_from_xattr(const void *value, size_t size)
50255 {
50256- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50257- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50258+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50259+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50260 int count;
50261 struct posix_acl *acl;
50262 struct posix_acl_entry *acl_e;
50263diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
50264index 452a291..91a95f3b 100644
50265--- a/fs/xfs/xfs_bmap.c
50266+++ b/fs/xfs/xfs_bmap.c
50267@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
50268 int nmap,
50269 int ret_nmap);
50270 #else
50271-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50272+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50273 #endif /* DEBUG */
50274
50275 STATIC int
50276diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50277index 79d05e8..e3e5861 100644
50278--- a/fs/xfs/xfs_dir2_sf.c
50279+++ b/fs/xfs/xfs_dir2_sf.c
50280@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
50281 }
50282
50283 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
50284- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50285+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50286+ char name[sfep->namelen];
50287+ memcpy(name, sfep->name, sfep->namelen);
50288+ if (filldir(dirent, name, sfep->namelen,
50289+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
50290+ *offset = off & 0x7fffffff;
50291+ return 0;
50292+ }
50293+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50294 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50295 *offset = off & 0x7fffffff;
50296 return 0;
50297diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
50298index f7ce7de..e1a5db0 100644
50299--- a/fs/xfs/xfs_ioctl.c
50300+++ b/fs/xfs/xfs_ioctl.c
50301@@ -128,7 +128,7 @@ xfs_find_handle(
50302 }
50303
50304 error = -EFAULT;
50305- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50306+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50307 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50308 goto out_put;
50309
50310diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
50311index 474920b..97169a9 100644
50312--- a/fs/xfs/xfs_iops.c
50313+++ b/fs/xfs/xfs_iops.c
50314@@ -446,7 +446,7 @@ xfs_vn_put_link(
50315 struct nameidata *nd,
50316 void *p)
50317 {
50318- char *s = nd_get_link(nd);
50319+ const char *s = nd_get_link(nd);
50320
50321 if (!IS_ERR(s))
50322 kfree(s);
50323diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50324new file mode 100644
50325index 0000000..4639511
50326--- /dev/null
50327+++ b/grsecurity/Kconfig
50328@@ -0,0 +1,1051 @@
50329+#
50330+# grecurity configuration
50331+#
50332+
50333+menu "Grsecurity"
50334+
50335+config GRKERNSEC
50336+ bool "Grsecurity"
50337+ select CRYPTO
50338+ select CRYPTO_SHA256
50339+ help
50340+ If you say Y here, you will be able to configure many features
50341+ that will enhance the security of your system. It is highly
50342+ recommended that you say Y here and read through the help
50343+ for each option so that you fully understand the features and
50344+ can evaluate their usefulness for your machine.
50345+
50346+choice
50347+ prompt "Security Level"
50348+ depends on GRKERNSEC
50349+ default GRKERNSEC_CUSTOM
50350+
50351+config GRKERNSEC_LOW
50352+ bool "Low"
50353+ select GRKERNSEC_LINK
50354+ select GRKERNSEC_FIFO
50355+ select GRKERNSEC_RANDNET
50356+ select GRKERNSEC_DMESG
50357+ select GRKERNSEC_CHROOT
50358+ select GRKERNSEC_CHROOT_CHDIR
50359+
50360+ help
50361+ If you choose this option, several of the grsecurity options will
50362+ be enabled that will give you greater protection against a number
50363+ of attacks, while assuring that none of your software will have any
50364+ conflicts with the additional security measures. If you run a lot
50365+ of unusual software, or you are having problems with the higher
50366+ security levels, you should say Y here. With this option, the
50367+ following features are enabled:
50368+
50369+ - Linking restrictions
50370+ - FIFO restrictions
50371+ - Restricted dmesg
50372+ - Enforced chdir("/") on chroot
50373+ - Runtime module disabling
50374+
50375+config GRKERNSEC_MEDIUM
50376+ bool "Medium"
50377+ select PAX
50378+ select PAX_EI_PAX
50379+ select PAX_PT_PAX_FLAGS
50380+ select PAX_HAVE_ACL_FLAGS
50381+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50382+ select GRKERNSEC_CHROOT
50383+ select GRKERNSEC_CHROOT_SYSCTL
50384+ select GRKERNSEC_LINK
50385+ select GRKERNSEC_FIFO
50386+ select GRKERNSEC_DMESG
50387+ select GRKERNSEC_RANDNET
50388+ select GRKERNSEC_FORKFAIL
50389+ select GRKERNSEC_TIME
50390+ select GRKERNSEC_SIGNAL
50391+ select GRKERNSEC_CHROOT
50392+ select GRKERNSEC_CHROOT_UNIX
50393+ select GRKERNSEC_CHROOT_MOUNT
50394+ select GRKERNSEC_CHROOT_PIVOT
50395+ select GRKERNSEC_CHROOT_DOUBLE
50396+ select GRKERNSEC_CHROOT_CHDIR
50397+ select GRKERNSEC_CHROOT_MKNOD
50398+ select GRKERNSEC_PROC
50399+ select GRKERNSEC_PROC_USERGROUP
50400+ select PAX_RANDUSTACK
50401+ select PAX_ASLR
50402+ select PAX_RANDMMAP
50403+ select PAX_REFCOUNT if (X86 || SPARC64)
50404+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50405+
50406+ help
50407+ If you say Y here, several features in addition to those included
50408+ in the low additional security level will be enabled. These
50409+ features provide even more security to your system, though in rare
50410+ cases they may be incompatible with very old or poorly written
50411+ software. If you enable this option, make sure that your auth
50412+ service (identd) is running as gid 1001. With this option,
50413+ the following features (in addition to those provided in the
50414+ low additional security level) will be enabled:
50415+
50416+ - Failed fork logging
50417+ - Time change logging
50418+ - Signal logging
50419+ - Deny mounts in chroot
50420+ - Deny double chrooting
50421+ - Deny sysctl writes in chroot
50422+ - Deny mknod in chroot
50423+ - Deny access to abstract AF_UNIX sockets out of chroot
50424+ - Deny pivot_root in chroot
50425+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
50426+ - /proc restrictions with special GID set to 10 (usually wheel)
50427+ - Address Space Layout Randomization (ASLR)
50428+ - Prevent exploitation of most refcount overflows
50429+ - Bounds checking of copying between the kernel and userland
50430+
50431+config GRKERNSEC_HIGH
50432+ bool "High"
50433+ select GRKERNSEC_LINK
50434+ select GRKERNSEC_FIFO
50435+ select GRKERNSEC_DMESG
50436+ select GRKERNSEC_FORKFAIL
50437+ select GRKERNSEC_TIME
50438+ select GRKERNSEC_SIGNAL
50439+ select GRKERNSEC_CHROOT
50440+ select GRKERNSEC_CHROOT_SHMAT
50441+ select GRKERNSEC_CHROOT_UNIX
50442+ select GRKERNSEC_CHROOT_MOUNT
50443+ select GRKERNSEC_CHROOT_FCHDIR
50444+ select GRKERNSEC_CHROOT_PIVOT
50445+ select GRKERNSEC_CHROOT_DOUBLE
50446+ select GRKERNSEC_CHROOT_CHDIR
50447+ select GRKERNSEC_CHROOT_MKNOD
50448+ select GRKERNSEC_CHROOT_CAPS
50449+ select GRKERNSEC_CHROOT_SYSCTL
50450+ select GRKERNSEC_CHROOT_FINDTASK
50451+ select GRKERNSEC_SYSFS_RESTRICT
50452+ select GRKERNSEC_PROC
50453+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50454+ select GRKERNSEC_HIDESYM
50455+ select GRKERNSEC_BRUTE
50456+ select GRKERNSEC_PROC_USERGROUP
50457+ select GRKERNSEC_KMEM
50458+ select GRKERNSEC_RESLOG
50459+ select GRKERNSEC_RANDNET
50460+ select GRKERNSEC_PROC_ADD
50461+ select GRKERNSEC_CHROOT_CHMOD
50462+ select GRKERNSEC_CHROOT_NICE
50463+ select GRKERNSEC_SETXID
50464+ select GRKERNSEC_AUDIT_MOUNT
50465+ select GRKERNSEC_MODHARDEN if (MODULES)
50466+ select GRKERNSEC_HARDEN_PTRACE
50467+ select GRKERNSEC_VM86 if (X86_32)
50468+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50469+ select PAX
50470+ select PAX_RANDUSTACK
50471+ select PAX_ASLR
50472+ select PAX_RANDMMAP
50473+ select PAX_NOEXEC
50474+ select PAX_MPROTECT
50475+ select PAX_EI_PAX
50476+ select PAX_PT_PAX_FLAGS
50477+ select PAX_HAVE_ACL_FLAGS
50478+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50479+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50480+ select PAX_RANDKSTACK if (X86_TSC && X86)
50481+ select PAX_SEGMEXEC if (X86_32)
50482+ select PAX_PAGEEXEC
50483+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50484+ select PAX_EMUTRAMP if (PARISC)
50485+ select PAX_EMUSIGRT if (PARISC)
50486+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50487+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50488+ select PAX_REFCOUNT if (X86 || SPARC64)
50489+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50490+ help
50491+ If you say Y here, many of the features of grsecurity will be
50492+ enabled, which will protect you against many kinds of attacks
50493+ against your system. The heightened security comes at a cost
50494+ of an increased chance of incompatibilities with rare software
50495+ on your machine. Since this security level enables PaX, you should
50496+ view <http://pax.grsecurity.net> and read about the PaX
50497+ project. While you are there, download chpax and run it on
50498+ binaries that cause problems with PaX. Also remember that
50499+ since the /proc restrictions are enabled, you must run your
50500+ identd as gid 1001. This security level enables the following
50501+ features in addition to those listed in the low and medium
50502+ security levels:
50503+
50504+ - Additional /proc restrictions
50505+ - Chmod restrictions in chroot
50506+ - No signals, ptrace, or viewing of processes outside of chroot
50507+ - Capability restrictions in chroot
50508+ - Deny fchdir out of chroot
50509+ - Priority restrictions in chroot
50510+ - Segmentation-based implementation of PaX
50511+ - Mprotect restrictions
50512+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50513+ - Kernel stack randomization
50514+ - Mount/unmount/remount logging
50515+ - Kernel symbol hiding
50516+ - Hardening of module auto-loading
50517+ - Ptrace restrictions
50518+ - Restricted vm86 mode
50519+ - Restricted sysfs/debugfs
50520+ - Active kernel exploit response
50521+
50522+config GRKERNSEC_CUSTOM
50523+ bool "Custom"
50524+ help
50525+ If you say Y here, you will be able to configure every grsecurity
50526+ option, which allows you to enable many more features that aren't
50527+ covered in the basic security levels. These additional features
50528+ include TPE, socket restrictions, and the sysctl system for
50529+ grsecurity. It is advised that you read through the help for
50530+ each option to determine its usefulness in your situation.
50531+
50532+endchoice
50533+
50534+menu "Address Space Protection"
50535+depends on GRKERNSEC
50536+
50537+config GRKERNSEC_KMEM
50538+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50539+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50540+ help
50541+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50542+ be written to or read from to modify or leak the contents of the running
50543+ kernel. /dev/port will also not be allowed to be opened. If you have module
50544+ support disabled, enabling this will close up four ways that are
50545+ currently used to insert malicious code into the running kernel.
50546+ Even with all these features enabled, we still highly recommend that
50547+ you use the RBAC system, as it is still possible for an attacker to
50548+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50549+ If you are not using XFree86, you may be able to stop this additional
50550+ case by enabling the 'Disable privileged I/O' option. Though nothing
50551+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50552+ but only to video memory, which is the only writing we allow in this
50553+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50554+ not be allowed to mprotect it with PROT_WRITE later.
50555+ It is highly recommended that you say Y here if you meet all the
50556+ conditions above.
50557+
50558+config GRKERNSEC_VM86
50559+ bool "Restrict VM86 mode"
50560+ depends on X86_32
50561+
50562+ help
50563+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50564+ make use of a special execution mode on 32bit x86 processors called
50565+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50566+ video cards and will still work with this option enabled. The purpose
50567+ of the option is to prevent exploitation of emulation errors in
50568+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50569+ Nearly all users should be able to enable this option.
50570+
50571+config GRKERNSEC_IO
50572+ bool "Disable privileged I/O"
50573+ depends on X86
50574+ select RTC_CLASS
50575+ select RTC_INTF_DEV
50576+ select RTC_DRV_CMOS
50577+
50578+ help
50579+ If you say Y here, all ioperm and iopl calls will return an error.
50580+ Ioperm and iopl can be used to modify the running kernel.
50581+ Unfortunately, some programs need this access to operate properly,
50582+ the most notable of which are XFree86 and hwclock. hwclock can be
50583+ remedied by having RTC support in the kernel, so real-time
50584+ clock support is enabled if this option is enabled, to ensure
50585+ that hwclock operates correctly. XFree86 still will not
50586+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50587+ IF YOU USE XFree86. If you use XFree86 and you still want to
50588+ protect your kernel against modification, use the RBAC system.
50589+
50590+config GRKERNSEC_PROC_MEMMAP
50591+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50592+ default y if (PAX_NOEXEC || PAX_ASLR)
50593+ depends on PAX_NOEXEC || PAX_ASLR
50594+ help
50595+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50596+ give no information about the addresses of its mappings if
50597+ PaX features that rely on random addresses are enabled on the task.
50598+ If you use PaX it is greatly recommended that you say Y here as it
50599+ closes up a hole that makes the full ASLR useless for suid
50600+ binaries.
50601+
50602+config GRKERNSEC_BRUTE
50603+ bool "Deter exploit bruteforcing"
50604+ help
50605+ If you say Y here, attempts to bruteforce exploits against forking
50606+ daemons such as apache or sshd, as well as against suid/sgid binaries
50607+ will be deterred. When a child of a forking daemon is killed by PaX
50608+ or crashes due to an illegal instruction or other suspicious signal,
50609+ the parent process will be delayed 30 seconds upon every subsequent
50610+ fork until the administrator is able to assess the situation and
50611+ restart the daemon.
50612+ In the suid/sgid case, the attempt is logged, the user has all their
50613+ processes terminated, and they are prevented from executing any further
50614+ processes for 15 minutes.
50615+ It is recommended that you also enable signal logging in the auditing
50616+ section so that logs are generated when a process triggers a suspicious
50617+ signal.
50618+ If the sysctl option is enabled, a sysctl option with name
50619+ "deter_bruteforce" is created.
50620+
50621+
50622+config GRKERNSEC_MODHARDEN
50623+ bool "Harden module auto-loading"
50624+ depends on MODULES
50625+ help
50626+ If you say Y here, module auto-loading in response to use of some
50627+ feature implemented by an unloaded module will be restricted to
50628+ root users. Enabling this option helps defend against attacks
50629+ by unprivileged users who abuse the auto-loading behavior to
50630+ cause a vulnerable module to load that is then exploited.
50631+
50632+ If this option prevents a legitimate use of auto-loading for a
50633+ non-root user, the administrator can execute modprobe manually
50634+ with the exact name of the module mentioned in the alert log.
50635+ Alternatively, the administrator can add the module to the list
50636+ of modules loaded at boot by modifying init scripts.
50637+
50638+ Modification of init scripts will most likely be needed on
50639+ Ubuntu servers with encrypted home directory support enabled,
50640+ as the first non-root user logging in will cause the ecb(aes),
50641+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50642+
50643+config GRKERNSEC_HIDESYM
50644+ bool "Hide kernel symbols"
50645+ help
50646+ If you say Y here, getting information on loaded modules, and
50647+ displaying all kernel symbols through a syscall will be restricted
50648+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50649+ /proc/kallsyms will be restricted to the root user. The RBAC
50650+ system can hide that entry even from root.
50651+
50652+ This option also prevents leaking of kernel addresses through
50653+ several /proc entries.
50654+
50655+ Note that this option is only effective provided the following
50656+ conditions are met:
50657+ 1) The kernel using grsecurity is not precompiled by some distribution
50658+ 2) You have also enabled GRKERNSEC_DMESG
50659+ 3) You are using the RBAC system and hiding other files such as your
50660+ kernel image and System.map. Alternatively, enabling this option
50661+ causes the permissions on /boot, /lib/modules, and the kernel
50662+ source directory to change at compile time to prevent
50663+ reading by non-root users.
50664+ If the above conditions are met, this option will aid in providing a
50665+ useful protection against local kernel exploitation of overflows
50666+ and arbitrary read/write vulnerabilities.
50667+
50668+config GRKERNSEC_KERN_LOCKOUT
50669+ bool "Active kernel exploit response"
50670+ depends on X86 || ARM || PPC || SPARC
50671+ help
50672+ If you say Y here, when a PaX alert is triggered due to suspicious
50673+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50674+ or an OOPs occurs due to bad memory accesses, instead of just
50675+ terminating the offending process (and potentially allowing
50676+ a subsequent exploit from the same user), we will take one of two
50677+ actions:
50678+ If the user was root, we will panic the system
50679+ If the user was non-root, we will log the attempt, terminate
50680+ all processes owned by the user, then prevent them from creating
50681+ any new processes until the system is restarted
50682+ This deters repeated kernel exploitation/bruteforcing attempts
50683+ and is useful for later forensics.
50684+
50685+endmenu
50686+menu "Role Based Access Control Options"
50687+depends on GRKERNSEC
50688+
50689+config GRKERNSEC_RBAC_DEBUG
50690+ bool
50691+
50692+config GRKERNSEC_NO_RBAC
50693+ bool "Disable RBAC system"
50694+ help
50695+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50696+ preventing the RBAC system from being enabled. You should only say Y
50697+ here if you have no intention of using the RBAC system, so as to prevent
50698+ an attacker with root access from misusing the RBAC system to hide files
50699+ and processes when loadable module support and /dev/[k]mem have been
50700+ locked down.
50701+
50702+config GRKERNSEC_ACL_HIDEKERN
50703+ bool "Hide kernel processes"
50704+ help
50705+ If you say Y here, all kernel threads will be hidden to all
50706+ processes but those whose subject has the "view hidden processes"
50707+ flag.
50708+
50709+config GRKERNSEC_ACL_MAXTRIES
50710+ int "Maximum tries before password lockout"
50711+ default 3
50712+ help
50713+ This option enforces the maximum number of times a user can attempt
50714+ to authorize themselves with the grsecurity RBAC system before being
50715+ denied the ability to attempt authorization again for a specified time.
50716+ The lower the number, the harder it will be to brute-force a password.
50717+
50718+config GRKERNSEC_ACL_TIMEOUT
50719+ int "Time to wait after max password tries, in seconds"
50720+ default 30
50721+ help
50722+ This option specifies the time the user must wait after attempting to
50723+ authorize to the RBAC system with the maximum number of invalid
50724+ passwords. The higher the number, the harder it will be to brute-force
50725+ a password.
50726+
50727+endmenu
50728+menu "Filesystem Protections"
50729+depends on GRKERNSEC
50730+
50731+config GRKERNSEC_PROC
50732+ bool "Proc restrictions"
50733+ help
50734+ If you say Y here, the permissions of the /proc filesystem
50735+ will be altered to enhance system security and privacy. You MUST
50736+ choose either a user only restriction or a user and group restriction.
50737+ Depending upon the option you choose, you can either restrict users to
50738+ see only the processes they themselves run, or choose a group that can
50739+ view all processes and files normally restricted to root if you choose
50740+ the "restrict to user only" option. NOTE: If you're running identd as
50741+ a non-root user, you will have to run it as the group you specify here.
50742+
50743+config GRKERNSEC_PROC_USER
50744+ bool "Restrict /proc to user only"
50745+ depends on GRKERNSEC_PROC
50746+ help
50747+ If you say Y here, non-root users will only be able to view their own
50748+ processes, and restricts them from viewing network-related information,
50749+ and viewing kernel symbol and module information.
50750+
50751+config GRKERNSEC_PROC_USERGROUP
50752+ bool "Allow special group"
50753+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50754+ help
50755+ If you say Y here, you will be able to select a group that will be
50756+ able to view all processes and network-related information. If you've
50757+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50758+ remain hidden. This option is useful if you want to run identd as
50759+ a non-root user.
50760+
50761+config GRKERNSEC_PROC_GID
50762+ int "GID for special group"
50763+ depends on GRKERNSEC_PROC_USERGROUP
50764+ default 1001
50765+
50766+config GRKERNSEC_PROC_ADD
50767+ bool "Additional restrictions"
50768+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50769+ help
50770+ If you say Y here, additional restrictions will be placed on
50771+ /proc that keep normal users from viewing device information and
50772+ slabinfo information that could be useful for exploits.
50773+
50774+config GRKERNSEC_LINK
50775+ bool "Linking restrictions"
50776+ help
50777+ If you say Y here, /tmp race exploits will be prevented, since users
50778+ will no longer be able to follow symlinks owned by other users in
50779+ world-writable +t directories (e.g. /tmp), unless the owner of the
50780+ symlink is the owner of the directory. users will also not be
50781+ able to hardlink to files they do not own. If the sysctl option is
50782+ enabled, a sysctl option with name "linking_restrictions" is created.
50783+
50784+config GRKERNSEC_FIFO
50785+ bool "FIFO restrictions"
50786+ help
50787+ If you say Y here, users will not be able to write to FIFOs they don't
50788+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50789+ the FIFO is the same owner of the directory it's held in. If the sysctl
50790+ option is enabled, a sysctl option with name "fifo_restrictions" is
50791+ created.
50792+
50793+config GRKERNSEC_SYSFS_RESTRICT
50794+ bool "Sysfs/debugfs restriction"
50795+ depends on SYSFS
50796+ help
50797+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50798+ any filesystem normally mounted under it (e.g. debugfs) will only
50799+ be accessible by root. These filesystems generally provide access
50800+ to hardware and debug information that isn't appropriate for unprivileged
50801+ users of the system. Sysfs and debugfs have also become a large source
50802+ of new vulnerabilities, ranging from infoleaks to local compromise.
50803+ There has been very little oversight with an eye toward security involved
50804+ in adding new exporters of information to these filesystems, so their
50805+ use is discouraged.
50806+ This option is equivalent to a chmod 0700 of the mount paths.
50807+
50808+config GRKERNSEC_ROFS
50809+ bool "Runtime read-only mount protection"
50810+ help
50811+ If you say Y here, a sysctl option with name "romount_protect" will
50812+ be created. By setting this option to 1 at runtime, filesystems
50813+ will be protected in the following ways:
50814+ * No new writable mounts will be allowed
50815+ * Existing read-only mounts won't be able to be remounted read/write
50816+ * Write operations will be denied on all block devices
50817+ This option acts independently of grsec_lock: once it is set to 1,
50818+ it cannot be turned off. Therefore, please be mindful of the resulting
50819+ behavior if this option is enabled in an init script on a read-only
50820+ filesystem. This feature is mainly intended for secure embedded systems.
50821+
50822+config GRKERNSEC_CHROOT
50823+ bool "Chroot jail restrictions"
50824+ help
50825+ If you say Y here, you will be able to choose several options that will
50826+ make breaking out of a chrooted jail much more difficult. If you
50827+ encounter no software incompatibilities with the following options, it
50828+ is recommended that you enable each one.
50829+
50830+config GRKERNSEC_CHROOT_MOUNT
50831+ bool "Deny mounts"
50832+ depends on GRKERNSEC_CHROOT
50833+ help
50834+ If you say Y here, processes inside a chroot will not be able to
50835+ mount or remount filesystems. If the sysctl option is enabled, a
50836+ sysctl option with name "chroot_deny_mount" is created.
50837+
50838+config GRKERNSEC_CHROOT_DOUBLE
50839+ bool "Deny double-chroots"
50840+ depends on GRKERNSEC_CHROOT
50841+ help
50842+ If you say Y here, processes inside a chroot will not be able to chroot
50843+ again outside the chroot. This is a widely used method of breaking
50844+ out of a chroot jail and should not be allowed. If the sysctl
50845+ option is enabled, a sysctl option with name
50846+ "chroot_deny_chroot" is created.
50847+
50848+config GRKERNSEC_CHROOT_PIVOT
50849+ bool "Deny pivot_root in chroot"
50850+ depends on GRKERNSEC_CHROOT
50851+ help
50852+ If you say Y here, processes inside a chroot will not be able to use
50853+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50854+ works similar to chroot in that it changes the root filesystem. This
50855+ function could be misused in a chrooted process to attempt to break out
50856+ of the chroot, and therefore should not be allowed. If the sysctl
50857+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50858+ created.
50859+
50860+config GRKERNSEC_CHROOT_CHDIR
50861+ bool "Enforce chdir(\"/\") on all chroots"
50862+ depends on GRKERNSEC_CHROOT
50863+ help
50864+ If you say Y here, the current working directory of all newly-chrooted
50865+ applications will be set to the the root directory of the chroot.
50866+ The man page on chroot(2) states:
50867+ Note that this call does not change the current working
50868+ directory, so that `.' can be outside the tree rooted at
50869+ `/'. In particular, the super-user can escape from a
50870+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50871+
50872+ It is recommended that you say Y here, since it's not known to break
50873+ any software. If the sysctl option is enabled, a sysctl option with
50874+ name "chroot_enforce_chdir" is created.
50875+
50876+config GRKERNSEC_CHROOT_CHMOD
50877+ bool "Deny (f)chmod +s"
50878+ depends on GRKERNSEC_CHROOT
50879+ help
50880+ If you say Y here, processes inside a chroot will not be able to chmod
50881+ or fchmod files to make them have suid or sgid bits. This protects
50882+ against another published method of breaking a chroot. If the sysctl
50883+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50884+ created.
50885+
50886+config GRKERNSEC_CHROOT_FCHDIR
50887+ bool "Deny fchdir out of chroot"
50888+ depends on GRKERNSEC_CHROOT
50889+ help
50890+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50891+ to a file descriptor of the chrooting process that points to a directory
50892+ outside the filesystem will be stopped. If the sysctl option
50893+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50894+
50895+config GRKERNSEC_CHROOT_MKNOD
50896+ bool "Deny mknod"
50897+ depends on GRKERNSEC_CHROOT
50898+ help
50899+ If you say Y here, processes inside a chroot will not be allowed to
50900+ mknod. The problem with using mknod inside a chroot is that it
50901+ would allow an attacker to create a device entry that is the same
50902+ as one on the physical root of your system, which could range from
50903+ anything from the console device to a device for your harddrive (which
50904+ they could then use to wipe the drive or steal data). It is recommended
50905+ that you say Y here, unless you run into software incompatibilities.
50906+ If the sysctl option is enabled, a sysctl option with name
50907+ "chroot_deny_mknod" is created.
50908+
50909+config GRKERNSEC_CHROOT_SHMAT
50910+ bool "Deny shmat() out of chroot"
50911+ depends on GRKERNSEC_CHROOT
50912+ help
50913+ If you say Y here, processes inside a chroot will not be able to attach
50914+ to shared memory segments that were created outside of the chroot jail.
50915+ It is recommended that you say Y here. If the sysctl option is enabled,
50916+ a sysctl option with name "chroot_deny_shmat" is created.
50917+
50918+config GRKERNSEC_CHROOT_UNIX
50919+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50920+ depends on GRKERNSEC_CHROOT
50921+ help
50922+ If you say Y here, processes inside a chroot will not be able to
50923+ connect to abstract (meaning not belonging to a filesystem) Unix
50924+ domain sockets that were bound outside of a chroot. It is recommended
50925+ that you say Y here. If the sysctl option is enabled, a sysctl option
50926+ with name "chroot_deny_unix" is created.
50927+
50928+config GRKERNSEC_CHROOT_FINDTASK
50929+ bool "Protect outside processes"
50930+ depends on GRKERNSEC_CHROOT
50931+ help
50932+ If you say Y here, processes inside a chroot will not be able to
50933+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50934+ getsid, or view any process outside of the chroot. If the sysctl
50935+ option is enabled, a sysctl option with name "chroot_findtask" is
50936+ created.
50937+
50938+config GRKERNSEC_CHROOT_NICE
50939+ bool "Restrict priority changes"
50940+ depends on GRKERNSEC_CHROOT
50941+ help
50942+ If you say Y here, processes inside a chroot will not be able to raise
50943+ the priority of processes in the chroot, or alter the priority of
50944+ processes outside the chroot. This provides more security than simply
50945+ removing CAP_SYS_NICE from the process' capability set. If the
50946+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50947+ is created.
50948+
50949+config GRKERNSEC_CHROOT_SYSCTL
50950+ bool "Deny sysctl writes"
50951+ depends on GRKERNSEC_CHROOT
50952+ help
50953+ If you say Y here, an attacker in a chroot will not be able to
50954+ write to sysctl entries, either by sysctl(2) or through a /proc
50955+ interface. It is strongly recommended that you say Y here. If the
50956+ sysctl option is enabled, a sysctl option with name
50957+ "chroot_deny_sysctl" is created.
50958+
50959+config GRKERNSEC_CHROOT_CAPS
50960+ bool "Capability restrictions"
50961+ depends on GRKERNSEC_CHROOT
50962+ help
50963+ If you say Y here, the capabilities on all processes within a
50964+ chroot jail will be lowered to stop module insertion, raw i/o,
50965+ system and net admin tasks, rebooting the system, modifying immutable
50966+ files, modifying IPC owned by another, and changing the system time.
50967+ This is left an option because it can break some apps. Disable this
50968+ if your chrooted apps are having problems performing those kinds of
50969+ tasks. If the sysctl option is enabled, a sysctl option with
50970+ name "chroot_caps" is created.
50971+
50972+endmenu
50973+menu "Kernel Auditing"
50974+depends on GRKERNSEC
50975+
50976+config GRKERNSEC_AUDIT_GROUP
50977+ bool "Single group for auditing"
50978+ help
50979+ If you say Y here, the exec, chdir, and (un)mount logging features
50980+ will only operate on a group you specify. This option is recommended
50981+ if you only want to watch certain users instead of having a large
50982+ amount of logs from the entire system. If the sysctl option is enabled,
50983+ a sysctl option with name "audit_group" is created.
50984+
50985+config GRKERNSEC_AUDIT_GID
50986+ int "GID for auditing"
50987+ depends on GRKERNSEC_AUDIT_GROUP
50988+ default 1007
50989+
50990+config GRKERNSEC_EXECLOG
50991+ bool "Exec logging"
50992+ help
50993+ If you say Y here, all execve() calls will be logged (since the
50994+ other exec*() calls are frontends to execve(), all execution
50995+ will be logged). Useful for shell-servers that like to keep track
50996+ of their users. If the sysctl option is enabled, a sysctl option with
50997+ name "exec_logging" is created.
50998+ WARNING: This option when enabled will produce a LOT of logs, especially
50999+ on an active system.
51000+
51001+config GRKERNSEC_RESLOG
51002+ bool "Resource logging"
51003+ help
51004+ If you say Y here, all attempts to overstep resource limits will
51005+ be logged with the resource name, the requested size, and the current
51006+ limit. It is highly recommended that you say Y here. If the sysctl
51007+ option is enabled, a sysctl option with name "resource_logging" is
51008+ created. If the RBAC system is enabled, the sysctl value is ignored.
51009+
51010+config GRKERNSEC_CHROOT_EXECLOG
51011+ bool "Log execs within chroot"
51012+ help
51013+ If you say Y here, all executions inside a chroot jail will be logged
51014+ to syslog. This can cause a large amount of logs if certain
51015+ applications (eg. djb's daemontools) are installed on the system, and
51016+ is therefore left as an option. If the sysctl option is enabled, a
51017+ sysctl option with name "chroot_execlog" is created.
51018+
51019+config GRKERNSEC_AUDIT_PTRACE
51020+ bool "Ptrace logging"
51021+ help
51022+ If you say Y here, all attempts to attach to a process via ptrace
51023+ will be logged. If the sysctl option is enabled, a sysctl option
51024+ with name "audit_ptrace" is created.
51025+
51026+config GRKERNSEC_AUDIT_CHDIR
51027+ bool "Chdir logging"
51028+ help
51029+ If you say Y here, all chdir() calls will be logged. If the sysctl
51030+ option is enabled, a sysctl option with name "audit_chdir" is created.
51031+
51032+config GRKERNSEC_AUDIT_MOUNT
51033+ bool "(Un)Mount logging"
51034+ help
51035+ If you say Y here, all mounts and unmounts will be logged. If the
51036+ sysctl option is enabled, a sysctl option with name "audit_mount" is
51037+ created.
51038+
51039+config GRKERNSEC_SIGNAL
51040+ bool "Signal logging"
51041+ help
51042+ If you say Y here, certain important signals will be logged, such as
51043+ SIGSEGV, which will as a result inform you of when a error in a program
51044+ occurred, which in some cases could mean a possible exploit attempt.
51045+ If the sysctl option is enabled, a sysctl option with name
51046+ "signal_logging" is created.
51047+
51048+config GRKERNSEC_FORKFAIL
51049+ bool "Fork failure logging"
51050+ help
51051+ If you say Y here, all failed fork() attempts will be logged.
51052+ This could suggest a fork bomb, or someone attempting to overstep
51053+ their process limit. If the sysctl option is enabled, a sysctl option
51054+ with name "forkfail_logging" is created.
51055+
51056+config GRKERNSEC_TIME
51057+ bool "Time change logging"
51058+ help
51059+ If you say Y here, any changes of the system clock will be logged.
51060+ If the sysctl option is enabled, a sysctl option with name
51061+ "timechange_logging" is created.
51062+
51063+config GRKERNSEC_PROC_IPADDR
51064+ bool "/proc/<pid>/ipaddr support"
51065+ help
51066+ If you say Y here, a new entry will be added to each /proc/<pid>
51067+ directory that contains the IP address of the person using the task.
51068+ The IP is carried across local TCP and AF_UNIX stream sockets.
51069+ This information can be useful for IDS/IPSes to perform remote response
51070+ to a local attack. The entry is readable by only the owner of the
51071+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
51072+ the RBAC system), and thus does not create privacy concerns.
51073+
51074+config GRKERNSEC_RWXMAP_LOG
51075+ bool 'Denied RWX mmap/mprotect logging'
51076+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
51077+ help
51078+ If you say Y here, calls to mmap() and mprotect() with explicit
51079+ usage of PROT_WRITE and PROT_EXEC together will be logged when
51080+ denied by the PAX_MPROTECT feature. If the sysctl option is
51081+ enabled, a sysctl option with name "rwxmap_logging" is created.
51082+
51083+config GRKERNSEC_AUDIT_TEXTREL
51084+ bool 'ELF text relocations logging (READ HELP)'
51085+ depends on PAX_MPROTECT
51086+ help
51087+ If you say Y here, text relocations will be logged with the filename
51088+ of the offending library or binary. The purpose of the feature is
51089+ to help Linux distribution developers get rid of libraries and
51090+ binaries that need text relocations which hinder the future progress
51091+ of PaX. Only Linux distribution developers should say Y here, and
51092+ never on a production machine, as this option creates an information
51093+ leak that could aid an attacker in defeating the randomization of
51094+ a single memory region. If the sysctl option is enabled, a sysctl
51095+ option with name "audit_textrel" is created.
51096+
51097+endmenu
51098+
51099+menu "Executable Protections"
51100+depends on GRKERNSEC
51101+
51102+config GRKERNSEC_DMESG
51103+ bool "Dmesg(8) restriction"
51104+ help
51105+ If you say Y here, non-root users will not be able to use dmesg(8)
51106+ to view up to the last 4kb of messages in the kernel's log buffer.
51107+ The kernel's log buffer often contains kernel addresses and other
51108+ identifying information useful to an attacker in fingerprinting a
51109+ system for a targeted exploit.
51110+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
51111+ created.
51112+
51113+config GRKERNSEC_HARDEN_PTRACE
51114+ bool "Deter ptrace-based process snooping"
51115+ help
51116+ If you say Y here, TTY sniffers and other malicious monitoring
51117+ programs implemented through ptrace will be defeated. If you
51118+ have been using the RBAC system, this option has already been
51119+ enabled for several years for all users, with the ability to make
51120+ fine-grained exceptions.
51121+
51122+ This option only affects the ability of non-root users to ptrace
51123+ processes that are not a descendent of the ptracing process.
51124+ This means that strace ./binary and gdb ./binary will still work,
51125+ but attaching to arbitrary processes will not. If the sysctl
51126+ option is enabled, a sysctl option with name "harden_ptrace" is
51127+ created.
51128+
51129+config GRKERNSEC_SETXID
51130+ bool "Enforce consistent multithreaded privileges"
51131+ help
51132+ If you say Y here, a change from a root uid to a non-root uid
51133+ in a multithreaded application will cause the resulting uids,
51134+ gids, supplementary groups, and capabilities in that thread
51135+ to be propagated to the other threads of the process. In most
51136+ cases this is unnecessary, as glibc will emulate this behavior
51137+ on behalf of the application. Other libcs do not act in the
51138+ same way, allowing the other threads of the process to continue
51139+ running with root privileges. If the sysctl option is enabled,
51140+ a sysctl option with name "consistent_setxid" is created.
51141+
51142+config GRKERNSEC_TPE
51143+ bool "Trusted Path Execution (TPE)"
51144+ help
51145+ If you say Y here, you will be able to choose a gid to add to the
51146+ supplementary groups of users you want to mark as "untrusted."
51147+ These users will not be able to execute any files that are not in
51148+ root-owned directories writable only by root. If the sysctl option
51149+ is enabled, a sysctl option with name "tpe" is created.
51150+
51151+config GRKERNSEC_TPE_ALL
51152+ bool "Partially restrict all non-root users"
51153+ depends on GRKERNSEC_TPE
51154+ help
51155+ If you say Y here, all non-root users will be covered under
51156+ a weaker TPE restriction. This is separate from, and in addition to,
51157+ the main TPE options that you have selected elsewhere. Thus, if a
51158+ "trusted" GID is chosen, this restriction applies to even that GID.
51159+ Under this restriction, all non-root users will only be allowed to
51160+ execute files in directories they own that are not group or
51161+ world-writable, or in directories owned by root and writable only by
51162+ root. If the sysctl option is enabled, a sysctl option with name
51163+ "tpe_restrict_all" is created.
51164+
51165+config GRKERNSEC_TPE_INVERT
51166+ bool "Invert GID option"
51167+ depends on GRKERNSEC_TPE
51168+ help
51169+ If you say Y here, the group you specify in the TPE configuration will
51170+ decide what group TPE restrictions will be *disabled* for. This
51171+ option is useful if you want TPE restrictions to be applied to most
51172+ users on the system. If the sysctl option is enabled, a sysctl option
51173+ with name "tpe_invert" is created. Unlike other sysctl options, this
51174+ entry will default to on for backward-compatibility.
51175+
51176+config GRKERNSEC_TPE_GID
51177+ int "GID for untrusted users"
51178+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51179+ default 1005
51180+ help
51181+ Setting this GID determines what group TPE restrictions will be
51182+ *enabled* for. If the sysctl option is enabled, a sysctl option
51183+ with name "tpe_gid" is created.
51184+
51185+config GRKERNSEC_TPE_GID
51186+ int "GID for trusted users"
51187+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51188+ default 1005
51189+ help
51190+ Setting this GID determines what group TPE restrictions will be
51191+ *disabled* for. If the sysctl option is enabled, a sysctl option
51192+ with name "tpe_gid" is created.
51193+
51194+endmenu
51195+menu "Network Protections"
51196+depends on GRKERNSEC
51197+
51198+config GRKERNSEC_RANDNET
51199+ bool "Larger entropy pools"
51200+ help
51201+ If you say Y here, the entropy pools used for many features of Linux
51202+ and grsecurity will be doubled in size. Since several grsecurity
51203+ features use additional randomness, it is recommended that you say Y
51204+ here. Saying Y here has a similar effect as modifying
51205+ /proc/sys/kernel/random/poolsize.
51206+
51207+config GRKERNSEC_BLACKHOLE
51208+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51209+ depends on NET
51210+ help
51211+ If you say Y here, neither TCP resets nor ICMP
51212+ destination-unreachable packets will be sent in response to packets
51213+ sent to ports for which no associated listening process exists.
51214+ This feature supports both IPV4 and IPV6 and exempts the
51215+ loopback interface from blackholing. Enabling this feature
51216+ makes a host more resilient to DoS attacks and reduces network
51217+ visibility against scanners.
51218+
51219+ The blackhole feature as-implemented is equivalent to the FreeBSD
51220+ blackhole feature, as it prevents RST responses to all packets, not
51221+ just SYNs. Under most application behavior this causes no
51222+ problems, but applications (like haproxy) may not close certain
51223+ connections in a way that cleanly terminates them on the remote
51224+ end, leaving the remote host in LAST_ACK state. Because of this
51225+ side-effect and to prevent intentional LAST_ACK DoSes, this
51226+ feature also adds automatic mitigation against such attacks.
51227+ The mitigation drastically reduces the amount of time a socket
51228+ can spend in LAST_ACK state. If you're using haproxy and not
51229+ all servers it connects to have this option enabled, consider
51230+ disabling this feature on the haproxy host.
51231+
51232+ If the sysctl option is enabled, two sysctl options with names
51233+ "ip_blackhole" and "lastack_retries" will be created.
51234+ While "ip_blackhole" takes the standard zero/non-zero on/off
51235+ toggle, "lastack_retries" uses the same kinds of values as
51236+ "tcp_retries1" and "tcp_retries2". The default value of 4
51237+ prevents a socket from lasting more than 45 seconds in LAST_ACK
51238+ state.
51239+
51240+config GRKERNSEC_SOCKET
51241+ bool "Socket restrictions"
51242+ depends on NET
51243+ help
51244+ If you say Y here, you will be able to choose from several options.
51245+ If you assign a GID on your system and add it to the supplementary
51246+ groups of users you want to restrict socket access to, this patch
51247+ will perform up to three things, based on the option(s) you choose.
51248+
51249+config GRKERNSEC_SOCKET_ALL
51250+ bool "Deny any sockets to group"
51251+ depends on GRKERNSEC_SOCKET
51252+ help
51253+ If you say Y here, you will be able to choose a GID of whose users will
51254+ be unable to connect to other hosts from your machine or run server
51255+ applications from your machine. If the sysctl option is enabled, a
51256+ sysctl option with name "socket_all" is created.
51257+
51258+config GRKERNSEC_SOCKET_ALL_GID
51259+ int "GID to deny all sockets for"
51260+ depends on GRKERNSEC_SOCKET_ALL
51261+ default 1004
51262+ help
51263+ Here you can choose the GID to disable socket access for. Remember to
51264+ add the users you want socket access disabled for to the GID
51265+ specified here. If the sysctl option is enabled, a sysctl option
51266+ with name "socket_all_gid" is created.
51267+
51268+config GRKERNSEC_SOCKET_CLIENT
51269+ bool "Deny client sockets to group"
51270+ depends on GRKERNSEC_SOCKET
51271+ help
51272+ If you say Y here, you will be able to choose a GID of whose users will
51273+ be unable to connect to other hosts from your machine, but will be
51274+ able to run servers. If this option is enabled, all users in the group
51275+ you specify will have to use passive mode when initiating ftp transfers
51276+ from the shell on your machine. If the sysctl option is enabled, a
51277+ sysctl option with name "socket_client" is created.
51278+
51279+config GRKERNSEC_SOCKET_CLIENT_GID
51280+ int "GID to deny client sockets for"
51281+ depends on GRKERNSEC_SOCKET_CLIENT
51282+ default 1003
51283+ help
51284+ Here you can choose the GID to disable client socket access for.
51285+ Remember to add the users you want client socket access disabled for to
51286+ the GID specified here. If the sysctl option is enabled, a sysctl
51287+ option with name "socket_client_gid" is created.
51288+
51289+config GRKERNSEC_SOCKET_SERVER
51290+ bool "Deny server sockets to group"
51291+ depends on GRKERNSEC_SOCKET
51292+ help
51293+ If you say Y here, you will be able to choose a GID of whose users will
51294+ be unable to run server applications from your machine. If the sysctl
51295+ option is enabled, a sysctl option with name "socket_server" is created.
51296+
51297+config GRKERNSEC_SOCKET_SERVER_GID
51298+ int "GID to deny server sockets for"
51299+ depends on GRKERNSEC_SOCKET_SERVER
51300+ default 1002
51301+ help
51302+ Here you can choose the GID to disable server socket access for.
51303+ Remember to add the users you want server socket access disabled for to
51304+ the GID specified here. If the sysctl option is enabled, a sysctl
51305+ option with name "socket_server_gid" is created.
51306+
51307+endmenu
51308+menu "Sysctl support"
51309+depends on GRKERNSEC && SYSCTL
51310+
51311+config GRKERNSEC_SYSCTL
51312+ bool "Sysctl support"
51313+ help
51314+ If you say Y here, you will be able to change the options that
51315+ grsecurity runs with at bootup, without having to recompile your
51316+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51317+ to enable (1) or disable (0) various features. All the sysctl entries
51318+ are mutable until the "grsec_lock" entry is set to a non-zero value.
51319+ All features enabled in the kernel configuration are disabled at boot
51320+ if you do not say Y to the "Turn on features by default" option.
51321+ All options should be set at startup, and the grsec_lock entry should
51322+ be set to a non-zero value after all the options are set.
51323+ *THIS IS EXTREMELY IMPORTANT*
51324+
51325+config GRKERNSEC_SYSCTL_DISTRO
51326+ bool "Extra sysctl support for distro makers (READ HELP)"
51327+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51328+ help
51329+ If you say Y here, additional sysctl options will be created
51330+ for features that affect processes running as root. Therefore,
51331+ it is critical when using this option that the grsec_lock entry be
51332+ enabled after boot. Only distros with prebuilt kernel packages
51333+ with this option enabled that can ensure grsec_lock is enabled
51334+ after boot should use this option.
51335+ *Failure to set grsec_lock after boot makes all grsec features
51336+ this option covers useless*
51337+
51338+ Currently this option creates the following sysctl entries:
51339+ "Disable Privileged I/O": "disable_priv_io"
51340+
51341+config GRKERNSEC_SYSCTL_ON
51342+ bool "Turn on features by default"
51343+ depends on GRKERNSEC_SYSCTL
51344+ help
51345+ If you say Y here, instead of having all features enabled in the
51346+ kernel configuration disabled at boot time, the features will be
51347+ enabled at boot time. It is recommended you say Y here unless
51348+ there is some reason you would want all sysctl-tunable features to
51349+ be disabled by default. As mentioned elsewhere, it is important
51350+ to enable the grsec_lock entry once you have finished modifying
51351+ the sysctl entries.
51352+
51353+endmenu
51354+menu "Logging Options"
51355+depends on GRKERNSEC
51356+
51357+config GRKERNSEC_FLOODTIME
51358+ int "Seconds in between log messages (minimum)"
51359+ default 10
51360+ help
51361+ This option allows you to enforce the number of seconds between
51362+ grsecurity log messages. The default should be suitable for most
51363+ people, however, if you choose to change it, choose a value small enough
51364+ to allow informative logs to be produced, but large enough to
51365+ prevent flooding.
51366+
51367+config GRKERNSEC_FLOODBURST
51368+ int "Number of messages in a burst (maximum)"
51369+ default 6
51370+ help
51371+ This option allows you to choose the maximum number of messages allowed
51372+ within the flood time interval you chose in a separate option. The
51373+ default should be suitable for most people, however if you find that
51374+ many of your logs are being interpreted as flooding, you may want to
51375+ raise this value.
51376+
51377+endmenu
51378+
51379+endmenu
51380diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51381new file mode 100644
51382index 0000000..be9ae3a
51383--- /dev/null
51384+++ b/grsecurity/Makefile
51385@@ -0,0 +1,36 @@
51386+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51387+# during 2001-2009 it has been completely redesigned by Brad Spengler
51388+# into an RBAC system
51389+#
51390+# All code in this directory and various hooks inserted throughout the kernel
51391+# are copyright Brad Spengler - Open Source Security, Inc., and released
51392+# under the GPL v2 or higher
51393+
51394+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51395+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
51396+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51397+
51398+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51399+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51400+ gracl_learn.o grsec_log.o
51401+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51402+
51403+ifdef CONFIG_NET
51404+obj-y += grsec_sock.o
51405+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51406+endif
51407+
51408+ifndef CONFIG_GRKERNSEC
51409+obj-y += grsec_disabled.o
51410+endif
51411+
51412+ifdef CONFIG_GRKERNSEC_HIDESYM
51413+extra-y := grsec_hidesym.o
51414+$(obj)/grsec_hidesym.o:
51415+ @-chmod -f 500 /boot
51416+ @-chmod -f 500 /lib/modules
51417+ @-chmod -f 500 /lib64/modules
51418+ @-chmod -f 500 /lib32/modules
51419+ @-chmod -f 700 .
51420+ @echo ' grsec: protected kernel image paths'
51421+endif
51422diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51423new file mode 100644
51424index 0000000..09258e0
51425--- /dev/null
51426+++ b/grsecurity/gracl.c
51427@@ -0,0 +1,4156 @@
51428+#include <linux/kernel.h>
51429+#include <linux/module.h>
51430+#include <linux/sched.h>
51431+#include <linux/mm.h>
51432+#include <linux/file.h>
51433+#include <linux/fs.h>
51434+#include <linux/namei.h>
51435+#include <linux/mount.h>
51436+#include <linux/tty.h>
51437+#include <linux/proc_fs.h>
51438+#include <linux/lglock.h>
51439+#include <linux/slab.h>
51440+#include <linux/vmalloc.h>
51441+#include <linux/types.h>
51442+#include <linux/sysctl.h>
51443+#include <linux/netdevice.h>
51444+#include <linux/ptrace.h>
51445+#include <linux/gracl.h>
51446+#include <linux/gralloc.h>
51447+#include <linux/grsecurity.h>
51448+#include <linux/grinternal.h>
51449+#include <linux/pid_namespace.h>
51450+#include <linux/fdtable.h>
51451+#include <linux/percpu.h>
51452+
51453+#include <asm/uaccess.h>
51454+#include <asm/errno.h>
51455+#include <asm/mman.h>
51456+
51457+static struct acl_role_db acl_role_set;
51458+static struct name_db name_set;
51459+static struct inodev_db inodev_set;
51460+
51461+/* for keeping track of userspace pointers used for subjects, so we
51462+ can share references in the kernel as well
51463+*/
51464+
51465+static struct path real_root;
51466+
51467+static struct acl_subj_map_db subj_map_set;
51468+
51469+static struct acl_role_label *default_role;
51470+
51471+static struct acl_role_label *role_list;
51472+
51473+static u16 acl_sp_role_value;
51474+
51475+extern char *gr_shared_page[4];
51476+static DEFINE_MUTEX(gr_dev_mutex);
51477+DEFINE_RWLOCK(gr_inode_lock);
51478+
51479+struct gr_arg *gr_usermode;
51480+
51481+static unsigned int gr_status __read_only = GR_STATUS_INIT;
51482+
51483+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51484+extern void gr_clear_learn_entries(void);
51485+
51486+#ifdef CONFIG_GRKERNSEC_RESLOG
51487+extern void gr_log_resource(const struct task_struct *task,
51488+ const int res, const unsigned long wanted, const int gt);
51489+#endif
51490+
51491+unsigned char *gr_system_salt;
51492+unsigned char *gr_system_sum;
51493+
51494+static struct sprole_pw **acl_special_roles = NULL;
51495+static __u16 num_sprole_pws = 0;
51496+
51497+static struct acl_role_label *kernel_role = NULL;
51498+
51499+static unsigned int gr_auth_attempts = 0;
51500+static unsigned long gr_auth_expires = 0UL;
51501+
51502+#ifdef CONFIG_NET
51503+extern struct vfsmount *sock_mnt;
51504+#endif
51505+
51506+extern struct vfsmount *pipe_mnt;
51507+extern struct vfsmount *shm_mnt;
51508+#ifdef CONFIG_HUGETLBFS
51509+extern struct vfsmount *hugetlbfs_vfsmount;
51510+#endif
51511+
51512+static struct acl_object_label *fakefs_obj_rw;
51513+static struct acl_object_label *fakefs_obj_rwx;
51514+
51515+extern int gr_init_uidset(void);
51516+extern void gr_free_uidset(void);
51517+extern void gr_remove_uid(uid_t uid);
51518+extern int gr_find_uid(uid_t uid);
51519+
51520+DECLARE_BRLOCK(vfsmount_lock);
51521+
51522+__inline__ int
51523+gr_acl_is_enabled(void)
51524+{
51525+ return (gr_status & GR_READY);
51526+}
51527+
51528+#ifdef CONFIG_BTRFS_FS
51529+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51530+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51531+#endif
51532+
51533+static inline dev_t __get_dev(const struct dentry *dentry)
51534+{
51535+#ifdef CONFIG_BTRFS_FS
51536+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51537+ return get_btrfs_dev_from_inode(dentry->d_inode);
51538+ else
51539+#endif
51540+ return dentry->d_inode->i_sb->s_dev;
51541+}
51542+
51543+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51544+{
51545+ return __get_dev(dentry);
51546+}
51547+
51548+static char gr_task_roletype_to_char(struct task_struct *task)
51549+{
51550+ switch (task->role->roletype &
51551+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51552+ GR_ROLE_SPECIAL)) {
51553+ case GR_ROLE_DEFAULT:
51554+ return 'D';
51555+ case GR_ROLE_USER:
51556+ return 'U';
51557+ case GR_ROLE_GROUP:
51558+ return 'G';
51559+ case GR_ROLE_SPECIAL:
51560+ return 'S';
51561+ }
51562+
51563+ return 'X';
51564+}
51565+
51566+char gr_roletype_to_char(void)
51567+{
51568+ return gr_task_roletype_to_char(current);
51569+}
51570+
51571+__inline__ int
51572+gr_acl_tpe_check(void)
51573+{
51574+ if (unlikely(!(gr_status & GR_READY)))
51575+ return 0;
51576+ if (current->role->roletype & GR_ROLE_TPE)
51577+ return 1;
51578+ else
51579+ return 0;
51580+}
51581+
51582+int
51583+gr_handle_rawio(const struct inode *inode)
51584+{
51585+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51586+ if (inode && S_ISBLK(inode->i_mode) &&
51587+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51588+ !capable(CAP_SYS_RAWIO))
51589+ return 1;
51590+#endif
51591+ return 0;
51592+}
51593+
51594+static int
51595+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51596+{
51597+ if (likely(lena != lenb))
51598+ return 0;
51599+
51600+ return !memcmp(a, b, lena);
51601+}
51602+
51603+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51604+{
51605+ *buflen -= namelen;
51606+ if (*buflen < 0)
51607+ return -ENAMETOOLONG;
51608+ *buffer -= namelen;
51609+ memcpy(*buffer, str, namelen);
51610+ return 0;
51611+}
51612+
51613+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51614+{
51615+ return prepend(buffer, buflen, name->name, name->len);
51616+}
51617+
51618+static int prepend_path(const struct path *path, struct path *root,
51619+ char **buffer, int *buflen)
51620+{
51621+ struct dentry *dentry = path->dentry;
51622+ struct vfsmount *vfsmnt = path->mnt;
51623+ bool slash = false;
51624+ int error = 0;
51625+
51626+ while (dentry != root->dentry || vfsmnt != root->mnt) {
51627+ struct dentry * parent;
51628+
51629+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51630+ /* Global root? */
51631+ if (vfsmnt->mnt_parent == vfsmnt) {
51632+ goto out;
51633+ }
51634+ dentry = vfsmnt->mnt_mountpoint;
51635+ vfsmnt = vfsmnt->mnt_parent;
51636+ continue;
51637+ }
51638+ parent = dentry->d_parent;
51639+ prefetch(parent);
51640+ spin_lock(&dentry->d_lock);
51641+ error = prepend_name(buffer, buflen, &dentry->d_name);
51642+ spin_unlock(&dentry->d_lock);
51643+ if (!error)
51644+ error = prepend(buffer, buflen, "/", 1);
51645+ if (error)
51646+ break;
51647+
51648+ slash = true;
51649+ dentry = parent;
51650+ }
51651+
51652+out:
51653+ if (!error && !slash)
51654+ error = prepend(buffer, buflen, "/", 1);
51655+
51656+ return error;
51657+}
51658+
51659+/* this must be called with vfsmount_lock and rename_lock held */
51660+
51661+static char *__our_d_path(const struct path *path, struct path *root,
51662+ char *buf, int buflen)
51663+{
51664+ char *res = buf + buflen;
51665+ int error;
51666+
51667+ prepend(&res, &buflen, "\0", 1);
51668+ error = prepend_path(path, root, &res, &buflen);
51669+ if (error)
51670+ return ERR_PTR(error);
51671+
51672+ return res;
51673+}
51674+
51675+static char *
51676+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51677+{
51678+ char *retval;
51679+
51680+ retval = __our_d_path(path, root, buf, buflen);
51681+ if (unlikely(IS_ERR(retval)))
51682+ retval = strcpy(buf, "<path too long>");
51683+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51684+ retval[1] = '\0';
51685+
51686+ return retval;
51687+}
51688+
51689+static char *
51690+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51691+ char *buf, int buflen)
51692+{
51693+ struct path path;
51694+ char *res;
51695+
51696+ path.dentry = (struct dentry *)dentry;
51697+ path.mnt = (struct vfsmount *)vfsmnt;
51698+
51699+ /* we can use real_root.dentry, real_root.mnt, because this is only called
51700+ by the RBAC system */
51701+ res = gen_full_path(&path, &real_root, buf, buflen);
51702+
51703+ return res;
51704+}
51705+
51706+static char *
51707+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51708+ char *buf, int buflen)
51709+{
51710+ char *res;
51711+ struct path path;
51712+ struct path root;
51713+ struct task_struct *reaper = &init_task;
51714+
51715+ path.dentry = (struct dentry *)dentry;
51716+ path.mnt = (struct vfsmount *)vfsmnt;
51717+
51718+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51719+ get_fs_root(reaper->fs, &root);
51720+
51721+ write_seqlock(&rename_lock);
51722+ br_read_lock(vfsmount_lock);
51723+ res = gen_full_path(&path, &root, buf, buflen);
51724+ br_read_unlock(vfsmount_lock);
51725+ write_sequnlock(&rename_lock);
51726+
51727+ path_put(&root);
51728+ return res;
51729+}
51730+
51731+static char *
51732+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51733+{
51734+ char *ret;
51735+ write_seqlock(&rename_lock);
51736+ br_read_lock(vfsmount_lock);
51737+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51738+ PAGE_SIZE);
51739+ br_read_unlock(vfsmount_lock);
51740+ write_sequnlock(&rename_lock);
51741+ return ret;
51742+}
51743+
51744+static char *
51745+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51746+{
51747+ char *ret;
51748+ char *buf;
51749+ int buflen;
51750+
51751+ write_seqlock(&rename_lock);
51752+ br_read_lock(vfsmount_lock);
51753+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51754+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51755+ buflen = (int)(ret - buf);
51756+ if (buflen >= 5)
51757+ prepend(&ret, &buflen, "/proc", 5);
51758+ else
51759+ ret = strcpy(buf, "<path too long>");
51760+ br_read_unlock(vfsmount_lock);
51761+ write_sequnlock(&rename_lock);
51762+ return ret;
51763+}
51764+
51765+char *
51766+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51767+{
51768+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51769+ PAGE_SIZE);
51770+}
51771+
51772+char *
51773+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51774+{
51775+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51776+ PAGE_SIZE);
51777+}
51778+
51779+char *
51780+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51781+{
51782+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51783+ PAGE_SIZE);
51784+}
51785+
51786+char *
51787+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51788+{
51789+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51790+ PAGE_SIZE);
51791+}
51792+
51793+char *
51794+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51795+{
51796+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51797+ PAGE_SIZE);
51798+}
51799+
51800+__inline__ __u32
51801+to_gr_audit(const __u32 reqmode)
51802+{
51803+ /* masks off auditable permission flags, then shifts them to create
51804+ auditing flags, and adds the special case of append auditing if
51805+ we're requesting write */
51806+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51807+}
51808+
51809+struct acl_subject_label *
51810+lookup_subject_map(const struct acl_subject_label *userp)
51811+{
51812+ unsigned int index = shash(userp, subj_map_set.s_size);
51813+ struct subject_map *match;
51814+
51815+ match = subj_map_set.s_hash[index];
51816+
51817+ while (match && match->user != userp)
51818+ match = match->next;
51819+
51820+ if (match != NULL)
51821+ return match->kernel;
51822+ else
51823+ return NULL;
51824+}
51825+
51826+static void
51827+insert_subj_map_entry(struct subject_map *subjmap)
51828+{
51829+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51830+ struct subject_map **curr;
51831+
51832+ subjmap->prev = NULL;
51833+
51834+ curr = &subj_map_set.s_hash[index];
51835+ if (*curr != NULL)
51836+ (*curr)->prev = subjmap;
51837+
51838+ subjmap->next = *curr;
51839+ *curr = subjmap;
51840+
51841+ return;
51842+}
51843+
51844+static struct acl_role_label *
51845+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51846+ const gid_t gid)
51847+{
51848+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51849+ struct acl_role_label *match;
51850+ struct role_allowed_ip *ipp;
51851+ unsigned int x;
51852+ u32 curr_ip = task->signal->curr_ip;
51853+
51854+ task->signal->saved_ip = curr_ip;
51855+
51856+ match = acl_role_set.r_hash[index];
51857+
51858+ while (match) {
51859+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51860+ for (x = 0; x < match->domain_child_num; x++) {
51861+ if (match->domain_children[x] == uid)
51862+ goto found;
51863+ }
51864+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51865+ break;
51866+ match = match->next;
51867+ }
51868+found:
51869+ if (match == NULL) {
51870+ try_group:
51871+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51872+ match = acl_role_set.r_hash[index];
51873+
51874+ while (match) {
51875+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51876+ for (x = 0; x < match->domain_child_num; x++) {
51877+ if (match->domain_children[x] == gid)
51878+ goto found2;
51879+ }
51880+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51881+ break;
51882+ match = match->next;
51883+ }
51884+found2:
51885+ if (match == NULL)
51886+ match = default_role;
51887+ if (match->allowed_ips == NULL)
51888+ return match;
51889+ else {
51890+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51891+ if (likely
51892+ ((ntohl(curr_ip) & ipp->netmask) ==
51893+ (ntohl(ipp->addr) & ipp->netmask)))
51894+ return match;
51895+ }
51896+ match = default_role;
51897+ }
51898+ } else if (match->allowed_ips == NULL) {
51899+ return match;
51900+ } else {
51901+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51902+ if (likely
51903+ ((ntohl(curr_ip) & ipp->netmask) ==
51904+ (ntohl(ipp->addr) & ipp->netmask)))
51905+ return match;
51906+ }
51907+ goto try_group;
51908+ }
51909+
51910+ return match;
51911+}
51912+
51913+struct acl_subject_label *
51914+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51915+ const struct acl_role_label *role)
51916+{
51917+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51918+ struct acl_subject_label *match;
51919+
51920+ match = role->subj_hash[index];
51921+
51922+ while (match && (match->inode != ino || match->device != dev ||
51923+ (match->mode & GR_DELETED))) {
51924+ match = match->next;
51925+ }
51926+
51927+ if (match && !(match->mode & GR_DELETED))
51928+ return match;
51929+ else
51930+ return NULL;
51931+}
51932+
51933+struct acl_subject_label *
51934+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51935+ const struct acl_role_label *role)
51936+{
51937+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51938+ struct acl_subject_label *match;
51939+
51940+ match = role->subj_hash[index];
51941+
51942+ while (match && (match->inode != ino || match->device != dev ||
51943+ !(match->mode & GR_DELETED))) {
51944+ match = match->next;
51945+ }
51946+
51947+ if (match && (match->mode & GR_DELETED))
51948+ return match;
51949+ else
51950+ return NULL;
51951+}
51952+
51953+static struct acl_object_label *
51954+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51955+ const struct acl_subject_label *subj)
51956+{
51957+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51958+ struct acl_object_label *match;
51959+
51960+ match = subj->obj_hash[index];
51961+
51962+ while (match && (match->inode != ino || match->device != dev ||
51963+ (match->mode & GR_DELETED))) {
51964+ match = match->next;
51965+ }
51966+
51967+ if (match && !(match->mode & GR_DELETED))
51968+ return match;
51969+ else
51970+ return NULL;
51971+}
51972+
51973+static struct acl_object_label *
51974+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51975+ const struct acl_subject_label *subj)
51976+{
51977+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51978+ struct acl_object_label *match;
51979+
51980+ match = subj->obj_hash[index];
51981+
51982+ while (match && (match->inode != ino || match->device != dev ||
51983+ !(match->mode & GR_DELETED))) {
51984+ match = match->next;
51985+ }
51986+
51987+ if (match && (match->mode & GR_DELETED))
51988+ return match;
51989+
51990+ match = subj->obj_hash[index];
51991+
51992+ while (match && (match->inode != ino || match->device != dev ||
51993+ (match->mode & GR_DELETED))) {
51994+ match = match->next;
51995+ }
51996+
51997+ if (match && !(match->mode & GR_DELETED))
51998+ return match;
51999+ else
52000+ return NULL;
52001+}
52002+
52003+static struct name_entry *
52004+lookup_name_entry(const char *name)
52005+{
52006+ unsigned int len = strlen(name);
52007+ unsigned int key = full_name_hash(name, len);
52008+ unsigned int index = key % name_set.n_size;
52009+ struct name_entry *match;
52010+
52011+ match = name_set.n_hash[index];
52012+
52013+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
52014+ match = match->next;
52015+
52016+ return match;
52017+}
52018+
52019+static struct name_entry *
52020+lookup_name_entry_create(const char *name)
52021+{
52022+ unsigned int len = strlen(name);
52023+ unsigned int key = full_name_hash(name, len);
52024+ unsigned int index = key % name_set.n_size;
52025+ struct name_entry *match;
52026+
52027+ match = name_set.n_hash[index];
52028+
52029+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52030+ !match->deleted))
52031+ match = match->next;
52032+
52033+ if (match && match->deleted)
52034+ return match;
52035+
52036+ match = name_set.n_hash[index];
52037+
52038+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
52039+ match->deleted))
52040+ match = match->next;
52041+
52042+ if (match && !match->deleted)
52043+ return match;
52044+ else
52045+ return NULL;
52046+}
52047+
52048+static struct inodev_entry *
52049+lookup_inodev_entry(const ino_t ino, const dev_t dev)
52050+{
52051+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
52052+ struct inodev_entry *match;
52053+
52054+ match = inodev_set.i_hash[index];
52055+
52056+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
52057+ match = match->next;
52058+
52059+ return match;
52060+}
52061+
52062+static void
52063+insert_inodev_entry(struct inodev_entry *entry)
52064+{
52065+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
52066+ inodev_set.i_size);
52067+ struct inodev_entry **curr;
52068+
52069+ entry->prev = NULL;
52070+
52071+ curr = &inodev_set.i_hash[index];
52072+ if (*curr != NULL)
52073+ (*curr)->prev = entry;
52074+
52075+ entry->next = *curr;
52076+ *curr = entry;
52077+
52078+ return;
52079+}
52080+
52081+static void
52082+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52083+{
52084+ unsigned int index =
52085+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52086+ struct acl_role_label **curr;
52087+ struct acl_role_label *tmp;
52088+
52089+ curr = &acl_role_set.r_hash[index];
52090+
52091+ /* if role was already inserted due to domains and already has
52092+ a role in the same bucket as it attached, then we need to
52093+ combine these two buckets
52094+ */
52095+ if (role->next) {
52096+ tmp = role->next;
52097+ while (tmp->next)
52098+ tmp = tmp->next;
52099+ tmp->next = *curr;
52100+ } else
52101+ role->next = *curr;
52102+ *curr = role;
52103+
52104+ return;
52105+}
52106+
52107+static void
52108+insert_acl_role_label(struct acl_role_label *role)
52109+{
52110+ int i;
52111+
52112+ if (role_list == NULL) {
52113+ role_list = role;
52114+ role->prev = NULL;
52115+ } else {
52116+ role->prev = role_list;
52117+ role_list = role;
52118+ }
52119+
52120+ /* used for hash chains */
52121+ role->next = NULL;
52122+
52123+ if (role->roletype & GR_ROLE_DOMAIN) {
52124+ for (i = 0; i < role->domain_child_num; i++)
52125+ __insert_acl_role_label(role, role->domain_children[i]);
52126+ } else
52127+ __insert_acl_role_label(role, role->uidgid);
52128+}
52129+
52130+static int
52131+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52132+{
52133+ struct name_entry **curr, *nentry;
52134+ struct inodev_entry *ientry;
52135+ unsigned int len = strlen(name);
52136+ unsigned int key = full_name_hash(name, len);
52137+ unsigned int index = key % name_set.n_size;
52138+
52139+ curr = &name_set.n_hash[index];
52140+
52141+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52142+ curr = &((*curr)->next);
52143+
52144+ if (*curr != NULL)
52145+ return 1;
52146+
52147+ nentry = acl_alloc(sizeof (struct name_entry));
52148+ if (nentry == NULL)
52149+ return 0;
52150+ ientry = acl_alloc(sizeof (struct inodev_entry));
52151+ if (ientry == NULL)
52152+ return 0;
52153+ ientry->nentry = nentry;
52154+
52155+ nentry->key = key;
52156+ nentry->name = name;
52157+ nentry->inode = inode;
52158+ nentry->device = device;
52159+ nentry->len = len;
52160+ nentry->deleted = deleted;
52161+
52162+ nentry->prev = NULL;
52163+ curr = &name_set.n_hash[index];
52164+ if (*curr != NULL)
52165+ (*curr)->prev = nentry;
52166+ nentry->next = *curr;
52167+ *curr = nentry;
52168+
52169+ /* insert us into the table searchable by inode/dev */
52170+ insert_inodev_entry(ientry);
52171+
52172+ return 1;
52173+}
52174+
52175+static void
52176+insert_acl_obj_label(struct acl_object_label *obj,
52177+ struct acl_subject_label *subj)
52178+{
52179+ unsigned int index =
52180+ fhash(obj->inode, obj->device, subj->obj_hash_size);
52181+ struct acl_object_label **curr;
52182+
52183+
52184+ obj->prev = NULL;
52185+
52186+ curr = &subj->obj_hash[index];
52187+ if (*curr != NULL)
52188+ (*curr)->prev = obj;
52189+
52190+ obj->next = *curr;
52191+ *curr = obj;
52192+
52193+ return;
52194+}
52195+
52196+static void
52197+insert_acl_subj_label(struct acl_subject_label *obj,
52198+ struct acl_role_label *role)
52199+{
52200+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52201+ struct acl_subject_label **curr;
52202+
52203+ obj->prev = NULL;
52204+
52205+ curr = &role->subj_hash[index];
52206+ if (*curr != NULL)
52207+ (*curr)->prev = obj;
52208+
52209+ obj->next = *curr;
52210+ *curr = obj;
52211+
52212+ return;
52213+}
52214+
52215+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52216+
52217+static void *
52218+create_table(__u32 * len, int elementsize)
52219+{
52220+ unsigned int table_sizes[] = {
52221+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52222+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52223+ 4194301, 8388593, 16777213, 33554393, 67108859
52224+ };
52225+ void *newtable = NULL;
52226+ unsigned int pwr = 0;
52227+
52228+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52229+ table_sizes[pwr] <= *len)
52230+ pwr++;
52231+
52232+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52233+ return newtable;
52234+
52235+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52236+ newtable =
52237+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52238+ else
52239+ newtable = vmalloc(table_sizes[pwr] * elementsize);
52240+
52241+ *len = table_sizes[pwr];
52242+
52243+ return newtable;
52244+}
52245+
52246+static int
52247+init_variables(const struct gr_arg *arg)
52248+{
52249+ struct task_struct *reaper = &init_task;
52250+ unsigned int stacksize;
52251+
52252+ subj_map_set.s_size = arg->role_db.num_subjects;
52253+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52254+ name_set.n_size = arg->role_db.num_objects;
52255+ inodev_set.i_size = arg->role_db.num_objects;
52256+
52257+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
52258+ !name_set.n_size || !inodev_set.i_size)
52259+ return 1;
52260+
52261+ if (!gr_init_uidset())
52262+ return 1;
52263+
52264+ /* set up the stack that holds allocation info */
52265+
52266+ stacksize = arg->role_db.num_pointers + 5;
52267+
52268+ if (!acl_alloc_stack_init(stacksize))
52269+ return 1;
52270+
52271+ /* grab reference for the real root dentry and vfsmount */
52272+ get_fs_root(reaper->fs, &real_root);
52273+
52274+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52275+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52276+#endif
52277+
52278+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52279+ if (fakefs_obj_rw == NULL)
52280+ return 1;
52281+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52282+
52283+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52284+ if (fakefs_obj_rwx == NULL)
52285+ return 1;
52286+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
52287+
52288+ subj_map_set.s_hash =
52289+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52290+ acl_role_set.r_hash =
52291+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52292+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52293+ inodev_set.i_hash =
52294+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52295+
52296+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52297+ !name_set.n_hash || !inodev_set.i_hash)
52298+ return 1;
52299+
52300+ memset(subj_map_set.s_hash, 0,
52301+ sizeof(struct subject_map *) * subj_map_set.s_size);
52302+ memset(acl_role_set.r_hash, 0,
52303+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
52304+ memset(name_set.n_hash, 0,
52305+ sizeof (struct name_entry *) * name_set.n_size);
52306+ memset(inodev_set.i_hash, 0,
52307+ sizeof (struct inodev_entry *) * inodev_set.i_size);
52308+
52309+ return 0;
52310+}
52311+
52312+/* free information not needed after startup
52313+ currently contains user->kernel pointer mappings for subjects
52314+*/
52315+
52316+static void
52317+free_init_variables(void)
52318+{
52319+ __u32 i;
52320+
52321+ if (subj_map_set.s_hash) {
52322+ for (i = 0; i < subj_map_set.s_size; i++) {
52323+ if (subj_map_set.s_hash[i]) {
52324+ kfree(subj_map_set.s_hash[i]);
52325+ subj_map_set.s_hash[i] = NULL;
52326+ }
52327+ }
52328+
52329+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52330+ PAGE_SIZE)
52331+ kfree(subj_map_set.s_hash);
52332+ else
52333+ vfree(subj_map_set.s_hash);
52334+ }
52335+
52336+ return;
52337+}
52338+
52339+static void
52340+free_variables(void)
52341+{
52342+ struct acl_subject_label *s;
52343+ struct acl_role_label *r;
52344+ struct task_struct *task, *task2;
52345+ unsigned int x;
52346+
52347+ gr_clear_learn_entries();
52348+
52349+ read_lock(&tasklist_lock);
52350+ do_each_thread(task2, task) {
52351+ task->acl_sp_role = 0;
52352+ task->acl_role_id = 0;
52353+ task->acl = NULL;
52354+ task->role = NULL;
52355+ } while_each_thread(task2, task);
52356+ read_unlock(&tasklist_lock);
52357+
52358+ /* release the reference to the real root dentry and vfsmount */
52359+ path_put(&real_root);
52360+
52361+ /* free all object hash tables */
52362+
52363+ FOR_EACH_ROLE_START(r)
52364+ if (r->subj_hash == NULL)
52365+ goto next_role;
52366+ FOR_EACH_SUBJECT_START(r, s, x)
52367+ if (s->obj_hash == NULL)
52368+ break;
52369+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52370+ kfree(s->obj_hash);
52371+ else
52372+ vfree(s->obj_hash);
52373+ FOR_EACH_SUBJECT_END(s, x)
52374+ FOR_EACH_NESTED_SUBJECT_START(r, s)
52375+ if (s->obj_hash == NULL)
52376+ break;
52377+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52378+ kfree(s->obj_hash);
52379+ else
52380+ vfree(s->obj_hash);
52381+ FOR_EACH_NESTED_SUBJECT_END(s)
52382+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52383+ kfree(r->subj_hash);
52384+ else
52385+ vfree(r->subj_hash);
52386+ r->subj_hash = NULL;
52387+next_role:
52388+ FOR_EACH_ROLE_END(r)
52389+
52390+ acl_free_all();
52391+
52392+ if (acl_role_set.r_hash) {
52393+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52394+ PAGE_SIZE)
52395+ kfree(acl_role_set.r_hash);
52396+ else
52397+ vfree(acl_role_set.r_hash);
52398+ }
52399+ if (name_set.n_hash) {
52400+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
52401+ PAGE_SIZE)
52402+ kfree(name_set.n_hash);
52403+ else
52404+ vfree(name_set.n_hash);
52405+ }
52406+
52407+ if (inodev_set.i_hash) {
52408+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52409+ PAGE_SIZE)
52410+ kfree(inodev_set.i_hash);
52411+ else
52412+ vfree(inodev_set.i_hash);
52413+ }
52414+
52415+ gr_free_uidset();
52416+
52417+ memset(&name_set, 0, sizeof (struct name_db));
52418+ memset(&inodev_set, 0, sizeof (struct inodev_db));
52419+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52420+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52421+
52422+ default_role = NULL;
52423+ role_list = NULL;
52424+
52425+ return;
52426+}
52427+
52428+static __u32
52429+count_user_objs(struct acl_object_label *userp)
52430+{
52431+ struct acl_object_label o_tmp;
52432+ __u32 num = 0;
52433+
52434+ while (userp) {
52435+ if (copy_from_user(&o_tmp, userp,
52436+ sizeof (struct acl_object_label)))
52437+ break;
52438+
52439+ userp = o_tmp.prev;
52440+ num++;
52441+ }
52442+
52443+ return num;
52444+}
52445+
52446+static struct acl_subject_label *
52447+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52448+
52449+static int
52450+copy_user_glob(struct acl_object_label *obj)
52451+{
52452+ struct acl_object_label *g_tmp, **guser;
52453+ unsigned int len;
52454+ char *tmp;
52455+
52456+ if (obj->globbed == NULL)
52457+ return 0;
52458+
52459+ guser = &obj->globbed;
52460+ while (*guser) {
52461+ g_tmp = (struct acl_object_label *)
52462+ acl_alloc(sizeof (struct acl_object_label));
52463+ if (g_tmp == NULL)
52464+ return -ENOMEM;
52465+
52466+ if (copy_from_user(g_tmp, *guser,
52467+ sizeof (struct acl_object_label)))
52468+ return -EFAULT;
52469+
52470+ len = strnlen_user(g_tmp->filename, PATH_MAX);
52471+
52472+ if (!len || len >= PATH_MAX)
52473+ return -EINVAL;
52474+
52475+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52476+ return -ENOMEM;
52477+
52478+ if (copy_from_user(tmp, g_tmp->filename, len))
52479+ return -EFAULT;
52480+ tmp[len-1] = '\0';
52481+ g_tmp->filename = tmp;
52482+
52483+ *guser = g_tmp;
52484+ guser = &(g_tmp->next);
52485+ }
52486+
52487+ return 0;
52488+}
52489+
52490+static int
52491+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52492+ struct acl_role_label *role)
52493+{
52494+ struct acl_object_label *o_tmp;
52495+ unsigned int len;
52496+ int ret;
52497+ char *tmp;
52498+
52499+ while (userp) {
52500+ if ((o_tmp = (struct acl_object_label *)
52501+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
52502+ return -ENOMEM;
52503+
52504+ if (copy_from_user(o_tmp, userp,
52505+ sizeof (struct acl_object_label)))
52506+ return -EFAULT;
52507+
52508+ userp = o_tmp->prev;
52509+
52510+ len = strnlen_user(o_tmp->filename, PATH_MAX);
52511+
52512+ if (!len || len >= PATH_MAX)
52513+ return -EINVAL;
52514+
52515+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52516+ return -ENOMEM;
52517+
52518+ if (copy_from_user(tmp, o_tmp->filename, len))
52519+ return -EFAULT;
52520+ tmp[len-1] = '\0';
52521+ o_tmp->filename = tmp;
52522+
52523+ insert_acl_obj_label(o_tmp, subj);
52524+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52525+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52526+ return -ENOMEM;
52527+
52528+ ret = copy_user_glob(o_tmp);
52529+ if (ret)
52530+ return ret;
52531+
52532+ if (o_tmp->nested) {
52533+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52534+ if (IS_ERR(o_tmp->nested))
52535+ return PTR_ERR(o_tmp->nested);
52536+
52537+ /* insert into nested subject list */
52538+ o_tmp->nested->next = role->hash->first;
52539+ role->hash->first = o_tmp->nested;
52540+ }
52541+ }
52542+
52543+ return 0;
52544+}
52545+
52546+static __u32
52547+count_user_subjs(struct acl_subject_label *userp)
52548+{
52549+ struct acl_subject_label s_tmp;
52550+ __u32 num = 0;
52551+
52552+ while (userp) {
52553+ if (copy_from_user(&s_tmp, userp,
52554+ sizeof (struct acl_subject_label)))
52555+ break;
52556+
52557+ userp = s_tmp.prev;
52558+ /* do not count nested subjects against this count, since
52559+ they are not included in the hash table, but are
52560+ attached to objects. We have already counted
52561+ the subjects in userspace for the allocation
52562+ stack
52563+ */
52564+ if (!(s_tmp.mode & GR_NESTED))
52565+ num++;
52566+ }
52567+
52568+ return num;
52569+}
52570+
52571+static int
52572+copy_user_allowedips(struct acl_role_label *rolep)
52573+{
52574+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52575+
52576+ ruserip = rolep->allowed_ips;
52577+
52578+ while (ruserip) {
52579+ rlast = rtmp;
52580+
52581+ if ((rtmp = (struct role_allowed_ip *)
52582+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52583+ return -ENOMEM;
52584+
52585+ if (copy_from_user(rtmp, ruserip,
52586+ sizeof (struct role_allowed_ip)))
52587+ return -EFAULT;
52588+
52589+ ruserip = rtmp->prev;
52590+
52591+ if (!rlast) {
52592+ rtmp->prev = NULL;
52593+ rolep->allowed_ips = rtmp;
52594+ } else {
52595+ rlast->next = rtmp;
52596+ rtmp->prev = rlast;
52597+ }
52598+
52599+ if (!ruserip)
52600+ rtmp->next = NULL;
52601+ }
52602+
52603+ return 0;
52604+}
52605+
52606+static int
52607+copy_user_transitions(struct acl_role_label *rolep)
52608+{
52609+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
52610+
52611+ unsigned int len;
52612+ char *tmp;
52613+
52614+ rusertp = rolep->transitions;
52615+
52616+ while (rusertp) {
52617+ rlast = rtmp;
52618+
52619+ if ((rtmp = (struct role_transition *)
52620+ acl_alloc(sizeof (struct role_transition))) == NULL)
52621+ return -ENOMEM;
52622+
52623+ if (copy_from_user(rtmp, rusertp,
52624+ sizeof (struct role_transition)))
52625+ return -EFAULT;
52626+
52627+ rusertp = rtmp->prev;
52628+
52629+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52630+
52631+ if (!len || len >= GR_SPROLE_LEN)
52632+ return -EINVAL;
52633+
52634+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52635+ return -ENOMEM;
52636+
52637+ if (copy_from_user(tmp, rtmp->rolename, len))
52638+ return -EFAULT;
52639+ tmp[len-1] = '\0';
52640+ rtmp->rolename = tmp;
52641+
52642+ if (!rlast) {
52643+ rtmp->prev = NULL;
52644+ rolep->transitions = rtmp;
52645+ } else {
52646+ rlast->next = rtmp;
52647+ rtmp->prev = rlast;
52648+ }
52649+
52650+ if (!rusertp)
52651+ rtmp->next = NULL;
52652+ }
52653+
52654+ return 0;
52655+}
52656+
52657+static struct acl_subject_label *
52658+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52659+{
52660+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52661+ unsigned int len;
52662+ char *tmp;
52663+ __u32 num_objs;
52664+ struct acl_ip_label **i_tmp, *i_utmp2;
52665+ struct gr_hash_struct ghash;
52666+ struct subject_map *subjmap;
52667+ unsigned int i_num;
52668+ int err;
52669+
52670+ s_tmp = lookup_subject_map(userp);
52671+
52672+ /* we've already copied this subject into the kernel, just return
52673+ the reference to it, and don't copy it over again
52674+ */
52675+ if (s_tmp)
52676+ return(s_tmp);
52677+
52678+ if ((s_tmp = (struct acl_subject_label *)
52679+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52680+ return ERR_PTR(-ENOMEM);
52681+
52682+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52683+ if (subjmap == NULL)
52684+ return ERR_PTR(-ENOMEM);
52685+
52686+ subjmap->user = userp;
52687+ subjmap->kernel = s_tmp;
52688+ insert_subj_map_entry(subjmap);
52689+
52690+ if (copy_from_user(s_tmp, userp,
52691+ sizeof (struct acl_subject_label)))
52692+ return ERR_PTR(-EFAULT);
52693+
52694+ len = strnlen_user(s_tmp->filename, PATH_MAX);
52695+
52696+ if (!len || len >= PATH_MAX)
52697+ return ERR_PTR(-EINVAL);
52698+
52699+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52700+ return ERR_PTR(-ENOMEM);
52701+
52702+ if (copy_from_user(tmp, s_tmp->filename, len))
52703+ return ERR_PTR(-EFAULT);
52704+ tmp[len-1] = '\0';
52705+ s_tmp->filename = tmp;
52706+
52707+ if (!strcmp(s_tmp->filename, "/"))
52708+ role->root_label = s_tmp;
52709+
52710+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52711+ return ERR_PTR(-EFAULT);
52712+
52713+ /* copy user and group transition tables */
52714+
52715+ if (s_tmp->user_trans_num) {
52716+ uid_t *uidlist;
52717+
52718+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52719+ if (uidlist == NULL)
52720+ return ERR_PTR(-ENOMEM);
52721+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52722+ return ERR_PTR(-EFAULT);
52723+
52724+ s_tmp->user_transitions = uidlist;
52725+ }
52726+
52727+ if (s_tmp->group_trans_num) {
52728+ gid_t *gidlist;
52729+
52730+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52731+ if (gidlist == NULL)
52732+ return ERR_PTR(-ENOMEM);
52733+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52734+ return ERR_PTR(-EFAULT);
52735+
52736+ s_tmp->group_transitions = gidlist;
52737+ }
52738+
52739+ /* set up object hash table */
52740+ num_objs = count_user_objs(ghash.first);
52741+
52742+ s_tmp->obj_hash_size = num_objs;
52743+ s_tmp->obj_hash =
52744+ (struct acl_object_label **)
52745+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52746+
52747+ if (!s_tmp->obj_hash)
52748+ return ERR_PTR(-ENOMEM);
52749+
52750+ memset(s_tmp->obj_hash, 0,
52751+ s_tmp->obj_hash_size *
52752+ sizeof (struct acl_object_label *));
52753+
52754+ /* add in objects */
52755+ err = copy_user_objs(ghash.first, s_tmp, role);
52756+
52757+ if (err)
52758+ return ERR_PTR(err);
52759+
52760+ /* set pointer for parent subject */
52761+ if (s_tmp->parent_subject) {
52762+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52763+
52764+ if (IS_ERR(s_tmp2))
52765+ return s_tmp2;
52766+
52767+ s_tmp->parent_subject = s_tmp2;
52768+ }
52769+
52770+ /* add in ip acls */
52771+
52772+ if (!s_tmp->ip_num) {
52773+ s_tmp->ips = NULL;
52774+ goto insert;
52775+ }
52776+
52777+ i_tmp =
52778+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52779+ sizeof (struct acl_ip_label *));
52780+
52781+ if (!i_tmp)
52782+ return ERR_PTR(-ENOMEM);
52783+
52784+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52785+ *(i_tmp + i_num) =
52786+ (struct acl_ip_label *)
52787+ acl_alloc(sizeof (struct acl_ip_label));
52788+ if (!*(i_tmp + i_num))
52789+ return ERR_PTR(-ENOMEM);
52790+
52791+ if (copy_from_user
52792+ (&i_utmp2, s_tmp->ips + i_num,
52793+ sizeof (struct acl_ip_label *)))
52794+ return ERR_PTR(-EFAULT);
52795+
52796+ if (copy_from_user
52797+ (*(i_tmp + i_num), i_utmp2,
52798+ sizeof (struct acl_ip_label)))
52799+ return ERR_PTR(-EFAULT);
52800+
52801+ if ((*(i_tmp + i_num))->iface == NULL)
52802+ continue;
52803+
52804+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52805+ if (!len || len >= IFNAMSIZ)
52806+ return ERR_PTR(-EINVAL);
52807+ tmp = acl_alloc(len);
52808+ if (tmp == NULL)
52809+ return ERR_PTR(-ENOMEM);
52810+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52811+ return ERR_PTR(-EFAULT);
52812+ (*(i_tmp + i_num))->iface = tmp;
52813+ }
52814+
52815+ s_tmp->ips = i_tmp;
52816+
52817+insert:
52818+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52819+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52820+ return ERR_PTR(-ENOMEM);
52821+
52822+ return s_tmp;
52823+}
52824+
52825+static int
52826+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52827+{
52828+ struct acl_subject_label s_pre;
52829+ struct acl_subject_label * ret;
52830+ int err;
52831+
52832+ while (userp) {
52833+ if (copy_from_user(&s_pre, userp,
52834+ sizeof (struct acl_subject_label)))
52835+ return -EFAULT;
52836+
52837+ /* do not add nested subjects here, add
52838+ while parsing objects
52839+ */
52840+
52841+ if (s_pre.mode & GR_NESTED) {
52842+ userp = s_pre.prev;
52843+ continue;
52844+ }
52845+
52846+ ret = do_copy_user_subj(userp, role);
52847+
52848+ err = PTR_ERR(ret);
52849+ if (IS_ERR(ret))
52850+ return err;
52851+
52852+ insert_acl_subj_label(ret, role);
52853+
52854+ userp = s_pre.prev;
52855+ }
52856+
52857+ return 0;
52858+}
52859+
52860+static int
52861+copy_user_acl(struct gr_arg *arg)
52862+{
52863+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52864+ struct sprole_pw *sptmp;
52865+ struct gr_hash_struct *ghash;
52866+ uid_t *domainlist;
52867+ unsigned int r_num;
52868+ unsigned int len;
52869+ char *tmp;
52870+ int err = 0;
52871+ __u16 i;
52872+ __u32 num_subjs;
52873+
52874+ /* we need a default and kernel role */
52875+ if (arg->role_db.num_roles < 2)
52876+ return -EINVAL;
52877+
52878+ /* copy special role authentication info from userspace */
52879+
52880+ num_sprole_pws = arg->num_sprole_pws;
52881+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52882+
52883+ if (!acl_special_roles) {
52884+ err = -ENOMEM;
52885+ goto cleanup;
52886+ }
52887+
52888+ for (i = 0; i < num_sprole_pws; i++) {
52889+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52890+ if (!sptmp) {
52891+ err = -ENOMEM;
52892+ goto cleanup;
52893+ }
52894+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52895+ sizeof (struct sprole_pw))) {
52896+ err = -EFAULT;
52897+ goto cleanup;
52898+ }
52899+
52900+ len =
52901+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52902+
52903+ if (!len || len >= GR_SPROLE_LEN) {
52904+ err = -EINVAL;
52905+ goto cleanup;
52906+ }
52907+
52908+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52909+ err = -ENOMEM;
52910+ goto cleanup;
52911+ }
52912+
52913+ if (copy_from_user(tmp, sptmp->rolename, len)) {
52914+ err = -EFAULT;
52915+ goto cleanup;
52916+ }
52917+ tmp[len-1] = '\0';
52918+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52919+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52920+#endif
52921+ sptmp->rolename = tmp;
52922+ acl_special_roles[i] = sptmp;
52923+ }
52924+
52925+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52926+
52927+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52928+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52929+
52930+ if (!r_tmp) {
52931+ err = -ENOMEM;
52932+ goto cleanup;
52933+ }
52934+
52935+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52936+ sizeof (struct acl_role_label *))) {
52937+ err = -EFAULT;
52938+ goto cleanup;
52939+ }
52940+
52941+ if (copy_from_user(r_tmp, r_utmp2,
52942+ sizeof (struct acl_role_label))) {
52943+ err = -EFAULT;
52944+ goto cleanup;
52945+ }
52946+
52947+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52948+
52949+ if (!len || len >= PATH_MAX) {
52950+ err = -EINVAL;
52951+ goto cleanup;
52952+ }
52953+
52954+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52955+ err = -ENOMEM;
52956+ goto cleanup;
52957+ }
52958+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
52959+ err = -EFAULT;
52960+ goto cleanup;
52961+ }
52962+ tmp[len-1] = '\0';
52963+ r_tmp->rolename = tmp;
52964+
52965+ if (!strcmp(r_tmp->rolename, "default")
52966+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52967+ default_role = r_tmp;
52968+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52969+ kernel_role = r_tmp;
52970+ }
52971+
52972+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52973+ err = -ENOMEM;
52974+ goto cleanup;
52975+ }
52976+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52977+ err = -EFAULT;
52978+ goto cleanup;
52979+ }
52980+
52981+ r_tmp->hash = ghash;
52982+
52983+ num_subjs = count_user_subjs(r_tmp->hash->first);
52984+
52985+ r_tmp->subj_hash_size = num_subjs;
52986+ r_tmp->subj_hash =
52987+ (struct acl_subject_label **)
52988+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52989+
52990+ if (!r_tmp->subj_hash) {
52991+ err = -ENOMEM;
52992+ goto cleanup;
52993+ }
52994+
52995+ err = copy_user_allowedips(r_tmp);
52996+ if (err)
52997+ goto cleanup;
52998+
52999+ /* copy domain info */
53000+ if (r_tmp->domain_children != NULL) {
53001+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
53002+ if (domainlist == NULL) {
53003+ err = -ENOMEM;
53004+ goto cleanup;
53005+ }
53006+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
53007+ err = -EFAULT;
53008+ goto cleanup;
53009+ }
53010+ r_tmp->domain_children = domainlist;
53011+ }
53012+
53013+ err = copy_user_transitions(r_tmp);
53014+ if (err)
53015+ goto cleanup;
53016+
53017+ memset(r_tmp->subj_hash, 0,
53018+ r_tmp->subj_hash_size *
53019+ sizeof (struct acl_subject_label *));
53020+
53021+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
53022+
53023+ if (err)
53024+ goto cleanup;
53025+
53026+ /* set nested subject list to null */
53027+ r_tmp->hash->first = NULL;
53028+
53029+ insert_acl_role_label(r_tmp);
53030+ }
53031+
53032+ goto return_err;
53033+ cleanup:
53034+ free_variables();
53035+ return_err:
53036+ return err;
53037+
53038+}
53039+
53040+static int
53041+gracl_init(struct gr_arg *args)
53042+{
53043+ int error = 0;
53044+
53045+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
53046+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
53047+
53048+ if (init_variables(args)) {
53049+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
53050+ error = -ENOMEM;
53051+ free_variables();
53052+ goto out;
53053+ }
53054+
53055+ error = copy_user_acl(args);
53056+ free_init_variables();
53057+ if (error) {
53058+ free_variables();
53059+ goto out;
53060+ }
53061+
53062+ if ((error = gr_set_acls(0))) {
53063+ free_variables();
53064+ goto out;
53065+ }
53066+
53067+ pax_open_kernel();
53068+ gr_status |= GR_READY;
53069+ pax_close_kernel();
53070+
53071+ out:
53072+ return error;
53073+}
53074+
53075+/* derived from glibc fnmatch() 0: match, 1: no match*/
53076+
53077+static int
53078+glob_match(const char *p, const char *n)
53079+{
53080+ char c;
53081+
53082+ while ((c = *p++) != '\0') {
53083+ switch (c) {
53084+ case '?':
53085+ if (*n == '\0')
53086+ return 1;
53087+ else if (*n == '/')
53088+ return 1;
53089+ break;
53090+ case '\\':
53091+ if (*n != c)
53092+ return 1;
53093+ break;
53094+ case '*':
53095+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
53096+ if (*n == '/')
53097+ return 1;
53098+ else if (c == '?') {
53099+ if (*n == '\0')
53100+ return 1;
53101+ else
53102+ ++n;
53103+ }
53104+ }
53105+ if (c == '\0') {
53106+ return 0;
53107+ } else {
53108+ const char *endp;
53109+
53110+ if ((endp = strchr(n, '/')) == NULL)
53111+ endp = n + strlen(n);
53112+
53113+ if (c == '[') {
53114+ for (--p; n < endp; ++n)
53115+ if (!glob_match(p, n))
53116+ return 0;
53117+ } else if (c == '/') {
53118+ while (*n != '\0' && *n != '/')
53119+ ++n;
53120+ if (*n == '/' && !glob_match(p, n + 1))
53121+ return 0;
53122+ } else {
53123+ for (--p; n < endp; ++n)
53124+ if (*n == c && !glob_match(p, n))
53125+ return 0;
53126+ }
53127+
53128+ return 1;
53129+ }
53130+ case '[':
53131+ {
53132+ int not;
53133+ char cold;
53134+
53135+ if (*n == '\0' || *n == '/')
53136+ return 1;
53137+
53138+ not = (*p == '!' || *p == '^');
53139+ if (not)
53140+ ++p;
53141+
53142+ c = *p++;
53143+ for (;;) {
53144+ unsigned char fn = (unsigned char)*n;
53145+
53146+ if (c == '\0')
53147+ return 1;
53148+ else {
53149+ if (c == fn)
53150+ goto matched;
53151+ cold = c;
53152+ c = *p++;
53153+
53154+ if (c == '-' && *p != ']') {
53155+ unsigned char cend = *p++;
53156+
53157+ if (cend == '\0')
53158+ return 1;
53159+
53160+ if (cold <= fn && fn <= cend)
53161+ goto matched;
53162+
53163+ c = *p++;
53164+ }
53165+ }
53166+
53167+ if (c == ']')
53168+ break;
53169+ }
53170+ if (!not)
53171+ return 1;
53172+ break;
53173+ matched:
53174+ while (c != ']') {
53175+ if (c == '\0')
53176+ return 1;
53177+
53178+ c = *p++;
53179+ }
53180+ if (not)
53181+ return 1;
53182+ }
53183+ break;
53184+ default:
53185+ if (c != *n)
53186+ return 1;
53187+ }
53188+
53189+ ++n;
53190+ }
53191+
53192+ if (*n == '\0')
53193+ return 0;
53194+
53195+ if (*n == '/')
53196+ return 0;
53197+
53198+ return 1;
53199+}
53200+
53201+static struct acl_object_label *
53202+chk_glob_label(struct acl_object_label *globbed,
53203+ struct dentry *dentry, struct vfsmount *mnt, char **path)
53204+{
53205+ struct acl_object_label *tmp;
53206+
53207+ if (*path == NULL)
53208+ *path = gr_to_filename_nolock(dentry, mnt);
53209+
53210+ tmp = globbed;
53211+
53212+ while (tmp) {
53213+ if (!glob_match(tmp->filename, *path))
53214+ return tmp;
53215+ tmp = tmp->next;
53216+ }
53217+
53218+ return NULL;
53219+}
53220+
53221+static struct acl_object_label *
53222+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53223+ const ino_t curr_ino, const dev_t curr_dev,
53224+ const struct acl_subject_label *subj, char **path, const int checkglob)
53225+{
53226+ struct acl_subject_label *tmpsubj;
53227+ struct acl_object_label *retval;
53228+ struct acl_object_label *retval2;
53229+
53230+ tmpsubj = (struct acl_subject_label *) subj;
53231+ read_lock(&gr_inode_lock);
53232+ do {
53233+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53234+ if (retval) {
53235+ if (checkglob && retval->globbed) {
53236+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
53237+ (struct vfsmount *)orig_mnt, path);
53238+ if (retval2)
53239+ retval = retval2;
53240+ }
53241+ break;
53242+ }
53243+ } while ((tmpsubj = tmpsubj->parent_subject));
53244+ read_unlock(&gr_inode_lock);
53245+
53246+ return retval;
53247+}
53248+
53249+static __inline__ struct acl_object_label *
53250+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53251+ struct dentry *curr_dentry,
53252+ const struct acl_subject_label *subj, char **path, const int checkglob)
53253+{
53254+ int newglob = checkglob;
53255+ ino_t inode;
53256+ dev_t device;
53257+
53258+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53259+ as we don't want a / * rule to match instead of the / object
53260+ don't do this for create lookups that call this function though, since they're looking up
53261+ on the parent and thus need globbing checks on all paths
53262+ */
53263+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53264+ newglob = GR_NO_GLOB;
53265+
53266+ spin_lock(&curr_dentry->d_lock);
53267+ inode = curr_dentry->d_inode->i_ino;
53268+ device = __get_dev(curr_dentry);
53269+ spin_unlock(&curr_dentry->d_lock);
53270+
53271+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
53272+}
53273+
53274+static struct acl_object_label *
53275+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53276+ const struct acl_subject_label *subj, char *path, const int checkglob)
53277+{
53278+ struct dentry *dentry = (struct dentry *) l_dentry;
53279+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53280+ struct acl_object_label *retval;
53281+ struct dentry *parent;
53282+
53283+ write_seqlock(&rename_lock);
53284+ br_read_lock(vfsmount_lock);
53285+
53286+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53287+#ifdef CONFIG_NET
53288+ mnt == sock_mnt ||
53289+#endif
53290+#ifdef CONFIG_HUGETLBFS
53291+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
53292+#endif
53293+ /* ignore Eric Biederman */
53294+ IS_PRIVATE(l_dentry->d_inode))) {
53295+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
53296+ goto out;
53297+ }
53298+
53299+ for (;;) {
53300+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53301+ break;
53302+
53303+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53304+ if (mnt->mnt_parent == mnt)
53305+ break;
53306+
53307+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53308+ if (retval != NULL)
53309+ goto out;
53310+
53311+ dentry = mnt->mnt_mountpoint;
53312+ mnt = mnt->mnt_parent;
53313+ continue;
53314+ }
53315+
53316+ parent = dentry->d_parent;
53317+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53318+ if (retval != NULL)
53319+ goto out;
53320+
53321+ dentry = parent;
53322+ }
53323+
53324+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53325+
53326+ /* real_root is pinned so we don't have to hold a reference */
53327+ if (retval == NULL)
53328+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
53329+out:
53330+ br_read_unlock(vfsmount_lock);
53331+ write_sequnlock(&rename_lock);
53332+
53333+ BUG_ON(retval == NULL);
53334+
53335+ return retval;
53336+}
53337+
53338+static __inline__ struct acl_object_label *
53339+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53340+ const struct acl_subject_label *subj)
53341+{
53342+ char *path = NULL;
53343+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
53344+}
53345+
53346+static __inline__ struct acl_object_label *
53347+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53348+ const struct acl_subject_label *subj)
53349+{
53350+ char *path = NULL;
53351+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
53352+}
53353+
53354+static __inline__ struct acl_object_label *
53355+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53356+ const struct acl_subject_label *subj, char *path)
53357+{
53358+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
53359+}
53360+
53361+static struct acl_subject_label *
53362+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53363+ const struct acl_role_label *role)
53364+{
53365+ struct dentry *dentry = (struct dentry *) l_dentry;
53366+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53367+ struct acl_subject_label *retval;
53368+ struct dentry *parent;
53369+
53370+ write_seqlock(&rename_lock);
53371+ br_read_lock(vfsmount_lock);
53372+
53373+ for (;;) {
53374+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53375+ break;
53376+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53377+ if (mnt->mnt_parent == mnt)
53378+ break;
53379+
53380+ spin_lock(&dentry->d_lock);
53381+ read_lock(&gr_inode_lock);
53382+ retval =
53383+ lookup_acl_subj_label(dentry->d_inode->i_ino,
53384+ __get_dev(dentry), role);
53385+ read_unlock(&gr_inode_lock);
53386+ spin_unlock(&dentry->d_lock);
53387+ if (retval != NULL)
53388+ goto out;
53389+
53390+ dentry = mnt->mnt_mountpoint;
53391+ mnt = mnt->mnt_parent;
53392+ continue;
53393+ }
53394+
53395+ spin_lock(&dentry->d_lock);
53396+ read_lock(&gr_inode_lock);
53397+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53398+ __get_dev(dentry), role);
53399+ read_unlock(&gr_inode_lock);
53400+ parent = dentry->d_parent;
53401+ spin_unlock(&dentry->d_lock);
53402+
53403+ if (retval != NULL)
53404+ goto out;
53405+
53406+ dentry = parent;
53407+ }
53408+
53409+ spin_lock(&dentry->d_lock);
53410+ read_lock(&gr_inode_lock);
53411+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53412+ __get_dev(dentry), role);
53413+ read_unlock(&gr_inode_lock);
53414+ spin_unlock(&dentry->d_lock);
53415+
53416+ if (unlikely(retval == NULL)) {
53417+ /* real_root is pinned, we don't need to hold a reference */
53418+ read_lock(&gr_inode_lock);
53419+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53420+ __get_dev(real_root.dentry), role);
53421+ read_unlock(&gr_inode_lock);
53422+ }
53423+out:
53424+ br_read_unlock(vfsmount_lock);
53425+ write_sequnlock(&rename_lock);
53426+
53427+ BUG_ON(retval == NULL);
53428+
53429+ return retval;
53430+}
53431+
53432+static void
53433+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53434+{
53435+ struct task_struct *task = current;
53436+ const struct cred *cred = current_cred();
53437+
53438+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53439+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53440+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53441+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53442+
53443+ return;
53444+}
53445+
53446+static void
53447+gr_log_learn_sysctl(const char *path, const __u32 mode)
53448+{
53449+ struct task_struct *task = current;
53450+ const struct cred *cred = current_cred();
53451+
53452+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53453+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53454+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53455+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
53456+
53457+ return;
53458+}
53459+
53460+static void
53461+gr_log_learn_id_change(const char type, const unsigned int real,
53462+ const unsigned int effective, const unsigned int fs)
53463+{
53464+ struct task_struct *task = current;
53465+ const struct cred *cred = current_cred();
53466+
53467+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53468+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53469+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53470+ type, real, effective, fs, &task->signal->saved_ip);
53471+
53472+ return;
53473+}
53474+
53475+__u32
53476+gr_search_file(const struct dentry * dentry, const __u32 mode,
53477+ const struct vfsmount * mnt)
53478+{
53479+ __u32 retval = mode;
53480+ struct acl_subject_label *curracl;
53481+ struct acl_object_label *currobj;
53482+
53483+ if (unlikely(!(gr_status & GR_READY)))
53484+ return (mode & ~GR_AUDITS);
53485+
53486+ curracl = current->acl;
53487+
53488+ currobj = chk_obj_label(dentry, mnt, curracl);
53489+ retval = currobj->mode & mode;
53490+
53491+ /* if we're opening a specified transfer file for writing
53492+ (e.g. /dev/initctl), then transfer our role to init
53493+ */
53494+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53495+ current->role->roletype & GR_ROLE_PERSIST)) {
53496+ struct task_struct *task = init_pid_ns.child_reaper;
53497+
53498+ if (task->role != current->role) {
53499+ task->acl_sp_role = 0;
53500+ task->acl_role_id = current->acl_role_id;
53501+ task->role = current->role;
53502+ rcu_read_lock();
53503+ read_lock(&grsec_exec_file_lock);
53504+ gr_apply_subject_to_task(task);
53505+ read_unlock(&grsec_exec_file_lock);
53506+ rcu_read_unlock();
53507+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53508+ }
53509+ }
53510+
53511+ if (unlikely
53512+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53513+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53514+ __u32 new_mode = mode;
53515+
53516+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53517+
53518+ retval = new_mode;
53519+
53520+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53521+ new_mode |= GR_INHERIT;
53522+
53523+ if (!(mode & GR_NOLEARN))
53524+ gr_log_learn(dentry, mnt, new_mode);
53525+ }
53526+
53527+ return retval;
53528+}
53529+
53530+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53531+ const struct dentry *parent,
53532+ const struct vfsmount *mnt)
53533+{
53534+ struct name_entry *match;
53535+ struct acl_object_label *matchpo;
53536+ struct acl_subject_label *curracl;
53537+ char *path;
53538+
53539+ if (unlikely(!(gr_status & GR_READY)))
53540+ return NULL;
53541+
53542+ preempt_disable();
53543+ path = gr_to_filename_rbac(new_dentry, mnt);
53544+ match = lookup_name_entry_create(path);
53545+
53546+ curracl = current->acl;
53547+
53548+ if (match) {
53549+ read_lock(&gr_inode_lock);
53550+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53551+ read_unlock(&gr_inode_lock);
53552+
53553+ if (matchpo) {
53554+ preempt_enable();
53555+ return matchpo;
53556+ }
53557+ }
53558+
53559+ // lookup parent
53560+
53561+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53562+
53563+ preempt_enable();
53564+ return matchpo;
53565+}
53566+
53567+__u32
53568+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53569+ const struct vfsmount * mnt, const __u32 mode)
53570+{
53571+ struct acl_object_label *matchpo;
53572+ __u32 retval;
53573+
53574+ if (unlikely(!(gr_status & GR_READY)))
53575+ return (mode & ~GR_AUDITS);
53576+
53577+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
53578+
53579+ retval = matchpo->mode & mode;
53580+
53581+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53582+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53583+ __u32 new_mode = mode;
53584+
53585+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53586+
53587+ gr_log_learn(new_dentry, mnt, new_mode);
53588+ return new_mode;
53589+ }
53590+
53591+ return retval;
53592+}
53593+
53594+__u32
53595+gr_check_link(const struct dentry * new_dentry,
53596+ const struct dentry * parent_dentry,
53597+ const struct vfsmount * parent_mnt,
53598+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53599+{
53600+ struct acl_object_label *obj;
53601+ __u32 oldmode, newmode;
53602+ __u32 needmode;
53603+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53604+ GR_DELETE | GR_INHERIT;
53605+
53606+ if (unlikely(!(gr_status & GR_READY)))
53607+ return (GR_CREATE | GR_LINK);
53608+
53609+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53610+ oldmode = obj->mode;
53611+
53612+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53613+ newmode = obj->mode;
53614+
53615+ needmode = newmode & checkmodes;
53616+
53617+ // old name for hardlink must have at least the permissions of the new name
53618+ if ((oldmode & needmode) != needmode)
53619+ goto bad;
53620+
53621+ // if old name had restrictions/auditing, make sure the new name does as well
53622+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53623+
53624+ // don't allow hardlinking of suid/sgid files without permission
53625+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53626+ needmode |= GR_SETID;
53627+
53628+ if ((newmode & needmode) != needmode)
53629+ goto bad;
53630+
53631+ // enforce minimum permissions
53632+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53633+ return newmode;
53634+bad:
53635+ needmode = oldmode;
53636+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53637+ needmode |= GR_SETID;
53638+
53639+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53640+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53641+ return (GR_CREATE | GR_LINK);
53642+ } else if (newmode & GR_SUPPRESS)
53643+ return GR_SUPPRESS;
53644+ else
53645+ return 0;
53646+}
53647+
53648+int
53649+gr_check_hidden_task(const struct task_struct *task)
53650+{
53651+ if (unlikely(!(gr_status & GR_READY)))
53652+ return 0;
53653+
53654+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53655+ return 1;
53656+
53657+ return 0;
53658+}
53659+
53660+int
53661+gr_check_protected_task(const struct task_struct *task)
53662+{
53663+ if (unlikely(!(gr_status & GR_READY) || !task))
53664+ return 0;
53665+
53666+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53667+ task->acl != current->acl)
53668+ return 1;
53669+
53670+ return 0;
53671+}
53672+
53673+int
53674+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53675+{
53676+ struct task_struct *p;
53677+ int ret = 0;
53678+
53679+ if (unlikely(!(gr_status & GR_READY) || !pid))
53680+ return ret;
53681+
53682+ read_lock(&tasklist_lock);
53683+ do_each_pid_task(pid, type, p) {
53684+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53685+ p->acl != current->acl) {
53686+ ret = 1;
53687+ goto out;
53688+ }
53689+ } while_each_pid_task(pid, type, p);
53690+out:
53691+ read_unlock(&tasklist_lock);
53692+
53693+ return ret;
53694+}
53695+
53696+void
53697+gr_copy_label(struct task_struct *tsk)
53698+{
53699+ tsk->signal->used_accept = 0;
53700+ tsk->acl_sp_role = 0;
53701+ tsk->acl_role_id = current->acl_role_id;
53702+ tsk->acl = current->acl;
53703+ tsk->role = current->role;
53704+ tsk->signal->curr_ip = current->signal->curr_ip;
53705+ tsk->signal->saved_ip = current->signal->saved_ip;
53706+ if (current->exec_file)
53707+ get_file(current->exec_file);
53708+ tsk->exec_file = current->exec_file;
53709+ tsk->is_writable = current->is_writable;
53710+ if (unlikely(current->signal->used_accept)) {
53711+ current->signal->curr_ip = 0;
53712+ current->signal->saved_ip = 0;
53713+ }
53714+
53715+ return;
53716+}
53717+
53718+static void
53719+gr_set_proc_res(struct task_struct *task)
53720+{
53721+ struct acl_subject_label *proc;
53722+ unsigned short i;
53723+
53724+ proc = task->acl;
53725+
53726+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53727+ return;
53728+
53729+ for (i = 0; i < RLIM_NLIMITS; i++) {
53730+ if (!(proc->resmask & (1 << i)))
53731+ continue;
53732+
53733+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53734+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53735+ }
53736+
53737+ return;
53738+}
53739+
53740+extern int __gr_process_user_ban(struct user_struct *user);
53741+
53742+int
53743+gr_check_user_change(int real, int effective, int fs)
53744+{
53745+ unsigned int i;
53746+ __u16 num;
53747+ uid_t *uidlist;
53748+ int curuid;
53749+ int realok = 0;
53750+ int effectiveok = 0;
53751+ int fsok = 0;
53752+
53753+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53754+ struct user_struct *user;
53755+
53756+ if (real == -1)
53757+ goto skipit;
53758+
53759+ user = find_user(real);
53760+ if (user == NULL)
53761+ goto skipit;
53762+
53763+ if (__gr_process_user_ban(user)) {
53764+ /* for find_user */
53765+ free_uid(user);
53766+ return 1;
53767+ }
53768+
53769+ /* for find_user */
53770+ free_uid(user);
53771+
53772+skipit:
53773+#endif
53774+
53775+ if (unlikely(!(gr_status & GR_READY)))
53776+ return 0;
53777+
53778+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53779+ gr_log_learn_id_change('u', real, effective, fs);
53780+
53781+ num = current->acl->user_trans_num;
53782+ uidlist = current->acl->user_transitions;
53783+
53784+ if (uidlist == NULL)
53785+ return 0;
53786+
53787+ if (real == -1)
53788+ realok = 1;
53789+ if (effective == -1)
53790+ effectiveok = 1;
53791+ if (fs == -1)
53792+ fsok = 1;
53793+
53794+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53795+ for (i = 0; i < num; i++) {
53796+ curuid = (int)uidlist[i];
53797+ if (real == curuid)
53798+ realok = 1;
53799+ if (effective == curuid)
53800+ effectiveok = 1;
53801+ if (fs == curuid)
53802+ fsok = 1;
53803+ }
53804+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53805+ for (i = 0; i < num; i++) {
53806+ curuid = (int)uidlist[i];
53807+ if (real == curuid)
53808+ break;
53809+ if (effective == curuid)
53810+ break;
53811+ if (fs == curuid)
53812+ break;
53813+ }
53814+ /* not in deny list */
53815+ if (i == num) {
53816+ realok = 1;
53817+ effectiveok = 1;
53818+ fsok = 1;
53819+ }
53820+ }
53821+
53822+ if (realok && effectiveok && fsok)
53823+ return 0;
53824+ else {
53825+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53826+ return 1;
53827+ }
53828+}
53829+
53830+int
53831+gr_check_group_change(int real, int effective, int fs)
53832+{
53833+ unsigned int i;
53834+ __u16 num;
53835+ gid_t *gidlist;
53836+ int curgid;
53837+ int realok = 0;
53838+ int effectiveok = 0;
53839+ int fsok = 0;
53840+
53841+ if (unlikely(!(gr_status & GR_READY)))
53842+ return 0;
53843+
53844+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53845+ gr_log_learn_id_change('g', real, effective, fs);
53846+
53847+ num = current->acl->group_trans_num;
53848+ gidlist = current->acl->group_transitions;
53849+
53850+ if (gidlist == NULL)
53851+ return 0;
53852+
53853+ if (real == -1)
53854+ realok = 1;
53855+ if (effective == -1)
53856+ effectiveok = 1;
53857+ if (fs == -1)
53858+ fsok = 1;
53859+
53860+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53861+ for (i = 0; i < num; i++) {
53862+ curgid = (int)gidlist[i];
53863+ if (real == curgid)
53864+ realok = 1;
53865+ if (effective == curgid)
53866+ effectiveok = 1;
53867+ if (fs == curgid)
53868+ fsok = 1;
53869+ }
53870+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53871+ for (i = 0; i < num; i++) {
53872+ curgid = (int)gidlist[i];
53873+ if (real == curgid)
53874+ break;
53875+ if (effective == curgid)
53876+ break;
53877+ if (fs == curgid)
53878+ break;
53879+ }
53880+ /* not in deny list */
53881+ if (i == num) {
53882+ realok = 1;
53883+ effectiveok = 1;
53884+ fsok = 1;
53885+ }
53886+ }
53887+
53888+ if (realok && effectiveok && fsok)
53889+ return 0;
53890+ else {
53891+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53892+ return 1;
53893+ }
53894+}
53895+
53896+void
53897+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53898+{
53899+ struct acl_role_label *role = task->role;
53900+ struct acl_subject_label *subj = NULL;
53901+ struct acl_object_label *obj;
53902+ struct file *filp;
53903+
53904+ if (unlikely(!(gr_status & GR_READY)))
53905+ return;
53906+
53907+ filp = task->exec_file;
53908+
53909+ /* kernel process, we'll give them the kernel role */
53910+ if (unlikely(!filp)) {
53911+ task->role = kernel_role;
53912+ task->acl = kernel_role->root_label;
53913+ return;
53914+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53915+ role = lookup_acl_role_label(task, uid, gid);
53916+
53917+ /* perform subject lookup in possibly new role
53918+ we can use this result below in the case where role == task->role
53919+ */
53920+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53921+
53922+ /* if we changed uid/gid, but result in the same role
53923+ and are using inheritance, don't lose the inherited subject
53924+ if current subject is other than what normal lookup
53925+ would result in, we arrived via inheritance, don't
53926+ lose subject
53927+ */
53928+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53929+ (subj == task->acl)))
53930+ task->acl = subj;
53931+
53932+ task->role = role;
53933+
53934+ task->is_writable = 0;
53935+
53936+ /* ignore additional mmap checks for processes that are writable
53937+ by the default ACL */
53938+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53939+ if (unlikely(obj->mode & GR_WRITE))
53940+ task->is_writable = 1;
53941+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53942+ if (unlikely(obj->mode & GR_WRITE))
53943+ task->is_writable = 1;
53944+
53945+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53946+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53947+#endif
53948+
53949+ gr_set_proc_res(task);
53950+
53951+ return;
53952+}
53953+
53954+int
53955+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53956+ const int unsafe_share)
53957+{
53958+ struct task_struct *task = current;
53959+ struct acl_subject_label *newacl;
53960+ struct acl_object_label *obj;
53961+ __u32 retmode;
53962+
53963+ if (unlikely(!(gr_status & GR_READY)))
53964+ return 0;
53965+
53966+ newacl = chk_subj_label(dentry, mnt, task->role);
53967+
53968+ task_lock(task);
53969+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53970+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53971+ !(task->role->roletype & GR_ROLE_GOD) &&
53972+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53973+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
53974+ task_unlock(task);
53975+ if (unsafe_share)
53976+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53977+ else
53978+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53979+ return -EACCES;
53980+ }
53981+ task_unlock(task);
53982+
53983+ obj = chk_obj_label(dentry, mnt, task->acl);
53984+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53985+
53986+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53987+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53988+ if (obj->nested)
53989+ task->acl = obj->nested;
53990+ else
53991+ task->acl = newacl;
53992+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53993+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53994+
53995+ task->is_writable = 0;
53996+
53997+ /* ignore additional mmap checks for processes that are writable
53998+ by the default ACL */
53999+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
54000+ if (unlikely(obj->mode & GR_WRITE))
54001+ task->is_writable = 1;
54002+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
54003+ if (unlikely(obj->mode & GR_WRITE))
54004+ task->is_writable = 1;
54005+
54006+ gr_set_proc_res(task);
54007+
54008+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54009+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54010+#endif
54011+ return 0;
54012+}
54013+
54014+/* always called with valid inodev ptr */
54015+static void
54016+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
54017+{
54018+ struct acl_object_label *matchpo;
54019+ struct acl_subject_label *matchps;
54020+ struct acl_subject_label *subj;
54021+ struct acl_role_label *role;
54022+ unsigned int x;
54023+
54024+ FOR_EACH_ROLE_START(role)
54025+ FOR_EACH_SUBJECT_START(role, subj, x)
54026+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
54027+ matchpo->mode |= GR_DELETED;
54028+ FOR_EACH_SUBJECT_END(subj,x)
54029+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54030+ if (subj->inode == ino && subj->device == dev)
54031+ subj->mode |= GR_DELETED;
54032+ FOR_EACH_NESTED_SUBJECT_END(subj)
54033+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
54034+ matchps->mode |= GR_DELETED;
54035+ FOR_EACH_ROLE_END(role)
54036+
54037+ inodev->nentry->deleted = 1;
54038+
54039+ return;
54040+}
54041+
54042+void
54043+gr_handle_delete(const ino_t ino, const dev_t dev)
54044+{
54045+ struct inodev_entry *inodev;
54046+
54047+ if (unlikely(!(gr_status & GR_READY)))
54048+ return;
54049+
54050+ write_lock(&gr_inode_lock);
54051+ inodev = lookup_inodev_entry(ino, dev);
54052+ if (inodev != NULL)
54053+ do_handle_delete(inodev, ino, dev);
54054+ write_unlock(&gr_inode_lock);
54055+
54056+ return;
54057+}
54058+
54059+static void
54060+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54061+ const ino_t newinode, const dev_t newdevice,
54062+ struct acl_subject_label *subj)
54063+{
54064+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54065+ struct acl_object_label *match;
54066+
54067+ match = subj->obj_hash[index];
54068+
54069+ while (match && (match->inode != oldinode ||
54070+ match->device != olddevice ||
54071+ !(match->mode & GR_DELETED)))
54072+ match = match->next;
54073+
54074+ if (match && (match->inode == oldinode)
54075+ && (match->device == olddevice)
54076+ && (match->mode & GR_DELETED)) {
54077+ if (match->prev == NULL) {
54078+ subj->obj_hash[index] = match->next;
54079+ if (match->next != NULL)
54080+ match->next->prev = NULL;
54081+ } else {
54082+ match->prev->next = match->next;
54083+ if (match->next != NULL)
54084+ match->next->prev = match->prev;
54085+ }
54086+ match->prev = NULL;
54087+ match->next = NULL;
54088+ match->inode = newinode;
54089+ match->device = newdevice;
54090+ match->mode &= ~GR_DELETED;
54091+
54092+ insert_acl_obj_label(match, subj);
54093+ }
54094+
54095+ return;
54096+}
54097+
54098+static void
54099+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54100+ const ino_t newinode, const dev_t newdevice,
54101+ struct acl_role_label *role)
54102+{
54103+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54104+ struct acl_subject_label *match;
54105+
54106+ match = role->subj_hash[index];
54107+
54108+ while (match && (match->inode != oldinode ||
54109+ match->device != olddevice ||
54110+ !(match->mode & GR_DELETED)))
54111+ match = match->next;
54112+
54113+ if (match && (match->inode == oldinode)
54114+ && (match->device == olddevice)
54115+ && (match->mode & GR_DELETED)) {
54116+ if (match->prev == NULL) {
54117+ role->subj_hash[index] = match->next;
54118+ if (match->next != NULL)
54119+ match->next->prev = NULL;
54120+ } else {
54121+ match->prev->next = match->next;
54122+ if (match->next != NULL)
54123+ match->next->prev = match->prev;
54124+ }
54125+ match->prev = NULL;
54126+ match->next = NULL;
54127+ match->inode = newinode;
54128+ match->device = newdevice;
54129+ match->mode &= ~GR_DELETED;
54130+
54131+ insert_acl_subj_label(match, role);
54132+ }
54133+
54134+ return;
54135+}
54136+
54137+static void
54138+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54139+ const ino_t newinode, const dev_t newdevice)
54140+{
54141+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54142+ struct inodev_entry *match;
54143+
54144+ match = inodev_set.i_hash[index];
54145+
54146+ while (match && (match->nentry->inode != oldinode ||
54147+ match->nentry->device != olddevice || !match->nentry->deleted))
54148+ match = match->next;
54149+
54150+ if (match && (match->nentry->inode == oldinode)
54151+ && (match->nentry->device == olddevice) &&
54152+ match->nentry->deleted) {
54153+ if (match->prev == NULL) {
54154+ inodev_set.i_hash[index] = match->next;
54155+ if (match->next != NULL)
54156+ match->next->prev = NULL;
54157+ } else {
54158+ match->prev->next = match->next;
54159+ if (match->next != NULL)
54160+ match->next->prev = match->prev;
54161+ }
54162+ match->prev = NULL;
54163+ match->next = NULL;
54164+ match->nentry->inode = newinode;
54165+ match->nentry->device = newdevice;
54166+ match->nentry->deleted = 0;
54167+
54168+ insert_inodev_entry(match);
54169+ }
54170+
54171+ return;
54172+}
54173+
54174+static void
54175+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
54176+{
54177+ struct acl_subject_label *subj;
54178+ struct acl_role_label *role;
54179+ unsigned int x;
54180+
54181+ FOR_EACH_ROLE_START(role)
54182+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
54183+
54184+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54185+ if ((subj->inode == ino) && (subj->device == dev)) {
54186+ subj->inode = ino;
54187+ subj->device = dev;
54188+ }
54189+ FOR_EACH_NESTED_SUBJECT_END(subj)
54190+ FOR_EACH_SUBJECT_START(role, subj, x)
54191+ update_acl_obj_label(matchn->inode, matchn->device,
54192+ ino, dev, subj);
54193+ FOR_EACH_SUBJECT_END(subj,x)
54194+ FOR_EACH_ROLE_END(role)
54195+
54196+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
54197+
54198+ return;
54199+}
54200+
54201+static void
54202+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54203+ const struct vfsmount *mnt)
54204+{
54205+ ino_t ino = dentry->d_inode->i_ino;
54206+ dev_t dev = __get_dev(dentry);
54207+
54208+ __do_handle_create(matchn, ino, dev);
54209+
54210+ return;
54211+}
54212+
54213+void
54214+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54215+{
54216+ struct name_entry *matchn;
54217+
54218+ if (unlikely(!(gr_status & GR_READY)))
54219+ return;
54220+
54221+ preempt_disable();
54222+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54223+
54224+ if (unlikely((unsigned long)matchn)) {
54225+ write_lock(&gr_inode_lock);
54226+ do_handle_create(matchn, dentry, mnt);
54227+ write_unlock(&gr_inode_lock);
54228+ }
54229+ preempt_enable();
54230+
54231+ return;
54232+}
54233+
54234+void
54235+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54236+{
54237+ struct name_entry *matchn;
54238+
54239+ if (unlikely(!(gr_status & GR_READY)))
54240+ return;
54241+
54242+ preempt_disable();
54243+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54244+
54245+ if (unlikely((unsigned long)matchn)) {
54246+ write_lock(&gr_inode_lock);
54247+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54248+ write_unlock(&gr_inode_lock);
54249+ }
54250+ preempt_enable();
54251+
54252+ return;
54253+}
54254+
54255+void
54256+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54257+ struct dentry *old_dentry,
54258+ struct dentry *new_dentry,
54259+ struct vfsmount *mnt, const __u8 replace)
54260+{
54261+ struct name_entry *matchn;
54262+ struct inodev_entry *inodev;
54263+ struct inode *inode = new_dentry->d_inode;
54264+ ino_t old_ino = old_dentry->d_inode->i_ino;
54265+ dev_t old_dev = __get_dev(old_dentry);
54266+
54267+ /* vfs_rename swaps the name and parent link for old_dentry and
54268+ new_dentry
54269+ at this point, old_dentry has the new name, parent link, and inode
54270+ for the renamed file
54271+ if a file is being replaced by a rename, new_dentry has the inode
54272+ and name for the replaced file
54273+ */
54274+
54275+ if (unlikely(!(gr_status & GR_READY)))
54276+ return;
54277+
54278+ preempt_disable();
54279+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54280+
54281+ /* we wouldn't have to check d_inode if it weren't for
54282+ NFS silly-renaming
54283+ */
54284+
54285+ write_lock(&gr_inode_lock);
54286+ if (unlikely(replace && inode)) {
54287+ ino_t new_ino = inode->i_ino;
54288+ dev_t new_dev = __get_dev(new_dentry);
54289+
54290+ inodev = lookup_inodev_entry(new_ino, new_dev);
54291+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
54292+ do_handle_delete(inodev, new_ino, new_dev);
54293+ }
54294+
54295+ inodev = lookup_inodev_entry(old_ino, old_dev);
54296+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
54297+ do_handle_delete(inodev, old_ino, old_dev);
54298+
54299+ if (unlikely((unsigned long)matchn))
54300+ do_handle_create(matchn, old_dentry, mnt);
54301+
54302+ write_unlock(&gr_inode_lock);
54303+ preempt_enable();
54304+
54305+ return;
54306+}
54307+
54308+static int
54309+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54310+ unsigned char **sum)
54311+{
54312+ struct acl_role_label *r;
54313+ struct role_allowed_ip *ipp;
54314+ struct role_transition *trans;
54315+ unsigned int i;
54316+ int found = 0;
54317+ u32 curr_ip = current->signal->curr_ip;
54318+
54319+ current->signal->saved_ip = curr_ip;
54320+
54321+ /* check transition table */
54322+
54323+ for (trans = current->role->transitions; trans; trans = trans->next) {
54324+ if (!strcmp(rolename, trans->rolename)) {
54325+ found = 1;
54326+ break;
54327+ }
54328+ }
54329+
54330+ if (!found)
54331+ return 0;
54332+
54333+ /* handle special roles that do not require authentication
54334+ and check ip */
54335+
54336+ FOR_EACH_ROLE_START(r)
54337+ if (!strcmp(rolename, r->rolename) &&
54338+ (r->roletype & GR_ROLE_SPECIAL)) {
54339+ found = 0;
54340+ if (r->allowed_ips != NULL) {
54341+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
54342+ if ((ntohl(curr_ip) & ipp->netmask) ==
54343+ (ntohl(ipp->addr) & ipp->netmask))
54344+ found = 1;
54345+ }
54346+ } else
54347+ found = 2;
54348+ if (!found)
54349+ return 0;
54350+
54351+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54352+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54353+ *salt = NULL;
54354+ *sum = NULL;
54355+ return 1;
54356+ }
54357+ }
54358+ FOR_EACH_ROLE_END(r)
54359+
54360+ for (i = 0; i < num_sprole_pws; i++) {
54361+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54362+ *salt = acl_special_roles[i]->salt;
54363+ *sum = acl_special_roles[i]->sum;
54364+ return 1;
54365+ }
54366+ }
54367+
54368+ return 0;
54369+}
54370+
54371+static void
54372+assign_special_role(char *rolename)
54373+{
54374+ struct acl_object_label *obj;
54375+ struct acl_role_label *r;
54376+ struct acl_role_label *assigned = NULL;
54377+ struct task_struct *tsk;
54378+ struct file *filp;
54379+
54380+ FOR_EACH_ROLE_START(r)
54381+ if (!strcmp(rolename, r->rolename) &&
54382+ (r->roletype & GR_ROLE_SPECIAL)) {
54383+ assigned = r;
54384+ break;
54385+ }
54386+ FOR_EACH_ROLE_END(r)
54387+
54388+ if (!assigned)
54389+ return;
54390+
54391+ read_lock(&tasklist_lock);
54392+ read_lock(&grsec_exec_file_lock);
54393+
54394+ tsk = current->real_parent;
54395+ if (tsk == NULL)
54396+ goto out_unlock;
54397+
54398+ filp = tsk->exec_file;
54399+ if (filp == NULL)
54400+ goto out_unlock;
54401+
54402+ tsk->is_writable = 0;
54403+
54404+ tsk->acl_sp_role = 1;
54405+ tsk->acl_role_id = ++acl_sp_role_value;
54406+ tsk->role = assigned;
54407+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54408+
54409+ /* ignore additional mmap checks for processes that are writable
54410+ by the default ACL */
54411+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54412+ if (unlikely(obj->mode & GR_WRITE))
54413+ tsk->is_writable = 1;
54414+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54415+ if (unlikely(obj->mode & GR_WRITE))
54416+ tsk->is_writable = 1;
54417+
54418+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54419+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54420+#endif
54421+
54422+out_unlock:
54423+ read_unlock(&grsec_exec_file_lock);
54424+ read_unlock(&tasklist_lock);
54425+ return;
54426+}
54427+
54428+int gr_check_secure_terminal(struct task_struct *task)
54429+{
54430+ struct task_struct *p, *p2, *p3;
54431+ struct files_struct *files;
54432+ struct fdtable *fdt;
54433+ struct file *our_file = NULL, *file;
54434+ int i;
54435+
54436+ if (task->signal->tty == NULL)
54437+ return 1;
54438+
54439+ files = get_files_struct(task);
54440+ if (files != NULL) {
54441+ rcu_read_lock();
54442+ fdt = files_fdtable(files);
54443+ for (i=0; i < fdt->max_fds; i++) {
54444+ file = fcheck_files(files, i);
54445+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54446+ get_file(file);
54447+ our_file = file;
54448+ }
54449+ }
54450+ rcu_read_unlock();
54451+ put_files_struct(files);
54452+ }
54453+
54454+ if (our_file == NULL)
54455+ return 1;
54456+
54457+ read_lock(&tasklist_lock);
54458+ do_each_thread(p2, p) {
54459+ files = get_files_struct(p);
54460+ if (files == NULL ||
54461+ (p->signal && p->signal->tty == task->signal->tty)) {
54462+ if (files != NULL)
54463+ put_files_struct(files);
54464+ continue;
54465+ }
54466+ rcu_read_lock();
54467+ fdt = files_fdtable(files);
54468+ for (i=0; i < fdt->max_fds; i++) {
54469+ file = fcheck_files(files, i);
54470+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54471+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54472+ p3 = task;
54473+ while (p3->pid > 0) {
54474+ if (p3 == p)
54475+ break;
54476+ p3 = p3->real_parent;
54477+ }
54478+ if (p3 == p)
54479+ break;
54480+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54481+ gr_handle_alertkill(p);
54482+ rcu_read_unlock();
54483+ put_files_struct(files);
54484+ read_unlock(&tasklist_lock);
54485+ fput(our_file);
54486+ return 0;
54487+ }
54488+ }
54489+ rcu_read_unlock();
54490+ put_files_struct(files);
54491+ } while_each_thread(p2, p);
54492+ read_unlock(&tasklist_lock);
54493+
54494+ fput(our_file);
54495+ return 1;
54496+}
54497+
54498+ssize_t
54499+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54500+{
54501+ struct gr_arg_wrapper uwrap;
54502+ unsigned char *sprole_salt = NULL;
54503+ unsigned char *sprole_sum = NULL;
54504+ int error = sizeof (struct gr_arg_wrapper);
54505+ int error2 = 0;
54506+
54507+ mutex_lock(&gr_dev_mutex);
54508+
54509+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54510+ error = -EPERM;
54511+ goto out;
54512+ }
54513+
54514+ if (count != sizeof (struct gr_arg_wrapper)) {
54515+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54516+ error = -EINVAL;
54517+ goto out;
54518+ }
54519+
54520+
54521+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54522+ gr_auth_expires = 0;
54523+ gr_auth_attempts = 0;
54524+ }
54525+
54526+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54527+ error = -EFAULT;
54528+ goto out;
54529+ }
54530+
54531+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54532+ error = -EINVAL;
54533+ goto out;
54534+ }
54535+
54536+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54537+ error = -EFAULT;
54538+ goto out;
54539+ }
54540+
54541+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54542+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54543+ time_after(gr_auth_expires, get_seconds())) {
54544+ error = -EBUSY;
54545+ goto out;
54546+ }
54547+
54548+ /* if non-root trying to do anything other than use a special role,
54549+ do not attempt authentication, do not count towards authentication
54550+ locking
54551+ */
54552+
54553+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54554+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54555+ current_uid()) {
54556+ error = -EPERM;
54557+ goto out;
54558+ }
54559+
54560+ /* ensure pw and special role name are null terminated */
54561+
54562+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54563+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54564+
54565+ /* Okay.
54566+ * We have our enough of the argument structure..(we have yet
54567+ * to copy_from_user the tables themselves) . Copy the tables
54568+ * only if we need them, i.e. for loading operations. */
54569+
54570+ switch (gr_usermode->mode) {
54571+ case GR_STATUS:
54572+ if (gr_status & GR_READY) {
54573+ error = 1;
54574+ if (!gr_check_secure_terminal(current))
54575+ error = 3;
54576+ } else
54577+ error = 2;
54578+ goto out;
54579+ case GR_SHUTDOWN:
54580+ if ((gr_status & GR_READY)
54581+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54582+ pax_open_kernel();
54583+ gr_status &= ~GR_READY;
54584+ pax_close_kernel();
54585+
54586+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54587+ free_variables();
54588+ memset(gr_usermode, 0, sizeof (struct gr_arg));
54589+ memset(gr_system_salt, 0, GR_SALT_LEN);
54590+ memset(gr_system_sum, 0, GR_SHA_LEN);
54591+ } else if (gr_status & GR_READY) {
54592+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54593+ error = -EPERM;
54594+ } else {
54595+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54596+ error = -EAGAIN;
54597+ }
54598+ break;
54599+ case GR_ENABLE:
54600+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54601+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54602+ else {
54603+ if (gr_status & GR_READY)
54604+ error = -EAGAIN;
54605+ else
54606+ error = error2;
54607+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54608+ }
54609+ break;
54610+ case GR_RELOAD:
54611+ if (!(gr_status & GR_READY)) {
54612+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54613+ error = -EAGAIN;
54614+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54615+ preempt_disable();
54616+
54617+ pax_open_kernel();
54618+ gr_status &= ~GR_READY;
54619+ pax_close_kernel();
54620+
54621+ free_variables();
54622+ if (!(error2 = gracl_init(gr_usermode))) {
54623+ preempt_enable();
54624+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54625+ } else {
54626+ preempt_enable();
54627+ error = error2;
54628+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54629+ }
54630+ } else {
54631+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54632+ error = -EPERM;
54633+ }
54634+ break;
54635+ case GR_SEGVMOD:
54636+ if (unlikely(!(gr_status & GR_READY))) {
54637+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54638+ error = -EAGAIN;
54639+ break;
54640+ }
54641+
54642+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54643+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54644+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54645+ struct acl_subject_label *segvacl;
54646+ segvacl =
54647+ lookup_acl_subj_label(gr_usermode->segv_inode,
54648+ gr_usermode->segv_device,
54649+ current->role);
54650+ if (segvacl) {
54651+ segvacl->crashes = 0;
54652+ segvacl->expires = 0;
54653+ }
54654+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54655+ gr_remove_uid(gr_usermode->segv_uid);
54656+ }
54657+ } else {
54658+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54659+ error = -EPERM;
54660+ }
54661+ break;
54662+ case GR_SPROLE:
54663+ case GR_SPROLEPAM:
54664+ if (unlikely(!(gr_status & GR_READY))) {
54665+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54666+ error = -EAGAIN;
54667+ break;
54668+ }
54669+
54670+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54671+ current->role->expires = 0;
54672+ current->role->auth_attempts = 0;
54673+ }
54674+
54675+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54676+ time_after(current->role->expires, get_seconds())) {
54677+ error = -EBUSY;
54678+ goto out;
54679+ }
54680+
54681+ if (lookup_special_role_auth
54682+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54683+ && ((!sprole_salt && !sprole_sum)
54684+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54685+ char *p = "";
54686+ assign_special_role(gr_usermode->sp_role);
54687+ read_lock(&tasklist_lock);
54688+ if (current->real_parent)
54689+ p = current->real_parent->role->rolename;
54690+ read_unlock(&tasklist_lock);
54691+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54692+ p, acl_sp_role_value);
54693+ } else {
54694+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54695+ error = -EPERM;
54696+ if(!(current->role->auth_attempts++))
54697+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54698+
54699+ goto out;
54700+ }
54701+ break;
54702+ case GR_UNSPROLE:
54703+ if (unlikely(!(gr_status & GR_READY))) {
54704+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54705+ error = -EAGAIN;
54706+ break;
54707+ }
54708+
54709+ if (current->role->roletype & GR_ROLE_SPECIAL) {
54710+ char *p = "";
54711+ int i = 0;
54712+
54713+ read_lock(&tasklist_lock);
54714+ if (current->real_parent) {
54715+ p = current->real_parent->role->rolename;
54716+ i = current->real_parent->acl_role_id;
54717+ }
54718+ read_unlock(&tasklist_lock);
54719+
54720+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54721+ gr_set_acls(1);
54722+ } else {
54723+ error = -EPERM;
54724+ goto out;
54725+ }
54726+ break;
54727+ default:
54728+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54729+ error = -EINVAL;
54730+ break;
54731+ }
54732+
54733+ if (error != -EPERM)
54734+ goto out;
54735+
54736+ if(!(gr_auth_attempts++))
54737+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54738+
54739+ out:
54740+ mutex_unlock(&gr_dev_mutex);
54741+ return error;
54742+}
54743+
54744+/* must be called with
54745+ rcu_read_lock();
54746+ read_lock(&tasklist_lock);
54747+ read_lock(&grsec_exec_file_lock);
54748+*/
54749+int gr_apply_subject_to_task(struct task_struct *task)
54750+{
54751+ struct acl_object_label *obj;
54752+ char *tmpname;
54753+ struct acl_subject_label *tmpsubj;
54754+ struct file *filp;
54755+ struct name_entry *nmatch;
54756+
54757+ filp = task->exec_file;
54758+ if (filp == NULL)
54759+ return 0;
54760+
54761+ /* the following is to apply the correct subject
54762+ on binaries running when the RBAC system
54763+ is enabled, when the binaries have been
54764+ replaced or deleted since their execution
54765+ -----
54766+ when the RBAC system starts, the inode/dev
54767+ from exec_file will be one the RBAC system
54768+ is unaware of. It only knows the inode/dev
54769+ of the present file on disk, or the absence
54770+ of it.
54771+ */
54772+ preempt_disable();
54773+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54774+
54775+ nmatch = lookup_name_entry(tmpname);
54776+ preempt_enable();
54777+ tmpsubj = NULL;
54778+ if (nmatch) {
54779+ if (nmatch->deleted)
54780+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54781+ else
54782+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54783+ if (tmpsubj != NULL)
54784+ task->acl = tmpsubj;
54785+ }
54786+ if (tmpsubj == NULL)
54787+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54788+ task->role);
54789+ if (task->acl) {
54790+ task->is_writable = 0;
54791+ /* ignore additional mmap checks for processes that are writable
54792+ by the default ACL */
54793+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54794+ if (unlikely(obj->mode & GR_WRITE))
54795+ task->is_writable = 1;
54796+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54797+ if (unlikely(obj->mode & GR_WRITE))
54798+ task->is_writable = 1;
54799+
54800+ gr_set_proc_res(task);
54801+
54802+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54803+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54804+#endif
54805+ } else {
54806+ return 1;
54807+ }
54808+
54809+ return 0;
54810+}
54811+
54812+int
54813+gr_set_acls(const int type)
54814+{
54815+ struct task_struct *task, *task2;
54816+ struct acl_role_label *role = current->role;
54817+ __u16 acl_role_id = current->acl_role_id;
54818+ const struct cred *cred;
54819+ int ret;
54820+
54821+ rcu_read_lock();
54822+ read_lock(&tasklist_lock);
54823+ read_lock(&grsec_exec_file_lock);
54824+ do_each_thread(task2, task) {
54825+ /* check to see if we're called from the exit handler,
54826+ if so, only replace ACLs that have inherited the admin
54827+ ACL */
54828+
54829+ if (type && (task->role != role ||
54830+ task->acl_role_id != acl_role_id))
54831+ continue;
54832+
54833+ task->acl_role_id = 0;
54834+ task->acl_sp_role = 0;
54835+
54836+ if (task->exec_file) {
54837+ cred = __task_cred(task);
54838+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54839+ ret = gr_apply_subject_to_task(task);
54840+ if (ret) {
54841+ read_unlock(&grsec_exec_file_lock);
54842+ read_unlock(&tasklist_lock);
54843+ rcu_read_unlock();
54844+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54845+ return ret;
54846+ }
54847+ } else {
54848+ // it's a kernel process
54849+ task->role = kernel_role;
54850+ task->acl = kernel_role->root_label;
54851+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54852+ task->acl->mode &= ~GR_PROCFIND;
54853+#endif
54854+ }
54855+ } while_each_thread(task2, task);
54856+ read_unlock(&grsec_exec_file_lock);
54857+ read_unlock(&tasklist_lock);
54858+ rcu_read_unlock();
54859+
54860+ return 0;
54861+}
54862+
54863+void
54864+gr_learn_resource(const struct task_struct *task,
54865+ const int res, const unsigned long wanted, const int gt)
54866+{
54867+ struct acl_subject_label *acl;
54868+ const struct cred *cred;
54869+
54870+ if (unlikely((gr_status & GR_READY) &&
54871+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54872+ goto skip_reslog;
54873+
54874+#ifdef CONFIG_GRKERNSEC_RESLOG
54875+ gr_log_resource(task, res, wanted, gt);
54876+#endif
54877+ skip_reslog:
54878+
54879+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54880+ return;
54881+
54882+ acl = task->acl;
54883+
54884+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54885+ !(acl->resmask & (1 << (unsigned short) res))))
54886+ return;
54887+
54888+ if (wanted >= acl->res[res].rlim_cur) {
54889+ unsigned long res_add;
54890+
54891+ res_add = wanted;
54892+ switch (res) {
54893+ case RLIMIT_CPU:
54894+ res_add += GR_RLIM_CPU_BUMP;
54895+ break;
54896+ case RLIMIT_FSIZE:
54897+ res_add += GR_RLIM_FSIZE_BUMP;
54898+ break;
54899+ case RLIMIT_DATA:
54900+ res_add += GR_RLIM_DATA_BUMP;
54901+ break;
54902+ case RLIMIT_STACK:
54903+ res_add += GR_RLIM_STACK_BUMP;
54904+ break;
54905+ case RLIMIT_CORE:
54906+ res_add += GR_RLIM_CORE_BUMP;
54907+ break;
54908+ case RLIMIT_RSS:
54909+ res_add += GR_RLIM_RSS_BUMP;
54910+ break;
54911+ case RLIMIT_NPROC:
54912+ res_add += GR_RLIM_NPROC_BUMP;
54913+ break;
54914+ case RLIMIT_NOFILE:
54915+ res_add += GR_RLIM_NOFILE_BUMP;
54916+ break;
54917+ case RLIMIT_MEMLOCK:
54918+ res_add += GR_RLIM_MEMLOCK_BUMP;
54919+ break;
54920+ case RLIMIT_AS:
54921+ res_add += GR_RLIM_AS_BUMP;
54922+ break;
54923+ case RLIMIT_LOCKS:
54924+ res_add += GR_RLIM_LOCKS_BUMP;
54925+ break;
54926+ case RLIMIT_SIGPENDING:
54927+ res_add += GR_RLIM_SIGPENDING_BUMP;
54928+ break;
54929+ case RLIMIT_MSGQUEUE:
54930+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54931+ break;
54932+ case RLIMIT_NICE:
54933+ res_add += GR_RLIM_NICE_BUMP;
54934+ break;
54935+ case RLIMIT_RTPRIO:
54936+ res_add += GR_RLIM_RTPRIO_BUMP;
54937+ break;
54938+ case RLIMIT_RTTIME:
54939+ res_add += GR_RLIM_RTTIME_BUMP;
54940+ break;
54941+ }
54942+
54943+ acl->res[res].rlim_cur = res_add;
54944+
54945+ if (wanted > acl->res[res].rlim_max)
54946+ acl->res[res].rlim_max = res_add;
54947+
54948+ /* only log the subject filename, since resource logging is supported for
54949+ single-subject learning only */
54950+ rcu_read_lock();
54951+ cred = __task_cred(task);
54952+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54953+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54954+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54955+ "", (unsigned long) res, &task->signal->saved_ip);
54956+ rcu_read_unlock();
54957+ }
54958+
54959+ return;
54960+}
54961+
54962+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54963+void
54964+pax_set_initial_flags(struct linux_binprm *bprm)
54965+{
54966+ struct task_struct *task = current;
54967+ struct acl_subject_label *proc;
54968+ unsigned long flags;
54969+
54970+ if (unlikely(!(gr_status & GR_READY)))
54971+ return;
54972+
54973+ flags = pax_get_flags(task);
54974+
54975+ proc = task->acl;
54976+
54977+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54978+ flags &= ~MF_PAX_PAGEEXEC;
54979+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54980+ flags &= ~MF_PAX_SEGMEXEC;
54981+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54982+ flags &= ~MF_PAX_RANDMMAP;
54983+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54984+ flags &= ~MF_PAX_EMUTRAMP;
54985+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54986+ flags &= ~MF_PAX_MPROTECT;
54987+
54988+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54989+ flags |= MF_PAX_PAGEEXEC;
54990+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54991+ flags |= MF_PAX_SEGMEXEC;
54992+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54993+ flags |= MF_PAX_RANDMMAP;
54994+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54995+ flags |= MF_PAX_EMUTRAMP;
54996+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54997+ flags |= MF_PAX_MPROTECT;
54998+
54999+ pax_set_flags(task, flags);
55000+
55001+ return;
55002+}
55003+#endif
55004+
55005+#ifdef CONFIG_SYSCTL
55006+/* Eric Biederman likes breaking userland ABI and every inode-based security
55007+ system to save 35kb of memory */
55008+
55009+/* we modify the passed in filename, but adjust it back before returning */
55010+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
55011+{
55012+ struct name_entry *nmatch;
55013+ char *p, *lastp = NULL;
55014+ struct acl_object_label *obj = NULL, *tmp;
55015+ struct acl_subject_label *tmpsubj;
55016+ char c = '\0';
55017+
55018+ read_lock(&gr_inode_lock);
55019+
55020+ p = name + len - 1;
55021+ do {
55022+ nmatch = lookup_name_entry(name);
55023+ if (lastp != NULL)
55024+ *lastp = c;
55025+
55026+ if (nmatch == NULL)
55027+ goto next_component;
55028+ tmpsubj = current->acl;
55029+ do {
55030+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
55031+ if (obj != NULL) {
55032+ tmp = obj->globbed;
55033+ while (tmp) {
55034+ if (!glob_match(tmp->filename, name)) {
55035+ obj = tmp;
55036+ goto found_obj;
55037+ }
55038+ tmp = tmp->next;
55039+ }
55040+ goto found_obj;
55041+ }
55042+ } while ((tmpsubj = tmpsubj->parent_subject));
55043+next_component:
55044+ /* end case */
55045+ if (p == name)
55046+ break;
55047+
55048+ while (*p != '/')
55049+ p--;
55050+ if (p == name)
55051+ lastp = p + 1;
55052+ else {
55053+ lastp = p;
55054+ p--;
55055+ }
55056+ c = *lastp;
55057+ *lastp = '\0';
55058+ } while (1);
55059+found_obj:
55060+ read_unlock(&gr_inode_lock);
55061+ /* obj returned will always be non-null */
55062+ return obj;
55063+}
55064+
55065+/* returns 0 when allowing, non-zero on error
55066+ op of 0 is used for readdir, so we don't log the names of hidden files
55067+*/
55068+__u32
55069+gr_handle_sysctl(const struct ctl_table *table, const int op)
55070+{
55071+ struct ctl_table *tmp;
55072+ const char *proc_sys = "/proc/sys";
55073+ char *path;
55074+ struct acl_object_label *obj;
55075+ unsigned short len = 0, pos = 0, depth = 0, i;
55076+ __u32 err = 0;
55077+ __u32 mode = 0;
55078+
55079+ if (unlikely(!(gr_status & GR_READY)))
55080+ return 0;
55081+
55082+ /* for now, ignore operations on non-sysctl entries if it's not a
55083+ readdir*/
55084+ if (table->child != NULL && op != 0)
55085+ return 0;
55086+
55087+ mode |= GR_FIND;
55088+ /* it's only a read if it's an entry, read on dirs is for readdir */
55089+ if (op & MAY_READ)
55090+ mode |= GR_READ;
55091+ if (op & MAY_WRITE)
55092+ mode |= GR_WRITE;
55093+
55094+ preempt_disable();
55095+
55096+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55097+
55098+ /* it's only a read/write if it's an actual entry, not a dir
55099+ (which are opened for readdir)
55100+ */
55101+
55102+ /* convert the requested sysctl entry into a pathname */
55103+
55104+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
55105+ len += strlen(tmp->procname);
55106+ len++;
55107+ depth++;
55108+ }
55109+
55110+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
55111+ /* deny */
55112+ goto out;
55113+ }
55114+
55115+ memset(path, 0, PAGE_SIZE);
55116+
55117+ memcpy(path, proc_sys, strlen(proc_sys));
55118+
55119+ pos += strlen(proc_sys);
55120+
55121+ for (; depth > 0; depth--) {
55122+ path[pos] = '/';
55123+ pos++;
55124+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
55125+ if (depth == i) {
55126+ memcpy(path + pos, tmp->procname,
55127+ strlen(tmp->procname));
55128+ pos += strlen(tmp->procname);
55129+ }
55130+ i++;
55131+ }
55132+ }
55133+
55134+ obj = gr_lookup_by_name(path, pos);
55135+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
55136+
55137+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
55138+ ((err & mode) != mode))) {
55139+ __u32 new_mode = mode;
55140+
55141+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55142+
55143+ err = 0;
55144+ gr_log_learn_sysctl(path, new_mode);
55145+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
55146+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
55147+ err = -ENOENT;
55148+ } else if (!(err & GR_FIND)) {
55149+ err = -ENOENT;
55150+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
55151+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
55152+ path, (mode & GR_READ) ? " reading" : "",
55153+ (mode & GR_WRITE) ? " writing" : "");
55154+ err = -EACCES;
55155+ } else if ((err & mode) != mode) {
55156+ err = -EACCES;
55157+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
55158+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
55159+ path, (mode & GR_READ) ? " reading" : "",
55160+ (mode & GR_WRITE) ? " writing" : "");
55161+ err = 0;
55162+ } else
55163+ err = 0;
55164+
55165+ out:
55166+ preempt_enable();
55167+
55168+ return err;
55169+}
55170+#endif
55171+
55172+int
55173+gr_handle_proc_ptrace(struct task_struct *task)
55174+{
55175+ struct file *filp;
55176+ struct task_struct *tmp = task;
55177+ struct task_struct *curtemp = current;
55178+ __u32 retmode;
55179+
55180+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55181+ if (unlikely(!(gr_status & GR_READY)))
55182+ return 0;
55183+#endif
55184+
55185+ read_lock(&tasklist_lock);
55186+ read_lock(&grsec_exec_file_lock);
55187+ filp = task->exec_file;
55188+
55189+ while (tmp->pid > 0) {
55190+ if (tmp == curtemp)
55191+ break;
55192+ tmp = tmp->real_parent;
55193+ }
55194+
55195+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55196+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
55197+ read_unlock(&grsec_exec_file_lock);
55198+ read_unlock(&tasklist_lock);
55199+ return 1;
55200+ }
55201+
55202+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55203+ if (!(gr_status & GR_READY)) {
55204+ read_unlock(&grsec_exec_file_lock);
55205+ read_unlock(&tasklist_lock);
55206+ return 0;
55207+ }
55208+#endif
55209+
55210+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
55211+ read_unlock(&grsec_exec_file_lock);
55212+ read_unlock(&tasklist_lock);
55213+
55214+ if (retmode & GR_NOPTRACE)
55215+ return 1;
55216+
55217+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
55218+ && (current->acl != task->acl || (current->acl != current->role->root_label
55219+ && current->pid != task->pid)))
55220+ return 1;
55221+
55222+ return 0;
55223+}
55224+
55225+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
55226+{
55227+ if (unlikely(!(gr_status & GR_READY)))
55228+ return;
55229+
55230+ if (!(current->role->roletype & GR_ROLE_GOD))
55231+ return;
55232+
55233+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55234+ p->role->rolename, gr_task_roletype_to_char(p),
55235+ p->acl->filename);
55236+}
55237+
55238+int
55239+gr_handle_ptrace(struct task_struct *task, const long request)
55240+{
55241+ struct task_struct *tmp = task;
55242+ struct task_struct *curtemp = current;
55243+ __u32 retmode;
55244+
55245+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55246+ if (unlikely(!(gr_status & GR_READY)))
55247+ return 0;
55248+#endif
55249+
55250+ read_lock(&tasklist_lock);
55251+ while (tmp->pid > 0) {
55252+ if (tmp == curtemp)
55253+ break;
55254+ tmp = tmp->real_parent;
55255+ }
55256+
55257+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
55258+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55259+ read_unlock(&tasklist_lock);
55260+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55261+ return 1;
55262+ }
55263+ read_unlock(&tasklist_lock);
55264+
55265+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55266+ if (!(gr_status & GR_READY))
55267+ return 0;
55268+#endif
55269+
55270+ read_lock(&grsec_exec_file_lock);
55271+ if (unlikely(!task->exec_file)) {
55272+ read_unlock(&grsec_exec_file_lock);
55273+ return 0;
55274+ }
55275+
55276+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55277+ read_unlock(&grsec_exec_file_lock);
55278+
55279+ if (retmode & GR_NOPTRACE) {
55280+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55281+ return 1;
55282+ }
55283+
55284+ if (retmode & GR_PTRACERD) {
55285+ switch (request) {
55286+ case PTRACE_SEIZE:
55287+ case PTRACE_POKETEXT:
55288+ case PTRACE_POKEDATA:
55289+ case PTRACE_POKEUSR:
55290+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55291+ case PTRACE_SETREGS:
55292+ case PTRACE_SETFPREGS:
55293+#endif
55294+#ifdef CONFIG_X86
55295+ case PTRACE_SETFPXREGS:
55296+#endif
55297+#ifdef CONFIG_ALTIVEC
55298+ case PTRACE_SETVRREGS:
55299+#endif
55300+ return 1;
55301+ default:
55302+ return 0;
55303+ }
55304+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
55305+ !(current->role->roletype & GR_ROLE_GOD) &&
55306+ (current->acl != task->acl)) {
55307+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55308+ return 1;
55309+ }
55310+
55311+ return 0;
55312+}
55313+
55314+static int is_writable_mmap(const struct file *filp)
55315+{
55316+ struct task_struct *task = current;
55317+ struct acl_object_label *obj, *obj2;
55318+
55319+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
55320+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
55321+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55322+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55323+ task->role->root_label);
55324+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55325+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55326+ return 1;
55327+ }
55328+ }
55329+ return 0;
55330+}
55331+
55332+int
55333+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55334+{
55335+ __u32 mode;
55336+
55337+ if (unlikely(!file || !(prot & PROT_EXEC)))
55338+ return 1;
55339+
55340+ if (is_writable_mmap(file))
55341+ return 0;
55342+
55343+ mode =
55344+ gr_search_file(file->f_path.dentry,
55345+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55346+ file->f_path.mnt);
55347+
55348+ if (!gr_tpe_allow(file))
55349+ return 0;
55350+
55351+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55352+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55353+ return 0;
55354+ } else if (unlikely(!(mode & GR_EXEC))) {
55355+ return 0;
55356+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55357+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55358+ return 1;
55359+ }
55360+
55361+ return 1;
55362+}
55363+
55364+int
55365+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55366+{
55367+ __u32 mode;
55368+
55369+ if (unlikely(!file || !(prot & PROT_EXEC)))
55370+ return 1;
55371+
55372+ if (is_writable_mmap(file))
55373+ return 0;
55374+
55375+ mode =
55376+ gr_search_file(file->f_path.dentry,
55377+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55378+ file->f_path.mnt);
55379+
55380+ if (!gr_tpe_allow(file))
55381+ return 0;
55382+
55383+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55384+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55385+ return 0;
55386+ } else if (unlikely(!(mode & GR_EXEC))) {
55387+ return 0;
55388+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55389+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55390+ return 1;
55391+ }
55392+
55393+ return 1;
55394+}
55395+
55396+void
55397+gr_acl_handle_psacct(struct task_struct *task, const long code)
55398+{
55399+ unsigned long runtime;
55400+ unsigned long cputime;
55401+ unsigned int wday, cday;
55402+ __u8 whr, chr;
55403+ __u8 wmin, cmin;
55404+ __u8 wsec, csec;
55405+ struct timespec timeval;
55406+
55407+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55408+ !(task->acl->mode & GR_PROCACCT)))
55409+ return;
55410+
55411+ do_posix_clock_monotonic_gettime(&timeval);
55412+ runtime = timeval.tv_sec - task->start_time.tv_sec;
55413+ wday = runtime / (3600 * 24);
55414+ runtime -= wday * (3600 * 24);
55415+ whr = runtime / 3600;
55416+ runtime -= whr * 3600;
55417+ wmin = runtime / 60;
55418+ runtime -= wmin * 60;
55419+ wsec = runtime;
55420+
55421+ cputime = (task->utime + task->stime) / HZ;
55422+ cday = cputime / (3600 * 24);
55423+ cputime -= cday * (3600 * 24);
55424+ chr = cputime / 3600;
55425+ cputime -= chr * 3600;
55426+ cmin = cputime / 60;
55427+ cputime -= cmin * 60;
55428+ csec = cputime;
55429+
55430+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55431+
55432+ return;
55433+}
55434+
55435+void gr_set_kernel_label(struct task_struct *task)
55436+{
55437+ if (gr_status & GR_READY) {
55438+ task->role = kernel_role;
55439+ task->acl = kernel_role->root_label;
55440+ }
55441+ return;
55442+}
55443+
55444+#ifdef CONFIG_TASKSTATS
55445+int gr_is_taskstats_denied(int pid)
55446+{
55447+ struct task_struct *task;
55448+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55449+ const struct cred *cred;
55450+#endif
55451+ int ret = 0;
55452+
55453+ /* restrict taskstats viewing to un-chrooted root users
55454+ who have the 'view' subject flag if the RBAC system is enabled
55455+ */
55456+
55457+ rcu_read_lock();
55458+ read_lock(&tasklist_lock);
55459+ task = find_task_by_vpid(pid);
55460+ if (task) {
55461+#ifdef CONFIG_GRKERNSEC_CHROOT
55462+ if (proc_is_chrooted(task))
55463+ ret = -EACCES;
55464+#endif
55465+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55466+ cred = __task_cred(task);
55467+#ifdef CONFIG_GRKERNSEC_PROC_USER
55468+ if (cred->uid != 0)
55469+ ret = -EACCES;
55470+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55471+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55472+ ret = -EACCES;
55473+#endif
55474+#endif
55475+ if (gr_status & GR_READY) {
55476+ if (!(task->acl->mode & GR_VIEW))
55477+ ret = -EACCES;
55478+ }
55479+ } else
55480+ ret = -ENOENT;
55481+
55482+ read_unlock(&tasklist_lock);
55483+ rcu_read_unlock();
55484+
55485+ return ret;
55486+}
55487+#endif
55488+
55489+/* AUXV entries are filled via a descendant of search_binary_handler
55490+ after we've already applied the subject for the target
55491+*/
55492+int gr_acl_enable_at_secure(void)
55493+{
55494+ if (unlikely(!(gr_status & GR_READY)))
55495+ return 0;
55496+
55497+ if (current->acl->mode & GR_ATSECURE)
55498+ return 1;
55499+
55500+ return 0;
55501+}
55502+
55503+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55504+{
55505+ struct task_struct *task = current;
55506+ struct dentry *dentry = file->f_path.dentry;
55507+ struct vfsmount *mnt = file->f_path.mnt;
55508+ struct acl_object_label *obj, *tmp;
55509+ struct acl_subject_label *subj;
55510+ unsigned int bufsize;
55511+ int is_not_root;
55512+ char *path;
55513+ dev_t dev = __get_dev(dentry);
55514+
55515+ if (unlikely(!(gr_status & GR_READY)))
55516+ return 1;
55517+
55518+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55519+ return 1;
55520+
55521+ /* ignore Eric Biederman */
55522+ if (IS_PRIVATE(dentry->d_inode))
55523+ return 1;
55524+
55525+ subj = task->acl;
55526+ do {
55527+ obj = lookup_acl_obj_label(ino, dev, subj);
55528+ if (obj != NULL)
55529+ return (obj->mode & GR_FIND) ? 1 : 0;
55530+ } while ((subj = subj->parent_subject));
55531+
55532+ /* this is purely an optimization since we're looking for an object
55533+ for the directory we're doing a readdir on
55534+ if it's possible for any globbed object to match the entry we're
55535+ filling into the directory, then the object we find here will be
55536+ an anchor point with attached globbed objects
55537+ */
55538+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55539+ if (obj->globbed == NULL)
55540+ return (obj->mode & GR_FIND) ? 1 : 0;
55541+
55542+ is_not_root = ((obj->filename[0] == '/') &&
55543+ (obj->filename[1] == '\0')) ? 0 : 1;
55544+ bufsize = PAGE_SIZE - namelen - is_not_root;
55545+
55546+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
55547+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55548+ return 1;
55549+
55550+ preempt_disable();
55551+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55552+ bufsize);
55553+
55554+ bufsize = strlen(path);
55555+
55556+ /* if base is "/", don't append an additional slash */
55557+ if (is_not_root)
55558+ *(path + bufsize) = '/';
55559+ memcpy(path + bufsize + is_not_root, name, namelen);
55560+ *(path + bufsize + namelen + is_not_root) = '\0';
55561+
55562+ tmp = obj->globbed;
55563+ while (tmp) {
55564+ if (!glob_match(tmp->filename, path)) {
55565+ preempt_enable();
55566+ return (tmp->mode & GR_FIND) ? 1 : 0;
55567+ }
55568+ tmp = tmp->next;
55569+ }
55570+ preempt_enable();
55571+ return (obj->mode & GR_FIND) ? 1 : 0;
55572+}
55573+
55574+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55575+EXPORT_SYMBOL(gr_acl_is_enabled);
55576+#endif
55577+EXPORT_SYMBOL(gr_learn_resource);
55578+EXPORT_SYMBOL(gr_set_kernel_label);
55579+#ifdef CONFIG_SECURITY
55580+EXPORT_SYMBOL(gr_check_user_change);
55581+EXPORT_SYMBOL(gr_check_group_change);
55582+#endif
55583+
55584diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55585new file mode 100644
55586index 0000000..34fefda
55587--- /dev/null
55588+++ b/grsecurity/gracl_alloc.c
55589@@ -0,0 +1,105 @@
55590+#include <linux/kernel.h>
55591+#include <linux/mm.h>
55592+#include <linux/slab.h>
55593+#include <linux/vmalloc.h>
55594+#include <linux/gracl.h>
55595+#include <linux/grsecurity.h>
55596+
55597+static unsigned long alloc_stack_next = 1;
55598+static unsigned long alloc_stack_size = 1;
55599+static void **alloc_stack;
55600+
55601+static __inline__ int
55602+alloc_pop(void)
55603+{
55604+ if (alloc_stack_next == 1)
55605+ return 0;
55606+
55607+ kfree(alloc_stack[alloc_stack_next - 2]);
55608+
55609+ alloc_stack_next--;
55610+
55611+ return 1;
55612+}
55613+
55614+static __inline__ int
55615+alloc_push(void *buf)
55616+{
55617+ if (alloc_stack_next >= alloc_stack_size)
55618+ return 1;
55619+
55620+ alloc_stack[alloc_stack_next - 1] = buf;
55621+
55622+ alloc_stack_next++;
55623+
55624+ return 0;
55625+}
55626+
55627+void *
55628+acl_alloc(unsigned long len)
55629+{
55630+ void *ret = NULL;
55631+
55632+ if (!len || len > PAGE_SIZE)
55633+ goto out;
55634+
55635+ ret = kmalloc(len, GFP_KERNEL);
55636+
55637+ if (ret) {
55638+ if (alloc_push(ret)) {
55639+ kfree(ret);
55640+ ret = NULL;
55641+ }
55642+ }
55643+
55644+out:
55645+ return ret;
55646+}
55647+
55648+void *
55649+acl_alloc_num(unsigned long num, unsigned long len)
55650+{
55651+ if (!len || (num > (PAGE_SIZE / len)))
55652+ return NULL;
55653+
55654+ return acl_alloc(num * len);
55655+}
55656+
55657+void
55658+acl_free_all(void)
55659+{
55660+ if (gr_acl_is_enabled() || !alloc_stack)
55661+ return;
55662+
55663+ while (alloc_pop()) ;
55664+
55665+ if (alloc_stack) {
55666+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55667+ kfree(alloc_stack);
55668+ else
55669+ vfree(alloc_stack);
55670+ }
55671+
55672+ alloc_stack = NULL;
55673+ alloc_stack_size = 1;
55674+ alloc_stack_next = 1;
55675+
55676+ return;
55677+}
55678+
55679+int
55680+acl_alloc_stack_init(unsigned long size)
55681+{
55682+ if ((size * sizeof (void *)) <= PAGE_SIZE)
55683+ alloc_stack =
55684+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55685+ else
55686+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
55687+
55688+ alloc_stack_size = size;
55689+
55690+ if (!alloc_stack)
55691+ return 0;
55692+ else
55693+ return 1;
55694+}
55695diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55696new file mode 100644
55697index 0000000..955ddfb
55698--- /dev/null
55699+++ b/grsecurity/gracl_cap.c
55700@@ -0,0 +1,101 @@
55701+#include <linux/kernel.h>
55702+#include <linux/module.h>
55703+#include <linux/sched.h>
55704+#include <linux/gracl.h>
55705+#include <linux/grsecurity.h>
55706+#include <linux/grinternal.h>
55707+
55708+extern const char *captab_log[];
55709+extern int captab_log_entries;
55710+
55711+int
55712+gr_acl_is_capable(const int cap)
55713+{
55714+ struct task_struct *task = current;
55715+ const struct cred *cred = current_cred();
55716+ struct acl_subject_label *curracl;
55717+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55718+ kernel_cap_t cap_audit = __cap_empty_set;
55719+
55720+ if (!gr_acl_is_enabled())
55721+ return 1;
55722+
55723+ curracl = task->acl;
55724+
55725+ cap_drop = curracl->cap_lower;
55726+ cap_mask = curracl->cap_mask;
55727+ cap_audit = curracl->cap_invert_audit;
55728+
55729+ while ((curracl = curracl->parent_subject)) {
55730+ /* if the cap isn't specified in the current computed mask but is specified in the
55731+ current level subject, and is lowered in the current level subject, then add
55732+ it to the set of dropped capabilities
55733+ otherwise, add the current level subject's mask to the current computed mask
55734+ */
55735+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55736+ cap_raise(cap_mask, cap);
55737+ if (cap_raised(curracl->cap_lower, cap))
55738+ cap_raise(cap_drop, cap);
55739+ if (cap_raised(curracl->cap_invert_audit, cap))
55740+ cap_raise(cap_audit, cap);
55741+ }
55742+ }
55743+
55744+ if (!cap_raised(cap_drop, cap)) {
55745+ if (cap_raised(cap_audit, cap))
55746+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55747+ return 1;
55748+ }
55749+
55750+ curracl = task->acl;
55751+
55752+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55753+ && cap_raised(cred->cap_effective, cap)) {
55754+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55755+ task->role->roletype, cred->uid,
55756+ cred->gid, task->exec_file ?
55757+ gr_to_filename(task->exec_file->f_path.dentry,
55758+ task->exec_file->f_path.mnt) : curracl->filename,
55759+ curracl->filename, 0UL,
55760+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55761+ return 1;
55762+ }
55763+
55764+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55765+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55766+ return 0;
55767+}
55768+
55769+int
55770+gr_acl_is_capable_nolog(const int cap)
55771+{
55772+ struct acl_subject_label *curracl;
55773+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55774+
55775+ if (!gr_acl_is_enabled())
55776+ return 1;
55777+
55778+ curracl = current->acl;
55779+
55780+ cap_drop = curracl->cap_lower;
55781+ cap_mask = curracl->cap_mask;
55782+
55783+ while ((curracl = curracl->parent_subject)) {
55784+ /* if the cap isn't specified in the current computed mask but is specified in the
55785+ current level subject, and is lowered in the current level subject, then add
55786+ it to the set of dropped capabilities
55787+ otherwise, add the current level subject's mask to the current computed mask
55788+ */
55789+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55790+ cap_raise(cap_mask, cap);
55791+ if (cap_raised(curracl->cap_lower, cap))
55792+ cap_raise(cap_drop, cap);
55793+ }
55794+ }
55795+
55796+ if (!cap_raised(cap_drop, cap))
55797+ return 1;
55798+
55799+ return 0;
55800+}
55801+
55802diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55803new file mode 100644
55804index 0000000..4eda5c3
55805--- /dev/null
55806+++ b/grsecurity/gracl_fs.c
55807@@ -0,0 +1,433 @@
55808+#include <linux/kernel.h>
55809+#include <linux/sched.h>
55810+#include <linux/types.h>
55811+#include <linux/fs.h>
55812+#include <linux/file.h>
55813+#include <linux/stat.h>
55814+#include <linux/grsecurity.h>
55815+#include <linux/grinternal.h>
55816+#include <linux/gracl.h>
55817+
55818+__u32
55819+gr_acl_handle_hidden_file(const struct dentry * dentry,
55820+ const struct vfsmount * mnt)
55821+{
55822+ __u32 mode;
55823+
55824+ if (unlikely(!dentry->d_inode))
55825+ return GR_FIND;
55826+
55827+ mode =
55828+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55829+
55830+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55831+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55832+ return mode;
55833+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55834+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55835+ return 0;
55836+ } else if (unlikely(!(mode & GR_FIND)))
55837+ return 0;
55838+
55839+ return GR_FIND;
55840+}
55841+
55842+__u32
55843+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55844+ int acc_mode)
55845+{
55846+ __u32 reqmode = GR_FIND;
55847+ __u32 mode;
55848+
55849+ if (unlikely(!dentry->d_inode))
55850+ return reqmode;
55851+
55852+ if (acc_mode & MAY_APPEND)
55853+ reqmode |= GR_APPEND;
55854+ else if (acc_mode & MAY_WRITE)
55855+ reqmode |= GR_WRITE;
55856+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55857+ reqmode |= GR_READ;
55858+
55859+ mode =
55860+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55861+ mnt);
55862+
55863+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55864+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55865+ reqmode & GR_READ ? " reading" : "",
55866+ reqmode & GR_WRITE ? " writing" : reqmode &
55867+ GR_APPEND ? " appending" : "");
55868+ return reqmode;
55869+ } else
55870+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55871+ {
55872+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55873+ reqmode & GR_READ ? " reading" : "",
55874+ reqmode & GR_WRITE ? " writing" : reqmode &
55875+ GR_APPEND ? " appending" : "");
55876+ return 0;
55877+ } else if (unlikely((mode & reqmode) != reqmode))
55878+ return 0;
55879+
55880+ return reqmode;
55881+}
55882+
55883+__u32
55884+gr_acl_handle_creat(const struct dentry * dentry,
55885+ const struct dentry * p_dentry,
55886+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55887+ const int imode)
55888+{
55889+ __u32 reqmode = GR_WRITE | GR_CREATE;
55890+ __u32 mode;
55891+
55892+ if (acc_mode & MAY_APPEND)
55893+ reqmode |= GR_APPEND;
55894+ // if a directory was required or the directory already exists, then
55895+ // don't count this open as a read
55896+ if ((acc_mode & MAY_READ) &&
55897+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55898+ reqmode |= GR_READ;
55899+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55900+ reqmode |= GR_SETID;
55901+
55902+ mode =
55903+ gr_check_create(dentry, p_dentry, p_mnt,
55904+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55905+
55906+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55907+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55908+ reqmode & GR_READ ? " reading" : "",
55909+ reqmode & GR_WRITE ? " writing" : reqmode &
55910+ GR_APPEND ? " appending" : "");
55911+ return reqmode;
55912+ } else
55913+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55914+ {
55915+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55916+ reqmode & GR_READ ? " reading" : "",
55917+ reqmode & GR_WRITE ? " writing" : reqmode &
55918+ GR_APPEND ? " appending" : "");
55919+ return 0;
55920+ } else if (unlikely((mode & reqmode) != reqmode))
55921+ return 0;
55922+
55923+ return reqmode;
55924+}
55925+
55926+__u32
55927+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55928+ const int fmode)
55929+{
55930+ __u32 mode, reqmode = GR_FIND;
55931+
55932+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55933+ reqmode |= GR_EXEC;
55934+ if (fmode & S_IWOTH)
55935+ reqmode |= GR_WRITE;
55936+ if (fmode & S_IROTH)
55937+ reqmode |= GR_READ;
55938+
55939+ mode =
55940+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55941+ mnt);
55942+
55943+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55944+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55945+ reqmode & GR_READ ? " reading" : "",
55946+ reqmode & GR_WRITE ? " writing" : "",
55947+ reqmode & GR_EXEC ? " executing" : "");
55948+ return reqmode;
55949+ } else
55950+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55951+ {
55952+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55953+ reqmode & GR_READ ? " reading" : "",
55954+ reqmode & GR_WRITE ? " writing" : "",
55955+ reqmode & GR_EXEC ? " executing" : "");
55956+ return 0;
55957+ } else if (unlikely((mode & reqmode) != reqmode))
55958+ return 0;
55959+
55960+ return reqmode;
55961+}
55962+
55963+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55964+{
55965+ __u32 mode;
55966+
55967+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55968+
55969+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55970+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55971+ return mode;
55972+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55973+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55974+ return 0;
55975+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55976+ return 0;
55977+
55978+ return (reqmode);
55979+}
55980+
55981+__u32
55982+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55983+{
55984+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55985+}
55986+
55987+__u32
55988+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55989+{
55990+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55991+}
55992+
55993+__u32
55994+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55995+{
55996+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55997+}
55998+
55999+__u32
56000+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
56001+{
56002+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
56003+}
56004+
56005+__u32
56006+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
56007+ mode_t mode)
56008+{
56009+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
56010+ return 1;
56011+
56012+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
56013+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56014+ GR_FCHMOD_ACL_MSG);
56015+ } else {
56016+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
56017+ }
56018+}
56019+
56020+__u32
56021+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
56022+ mode_t mode)
56023+{
56024+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
56025+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
56026+ GR_CHMOD_ACL_MSG);
56027+ } else {
56028+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
56029+ }
56030+}
56031+
56032+__u32
56033+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
56034+{
56035+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
56036+}
56037+
56038+__u32
56039+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
56040+{
56041+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
56042+}
56043+
56044+__u32
56045+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
56046+{
56047+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
56048+}
56049+
56050+__u32
56051+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
56052+{
56053+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
56054+ GR_UNIXCONNECT_ACL_MSG);
56055+}
56056+
56057+/* hardlinks require at minimum create and link permission,
56058+ any additional privilege required is based on the
56059+ privilege of the file being linked to
56060+*/
56061+__u32
56062+gr_acl_handle_link(const struct dentry * new_dentry,
56063+ const struct dentry * parent_dentry,
56064+ const struct vfsmount * parent_mnt,
56065+ const struct dentry * old_dentry,
56066+ const struct vfsmount * old_mnt, const char *to)
56067+{
56068+ __u32 mode;
56069+ __u32 needmode = GR_CREATE | GR_LINK;
56070+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
56071+
56072+ mode =
56073+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
56074+ old_mnt);
56075+
56076+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
56077+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56078+ return mode;
56079+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56080+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
56081+ return 0;
56082+ } else if (unlikely((mode & needmode) != needmode))
56083+ return 0;
56084+
56085+ return 1;
56086+}
56087+
56088+__u32
56089+gr_acl_handle_symlink(const struct dentry * new_dentry,
56090+ const struct dentry * parent_dentry,
56091+ const struct vfsmount * parent_mnt, const char *from)
56092+{
56093+ __u32 needmode = GR_WRITE | GR_CREATE;
56094+ __u32 mode;
56095+
56096+ mode =
56097+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
56098+ GR_CREATE | GR_AUDIT_CREATE |
56099+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
56100+
56101+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
56102+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56103+ return mode;
56104+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
56105+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
56106+ return 0;
56107+ } else if (unlikely((mode & needmode) != needmode))
56108+ return 0;
56109+
56110+ return (GR_WRITE | GR_CREATE);
56111+}
56112+
56113+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
56114+{
56115+ __u32 mode;
56116+
56117+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
56118+
56119+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
56120+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
56121+ return mode;
56122+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
56123+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
56124+ return 0;
56125+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
56126+ return 0;
56127+
56128+ return (reqmode);
56129+}
56130+
56131+__u32
56132+gr_acl_handle_mknod(const struct dentry * new_dentry,
56133+ const struct dentry * parent_dentry,
56134+ const struct vfsmount * parent_mnt,
56135+ const int mode)
56136+{
56137+ __u32 reqmode = GR_WRITE | GR_CREATE;
56138+ if (unlikely(mode & (S_ISUID | S_ISGID)))
56139+ reqmode |= GR_SETID;
56140+
56141+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56142+ reqmode, GR_MKNOD_ACL_MSG);
56143+}
56144+
56145+__u32
56146+gr_acl_handle_mkdir(const struct dentry *new_dentry,
56147+ const struct dentry *parent_dentry,
56148+ const struct vfsmount *parent_mnt)
56149+{
56150+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
56151+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
56152+}
56153+
56154+#define RENAME_CHECK_SUCCESS(old, new) \
56155+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
56156+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
56157+
56158+int
56159+gr_acl_handle_rename(struct dentry *new_dentry,
56160+ struct dentry *parent_dentry,
56161+ const struct vfsmount *parent_mnt,
56162+ struct dentry *old_dentry,
56163+ struct inode *old_parent_inode,
56164+ struct vfsmount *old_mnt, const char *newname)
56165+{
56166+ __u32 comp1, comp2;
56167+ int error = 0;
56168+
56169+ if (unlikely(!gr_acl_is_enabled()))
56170+ return 0;
56171+
56172+ if (!new_dentry->d_inode) {
56173+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
56174+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
56175+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
56176+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
56177+ GR_DELETE | GR_AUDIT_DELETE |
56178+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56179+ GR_SUPPRESS, old_mnt);
56180+ } else {
56181+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
56182+ GR_CREATE | GR_DELETE |
56183+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
56184+ GR_AUDIT_READ | GR_AUDIT_WRITE |
56185+ GR_SUPPRESS, parent_mnt);
56186+ comp2 =
56187+ gr_search_file(old_dentry,
56188+ GR_READ | GR_WRITE | GR_AUDIT_READ |
56189+ GR_DELETE | GR_AUDIT_DELETE |
56190+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
56191+ }
56192+
56193+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
56194+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
56195+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56196+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
56197+ && !(comp2 & GR_SUPPRESS)) {
56198+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
56199+ error = -EACCES;
56200+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
56201+ error = -EACCES;
56202+
56203+ return error;
56204+}
56205+
56206+void
56207+gr_acl_handle_exit(void)
56208+{
56209+ u16 id;
56210+ char *rolename;
56211+ struct file *exec_file;
56212+
56213+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56214+ !(current->role->roletype & GR_ROLE_PERSIST))) {
56215+ id = current->acl_role_id;
56216+ rolename = current->role->rolename;
56217+ gr_set_acls(1);
56218+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56219+ }
56220+
56221+ write_lock(&grsec_exec_file_lock);
56222+ exec_file = current->exec_file;
56223+ current->exec_file = NULL;
56224+ write_unlock(&grsec_exec_file_lock);
56225+
56226+ if (exec_file)
56227+ fput(exec_file);
56228+}
56229+
56230+int
56231+gr_acl_handle_procpidmem(const struct task_struct *task)
56232+{
56233+ if (unlikely(!gr_acl_is_enabled()))
56234+ return 0;
56235+
56236+ if (task != current && task->acl->mode & GR_PROTPROCFD)
56237+ return -EACCES;
56238+
56239+ return 0;
56240+}
56241diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56242new file mode 100644
56243index 0000000..17050ca
56244--- /dev/null
56245+++ b/grsecurity/gracl_ip.c
56246@@ -0,0 +1,381 @@
56247+#include <linux/kernel.h>
56248+#include <asm/uaccess.h>
56249+#include <asm/errno.h>
56250+#include <net/sock.h>
56251+#include <linux/file.h>
56252+#include <linux/fs.h>
56253+#include <linux/net.h>
56254+#include <linux/in.h>
56255+#include <linux/skbuff.h>
56256+#include <linux/ip.h>
56257+#include <linux/udp.h>
56258+#include <linux/types.h>
56259+#include <linux/sched.h>
56260+#include <linux/netdevice.h>
56261+#include <linux/inetdevice.h>
56262+#include <linux/gracl.h>
56263+#include <linux/grsecurity.h>
56264+#include <linux/grinternal.h>
56265+
56266+#define GR_BIND 0x01
56267+#define GR_CONNECT 0x02
56268+#define GR_INVERT 0x04
56269+#define GR_BINDOVERRIDE 0x08
56270+#define GR_CONNECTOVERRIDE 0x10
56271+#define GR_SOCK_FAMILY 0x20
56272+
56273+static const char * gr_protocols[IPPROTO_MAX] = {
56274+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56275+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56276+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56277+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56278+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56279+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56280+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56281+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56282+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56283+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56284+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56285+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56286+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56287+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56288+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56289+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56290+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56291+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56292+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56293+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56294+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56295+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56296+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56297+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56298+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56299+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56300+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56301+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56302+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56303+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56304+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56305+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56306+ };
56307+
56308+static const char * gr_socktypes[SOCK_MAX] = {
56309+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56310+ "unknown:7", "unknown:8", "unknown:9", "packet"
56311+ };
56312+
56313+static const char * gr_sockfamilies[AF_MAX+1] = {
56314+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56315+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
56316+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56317+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
56318+ };
56319+
56320+const char *
56321+gr_proto_to_name(unsigned char proto)
56322+{
56323+ return gr_protocols[proto];
56324+}
56325+
56326+const char *
56327+gr_socktype_to_name(unsigned char type)
56328+{
56329+ return gr_socktypes[type];
56330+}
56331+
56332+const char *
56333+gr_sockfamily_to_name(unsigned char family)
56334+{
56335+ return gr_sockfamilies[family];
56336+}
56337+
56338+int
56339+gr_search_socket(const int domain, const int type, const int protocol)
56340+{
56341+ struct acl_subject_label *curr;
56342+ const struct cred *cred = current_cred();
56343+
56344+ if (unlikely(!gr_acl_is_enabled()))
56345+ goto exit;
56346+
56347+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
56348+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56349+ goto exit; // let the kernel handle it
56350+
56351+ curr = current->acl;
56352+
56353+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56354+ /* the family is allowed, if this is PF_INET allow it only if
56355+ the extra sock type/protocol checks pass */
56356+ if (domain == PF_INET)
56357+ goto inet_check;
56358+ goto exit;
56359+ } else {
56360+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56361+ __u32 fakeip = 0;
56362+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56363+ current->role->roletype, cred->uid,
56364+ cred->gid, current->exec_file ?
56365+ gr_to_filename(current->exec_file->f_path.dentry,
56366+ current->exec_file->f_path.mnt) :
56367+ curr->filename, curr->filename,
56368+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56369+ &current->signal->saved_ip);
56370+ goto exit;
56371+ }
56372+ goto exit_fail;
56373+ }
56374+
56375+inet_check:
56376+ /* the rest of this checking is for IPv4 only */
56377+ if (!curr->ips)
56378+ goto exit;
56379+
56380+ if ((curr->ip_type & (1 << type)) &&
56381+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56382+ goto exit;
56383+
56384+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56385+ /* we don't place acls on raw sockets , and sometimes
56386+ dgram/ip sockets are opened for ioctl and not
56387+ bind/connect, so we'll fake a bind learn log */
56388+ if (type == SOCK_RAW || type == SOCK_PACKET) {
56389+ __u32 fakeip = 0;
56390+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56391+ current->role->roletype, cred->uid,
56392+ cred->gid, current->exec_file ?
56393+ gr_to_filename(current->exec_file->f_path.dentry,
56394+ current->exec_file->f_path.mnt) :
56395+ curr->filename, curr->filename,
56396+ &fakeip, 0, type,
56397+ protocol, GR_CONNECT, &current->signal->saved_ip);
56398+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56399+ __u32 fakeip = 0;
56400+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56401+ current->role->roletype, cred->uid,
56402+ cred->gid, current->exec_file ?
56403+ gr_to_filename(current->exec_file->f_path.dentry,
56404+ current->exec_file->f_path.mnt) :
56405+ curr->filename, curr->filename,
56406+ &fakeip, 0, type,
56407+ protocol, GR_BIND, &current->signal->saved_ip);
56408+ }
56409+ /* we'll log when they use connect or bind */
56410+ goto exit;
56411+ }
56412+
56413+exit_fail:
56414+ if (domain == PF_INET)
56415+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56416+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
56417+ else
56418+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56419+ gr_socktype_to_name(type), protocol);
56420+
56421+ return 0;
56422+exit:
56423+ return 1;
56424+}
56425+
56426+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56427+{
56428+ if ((ip->mode & mode) &&
56429+ (ip_port >= ip->low) &&
56430+ (ip_port <= ip->high) &&
56431+ ((ntohl(ip_addr) & our_netmask) ==
56432+ (ntohl(our_addr) & our_netmask))
56433+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56434+ && (ip->type & (1 << type))) {
56435+ if (ip->mode & GR_INVERT)
56436+ return 2; // specifically denied
56437+ else
56438+ return 1; // allowed
56439+ }
56440+
56441+ return 0; // not specifically allowed, may continue parsing
56442+}
56443+
56444+static int
56445+gr_search_connectbind(const int full_mode, struct sock *sk,
56446+ struct sockaddr_in *addr, const int type)
56447+{
56448+ char iface[IFNAMSIZ] = {0};
56449+ struct acl_subject_label *curr;
56450+ struct acl_ip_label *ip;
56451+ struct inet_sock *isk;
56452+ struct net_device *dev;
56453+ struct in_device *idev;
56454+ unsigned long i;
56455+ int ret;
56456+ int mode = full_mode & (GR_BIND | GR_CONNECT);
56457+ __u32 ip_addr = 0;
56458+ __u32 our_addr;
56459+ __u32 our_netmask;
56460+ char *p;
56461+ __u16 ip_port = 0;
56462+ const struct cred *cred = current_cred();
56463+
56464+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56465+ return 0;
56466+
56467+ curr = current->acl;
56468+ isk = inet_sk(sk);
56469+
56470+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56471+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56472+ addr->sin_addr.s_addr = curr->inaddr_any_override;
56473+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56474+ struct sockaddr_in saddr;
56475+ int err;
56476+
56477+ saddr.sin_family = AF_INET;
56478+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
56479+ saddr.sin_port = isk->inet_sport;
56480+
56481+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56482+ if (err)
56483+ return err;
56484+
56485+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56486+ if (err)
56487+ return err;
56488+ }
56489+
56490+ if (!curr->ips)
56491+ return 0;
56492+
56493+ ip_addr = addr->sin_addr.s_addr;
56494+ ip_port = ntohs(addr->sin_port);
56495+
56496+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56497+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56498+ current->role->roletype, cred->uid,
56499+ cred->gid, current->exec_file ?
56500+ gr_to_filename(current->exec_file->f_path.dentry,
56501+ current->exec_file->f_path.mnt) :
56502+ curr->filename, curr->filename,
56503+ &ip_addr, ip_port, type,
56504+ sk->sk_protocol, mode, &current->signal->saved_ip);
56505+ return 0;
56506+ }
56507+
56508+ for (i = 0; i < curr->ip_num; i++) {
56509+ ip = *(curr->ips + i);
56510+ if (ip->iface != NULL) {
56511+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
56512+ p = strchr(iface, ':');
56513+ if (p != NULL)
56514+ *p = '\0';
56515+ dev = dev_get_by_name(sock_net(sk), iface);
56516+ if (dev == NULL)
56517+ continue;
56518+ idev = in_dev_get(dev);
56519+ if (idev == NULL) {
56520+ dev_put(dev);
56521+ continue;
56522+ }
56523+ rcu_read_lock();
56524+ for_ifa(idev) {
56525+ if (!strcmp(ip->iface, ifa->ifa_label)) {
56526+ our_addr = ifa->ifa_address;
56527+ our_netmask = 0xffffffff;
56528+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56529+ if (ret == 1) {
56530+ rcu_read_unlock();
56531+ in_dev_put(idev);
56532+ dev_put(dev);
56533+ return 0;
56534+ } else if (ret == 2) {
56535+ rcu_read_unlock();
56536+ in_dev_put(idev);
56537+ dev_put(dev);
56538+ goto denied;
56539+ }
56540+ }
56541+ } endfor_ifa(idev);
56542+ rcu_read_unlock();
56543+ in_dev_put(idev);
56544+ dev_put(dev);
56545+ } else {
56546+ our_addr = ip->addr;
56547+ our_netmask = ip->netmask;
56548+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56549+ if (ret == 1)
56550+ return 0;
56551+ else if (ret == 2)
56552+ goto denied;
56553+ }
56554+ }
56555+
56556+denied:
56557+ if (mode == GR_BIND)
56558+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56559+ else if (mode == GR_CONNECT)
56560+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56561+
56562+ return -EACCES;
56563+}
56564+
56565+int
56566+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56567+{
56568+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56569+}
56570+
56571+int
56572+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56573+{
56574+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56575+}
56576+
56577+int gr_search_listen(struct socket *sock)
56578+{
56579+ struct sock *sk = sock->sk;
56580+ struct sockaddr_in addr;
56581+
56582+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56583+ addr.sin_port = inet_sk(sk)->inet_sport;
56584+
56585+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56586+}
56587+
56588+int gr_search_accept(struct socket *sock)
56589+{
56590+ struct sock *sk = sock->sk;
56591+ struct sockaddr_in addr;
56592+
56593+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56594+ addr.sin_port = inet_sk(sk)->inet_sport;
56595+
56596+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56597+}
56598+
56599+int
56600+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56601+{
56602+ if (addr)
56603+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56604+ else {
56605+ struct sockaddr_in sin;
56606+ const struct inet_sock *inet = inet_sk(sk);
56607+
56608+ sin.sin_addr.s_addr = inet->inet_daddr;
56609+ sin.sin_port = inet->inet_dport;
56610+
56611+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56612+ }
56613+}
56614+
56615+int
56616+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56617+{
56618+ struct sockaddr_in sin;
56619+
56620+ if (unlikely(skb->len < sizeof (struct udphdr)))
56621+ return 0; // skip this packet
56622+
56623+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56624+ sin.sin_port = udp_hdr(skb)->source;
56625+
56626+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56627+}
56628diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56629new file mode 100644
56630index 0000000..25f54ef
56631--- /dev/null
56632+++ b/grsecurity/gracl_learn.c
56633@@ -0,0 +1,207 @@
56634+#include <linux/kernel.h>
56635+#include <linux/mm.h>
56636+#include <linux/sched.h>
56637+#include <linux/poll.h>
56638+#include <linux/string.h>
56639+#include <linux/file.h>
56640+#include <linux/types.h>
56641+#include <linux/vmalloc.h>
56642+#include <linux/grinternal.h>
56643+
56644+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56645+ size_t count, loff_t *ppos);
56646+extern int gr_acl_is_enabled(void);
56647+
56648+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56649+static int gr_learn_attached;
56650+
56651+/* use a 512k buffer */
56652+#define LEARN_BUFFER_SIZE (512 * 1024)
56653+
56654+static DEFINE_SPINLOCK(gr_learn_lock);
56655+static DEFINE_MUTEX(gr_learn_user_mutex);
56656+
56657+/* we need to maintain two buffers, so that the kernel context of grlearn
56658+ uses a semaphore around the userspace copying, and the other kernel contexts
56659+ use a spinlock when copying into the buffer, since they cannot sleep
56660+*/
56661+static char *learn_buffer;
56662+static char *learn_buffer_user;
56663+static int learn_buffer_len;
56664+static int learn_buffer_user_len;
56665+
56666+static ssize_t
56667+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56668+{
56669+ DECLARE_WAITQUEUE(wait, current);
56670+ ssize_t retval = 0;
56671+
56672+ add_wait_queue(&learn_wait, &wait);
56673+ set_current_state(TASK_INTERRUPTIBLE);
56674+ do {
56675+ mutex_lock(&gr_learn_user_mutex);
56676+ spin_lock(&gr_learn_lock);
56677+ if (learn_buffer_len)
56678+ break;
56679+ spin_unlock(&gr_learn_lock);
56680+ mutex_unlock(&gr_learn_user_mutex);
56681+ if (file->f_flags & O_NONBLOCK) {
56682+ retval = -EAGAIN;
56683+ goto out;
56684+ }
56685+ if (signal_pending(current)) {
56686+ retval = -ERESTARTSYS;
56687+ goto out;
56688+ }
56689+
56690+ schedule();
56691+ } while (1);
56692+
56693+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56694+ learn_buffer_user_len = learn_buffer_len;
56695+ retval = learn_buffer_len;
56696+ learn_buffer_len = 0;
56697+
56698+ spin_unlock(&gr_learn_lock);
56699+
56700+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56701+ retval = -EFAULT;
56702+
56703+ mutex_unlock(&gr_learn_user_mutex);
56704+out:
56705+ set_current_state(TASK_RUNNING);
56706+ remove_wait_queue(&learn_wait, &wait);
56707+ return retval;
56708+}
56709+
56710+static unsigned int
56711+poll_learn(struct file * file, poll_table * wait)
56712+{
56713+ poll_wait(file, &learn_wait, wait);
56714+
56715+ if (learn_buffer_len)
56716+ return (POLLIN | POLLRDNORM);
56717+
56718+ return 0;
56719+}
56720+
56721+void
56722+gr_clear_learn_entries(void)
56723+{
56724+ char *tmp;
56725+
56726+ mutex_lock(&gr_learn_user_mutex);
56727+ spin_lock(&gr_learn_lock);
56728+ tmp = learn_buffer;
56729+ learn_buffer = NULL;
56730+ spin_unlock(&gr_learn_lock);
56731+ if (tmp)
56732+ vfree(tmp);
56733+ if (learn_buffer_user != NULL) {
56734+ vfree(learn_buffer_user);
56735+ learn_buffer_user = NULL;
56736+ }
56737+ learn_buffer_len = 0;
56738+ mutex_unlock(&gr_learn_user_mutex);
56739+
56740+ return;
56741+}
56742+
56743+void
56744+gr_add_learn_entry(const char *fmt, ...)
56745+{
56746+ va_list args;
56747+ unsigned int len;
56748+
56749+ if (!gr_learn_attached)
56750+ return;
56751+
56752+ spin_lock(&gr_learn_lock);
56753+
56754+ /* leave a gap at the end so we know when it's "full" but don't have to
56755+ compute the exact length of the string we're trying to append
56756+ */
56757+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56758+ spin_unlock(&gr_learn_lock);
56759+ wake_up_interruptible(&learn_wait);
56760+ return;
56761+ }
56762+ if (learn_buffer == NULL) {
56763+ spin_unlock(&gr_learn_lock);
56764+ return;
56765+ }
56766+
56767+ va_start(args, fmt);
56768+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56769+ va_end(args);
56770+
56771+ learn_buffer_len += len + 1;
56772+
56773+ spin_unlock(&gr_learn_lock);
56774+ wake_up_interruptible(&learn_wait);
56775+
56776+ return;
56777+}
56778+
56779+static int
56780+open_learn(struct inode *inode, struct file *file)
56781+{
56782+ if (file->f_mode & FMODE_READ && gr_learn_attached)
56783+ return -EBUSY;
56784+ if (file->f_mode & FMODE_READ) {
56785+ int retval = 0;
56786+ mutex_lock(&gr_learn_user_mutex);
56787+ if (learn_buffer == NULL)
56788+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56789+ if (learn_buffer_user == NULL)
56790+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56791+ if (learn_buffer == NULL) {
56792+ retval = -ENOMEM;
56793+ goto out_error;
56794+ }
56795+ if (learn_buffer_user == NULL) {
56796+ retval = -ENOMEM;
56797+ goto out_error;
56798+ }
56799+ learn_buffer_len = 0;
56800+ learn_buffer_user_len = 0;
56801+ gr_learn_attached = 1;
56802+out_error:
56803+ mutex_unlock(&gr_learn_user_mutex);
56804+ return retval;
56805+ }
56806+ return 0;
56807+}
56808+
56809+static int
56810+close_learn(struct inode *inode, struct file *file)
56811+{
56812+ if (file->f_mode & FMODE_READ) {
56813+ char *tmp = NULL;
56814+ mutex_lock(&gr_learn_user_mutex);
56815+ spin_lock(&gr_learn_lock);
56816+ tmp = learn_buffer;
56817+ learn_buffer = NULL;
56818+ spin_unlock(&gr_learn_lock);
56819+ if (tmp)
56820+ vfree(tmp);
56821+ if (learn_buffer_user != NULL) {
56822+ vfree(learn_buffer_user);
56823+ learn_buffer_user = NULL;
56824+ }
56825+ learn_buffer_len = 0;
56826+ learn_buffer_user_len = 0;
56827+ gr_learn_attached = 0;
56828+ mutex_unlock(&gr_learn_user_mutex);
56829+ }
56830+
56831+ return 0;
56832+}
56833+
56834+const struct file_operations grsec_fops = {
56835+ .read = read_learn,
56836+ .write = write_grsec_handler,
56837+ .open = open_learn,
56838+ .release = close_learn,
56839+ .poll = poll_learn,
56840+};
56841diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56842new file mode 100644
56843index 0000000..39645c9
56844--- /dev/null
56845+++ b/grsecurity/gracl_res.c
56846@@ -0,0 +1,68 @@
56847+#include <linux/kernel.h>
56848+#include <linux/sched.h>
56849+#include <linux/gracl.h>
56850+#include <linux/grinternal.h>
56851+
56852+static const char *restab_log[] = {
56853+ [RLIMIT_CPU] = "RLIMIT_CPU",
56854+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56855+ [RLIMIT_DATA] = "RLIMIT_DATA",
56856+ [RLIMIT_STACK] = "RLIMIT_STACK",
56857+ [RLIMIT_CORE] = "RLIMIT_CORE",
56858+ [RLIMIT_RSS] = "RLIMIT_RSS",
56859+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56860+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56861+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56862+ [RLIMIT_AS] = "RLIMIT_AS",
56863+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56864+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56865+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56866+ [RLIMIT_NICE] = "RLIMIT_NICE",
56867+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56868+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56869+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56870+};
56871+
56872+void
56873+gr_log_resource(const struct task_struct *task,
56874+ const int res, const unsigned long wanted, const int gt)
56875+{
56876+ const struct cred *cred;
56877+ unsigned long rlim;
56878+
56879+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56880+ return;
56881+
56882+ // not yet supported resource
56883+ if (unlikely(!restab_log[res]))
56884+ return;
56885+
56886+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56887+ rlim = task_rlimit_max(task, res);
56888+ else
56889+ rlim = task_rlimit(task, res);
56890+
56891+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56892+ return;
56893+
56894+ rcu_read_lock();
56895+ cred = __task_cred(task);
56896+
56897+ if (res == RLIMIT_NPROC &&
56898+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56899+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56900+ goto out_rcu_unlock;
56901+ else if (res == RLIMIT_MEMLOCK &&
56902+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56903+ goto out_rcu_unlock;
56904+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56905+ goto out_rcu_unlock;
56906+ rcu_read_unlock();
56907+
56908+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56909+
56910+ return;
56911+out_rcu_unlock:
56912+ rcu_read_unlock();
56913+ return;
56914+}
56915diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56916new file mode 100644
56917index 0000000..5556be3
56918--- /dev/null
56919+++ b/grsecurity/gracl_segv.c
56920@@ -0,0 +1,299 @@
56921+#include <linux/kernel.h>
56922+#include <linux/mm.h>
56923+#include <asm/uaccess.h>
56924+#include <asm/errno.h>
56925+#include <asm/mman.h>
56926+#include <net/sock.h>
56927+#include <linux/file.h>
56928+#include <linux/fs.h>
56929+#include <linux/net.h>
56930+#include <linux/in.h>
56931+#include <linux/slab.h>
56932+#include <linux/types.h>
56933+#include <linux/sched.h>
56934+#include <linux/timer.h>
56935+#include <linux/gracl.h>
56936+#include <linux/grsecurity.h>
56937+#include <linux/grinternal.h>
56938+
56939+static struct crash_uid *uid_set;
56940+static unsigned short uid_used;
56941+static DEFINE_SPINLOCK(gr_uid_lock);
56942+extern rwlock_t gr_inode_lock;
56943+extern struct acl_subject_label *
56944+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56945+ struct acl_role_label *role);
56946+
56947+#ifdef CONFIG_BTRFS_FS
56948+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56949+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56950+#endif
56951+
56952+static inline dev_t __get_dev(const struct dentry *dentry)
56953+{
56954+#ifdef CONFIG_BTRFS_FS
56955+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56956+ return get_btrfs_dev_from_inode(dentry->d_inode);
56957+ else
56958+#endif
56959+ return dentry->d_inode->i_sb->s_dev;
56960+}
56961+
56962+int
56963+gr_init_uidset(void)
56964+{
56965+ uid_set =
56966+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56967+ uid_used = 0;
56968+
56969+ return uid_set ? 1 : 0;
56970+}
56971+
56972+void
56973+gr_free_uidset(void)
56974+{
56975+ if (uid_set)
56976+ kfree(uid_set);
56977+
56978+ return;
56979+}
56980+
56981+int
56982+gr_find_uid(const uid_t uid)
56983+{
56984+ struct crash_uid *tmp = uid_set;
56985+ uid_t buid;
56986+ int low = 0, high = uid_used - 1, mid;
56987+
56988+ while (high >= low) {
56989+ mid = (low + high) >> 1;
56990+ buid = tmp[mid].uid;
56991+ if (buid == uid)
56992+ return mid;
56993+ if (buid > uid)
56994+ high = mid - 1;
56995+ if (buid < uid)
56996+ low = mid + 1;
56997+ }
56998+
56999+ return -1;
57000+}
57001+
57002+static __inline__ void
57003+gr_insertsort(void)
57004+{
57005+ unsigned short i, j;
57006+ struct crash_uid index;
57007+
57008+ for (i = 1; i < uid_used; i++) {
57009+ index = uid_set[i];
57010+ j = i;
57011+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
57012+ uid_set[j] = uid_set[j - 1];
57013+ j--;
57014+ }
57015+ uid_set[j] = index;
57016+ }
57017+
57018+ return;
57019+}
57020+
57021+static __inline__ void
57022+gr_insert_uid(const uid_t uid, const unsigned long expires)
57023+{
57024+ int loc;
57025+
57026+ if (uid_used == GR_UIDTABLE_MAX)
57027+ return;
57028+
57029+ loc = gr_find_uid(uid);
57030+
57031+ if (loc >= 0) {
57032+ uid_set[loc].expires = expires;
57033+ return;
57034+ }
57035+
57036+ uid_set[uid_used].uid = uid;
57037+ uid_set[uid_used].expires = expires;
57038+ uid_used++;
57039+
57040+ gr_insertsort();
57041+
57042+ return;
57043+}
57044+
57045+void
57046+gr_remove_uid(const unsigned short loc)
57047+{
57048+ unsigned short i;
57049+
57050+ for (i = loc + 1; i < uid_used; i++)
57051+ uid_set[i - 1] = uid_set[i];
57052+
57053+ uid_used--;
57054+
57055+ return;
57056+}
57057+
57058+int
57059+gr_check_crash_uid(const uid_t uid)
57060+{
57061+ int loc;
57062+ int ret = 0;
57063+
57064+ if (unlikely(!gr_acl_is_enabled()))
57065+ return 0;
57066+
57067+ spin_lock(&gr_uid_lock);
57068+ loc = gr_find_uid(uid);
57069+
57070+ if (loc < 0)
57071+ goto out_unlock;
57072+
57073+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
57074+ gr_remove_uid(loc);
57075+ else
57076+ ret = 1;
57077+
57078+out_unlock:
57079+ spin_unlock(&gr_uid_lock);
57080+ return ret;
57081+}
57082+
57083+static __inline__ int
57084+proc_is_setxid(const struct cred *cred)
57085+{
57086+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
57087+ cred->uid != cred->fsuid)
57088+ return 1;
57089+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
57090+ cred->gid != cred->fsgid)
57091+ return 1;
57092+
57093+ return 0;
57094+}
57095+
57096+extern int gr_fake_force_sig(int sig, struct task_struct *t);
57097+
57098+void
57099+gr_handle_crash(struct task_struct *task, const int sig)
57100+{
57101+ struct acl_subject_label *curr;
57102+ struct task_struct *tsk, *tsk2;
57103+ const struct cred *cred;
57104+ const struct cred *cred2;
57105+
57106+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
57107+ return;
57108+
57109+ if (unlikely(!gr_acl_is_enabled()))
57110+ return;
57111+
57112+ curr = task->acl;
57113+
57114+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
57115+ return;
57116+
57117+ if (time_before_eq(curr->expires, get_seconds())) {
57118+ curr->expires = 0;
57119+ curr->crashes = 0;
57120+ }
57121+
57122+ curr->crashes++;
57123+
57124+ if (!curr->expires)
57125+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
57126+
57127+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57128+ time_after(curr->expires, get_seconds())) {
57129+ rcu_read_lock();
57130+ cred = __task_cred(task);
57131+ if (cred->uid && proc_is_setxid(cred)) {
57132+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57133+ spin_lock(&gr_uid_lock);
57134+ gr_insert_uid(cred->uid, curr->expires);
57135+ spin_unlock(&gr_uid_lock);
57136+ curr->expires = 0;
57137+ curr->crashes = 0;
57138+ read_lock(&tasklist_lock);
57139+ do_each_thread(tsk2, tsk) {
57140+ cred2 = __task_cred(tsk);
57141+ if (tsk != task && cred2->uid == cred->uid)
57142+ gr_fake_force_sig(SIGKILL, tsk);
57143+ } while_each_thread(tsk2, tsk);
57144+ read_unlock(&tasklist_lock);
57145+ } else {
57146+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
57147+ read_lock(&tasklist_lock);
57148+ read_lock(&grsec_exec_file_lock);
57149+ do_each_thread(tsk2, tsk) {
57150+ if (likely(tsk != task)) {
57151+ // if this thread has the same subject as the one that triggered
57152+ // RES_CRASH and it's the same binary, kill it
57153+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
57154+ gr_fake_force_sig(SIGKILL, tsk);
57155+ }
57156+ } while_each_thread(tsk2, tsk);
57157+ read_unlock(&grsec_exec_file_lock);
57158+ read_unlock(&tasklist_lock);
57159+ }
57160+ rcu_read_unlock();
57161+ }
57162+
57163+ return;
57164+}
57165+
57166+int
57167+gr_check_crash_exec(const struct file *filp)
57168+{
57169+ struct acl_subject_label *curr;
57170+
57171+ if (unlikely(!gr_acl_is_enabled()))
57172+ return 0;
57173+
57174+ read_lock(&gr_inode_lock);
57175+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
57176+ __get_dev(filp->f_path.dentry),
57177+ current->role);
57178+ read_unlock(&gr_inode_lock);
57179+
57180+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
57181+ (!curr->crashes && !curr->expires))
57182+ return 0;
57183+
57184+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
57185+ time_after(curr->expires, get_seconds()))
57186+ return 1;
57187+ else if (time_before_eq(curr->expires, get_seconds())) {
57188+ curr->crashes = 0;
57189+ curr->expires = 0;
57190+ }
57191+
57192+ return 0;
57193+}
57194+
57195+void
57196+gr_handle_alertkill(struct task_struct *task)
57197+{
57198+ struct acl_subject_label *curracl;
57199+ __u32 curr_ip;
57200+ struct task_struct *p, *p2;
57201+
57202+ if (unlikely(!gr_acl_is_enabled()))
57203+ return;
57204+
57205+ curracl = task->acl;
57206+ curr_ip = task->signal->curr_ip;
57207+
57208+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57209+ read_lock(&tasklist_lock);
57210+ do_each_thread(p2, p) {
57211+ if (p->signal->curr_ip == curr_ip)
57212+ gr_fake_force_sig(SIGKILL, p);
57213+ } while_each_thread(p2, p);
57214+ read_unlock(&tasklist_lock);
57215+ } else if (curracl->mode & GR_KILLPROC)
57216+ gr_fake_force_sig(SIGKILL, task);
57217+
57218+ return;
57219+}
57220diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57221new file mode 100644
57222index 0000000..9d83a69
57223--- /dev/null
57224+++ b/grsecurity/gracl_shm.c
57225@@ -0,0 +1,40 @@
57226+#include <linux/kernel.h>
57227+#include <linux/mm.h>
57228+#include <linux/sched.h>
57229+#include <linux/file.h>
57230+#include <linux/ipc.h>
57231+#include <linux/gracl.h>
57232+#include <linux/grsecurity.h>
57233+#include <linux/grinternal.h>
57234+
57235+int
57236+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57237+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57238+{
57239+ struct task_struct *task;
57240+
57241+ if (!gr_acl_is_enabled())
57242+ return 1;
57243+
57244+ rcu_read_lock();
57245+ read_lock(&tasklist_lock);
57246+
57247+ task = find_task_by_vpid(shm_cprid);
57248+
57249+ if (unlikely(!task))
57250+ task = find_task_by_vpid(shm_lapid);
57251+
57252+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57253+ (task->pid == shm_lapid)) &&
57254+ (task->acl->mode & GR_PROTSHM) &&
57255+ (task->acl != current->acl))) {
57256+ read_unlock(&tasklist_lock);
57257+ rcu_read_unlock();
57258+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57259+ return 0;
57260+ }
57261+ read_unlock(&tasklist_lock);
57262+ rcu_read_unlock();
57263+
57264+ return 1;
57265+}
57266diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57267new file mode 100644
57268index 0000000..bc0be01
57269--- /dev/null
57270+++ b/grsecurity/grsec_chdir.c
57271@@ -0,0 +1,19 @@
57272+#include <linux/kernel.h>
57273+#include <linux/sched.h>
57274+#include <linux/fs.h>
57275+#include <linux/file.h>
57276+#include <linux/grsecurity.h>
57277+#include <linux/grinternal.h>
57278+
57279+void
57280+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57281+{
57282+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57283+ if ((grsec_enable_chdir && grsec_enable_group &&
57284+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57285+ !grsec_enable_group)) {
57286+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57287+ }
57288+#endif
57289+ return;
57290+}
57291diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57292new file mode 100644
57293index 0000000..a2dc675
57294--- /dev/null
57295+++ b/grsecurity/grsec_chroot.c
57296@@ -0,0 +1,351 @@
57297+#include <linux/kernel.h>
57298+#include <linux/module.h>
57299+#include <linux/sched.h>
57300+#include <linux/file.h>
57301+#include <linux/fs.h>
57302+#include <linux/mount.h>
57303+#include <linux/types.h>
57304+#include <linux/pid_namespace.h>
57305+#include <linux/grsecurity.h>
57306+#include <linux/grinternal.h>
57307+
57308+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57309+{
57310+#ifdef CONFIG_GRKERNSEC
57311+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
57312+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
57313+ task->gr_is_chrooted = 1;
57314+ else
57315+ task->gr_is_chrooted = 0;
57316+
57317+ task->gr_chroot_dentry = path->dentry;
57318+#endif
57319+ return;
57320+}
57321+
57322+void gr_clear_chroot_entries(struct task_struct *task)
57323+{
57324+#ifdef CONFIG_GRKERNSEC
57325+ task->gr_is_chrooted = 0;
57326+ task->gr_chroot_dentry = NULL;
57327+#endif
57328+ return;
57329+}
57330+
57331+int
57332+gr_handle_chroot_unix(const pid_t pid)
57333+{
57334+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57335+ struct task_struct *p;
57336+
57337+ if (unlikely(!grsec_enable_chroot_unix))
57338+ return 1;
57339+
57340+ if (likely(!proc_is_chrooted(current)))
57341+ return 1;
57342+
57343+ rcu_read_lock();
57344+ read_lock(&tasklist_lock);
57345+ p = find_task_by_vpid_unrestricted(pid);
57346+ if (unlikely(p && !have_same_root(current, p))) {
57347+ read_unlock(&tasklist_lock);
57348+ rcu_read_unlock();
57349+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57350+ return 0;
57351+ }
57352+ read_unlock(&tasklist_lock);
57353+ rcu_read_unlock();
57354+#endif
57355+ return 1;
57356+}
57357+
57358+int
57359+gr_handle_chroot_nice(void)
57360+{
57361+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57362+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57363+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57364+ return -EPERM;
57365+ }
57366+#endif
57367+ return 0;
57368+}
57369+
57370+int
57371+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57372+{
57373+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57374+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57375+ && proc_is_chrooted(current)) {
57376+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57377+ return -EACCES;
57378+ }
57379+#endif
57380+ return 0;
57381+}
57382+
57383+int
57384+gr_handle_chroot_rawio(const struct inode *inode)
57385+{
57386+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57387+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57388+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57389+ return 1;
57390+#endif
57391+ return 0;
57392+}
57393+
57394+int
57395+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57396+{
57397+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57398+ struct task_struct *p;
57399+ int ret = 0;
57400+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57401+ return ret;
57402+
57403+ read_lock(&tasklist_lock);
57404+ do_each_pid_task(pid, type, p) {
57405+ if (!have_same_root(current, p)) {
57406+ ret = 1;
57407+ goto out;
57408+ }
57409+ } while_each_pid_task(pid, type, p);
57410+out:
57411+ read_unlock(&tasklist_lock);
57412+ return ret;
57413+#endif
57414+ return 0;
57415+}
57416+
57417+int
57418+gr_pid_is_chrooted(struct task_struct *p)
57419+{
57420+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57421+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57422+ return 0;
57423+
57424+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57425+ !have_same_root(current, p)) {
57426+ return 1;
57427+ }
57428+#endif
57429+ return 0;
57430+}
57431+
57432+EXPORT_SYMBOL(gr_pid_is_chrooted);
57433+
57434+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57435+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57436+{
57437+ struct path path, currentroot;
57438+ int ret = 0;
57439+
57440+ path.dentry = (struct dentry *)u_dentry;
57441+ path.mnt = (struct vfsmount *)u_mnt;
57442+ get_fs_root(current->fs, &currentroot);
57443+ if (path_is_under(&path, &currentroot))
57444+ ret = 1;
57445+ path_put(&currentroot);
57446+
57447+ return ret;
57448+}
57449+#endif
57450+
57451+int
57452+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57453+{
57454+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57455+ if (!grsec_enable_chroot_fchdir)
57456+ return 1;
57457+
57458+ if (!proc_is_chrooted(current))
57459+ return 1;
57460+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57461+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57462+ return 0;
57463+ }
57464+#endif
57465+ return 1;
57466+}
57467+
57468+int
57469+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57470+ const time_t shm_createtime)
57471+{
57472+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57473+ struct task_struct *p;
57474+ time_t starttime;
57475+
57476+ if (unlikely(!grsec_enable_chroot_shmat))
57477+ return 1;
57478+
57479+ if (likely(!proc_is_chrooted(current)))
57480+ return 1;
57481+
57482+ rcu_read_lock();
57483+ read_lock(&tasklist_lock);
57484+
57485+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57486+ starttime = p->start_time.tv_sec;
57487+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57488+ if (have_same_root(current, p)) {
57489+ goto allow;
57490+ } else {
57491+ read_unlock(&tasklist_lock);
57492+ rcu_read_unlock();
57493+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57494+ return 0;
57495+ }
57496+ }
57497+ /* creator exited, pid reuse, fall through to next check */
57498+ }
57499+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57500+ if (unlikely(!have_same_root(current, p))) {
57501+ read_unlock(&tasklist_lock);
57502+ rcu_read_unlock();
57503+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57504+ return 0;
57505+ }
57506+ }
57507+
57508+allow:
57509+ read_unlock(&tasklist_lock);
57510+ rcu_read_unlock();
57511+#endif
57512+ return 1;
57513+}
57514+
57515+void
57516+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57517+{
57518+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57519+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57520+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57521+#endif
57522+ return;
57523+}
57524+
57525+int
57526+gr_handle_chroot_mknod(const struct dentry *dentry,
57527+ const struct vfsmount *mnt, const int mode)
57528+{
57529+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57530+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57531+ proc_is_chrooted(current)) {
57532+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57533+ return -EPERM;
57534+ }
57535+#endif
57536+ return 0;
57537+}
57538+
57539+int
57540+gr_handle_chroot_mount(const struct dentry *dentry,
57541+ const struct vfsmount *mnt, const char *dev_name)
57542+{
57543+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57544+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57545+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57546+ return -EPERM;
57547+ }
57548+#endif
57549+ return 0;
57550+}
57551+
57552+int
57553+gr_handle_chroot_pivot(void)
57554+{
57555+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57556+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57557+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57558+ return -EPERM;
57559+ }
57560+#endif
57561+ return 0;
57562+}
57563+
57564+int
57565+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57566+{
57567+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57568+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57569+ !gr_is_outside_chroot(dentry, mnt)) {
57570+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57571+ return -EPERM;
57572+ }
57573+#endif
57574+ return 0;
57575+}
57576+
57577+extern const char *captab_log[];
57578+extern int captab_log_entries;
57579+
57580+int
57581+gr_chroot_is_capable(const int cap)
57582+{
57583+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57584+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
57585+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57586+ if (cap_raised(chroot_caps, cap)) {
57587+ const struct cred *creds = current_cred();
57588+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
57589+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
57590+ }
57591+ return 0;
57592+ }
57593+ }
57594+#endif
57595+ return 1;
57596+}
57597+
57598+int
57599+gr_chroot_is_capable_nolog(const int cap)
57600+{
57601+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57602+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
57603+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57604+ if (cap_raised(chroot_caps, cap)) {
57605+ return 0;
57606+ }
57607+ }
57608+#endif
57609+ return 1;
57610+}
57611+
57612+int
57613+gr_handle_chroot_sysctl(const int op)
57614+{
57615+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57616+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57617+ proc_is_chrooted(current))
57618+ return -EACCES;
57619+#endif
57620+ return 0;
57621+}
57622+
57623+void
57624+gr_handle_chroot_chdir(struct path *path)
57625+{
57626+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57627+ if (grsec_enable_chroot_chdir)
57628+ set_fs_pwd(current->fs, path);
57629+#endif
57630+ return;
57631+}
57632+
57633+int
57634+gr_handle_chroot_chmod(const struct dentry *dentry,
57635+ const struct vfsmount *mnt, const int mode)
57636+{
57637+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57638+ /* allow chmod +s on directories, but not files */
57639+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57640+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57641+ proc_is_chrooted(current)) {
57642+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57643+ return -EPERM;
57644+ }
57645+#endif
57646+ return 0;
57647+}
57648diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57649new file mode 100644
57650index 0000000..d81a586
57651--- /dev/null
57652+++ b/grsecurity/grsec_disabled.c
57653@@ -0,0 +1,439 @@
57654+#include <linux/kernel.h>
57655+#include <linux/module.h>
57656+#include <linux/sched.h>
57657+#include <linux/file.h>
57658+#include <linux/fs.h>
57659+#include <linux/kdev_t.h>
57660+#include <linux/net.h>
57661+#include <linux/in.h>
57662+#include <linux/ip.h>
57663+#include <linux/skbuff.h>
57664+#include <linux/sysctl.h>
57665+
57666+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57667+void
57668+pax_set_initial_flags(struct linux_binprm *bprm)
57669+{
57670+ return;
57671+}
57672+#endif
57673+
57674+#ifdef CONFIG_SYSCTL
57675+__u32
57676+gr_handle_sysctl(const struct ctl_table * table, const int op)
57677+{
57678+ return 0;
57679+}
57680+#endif
57681+
57682+#ifdef CONFIG_TASKSTATS
57683+int gr_is_taskstats_denied(int pid)
57684+{
57685+ return 0;
57686+}
57687+#endif
57688+
57689+int
57690+gr_acl_is_enabled(void)
57691+{
57692+ return 0;
57693+}
57694+
57695+void
57696+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57697+{
57698+ return;
57699+}
57700+
57701+int
57702+gr_handle_rawio(const struct inode *inode)
57703+{
57704+ return 0;
57705+}
57706+
57707+void
57708+gr_acl_handle_psacct(struct task_struct *task, const long code)
57709+{
57710+ return;
57711+}
57712+
57713+int
57714+gr_handle_ptrace(struct task_struct *task, const long request)
57715+{
57716+ return 0;
57717+}
57718+
57719+int
57720+gr_handle_proc_ptrace(struct task_struct *task)
57721+{
57722+ return 0;
57723+}
57724+
57725+void
57726+gr_learn_resource(const struct task_struct *task,
57727+ const int res, const unsigned long wanted, const int gt)
57728+{
57729+ return;
57730+}
57731+
57732+int
57733+gr_set_acls(const int type)
57734+{
57735+ return 0;
57736+}
57737+
57738+int
57739+gr_check_hidden_task(const struct task_struct *tsk)
57740+{
57741+ return 0;
57742+}
57743+
57744+int
57745+gr_check_protected_task(const struct task_struct *task)
57746+{
57747+ return 0;
57748+}
57749+
57750+int
57751+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57752+{
57753+ return 0;
57754+}
57755+
57756+void
57757+gr_copy_label(struct task_struct *tsk)
57758+{
57759+ return;
57760+}
57761+
57762+void
57763+gr_set_pax_flags(struct task_struct *task)
57764+{
57765+ return;
57766+}
57767+
57768+int
57769+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57770+ const int unsafe_share)
57771+{
57772+ return 0;
57773+}
57774+
57775+void
57776+gr_handle_delete(const ino_t ino, const dev_t dev)
57777+{
57778+ return;
57779+}
57780+
57781+void
57782+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57783+{
57784+ return;
57785+}
57786+
57787+void
57788+gr_handle_crash(struct task_struct *task, const int sig)
57789+{
57790+ return;
57791+}
57792+
57793+int
57794+gr_check_crash_exec(const struct file *filp)
57795+{
57796+ return 0;
57797+}
57798+
57799+int
57800+gr_check_crash_uid(const uid_t uid)
57801+{
57802+ return 0;
57803+}
57804+
57805+void
57806+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57807+ struct dentry *old_dentry,
57808+ struct dentry *new_dentry,
57809+ struct vfsmount *mnt, const __u8 replace)
57810+{
57811+ return;
57812+}
57813+
57814+int
57815+gr_search_socket(const int family, const int type, const int protocol)
57816+{
57817+ return 1;
57818+}
57819+
57820+int
57821+gr_search_connectbind(const int mode, const struct socket *sock,
57822+ const struct sockaddr_in *addr)
57823+{
57824+ return 0;
57825+}
57826+
57827+void
57828+gr_handle_alertkill(struct task_struct *task)
57829+{
57830+ return;
57831+}
57832+
57833+__u32
57834+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57835+{
57836+ return 1;
57837+}
57838+
57839+__u32
57840+gr_acl_handle_hidden_file(const struct dentry * dentry,
57841+ const struct vfsmount * mnt)
57842+{
57843+ return 1;
57844+}
57845+
57846+__u32
57847+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57848+ int acc_mode)
57849+{
57850+ return 1;
57851+}
57852+
57853+__u32
57854+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57855+{
57856+ return 1;
57857+}
57858+
57859+__u32
57860+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57861+{
57862+ return 1;
57863+}
57864+
57865+int
57866+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57867+ unsigned int *vm_flags)
57868+{
57869+ return 1;
57870+}
57871+
57872+__u32
57873+gr_acl_handle_truncate(const struct dentry * dentry,
57874+ const struct vfsmount * mnt)
57875+{
57876+ return 1;
57877+}
57878+
57879+__u32
57880+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57881+{
57882+ return 1;
57883+}
57884+
57885+__u32
57886+gr_acl_handle_access(const struct dentry * dentry,
57887+ const struct vfsmount * mnt, const int fmode)
57888+{
57889+ return 1;
57890+}
57891+
57892+__u32
57893+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
57894+ mode_t mode)
57895+{
57896+ return 1;
57897+}
57898+
57899+__u32
57900+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57901+ mode_t mode)
57902+{
57903+ return 1;
57904+}
57905+
57906+__u32
57907+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57908+{
57909+ return 1;
57910+}
57911+
57912+__u32
57913+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57914+{
57915+ return 1;
57916+}
57917+
57918+void
57919+grsecurity_init(void)
57920+{
57921+ return;
57922+}
57923+
57924+__u32
57925+gr_acl_handle_mknod(const struct dentry * new_dentry,
57926+ const struct dentry * parent_dentry,
57927+ const struct vfsmount * parent_mnt,
57928+ const int mode)
57929+{
57930+ return 1;
57931+}
57932+
57933+__u32
57934+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57935+ const struct dentry * parent_dentry,
57936+ const struct vfsmount * parent_mnt)
57937+{
57938+ return 1;
57939+}
57940+
57941+__u32
57942+gr_acl_handle_symlink(const struct dentry * new_dentry,
57943+ const struct dentry * parent_dentry,
57944+ const struct vfsmount * parent_mnt, const char *from)
57945+{
57946+ return 1;
57947+}
57948+
57949+__u32
57950+gr_acl_handle_link(const struct dentry * new_dentry,
57951+ const struct dentry * parent_dentry,
57952+ const struct vfsmount * parent_mnt,
57953+ const struct dentry * old_dentry,
57954+ const struct vfsmount * old_mnt, const char *to)
57955+{
57956+ return 1;
57957+}
57958+
57959+int
57960+gr_acl_handle_rename(const struct dentry *new_dentry,
57961+ const struct dentry *parent_dentry,
57962+ const struct vfsmount *parent_mnt,
57963+ const struct dentry *old_dentry,
57964+ const struct inode *old_parent_inode,
57965+ const struct vfsmount *old_mnt, const char *newname)
57966+{
57967+ return 0;
57968+}
57969+
57970+int
57971+gr_acl_handle_filldir(const struct file *file, const char *name,
57972+ const int namelen, const ino_t ino)
57973+{
57974+ return 1;
57975+}
57976+
57977+int
57978+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57979+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57980+{
57981+ return 1;
57982+}
57983+
57984+int
57985+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57986+{
57987+ return 0;
57988+}
57989+
57990+int
57991+gr_search_accept(const struct socket *sock)
57992+{
57993+ return 0;
57994+}
57995+
57996+int
57997+gr_search_listen(const struct socket *sock)
57998+{
57999+ return 0;
58000+}
58001+
58002+int
58003+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
58004+{
58005+ return 0;
58006+}
58007+
58008+__u32
58009+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
58010+{
58011+ return 1;
58012+}
58013+
58014+__u32
58015+gr_acl_handle_creat(const struct dentry * dentry,
58016+ const struct dentry * p_dentry,
58017+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58018+ const int imode)
58019+{
58020+ return 1;
58021+}
58022+
58023+void
58024+gr_acl_handle_exit(void)
58025+{
58026+ return;
58027+}
58028+
58029+int
58030+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58031+{
58032+ return 1;
58033+}
58034+
58035+void
58036+gr_set_role_label(const uid_t uid, const gid_t gid)
58037+{
58038+ return;
58039+}
58040+
58041+int
58042+gr_acl_handle_procpidmem(const struct task_struct *task)
58043+{
58044+ return 0;
58045+}
58046+
58047+int
58048+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
58049+{
58050+ return 0;
58051+}
58052+
58053+int
58054+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
58055+{
58056+ return 0;
58057+}
58058+
58059+void
58060+gr_set_kernel_label(struct task_struct *task)
58061+{
58062+ return;
58063+}
58064+
58065+int
58066+gr_check_user_change(int real, int effective, int fs)
58067+{
58068+ return 0;
58069+}
58070+
58071+int
58072+gr_check_group_change(int real, int effective, int fs)
58073+{
58074+ return 0;
58075+}
58076+
58077+int gr_acl_enable_at_secure(void)
58078+{
58079+ return 0;
58080+}
58081+
58082+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58083+{
58084+ return dentry->d_inode->i_sb->s_dev;
58085+}
58086+
58087+EXPORT_SYMBOL(gr_learn_resource);
58088+EXPORT_SYMBOL(gr_set_kernel_label);
58089+#ifdef CONFIG_SECURITY
58090+EXPORT_SYMBOL(gr_check_user_change);
58091+EXPORT_SYMBOL(gr_check_group_change);
58092+#endif
58093diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
58094new file mode 100644
58095index 0000000..2b05ada
58096--- /dev/null
58097+++ b/grsecurity/grsec_exec.c
58098@@ -0,0 +1,146 @@
58099+#include <linux/kernel.h>
58100+#include <linux/sched.h>
58101+#include <linux/file.h>
58102+#include <linux/binfmts.h>
58103+#include <linux/fs.h>
58104+#include <linux/types.h>
58105+#include <linux/grdefs.h>
58106+#include <linux/grsecurity.h>
58107+#include <linux/grinternal.h>
58108+#include <linux/capability.h>
58109+#include <linux/module.h>
58110+
58111+#include <asm/uaccess.h>
58112+
58113+#ifdef CONFIG_GRKERNSEC_EXECLOG
58114+static char gr_exec_arg_buf[132];
58115+static DEFINE_MUTEX(gr_exec_arg_mutex);
58116+#endif
58117+
58118+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58119+
58120+void
58121+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58122+{
58123+#ifdef CONFIG_GRKERNSEC_EXECLOG
58124+ char *grarg = gr_exec_arg_buf;
58125+ unsigned int i, x, execlen = 0;
58126+ char c;
58127+
58128+ if (!((grsec_enable_execlog && grsec_enable_group &&
58129+ in_group_p(grsec_audit_gid))
58130+ || (grsec_enable_execlog && !grsec_enable_group)))
58131+ return;
58132+
58133+ mutex_lock(&gr_exec_arg_mutex);
58134+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
58135+
58136+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
58137+ const char __user *p;
58138+ unsigned int len;
58139+
58140+ p = get_user_arg_ptr(argv, i);
58141+ if (IS_ERR(p))
58142+ goto log;
58143+
58144+ len = strnlen_user(p, 128 - execlen);
58145+ if (len > 128 - execlen)
58146+ len = 128 - execlen;
58147+ else if (len > 0)
58148+ len--;
58149+ if (copy_from_user(grarg + execlen, p, len))
58150+ goto log;
58151+
58152+ /* rewrite unprintable characters */
58153+ for (x = 0; x < len; x++) {
58154+ c = *(grarg + execlen + x);
58155+ if (c < 32 || c > 126)
58156+ *(grarg + execlen + x) = ' ';
58157+ }
58158+
58159+ execlen += len;
58160+ *(grarg + execlen) = ' ';
58161+ *(grarg + execlen + 1) = '\0';
58162+ execlen++;
58163+ }
58164+
58165+ log:
58166+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
58167+ bprm->file->f_path.mnt, grarg);
58168+ mutex_unlock(&gr_exec_arg_mutex);
58169+#endif
58170+ return;
58171+}
58172+
58173+#ifdef CONFIG_GRKERNSEC
58174+extern int gr_acl_is_capable(const int cap);
58175+extern int gr_acl_is_capable_nolog(const int cap);
58176+extern int gr_chroot_is_capable(const int cap);
58177+extern int gr_chroot_is_capable_nolog(const int cap);
58178+#endif
58179+
58180+const char *captab_log[] = {
58181+ "CAP_CHOWN",
58182+ "CAP_DAC_OVERRIDE",
58183+ "CAP_DAC_READ_SEARCH",
58184+ "CAP_FOWNER",
58185+ "CAP_FSETID",
58186+ "CAP_KILL",
58187+ "CAP_SETGID",
58188+ "CAP_SETUID",
58189+ "CAP_SETPCAP",
58190+ "CAP_LINUX_IMMUTABLE",
58191+ "CAP_NET_BIND_SERVICE",
58192+ "CAP_NET_BROADCAST",
58193+ "CAP_NET_ADMIN",
58194+ "CAP_NET_RAW",
58195+ "CAP_IPC_LOCK",
58196+ "CAP_IPC_OWNER",
58197+ "CAP_SYS_MODULE",
58198+ "CAP_SYS_RAWIO",
58199+ "CAP_SYS_CHROOT",
58200+ "CAP_SYS_PTRACE",
58201+ "CAP_SYS_PACCT",
58202+ "CAP_SYS_ADMIN",
58203+ "CAP_SYS_BOOT",
58204+ "CAP_SYS_NICE",
58205+ "CAP_SYS_RESOURCE",
58206+ "CAP_SYS_TIME",
58207+ "CAP_SYS_TTY_CONFIG",
58208+ "CAP_MKNOD",
58209+ "CAP_LEASE",
58210+ "CAP_AUDIT_WRITE",
58211+ "CAP_AUDIT_CONTROL",
58212+ "CAP_SETFCAP",
58213+ "CAP_MAC_OVERRIDE",
58214+ "CAP_MAC_ADMIN",
58215+ "CAP_SYSLOG",
58216+ "CAP_WAKE_ALARM"
58217+};
58218+
58219+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
58220+
58221+int gr_is_capable(const int cap)
58222+{
58223+#ifdef CONFIG_GRKERNSEC
58224+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58225+ return 1;
58226+ return 0;
58227+#else
58228+ return 1;
58229+#endif
58230+}
58231+
58232+int gr_is_capable_nolog(const int cap)
58233+{
58234+#ifdef CONFIG_GRKERNSEC
58235+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58236+ return 1;
58237+ return 0;
58238+#else
58239+ return 1;
58240+#endif
58241+}
58242+
58243+EXPORT_SYMBOL(gr_is_capable);
58244+EXPORT_SYMBOL(gr_is_capable_nolog);
58245diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58246new file mode 100644
58247index 0000000..d3ee748
58248--- /dev/null
58249+++ b/grsecurity/grsec_fifo.c
58250@@ -0,0 +1,24 @@
58251+#include <linux/kernel.h>
58252+#include <linux/sched.h>
58253+#include <linux/fs.h>
58254+#include <linux/file.h>
58255+#include <linux/grinternal.h>
58256+
58257+int
58258+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58259+ const struct dentry *dir, const int flag, const int acc_mode)
58260+{
58261+#ifdef CONFIG_GRKERNSEC_FIFO
58262+ const struct cred *cred = current_cred();
58263+
58264+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58265+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58266+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58267+ (cred->fsuid != dentry->d_inode->i_uid)) {
58268+ if (!inode_permission(dentry->d_inode, acc_mode))
58269+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58270+ return -EACCES;
58271+ }
58272+#endif
58273+ return 0;
58274+}
58275diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58276new file mode 100644
58277index 0000000..8ca18bf
58278--- /dev/null
58279+++ b/grsecurity/grsec_fork.c
58280@@ -0,0 +1,23 @@
58281+#include <linux/kernel.h>
58282+#include <linux/sched.h>
58283+#include <linux/grsecurity.h>
58284+#include <linux/grinternal.h>
58285+#include <linux/errno.h>
58286+
58287+void
58288+gr_log_forkfail(const int retval)
58289+{
58290+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58291+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58292+ switch (retval) {
58293+ case -EAGAIN:
58294+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58295+ break;
58296+ case -ENOMEM:
58297+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58298+ break;
58299+ }
58300+ }
58301+#endif
58302+ return;
58303+}
58304diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58305new file mode 100644
58306index 0000000..cb8e5a1
58307--- /dev/null
58308+++ b/grsecurity/grsec_init.c
58309@@ -0,0 +1,273 @@
58310+#include <linux/kernel.h>
58311+#include <linux/sched.h>
58312+#include <linux/mm.h>
58313+#include <linux/gracl.h>
58314+#include <linux/slab.h>
58315+#include <linux/vmalloc.h>
58316+#include <linux/percpu.h>
58317+#include <linux/module.h>
58318+
58319+int grsec_enable_setxid;
58320+int grsec_enable_brute;
58321+int grsec_enable_link;
58322+int grsec_enable_dmesg;
58323+int grsec_enable_harden_ptrace;
58324+int grsec_enable_fifo;
58325+int grsec_enable_execlog;
58326+int grsec_enable_signal;
58327+int grsec_enable_forkfail;
58328+int grsec_enable_audit_ptrace;
58329+int grsec_enable_time;
58330+int grsec_enable_audit_textrel;
58331+int grsec_enable_group;
58332+int grsec_audit_gid;
58333+int grsec_enable_chdir;
58334+int grsec_enable_mount;
58335+int grsec_enable_rofs;
58336+int grsec_enable_chroot_findtask;
58337+int grsec_enable_chroot_mount;
58338+int grsec_enable_chroot_shmat;
58339+int grsec_enable_chroot_fchdir;
58340+int grsec_enable_chroot_double;
58341+int grsec_enable_chroot_pivot;
58342+int grsec_enable_chroot_chdir;
58343+int grsec_enable_chroot_chmod;
58344+int grsec_enable_chroot_mknod;
58345+int grsec_enable_chroot_nice;
58346+int grsec_enable_chroot_execlog;
58347+int grsec_enable_chroot_caps;
58348+int grsec_enable_chroot_sysctl;
58349+int grsec_enable_chroot_unix;
58350+int grsec_enable_tpe;
58351+int grsec_tpe_gid;
58352+int grsec_enable_blackhole;
58353+#ifdef CONFIG_IPV6_MODULE
58354+EXPORT_SYMBOL(grsec_enable_blackhole);
58355+#endif
58356+int grsec_lastack_retries;
58357+int grsec_enable_tpe_all;
58358+int grsec_enable_tpe_invert;
58359+int grsec_enable_socket_all;
58360+int grsec_socket_all_gid;
58361+int grsec_enable_socket_client;
58362+int grsec_socket_client_gid;
58363+int grsec_enable_socket_server;
58364+int grsec_socket_server_gid;
58365+int grsec_resource_logging;
58366+int grsec_disable_privio;
58367+int grsec_enable_log_rwxmaps;
58368+int grsec_lock;
58369+
58370+DEFINE_SPINLOCK(grsec_alert_lock);
58371+unsigned long grsec_alert_wtime = 0;
58372+unsigned long grsec_alert_fyet = 0;
58373+
58374+DEFINE_SPINLOCK(grsec_audit_lock);
58375+
58376+DEFINE_RWLOCK(grsec_exec_file_lock);
58377+
58378+char *gr_shared_page[4];
58379+
58380+char *gr_alert_log_fmt;
58381+char *gr_audit_log_fmt;
58382+char *gr_alert_log_buf;
58383+char *gr_audit_log_buf;
58384+
58385+extern struct gr_arg *gr_usermode;
58386+extern unsigned char *gr_system_salt;
58387+extern unsigned char *gr_system_sum;
58388+
58389+void __init
58390+grsecurity_init(void)
58391+{
58392+ int j;
58393+ /* create the per-cpu shared pages */
58394+
58395+#ifdef CONFIG_X86
58396+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58397+#endif
58398+
58399+ for (j = 0; j < 4; j++) {
58400+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58401+ if (gr_shared_page[j] == NULL) {
58402+ panic("Unable to allocate grsecurity shared page");
58403+ return;
58404+ }
58405+ }
58406+
58407+ /* allocate log buffers */
58408+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58409+ if (!gr_alert_log_fmt) {
58410+ panic("Unable to allocate grsecurity alert log format buffer");
58411+ return;
58412+ }
58413+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58414+ if (!gr_audit_log_fmt) {
58415+ panic("Unable to allocate grsecurity audit log format buffer");
58416+ return;
58417+ }
58418+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58419+ if (!gr_alert_log_buf) {
58420+ panic("Unable to allocate grsecurity alert log buffer");
58421+ return;
58422+ }
58423+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58424+ if (!gr_audit_log_buf) {
58425+ panic("Unable to allocate grsecurity audit log buffer");
58426+ return;
58427+ }
58428+
58429+ /* allocate memory for authentication structure */
58430+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58431+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58432+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58433+
58434+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58435+ panic("Unable to allocate grsecurity authentication structure");
58436+ return;
58437+ }
58438+
58439+
58440+#ifdef CONFIG_GRKERNSEC_IO
58441+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58442+ grsec_disable_privio = 1;
58443+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58444+ grsec_disable_privio = 1;
58445+#else
58446+ grsec_disable_privio = 0;
58447+#endif
58448+#endif
58449+
58450+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58451+ /* for backward compatibility, tpe_invert always defaults to on if
58452+ enabled in the kernel
58453+ */
58454+ grsec_enable_tpe_invert = 1;
58455+#endif
58456+
58457+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58458+#ifndef CONFIG_GRKERNSEC_SYSCTL
58459+ grsec_lock = 1;
58460+#endif
58461+
58462+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58463+ grsec_enable_audit_textrel = 1;
58464+#endif
58465+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58466+ grsec_enable_log_rwxmaps = 1;
58467+#endif
58468+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58469+ grsec_enable_group = 1;
58470+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58471+#endif
58472+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58473+ grsec_enable_chdir = 1;
58474+#endif
58475+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58476+ grsec_enable_harden_ptrace = 1;
58477+#endif
58478+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58479+ grsec_enable_mount = 1;
58480+#endif
58481+#ifdef CONFIG_GRKERNSEC_LINK
58482+ grsec_enable_link = 1;
58483+#endif
58484+#ifdef CONFIG_GRKERNSEC_BRUTE
58485+ grsec_enable_brute = 1;
58486+#endif
58487+#ifdef CONFIG_GRKERNSEC_DMESG
58488+ grsec_enable_dmesg = 1;
58489+#endif
58490+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58491+ grsec_enable_blackhole = 1;
58492+ grsec_lastack_retries = 4;
58493+#endif
58494+#ifdef CONFIG_GRKERNSEC_FIFO
58495+ grsec_enable_fifo = 1;
58496+#endif
58497+#ifdef CONFIG_GRKERNSEC_EXECLOG
58498+ grsec_enable_execlog = 1;
58499+#endif
58500+#ifdef CONFIG_GRKERNSEC_SETXID
58501+ grsec_enable_setxid = 1;
58502+#endif
58503+#ifdef CONFIG_GRKERNSEC_SIGNAL
58504+ grsec_enable_signal = 1;
58505+#endif
58506+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58507+ grsec_enable_forkfail = 1;
58508+#endif
58509+#ifdef CONFIG_GRKERNSEC_TIME
58510+ grsec_enable_time = 1;
58511+#endif
58512+#ifdef CONFIG_GRKERNSEC_RESLOG
58513+ grsec_resource_logging = 1;
58514+#endif
58515+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58516+ grsec_enable_chroot_findtask = 1;
58517+#endif
58518+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58519+ grsec_enable_chroot_unix = 1;
58520+#endif
58521+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58522+ grsec_enable_chroot_mount = 1;
58523+#endif
58524+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58525+ grsec_enable_chroot_fchdir = 1;
58526+#endif
58527+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58528+ grsec_enable_chroot_shmat = 1;
58529+#endif
58530+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58531+ grsec_enable_audit_ptrace = 1;
58532+#endif
58533+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58534+ grsec_enable_chroot_double = 1;
58535+#endif
58536+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58537+ grsec_enable_chroot_pivot = 1;
58538+#endif
58539+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58540+ grsec_enable_chroot_chdir = 1;
58541+#endif
58542+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58543+ grsec_enable_chroot_chmod = 1;
58544+#endif
58545+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58546+ grsec_enable_chroot_mknod = 1;
58547+#endif
58548+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58549+ grsec_enable_chroot_nice = 1;
58550+#endif
58551+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58552+ grsec_enable_chroot_execlog = 1;
58553+#endif
58554+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58555+ grsec_enable_chroot_caps = 1;
58556+#endif
58557+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58558+ grsec_enable_chroot_sysctl = 1;
58559+#endif
58560+#ifdef CONFIG_GRKERNSEC_TPE
58561+ grsec_enable_tpe = 1;
58562+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58563+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58564+ grsec_enable_tpe_all = 1;
58565+#endif
58566+#endif
58567+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58568+ grsec_enable_socket_all = 1;
58569+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58570+#endif
58571+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58572+ grsec_enable_socket_client = 1;
58573+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58574+#endif
58575+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58576+ grsec_enable_socket_server = 1;
58577+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58578+#endif
58579+#endif
58580+
58581+ return;
58582+}
58583diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58584new file mode 100644
58585index 0000000..3efe141
58586--- /dev/null
58587+++ b/grsecurity/grsec_link.c
58588@@ -0,0 +1,43 @@
58589+#include <linux/kernel.h>
58590+#include <linux/sched.h>
58591+#include <linux/fs.h>
58592+#include <linux/file.h>
58593+#include <linux/grinternal.h>
58594+
58595+int
58596+gr_handle_follow_link(const struct inode *parent,
58597+ const struct inode *inode,
58598+ const struct dentry *dentry, const struct vfsmount *mnt)
58599+{
58600+#ifdef CONFIG_GRKERNSEC_LINK
58601+ const struct cred *cred = current_cred();
58602+
58603+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58604+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
58605+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
58606+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58607+ return -EACCES;
58608+ }
58609+#endif
58610+ return 0;
58611+}
58612+
58613+int
58614+gr_handle_hardlink(const struct dentry *dentry,
58615+ const struct vfsmount *mnt,
58616+ struct inode *inode, const int mode, const char *to)
58617+{
58618+#ifdef CONFIG_GRKERNSEC_LINK
58619+ const struct cred *cred = current_cred();
58620+
58621+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
58622+ (!S_ISREG(mode) || (mode & S_ISUID) ||
58623+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
58624+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58625+ !capable(CAP_FOWNER) && cred->uid) {
58626+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58627+ return -EPERM;
58628+ }
58629+#endif
58630+ return 0;
58631+}
58632diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58633new file mode 100644
58634index 0000000..a45d2e9
58635--- /dev/null
58636+++ b/grsecurity/grsec_log.c
58637@@ -0,0 +1,322 @@
58638+#include <linux/kernel.h>
58639+#include <linux/sched.h>
58640+#include <linux/file.h>
58641+#include <linux/tty.h>
58642+#include <linux/fs.h>
58643+#include <linux/grinternal.h>
58644+
58645+#ifdef CONFIG_TREE_PREEMPT_RCU
58646+#define DISABLE_PREEMPT() preempt_disable()
58647+#define ENABLE_PREEMPT() preempt_enable()
58648+#else
58649+#define DISABLE_PREEMPT()
58650+#define ENABLE_PREEMPT()
58651+#endif
58652+
58653+#define BEGIN_LOCKS(x) \
58654+ DISABLE_PREEMPT(); \
58655+ rcu_read_lock(); \
58656+ read_lock(&tasklist_lock); \
58657+ read_lock(&grsec_exec_file_lock); \
58658+ if (x != GR_DO_AUDIT) \
58659+ spin_lock(&grsec_alert_lock); \
58660+ else \
58661+ spin_lock(&grsec_audit_lock)
58662+
58663+#define END_LOCKS(x) \
58664+ if (x != GR_DO_AUDIT) \
58665+ spin_unlock(&grsec_alert_lock); \
58666+ else \
58667+ spin_unlock(&grsec_audit_lock); \
58668+ read_unlock(&grsec_exec_file_lock); \
58669+ read_unlock(&tasklist_lock); \
58670+ rcu_read_unlock(); \
58671+ ENABLE_PREEMPT(); \
58672+ if (x == GR_DONT_AUDIT) \
58673+ gr_handle_alertkill(current)
58674+
58675+enum {
58676+ FLOODING,
58677+ NO_FLOODING
58678+};
58679+
58680+extern char *gr_alert_log_fmt;
58681+extern char *gr_audit_log_fmt;
58682+extern char *gr_alert_log_buf;
58683+extern char *gr_audit_log_buf;
58684+
58685+static int gr_log_start(int audit)
58686+{
58687+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58688+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58689+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58690+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58691+ unsigned long curr_secs = get_seconds();
58692+
58693+ if (audit == GR_DO_AUDIT)
58694+ goto set_fmt;
58695+
58696+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58697+ grsec_alert_wtime = curr_secs;
58698+ grsec_alert_fyet = 0;
58699+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58700+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58701+ grsec_alert_fyet++;
58702+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58703+ grsec_alert_wtime = curr_secs;
58704+ grsec_alert_fyet++;
58705+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58706+ return FLOODING;
58707+ }
58708+ else return FLOODING;
58709+
58710+set_fmt:
58711+#endif
58712+ memset(buf, 0, PAGE_SIZE);
58713+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
58714+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58715+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58716+ } else if (current->signal->curr_ip) {
58717+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58718+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58719+ } else if (gr_acl_is_enabled()) {
58720+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58721+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58722+ } else {
58723+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
58724+ strcpy(buf, fmt);
58725+ }
58726+
58727+ return NO_FLOODING;
58728+}
58729+
58730+static void gr_log_middle(int audit, const char *msg, va_list ap)
58731+ __attribute__ ((format (printf, 2, 0)));
58732+
58733+static void gr_log_middle(int audit, const char *msg, va_list ap)
58734+{
58735+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58736+ unsigned int len = strlen(buf);
58737+
58738+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58739+
58740+ return;
58741+}
58742+
58743+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58744+ __attribute__ ((format (printf, 2, 3)));
58745+
58746+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58747+{
58748+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58749+ unsigned int len = strlen(buf);
58750+ va_list ap;
58751+
58752+ va_start(ap, msg);
58753+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58754+ va_end(ap);
58755+
58756+ return;
58757+}
58758+
58759+static void gr_log_end(int audit, int append_default)
58760+{
58761+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58762+
58763+ if (append_default) {
58764+ unsigned int len = strlen(buf);
58765+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58766+ }
58767+
58768+ printk("%s\n", buf);
58769+
58770+ return;
58771+}
58772+
58773+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58774+{
58775+ int logtype;
58776+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58777+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58778+ void *voidptr = NULL;
58779+ int num1 = 0, num2 = 0;
58780+ unsigned long ulong1 = 0, ulong2 = 0;
58781+ struct dentry *dentry = NULL;
58782+ struct vfsmount *mnt = NULL;
58783+ struct file *file = NULL;
58784+ struct task_struct *task = NULL;
58785+ const struct cred *cred, *pcred;
58786+ va_list ap;
58787+
58788+ BEGIN_LOCKS(audit);
58789+ logtype = gr_log_start(audit);
58790+ if (logtype == FLOODING) {
58791+ END_LOCKS(audit);
58792+ return;
58793+ }
58794+ va_start(ap, argtypes);
58795+ switch (argtypes) {
58796+ case GR_TTYSNIFF:
58797+ task = va_arg(ap, struct task_struct *);
58798+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58799+ break;
58800+ case GR_SYSCTL_HIDDEN:
58801+ str1 = va_arg(ap, char *);
58802+ gr_log_middle_varargs(audit, msg, result, str1);
58803+ break;
58804+ case GR_RBAC:
58805+ dentry = va_arg(ap, struct dentry *);
58806+ mnt = va_arg(ap, struct vfsmount *);
58807+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58808+ break;
58809+ case GR_RBAC_STR:
58810+ dentry = va_arg(ap, struct dentry *);
58811+ mnt = va_arg(ap, struct vfsmount *);
58812+ str1 = va_arg(ap, char *);
58813+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58814+ break;
58815+ case GR_STR_RBAC:
58816+ str1 = va_arg(ap, char *);
58817+ dentry = va_arg(ap, struct dentry *);
58818+ mnt = va_arg(ap, struct vfsmount *);
58819+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58820+ break;
58821+ case GR_RBAC_MODE2:
58822+ dentry = va_arg(ap, struct dentry *);
58823+ mnt = va_arg(ap, struct vfsmount *);
58824+ str1 = va_arg(ap, char *);
58825+ str2 = va_arg(ap, char *);
58826+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58827+ break;
58828+ case GR_RBAC_MODE3:
58829+ dentry = va_arg(ap, struct dentry *);
58830+ mnt = va_arg(ap, struct vfsmount *);
58831+ str1 = va_arg(ap, char *);
58832+ str2 = va_arg(ap, char *);
58833+ str3 = va_arg(ap, char *);
58834+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58835+ break;
58836+ case GR_FILENAME:
58837+ dentry = va_arg(ap, struct dentry *);
58838+ mnt = va_arg(ap, struct vfsmount *);
58839+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58840+ break;
58841+ case GR_STR_FILENAME:
58842+ str1 = va_arg(ap, char *);
58843+ dentry = va_arg(ap, struct dentry *);
58844+ mnt = va_arg(ap, struct vfsmount *);
58845+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58846+ break;
58847+ case GR_FILENAME_STR:
58848+ dentry = va_arg(ap, struct dentry *);
58849+ mnt = va_arg(ap, struct vfsmount *);
58850+ str1 = va_arg(ap, char *);
58851+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58852+ break;
58853+ case GR_FILENAME_TWO_INT:
58854+ dentry = va_arg(ap, struct dentry *);
58855+ mnt = va_arg(ap, struct vfsmount *);
58856+ num1 = va_arg(ap, int);
58857+ num2 = va_arg(ap, int);
58858+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58859+ break;
58860+ case GR_FILENAME_TWO_INT_STR:
58861+ dentry = va_arg(ap, struct dentry *);
58862+ mnt = va_arg(ap, struct vfsmount *);
58863+ num1 = va_arg(ap, int);
58864+ num2 = va_arg(ap, int);
58865+ str1 = va_arg(ap, char *);
58866+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58867+ break;
58868+ case GR_TEXTREL:
58869+ file = va_arg(ap, struct file *);
58870+ ulong1 = va_arg(ap, unsigned long);
58871+ ulong2 = va_arg(ap, unsigned long);
58872+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58873+ break;
58874+ case GR_PTRACE:
58875+ task = va_arg(ap, struct task_struct *);
58876+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58877+ break;
58878+ case GR_RESOURCE:
58879+ task = va_arg(ap, struct task_struct *);
58880+ cred = __task_cred(task);
58881+ pcred = __task_cred(task->real_parent);
58882+ ulong1 = va_arg(ap, unsigned long);
58883+ str1 = va_arg(ap, char *);
58884+ ulong2 = va_arg(ap, unsigned long);
58885+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58886+ break;
58887+ case GR_CAP:
58888+ task = va_arg(ap, struct task_struct *);
58889+ cred = __task_cred(task);
58890+ pcred = __task_cred(task->real_parent);
58891+ str1 = va_arg(ap, char *);
58892+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58893+ break;
58894+ case GR_SIG:
58895+ str1 = va_arg(ap, char *);
58896+ voidptr = va_arg(ap, void *);
58897+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58898+ break;
58899+ case GR_SIG2:
58900+ task = va_arg(ap, struct task_struct *);
58901+ cred = __task_cred(task);
58902+ pcred = __task_cred(task->real_parent);
58903+ num1 = va_arg(ap, int);
58904+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58905+ break;
58906+ case GR_CRASH1:
58907+ task = va_arg(ap, struct task_struct *);
58908+ cred = __task_cred(task);
58909+ pcred = __task_cred(task->real_parent);
58910+ ulong1 = va_arg(ap, unsigned long);
58911+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58912+ break;
58913+ case GR_CRASH2:
58914+ task = va_arg(ap, struct task_struct *);
58915+ cred = __task_cred(task);
58916+ pcred = __task_cred(task->real_parent);
58917+ ulong1 = va_arg(ap, unsigned long);
58918+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58919+ break;
58920+ case GR_RWXMAP:
58921+ file = va_arg(ap, struct file *);
58922+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58923+ break;
58924+ case GR_PSACCT:
58925+ {
58926+ unsigned int wday, cday;
58927+ __u8 whr, chr;
58928+ __u8 wmin, cmin;
58929+ __u8 wsec, csec;
58930+ char cur_tty[64] = { 0 };
58931+ char parent_tty[64] = { 0 };
58932+
58933+ task = va_arg(ap, struct task_struct *);
58934+ wday = va_arg(ap, unsigned int);
58935+ cday = va_arg(ap, unsigned int);
58936+ whr = va_arg(ap, int);
58937+ chr = va_arg(ap, int);
58938+ wmin = va_arg(ap, int);
58939+ cmin = va_arg(ap, int);
58940+ wsec = va_arg(ap, int);
58941+ csec = va_arg(ap, int);
58942+ ulong1 = va_arg(ap, unsigned long);
58943+ cred = __task_cred(task);
58944+ pcred = __task_cred(task->real_parent);
58945+
58946+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58947+ }
58948+ break;
58949+ default:
58950+ gr_log_middle(audit, msg, ap);
58951+ }
58952+ va_end(ap);
58953+ // these don't need DEFAULTSECARGS printed on the end
58954+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58955+ gr_log_end(audit, 0);
58956+ else
58957+ gr_log_end(audit, 1);
58958+ END_LOCKS(audit);
58959+}
58960diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58961new file mode 100644
58962index 0000000..6c0416b
58963--- /dev/null
58964+++ b/grsecurity/grsec_mem.c
58965@@ -0,0 +1,33 @@
58966+#include <linux/kernel.h>
58967+#include <linux/sched.h>
58968+#include <linux/mm.h>
58969+#include <linux/mman.h>
58970+#include <linux/grinternal.h>
58971+
58972+void
58973+gr_handle_ioperm(void)
58974+{
58975+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58976+ return;
58977+}
58978+
58979+void
58980+gr_handle_iopl(void)
58981+{
58982+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58983+ return;
58984+}
58985+
58986+void
58987+gr_handle_mem_readwrite(u64 from, u64 to)
58988+{
58989+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58990+ return;
58991+}
58992+
58993+void
58994+gr_handle_vm86(void)
58995+{
58996+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58997+ return;
58998+}
58999diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
59000new file mode 100644
59001index 0000000..2131422
59002--- /dev/null
59003+++ b/grsecurity/grsec_mount.c
59004@@ -0,0 +1,62 @@
59005+#include <linux/kernel.h>
59006+#include <linux/sched.h>
59007+#include <linux/mount.h>
59008+#include <linux/grsecurity.h>
59009+#include <linux/grinternal.h>
59010+
59011+void
59012+gr_log_remount(const char *devname, const int retval)
59013+{
59014+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59015+ if (grsec_enable_mount && (retval >= 0))
59016+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
59017+#endif
59018+ return;
59019+}
59020+
59021+void
59022+gr_log_unmount(const char *devname, const int retval)
59023+{
59024+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59025+ if (grsec_enable_mount && (retval >= 0))
59026+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
59027+#endif
59028+ return;
59029+}
59030+
59031+void
59032+gr_log_mount(const char *from, const char *to, const int retval)
59033+{
59034+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59035+ if (grsec_enable_mount && (retval >= 0))
59036+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
59037+#endif
59038+ return;
59039+}
59040+
59041+int
59042+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
59043+{
59044+#ifdef CONFIG_GRKERNSEC_ROFS
59045+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
59046+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
59047+ return -EPERM;
59048+ } else
59049+ return 0;
59050+#endif
59051+ return 0;
59052+}
59053+
59054+int
59055+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
59056+{
59057+#ifdef CONFIG_GRKERNSEC_ROFS
59058+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
59059+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
59060+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
59061+ return -EPERM;
59062+ } else
59063+ return 0;
59064+#endif
59065+ return 0;
59066+}
59067diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
59068new file mode 100644
59069index 0000000..a3b12a0
59070--- /dev/null
59071+++ b/grsecurity/grsec_pax.c
59072@@ -0,0 +1,36 @@
59073+#include <linux/kernel.h>
59074+#include <linux/sched.h>
59075+#include <linux/mm.h>
59076+#include <linux/file.h>
59077+#include <linux/grinternal.h>
59078+#include <linux/grsecurity.h>
59079+
59080+void
59081+gr_log_textrel(struct vm_area_struct * vma)
59082+{
59083+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59084+ if (grsec_enable_audit_textrel)
59085+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
59086+#endif
59087+ return;
59088+}
59089+
59090+void
59091+gr_log_rwxmmap(struct file *file)
59092+{
59093+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59094+ if (grsec_enable_log_rwxmaps)
59095+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
59096+#endif
59097+ return;
59098+}
59099+
59100+void
59101+gr_log_rwxmprotect(struct file *file)
59102+{
59103+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59104+ if (grsec_enable_log_rwxmaps)
59105+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
59106+#endif
59107+ return;
59108+}
59109diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
59110new file mode 100644
59111index 0000000..472c1d6
59112--- /dev/null
59113+++ b/grsecurity/grsec_ptrace.c
59114@@ -0,0 +1,14 @@
59115+#include <linux/kernel.h>
59116+#include <linux/sched.h>
59117+#include <linux/grinternal.h>
59118+#include <linux/grsecurity.h>
59119+
59120+void
59121+gr_audit_ptrace(struct task_struct *task)
59122+{
59123+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59124+ if (grsec_enable_audit_ptrace)
59125+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
59126+#endif
59127+ return;
59128+}
59129diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59130new file mode 100644
59131index 0000000..7a5b2de
59132--- /dev/null
59133+++ b/grsecurity/grsec_sig.c
59134@@ -0,0 +1,207 @@
59135+#include <linux/kernel.h>
59136+#include <linux/sched.h>
59137+#include <linux/delay.h>
59138+#include <linux/grsecurity.h>
59139+#include <linux/grinternal.h>
59140+#include <linux/hardirq.h>
59141+
59142+char *signames[] = {
59143+ [SIGSEGV] = "Segmentation fault",
59144+ [SIGILL] = "Illegal instruction",
59145+ [SIGABRT] = "Abort",
59146+ [SIGBUS] = "Invalid alignment/Bus error"
59147+};
59148+
59149+void
59150+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59151+{
59152+#ifdef CONFIG_GRKERNSEC_SIGNAL
59153+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59154+ (sig == SIGABRT) || (sig == SIGBUS))) {
59155+ if (t->pid == current->pid) {
59156+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59157+ } else {
59158+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59159+ }
59160+ }
59161+#endif
59162+ return;
59163+}
59164+
59165+int
59166+gr_handle_signal(const struct task_struct *p, const int sig)
59167+{
59168+#ifdef CONFIG_GRKERNSEC
59169+ /* ignore the 0 signal for protected task checks */
59170+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
59171+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59172+ return -EPERM;
59173+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59174+ return -EPERM;
59175+ }
59176+#endif
59177+ return 0;
59178+}
59179+
59180+#ifdef CONFIG_GRKERNSEC
59181+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59182+
59183+int gr_fake_force_sig(int sig, struct task_struct *t)
59184+{
59185+ unsigned long int flags;
59186+ int ret, blocked, ignored;
59187+ struct k_sigaction *action;
59188+
59189+ spin_lock_irqsave(&t->sighand->siglock, flags);
59190+ action = &t->sighand->action[sig-1];
59191+ ignored = action->sa.sa_handler == SIG_IGN;
59192+ blocked = sigismember(&t->blocked, sig);
59193+ if (blocked || ignored) {
59194+ action->sa.sa_handler = SIG_DFL;
59195+ if (blocked) {
59196+ sigdelset(&t->blocked, sig);
59197+ recalc_sigpending_and_wake(t);
59198+ }
59199+ }
59200+ if (action->sa.sa_handler == SIG_DFL)
59201+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
59202+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59203+
59204+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
59205+
59206+ return ret;
59207+}
59208+#endif
59209+
59210+#ifdef CONFIG_GRKERNSEC_BRUTE
59211+#define GR_USER_BAN_TIME (15 * 60)
59212+
59213+static int __get_dumpable(unsigned long mm_flags)
59214+{
59215+ int ret;
59216+
59217+ ret = mm_flags & MMF_DUMPABLE_MASK;
59218+ return (ret >= 2) ? 2 : ret;
59219+}
59220+#endif
59221+
59222+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
59223+{
59224+#ifdef CONFIG_GRKERNSEC_BRUTE
59225+ uid_t uid = 0;
59226+
59227+ if (!grsec_enable_brute)
59228+ return;
59229+
59230+ rcu_read_lock();
59231+ read_lock(&tasklist_lock);
59232+ read_lock(&grsec_exec_file_lock);
59233+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59234+ p->real_parent->brute = 1;
59235+ else {
59236+ const struct cred *cred = __task_cred(p), *cred2;
59237+ struct task_struct *tsk, *tsk2;
59238+
59239+ if (!__get_dumpable(mm_flags) && cred->uid) {
59240+ struct user_struct *user;
59241+
59242+ uid = cred->uid;
59243+
59244+ /* this is put upon execution past expiration */
59245+ user = find_user(uid);
59246+ if (user == NULL)
59247+ goto unlock;
59248+ user->banned = 1;
59249+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59250+ if (user->ban_expires == ~0UL)
59251+ user->ban_expires--;
59252+
59253+ do_each_thread(tsk2, tsk) {
59254+ cred2 = __task_cred(tsk);
59255+ if (tsk != p && cred2->uid == uid)
59256+ gr_fake_force_sig(SIGKILL, tsk);
59257+ } while_each_thread(tsk2, tsk);
59258+ }
59259+ }
59260+unlock:
59261+ read_unlock(&grsec_exec_file_lock);
59262+ read_unlock(&tasklist_lock);
59263+ rcu_read_unlock();
59264+
59265+ if (uid)
59266+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
59267+
59268+#endif
59269+ return;
59270+}
59271+
59272+void gr_handle_brute_check(void)
59273+{
59274+#ifdef CONFIG_GRKERNSEC_BRUTE
59275+ if (current->brute)
59276+ msleep(30 * 1000);
59277+#endif
59278+ return;
59279+}
59280+
59281+void gr_handle_kernel_exploit(void)
59282+{
59283+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59284+ const struct cred *cred;
59285+ struct task_struct *tsk, *tsk2;
59286+ struct user_struct *user;
59287+ uid_t uid;
59288+
59289+ if (in_irq() || in_serving_softirq() || in_nmi())
59290+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59291+
59292+ uid = current_uid();
59293+
59294+ if (uid == 0)
59295+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
59296+ else {
59297+ /* kill all the processes of this user, hold a reference
59298+ to their creds struct, and prevent them from creating
59299+ another process until system reset
59300+ */
59301+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
59302+ /* we intentionally leak this ref */
59303+ user = get_uid(current->cred->user);
59304+ if (user) {
59305+ user->banned = 1;
59306+ user->ban_expires = ~0UL;
59307+ }
59308+
59309+ read_lock(&tasklist_lock);
59310+ do_each_thread(tsk2, tsk) {
59311+ cred = __task_cred(tsk);
59312+ if (cred->uid == uid)
59313+ gr_fake_force_sig(SIGKILL, tsk);
59314+ } while_each_thread(tsk2, tsk);
59315+ read_unlock(&tasklist_lock);
59316+ }
59317+#endif
59318+}
59319+
59320+int __gr_process_user_ban(struct user_struct *user)
59321+{
59322+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59323+ if (unlikely(user->banned)) {
59324+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59325+ user->banned = 0;
59326+ user->ban_expires = 0;
59327+ free_uid(user);
59328+ } else
59329+ return -EPERM;
59330+ }
59331+#endif
59332+ return 0;
59333+}
59334+
59335+int gr_process_user_ban(void)
59336+{
59337+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59338+ return __gr_process_user_ban(current->cred->user);
59339+#endif
59340+ return 0;
59341+}
59342diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59343new file mode 100644
59344index 0000000..4030d57
59345--- /dev/null
59346+++ b/grsecurity/grsec_sock.c
59347@@ -0,0 +1,244 @@
59348+#include <linux/kernel.h>
59349+#include <linux/module.h>
59350+#include <linux/sched.h>
59351+#include <linux/file.h>
59352+#include <linux/net.h>
59353+#include <linux/in.h>
59354+#include <linux/ip.h>
59355+#include <net/sock.h>
59356+#include <net/inet_sock.h>
59357+#include <linux/grsecurity.h>
59358+#include <linux/grinternal.h>
59359+#include <linux/gracl.h>
59360+
59361+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59362+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59363+
59364+EXPORT_SYMBOL(gr_search_udp_recvmsg);
59365+EXPORT_SYMBOL(gr_search_udp_sendmsg);
59366+
59367+#ifdef CONFIG_UNIX_MODULE
59368+EXPORT_SYMBOL(gr_acl_handle_unix);
59369+EXPORT_SYMBOL(gr_acl_handle_mknod);
59370+EXPORT_SYMBOL(gr_handle_chroot_unix);
59371+EXPORT_SYMBOL(gr_handle_create);
59372+#endif
59373+
59374+#ifdef CONFIG_GRKERNSEC
59375+#define gr_conn_table_size 32749
59376+struct conn_table_entry {
59377+ struct conn_table_entry *next;
59378+ struct signal_struct *sig;
59379+};
59380+
59381+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59382+DEFINE_SPINLOCK(gr_conn_table_lock);
59383+
59384+extern const char * gr_socktype_to_name(unsigned char type);
59385+extern const char * gr_proto_to_name(unsigned char proto);
59386+extern const char * gr_sockfamily_to_name(unsigned char family);
59387+
59388+static __inline__ int
59389+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59390+{
59391+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59392+}
59393+
59394+static __inline__ int
59395+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59396+ __u16 sport, __u16 dport)
59397+{
59398+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59399+ sig->gr_sport == sport && sig->gr_dport == dport))
59400+ return 1;
59401+ else
59402+ return 0;
59403+}
59404+
59405+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59406+{
59407+ struct conn_table_entry **match;
59408+ unsigned int index;
59409+
59410+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59411+ sig->gr_sport, sig->gr_dport,
59412+ gr_conn_table_size);
59413+
59414+ newent->sig = sig;
59415+
59416+ match = &gr_conn_table[index];
59417+ newent->next = *match;
59418+ *match = newent;
59419+
59420+ return;
59421+}
59422+
59423+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59424+{
59425+ struct conn_table_entry *match, *last = NULL;
59426+ unsigned int index;
59427+
59428+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59429+ sig->gr_sport, sig->gr_dport,
59430+ gr_conn_table_size);
59431+
59432+ match = gr_conn_table[index];
59433+ while (match && !conn_match(match->sig,
59434+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59435+ sig->gr_dport)) {
59436+ last = match;
59437+ match = match->next;
59438+ }
59439+
59440+ if (match) {
59441+ if (last)
59442+ last->next = match->next;
59443+ else
59444+ gr_conn_table[index] = NULL;
59445+ kfree(match);
59446+ }
59447+
59448+ return;
59449+}
59450+
59451+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59452+ __u16 sport, __u16 dport)
59453+{
59454+ struct conn_table_entry *match;
59455+ unsigned int index;
59456+
59457+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59458+
59459+ match = gr_conn_table[index];
59460+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59461+ match = match->next;
59462+
59463+ if (match)
59464+ return match->sig;
59465+ else
59466+ return NULL;
59467+}
59468+
59469+#endif
59470+
59471+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59472+{
59473+#ifdef CONFIG_GRKERNSEC
59474+ struct signal_struct *sig = task->signal;
59475+ struct conn_table_entry *newent;
59476+
59477+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59478+ if (newent == NULL)
59479+ return;
59480+ /* no bh lock needed since we are called with bh disabled */
59481+ spin_lock(&gr_conn_table_lock);
59482+ gr_del_task_from_ip_table_nolock(sig);
59483+ sig->gr_saddr = inet->inet_rcv_saddr;
59484+ sig->gr_daddr = inet->inet_daddr;
59485+ sig->gr_sport = inet->inet_sport;
59486+ sig->gr_dport = inet->inet_dport;
59487+ gr_add_to_task_ip_table_nolock(sig, newent);
59488+ spin_unlock(&gr_conn_table_lock);
59489+#endif
59490+ return;
59491+}
59492+
59493+void gr_del_task_from_ip_table(struct task_struct *task)
59494+{
59495+#ifdef CONFIG_GRKERNSEC
59496+ spin_lock_bh(&gr_conn_table_lock);
59497+ gr_del_task_from_ip_table_nolock(task->signal);
59498+ spin_unlock_bh(&gr_conn_table_lock);
59499+#endif
59500+ return;
59501+}
59502+
59503+void
59504+gr_attach_curr_ip(const struct sock *sk)
59505+{
59506+#ifdef CONFIG_GRKERNSEC
59507+ struct signal_struct *p, *set;
59508+ const struct inet_sock *inet = inet_sk(sk);
59509+
59510+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59511+ return;
59512+
59513+ set = current->signal;
59514+
59515+ spin_lock_bh(&gr_conn_table_lock);
59516+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59517+ inet->inet_dport, inet->inet_sport);
59518+ if (unlikely(p != NULL)) {
59519+ set->curr_ip = p->curr_ip;
59520+ set->used_accept = 1;
59521+ gr_del_task_from_ip_table_nolock(p);
59522+ spin_unlock_bh(&gr_conn_table_lock);
59523+ return;
59524+ }
59525+ spin_unlock_bh(&gr_conn_table_lock);
59526+
59527+ set->curr_ip = inet->inet_daddr;
59528+ set->used_accept = 1;
59529+#endif
59530+ return;
59531+}
59532+
59533+int
59534+gr_handle_sock_all(const int family, const int type, const int protocol)
59535+{
59536+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59537+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59538+ (family != AF_UNIX)) {
59539+ if (family == AF_INET)
59540+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59541+ else
59542+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59543+ return -EACCES;
59544+ }
59545+#endif
59546+ return 0;
59547+}
59548+
59549+int
59550+gr_handle_sock_server(const struct sockaddr *sck)
59551+{
59552+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59553+ if (grsec_enable_socket_server &&
59554+ in_group_p(grsec_socket_server_gid) &&
59555+ sck && (sck->sa_family != AF_UNIX) &&
59556+ (sck->sa_family != AF_LOCAL)) {
59557+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59558+ return -EACCES;
59559+ }
59560+#endif
59561+ return 0;
59562+}
59563+
59564+int
59565+gr_handle_sock_server_other(const struct sock *sck)
59566+{
59567+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59568+ if (grsec_enable_socket_server &&
59569+ in_group_p(grsec_socket_server_gid) &&
59570+ sck && (sck->sk_family != AF_UNIX) &&
59571+ (sck->sk_family != AF_LOCAL)) {
59572+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59573+ return -EACCES;
59574+ }
59575+#endif
59576+ return 0;
59577+}
59578+
59579+int
59580+gr_handle_sock_client(const struct sockaddr *sck)
59581+{
59582+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59583+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59584+ sck && (sck->sa_family != AF_UNIX) &&
59585+ (sck->sa_family != AF_LOCAL)) {
59586+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59587+ return -EACCES;
59588+ }
59589+#endif
59590+ return 0;
59591+}
59592diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59593new file mode 100644
59594index 0000000..bceef2f
59595--- /dev/null
59596+++ b/grsecurity/grsec_sysctl.c
59597@@ -0,0 +1,442 @@
59598+#include <linux/kernel.h>
59599+#include <linux/sched.h>
59600+#include <linux/sysctl.h>
59601+#include <linux/grsecurity.h>
59602+#include <linux/grinternal.h>
59603+
59604+int
59605+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59606+{
59607+#ifdef CONFIG_GRKERNSEC_SYSCTL
59608+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59609+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59610+ return -EACCES;
59611+ }
59612+#endif
59613+ return 0;
59614+}
59615+
59616+#ifdef CONFIG_GRKERNSEC_ROFS
59617+static int __maybe_unused one = 1;
59618+#endif
59619+
59620+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59621+struct ctl_table grsecurity_table[] = {
59622+#ifdef CONFIG_GRKERNSEC_SYSCTL
59623+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59624+#ifdef CONFIG_GRKERNSEC_IO
59625+ {
59626+ .procname = "disable_priv_io",
59627+ .data = &grsec_disable_privio,
59628+ .maxlen = sizeof(int),
59629+ .mode = 0600,
59630+ .proc_handler = &proc_dointvec,
59631+ },
59632+#endif
59633+#endif
59634+#ifdef CONFIG_GRKERNSEC_LINK
59635+ {
59636+ .procname = "linking_restrictions",
59637+ .data = &grsec_enable_link,
59638+ .maxlen = sizeof(int),
59639+ .mode = 0600,
59640+ .proc_handler = &proc_dointvec,
59641+ },
59642+#endif
59643+#ifdef CONFIG_GRKERNSEC_BRUTE
59644+ {
59645+ .procname = "deter_bruteforce",
59646+ .data = &grsec_enable_brute,
59647+ .maxlen = sizeof(int),
59648+ .mode = 0600,
59649+ .proc_handler = &proc_dointvec,
59650+ },
59651+#endif
59652+#ifdef CONFIG_GRKERNSEC_FIFO
59653+ {
59654+ .procname = "fifo_restrictions",
59655+ .data = &grsec_enable_fifo,
59656+ .maxlen = sizeof(int),
59657+ .mode = 0600,
59658+ .proc_handler = &proc_dointvec,
59659+ },
59660+#endif
59661+#ifdef CONFIG_GRKERNSEC_SETXID
59662+ {
59663+ .procname = "consistent_setxid",
59664+ .data = &grsec_enable_setxid,
59665+ .maxlen = sizeof(int),
59666+ .mode = 0600,
59667+ .proc_handler = &proc_dointvec,
59668+ },
59669+#endif
59670+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59671+ {
59672+ .procname = "ip_blackhole",
59673+ .data = &grsec_enable_blackhole,
59674+ .maxlen = sizeof(int),
59675+ .mode = 0600,
59676+ .proc_handler = &proc_dointvec,
59677+ },
59678+ {
59679+ .procname = "lastack_retries",
59680+ .data = &grsec_lastack_retries,
59681+ .maxlen = sizeof(int),
59682+ .mode = 0600,
59683+ .proc_handler = &proc_dointvec,
59684+ },
59685+#endif
59686+#ifdef CONFIG_GRKERNSEC_EXECLOG
59687+ {
59688+ .procname = "exec_logging",
59689+ .data = &grsec_enable_execlog,
59690+ .maxlen = sizeof(int),
59691+ .mode = 0600,
59692+ .proc_handler = &proc_dointvec,
59693+ },
59694+#endif
59695+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59696+ {
59697+ .procname = "rwxmap_logging",
59698+ .data = &grsec_enable_log_rwxmaps,
59699+ .maxlen = sizeof(int),
59700+ .mode = 0600,
59701+ .proc_handler = &proc_dointvec,
59702+ },
59703+#endif
59704+#ifdef CONFIG_GRKERNSEC_SIGNAL
59705+ {
59706+ .procname = "signal_logging",
59707+ .data = &grsec_enable_signal,
59708+ .maxlen = sizeof(int),
59709+ .mode = 0600,
59710+ .proc_handler = &proc_dointvec,
59711+ },
59712+#endif
59713+#ifdef CONFIG_GRKERNSEC_FORKFAIL
59714+ {
59715+ .procname = "forkfail_logging",
59716+ .data = &grsec_enable_forkfail,
59717+ .maxlen = sizeof(int),
59718+ .mode = 0600,
59719+ .proc_handler = &proc_dointvec,
59720+ },
59721+#endif
59722+#ifdef CONFIG_GRKERNSEC_TIME
59723+ {
59724+ .procname = "timechange_logging",
59725+ .data = &grsec_enable_time,
59726+ .maxlen = sizeof(int),
59727+ .mode = 0600,
59728+ .proc_handler = &proc_dointvec,
59729+ },
59730+#endif
59731+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59732+ {
59733+ .procname = "chroot_deny_shmat",
59734+ .data = &grsec_enable_chroot_shmat,
59735+ .maxlen = sizeof(int),
59736+ .mode = 0600,
59737+ .proc_handler = &proc_dointvec,
59738+ },
59739+#endif
59740+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59741+ {
59742+ .procname = "chroot_deny_unix",
59743+ .data = &grsec_enable_chroot_unix,
59744+ .maxlen = sizeof(int),
59745+ .mode = 0600,
59746+ .proc_handler = &proc_dointvec,
59747+ },
59748+#endif
59749+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59750+ {
59751+ .procname = "chroot_deny_mount",
59752+ .data = &grsec_enable_chroot_mount,
59753+ .maxlen = sizeof(int),
59754+ .mode = 0600,
59755+ .proc_handler = &proc_dointvec,
59756+ },
59757+#endif
59758+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59759+ {
59760+ .procname = "chroot_deny_fchdir",
59761+ .data = &grsec_enable_chroot_fchdir,
59762+ .maxlen = sizeof(int),
59763+ .mode = 0600,
59764+ .proc_handler = &proc_dointvec,
59765+ },
59766+#endif
59767+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59768+ {
59769+ .procname = "chroot_deny_chroot",
59770+ .data = &grsec_enable_chroot_double,
59771+ .maxlen = sizeof(int),
59772+ .mode = 0600,
59773+ .proc_handler = &proc_dointvec,
59774+ },
59775+#endif
59776+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59777+ {
59778+ .procname = "chroot_deny_pivot",
59779+ .data = &grsec_enable_chroot_pivot,
59780+ .maxlen = sizeof(int),
59781+ .mode = 0600,
59782+ .proc_handler = &proc_dointvec,
59783+ },
59784+#endif
59785+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59786+ {
59787+ .procname = "chroot_enforce_chdir",
59788+ .data = &grsec_enable_chroot_chdir,
59789+ .maxlen = sizeof(int),
59790+ .mode = 0600,
59791+ .proc_handler = &proc_dointvec,
59792+ },
59793+#endif
59794+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59795+ {
59796+ .procname = "chroot_deny_chmod",
59797+ .data = &grsec_enable_chroot_chmod,
59798+ .maxlen = sizeof(int),
59799+ .mode = 0600,
59800+ .proc_handler = &proc_dointvec,
59801+ },
59802+#endif
59803+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59804+ {
59805+ .procname = "chroot_deny_mknod",
59806+ .data = &grsec_enable_chroot_mknod,
59807+ .maxlen = sizeof(int),
59808+ .mode = 0600,
59809+ .proc_handler = &proc_dointvec,
59810+ },
59811+#endif
59812+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59813+ {
59814+ .procname = "chroot_restrict_nice",
59815+ .data = &grsec_enable_chroot_nice,
59816+ .maxlen = sizeof(int),
59817+ .mode = 0600,
59818+ .proc_handler = &proc_dointvec,
59819+ },
59820+#endif
59821+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59822+ {
59823+ .procname = "chroot_execlog",
59824+ .data = &grsec_enable_chroot_execlog,
59825+ .maxlen = sizeof(int),
59826+ .mode = 0600,
59827+ .proc_handler = &proc_dointvec,
59828+ },
59829+#endif
59830+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59831+ {
59832+ .procname = "chroot_caps",
59833+ .data = &grsec_enable_chroot_caps,
59834+ .maxlen = sizeof(int),
59835+ .mode = 0600,
59836+ .proc_handler = &proc_dointvec,
59837+ },
59838+#endif
59839+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59840+ {
59841+ .procname = "chroot_deny_sysctl",
59842+ .data = &grsec_enable_chroot_sysctl,
59843+ .maxlen = sizeof(int),
59844+ .mode = 0600,
59845+ .proc_handler = &proc_dointvec,
59846+ },
59847+#endif
59848+#ifdef CONFIG_GRKERNSEC_TPE
59849+ {
59850+ .procname = "tpe",
59851+ .data = &grsec_enable_tpe,
59852+ .maxlen = sizeof(int),
59853+ .mode = 0600,
59854+ .proc_handler = &proc_dointvec,
59855+ },
59856+ {
59857+ .procname = "tpe_gid",
59858+ .data = &grsec_tpe_gid,
59859+ .maxlen = sizeof(int),
59860+ .mode = 0600,
59861+ .proc_handler = &proc_dointvec,
59862+ },
59863+#endif
59864+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59865+ {
59866+ .procname = "tpe_invert",
59867+ .data = &grsec_enable_tpe_invert,
59868+ .maxlen = sizeof(int),
59869+ .mode = 0600,
59870+ .proc_handler = &proc_dointvec,
59871+ },
59872+#endif
59873+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59874+ {
59875+ .procname = "tpe_restrict_all",
59876+ .data = &grsec_enable_tpe_all,
59877+ .maxlen = sizeof(int),
59878+ .mode = 0600,
59879+ .proc_handler = &proc_dointvec,
59880+ },
59881+#endif
59882+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59883+ {
59884+ .procname = "socket_all",
59885+ .data = &grsec_enable_socket_all,
59886+ .maxlen = sizeof(int),
59887+ .mode = 0600,
59888+ .proc_handler = &proc_dointvec,
59889+ },
59890+ {
59891+ .procname = "socket_all_gid",
59892+ .data = &grsec_socket_all_gid,
59893+ .maxlen = sizeof(int),
59894+ .mode = 0600,
59895+ .proc_handler = &proc_dointvec,
59896+ },
59897+#endif
59898+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59899+ {
59900+ .procname = "socket_client",
59901+ .data = &grsec_enable_socket_client,
59902+ .maxlen = sizeof(int),
59903+ .mode = 0600,
59904+ .proc_handler = &proc_dointvec,
59905+ },
59906+ {
59907+ .procname = "socket_client_gid",
59908+ .data = &grsec_socket_client_gid,
59909+ .maxlen = sizeof(int),
59910+ .mode = 0600,
59911+ .proc_handler = &proc_dointvec,
59912+ },
59913+#endif
59914+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59915+ {
59916+ .procname = "socket_server",
59917+ .data = &grsec_enable_socket_server,
59918+ .maxlen = sizeof(int),
59919+ .mode = 0600,
59920+ .proc_handler = &proc_dointvec,
59921+ },
59922+ {
59923+ .procname = "socket_server_gid",
59924+ .data = &grsec_socket_server_gid,
59925+ .maxlen = sizeof(int),
59926+ .mode = 0600,
59927+ .proc_handler = &proc_dointvec,
59928+ },
59929+#endif
59930+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59931+ {
59932+ .procname = "audit_group",
59933+ .data = &grsec_enable_group,
59934+ .maxlen = sizeof(int),
59935+ .mode = 0600,
59936+ .proc_handler = &proc_dointvec,
59937+ },
59938+ {
59939+ .procname = "audit_gid",
59940+ .data = &grsec_audit_gid,
59941+ .maxlen = sizeof(int),
59942+ .mode = 0600,
59943+ .proc_handler = &proc_dointvec,
59944+ },
59945+#endif
59946+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59947+ {
59948+ .procname = "audit_chdir",
59949+ .data = &grsec_enable_chdir,
59950+ .maxlen = sizeof(int),
59951+ .mode = 0600,
59952+ .proc_handler = &proc_dointvec,
59953+ },
59954+#endif
59955+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59956+ {
59957+ .procname = "audit_mount",
59958+ .data = &grsec_enable_mount,
59959+ .maxlen = sizeof(int),
59960+ .mode = 0600,
59961+ .proc_handler = &proc_dointvec,
59962+ },
59963+#endif
59964+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59965+ {
59966+ .procname = "audit_textrel",
59967+ .data = &grsec_enable_audit_textrel,
59968+ .maxlen = sizeof(int),
59969+ .mode = 0600,
59970+ .proc_handler = &proc_dointvec,
59971+ },
59972+#endif
59973+#ifdef CONFIG_GRKERNSEC_DMESG
59974+ {
59975+ .procname = "dmesg",
59976+ .data = &grsec_enable_dmesg,
59977+ .maxlen = sizeof(int),
59978+ .mode = 0600,
59979+ .proc_handler = &proc_dointvec,
59980+ },
59981+#endif
59982+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59983+ {
59984+ .procname = "chroot_findtask",
59985+ .data = &grsec_enable_chroot_findtask,
59986+ .maxlen = sizeof(int),
59987+ .mode = 0600,
59988+ .proc_handler = &proc_dointvec,
59989+ },
59990+#endif
59991+#ifdef CONFIG_GRKERNSEC_RESLOG
59992+ {
59993+ .procname = "resource_logging",
59994+ .data = &grsec_resource_logging,
59995+ .maxlen = sizeof(int),
59996+ .mode = 0600,
59997+ .proc_handler = &proc_dointvec,
59998+ },
59999+#endif
60000+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60001+ {
60002+ .procname = "audit_ptrace",
60003+ .data = &grsec_enable_audit_ptrace,
60004+ .maxlen = sizeof(int),
60005+ .mode = 0600,
60006+ .proc_handler = &proc_dointvec,
60007+ },
60008+#endif
60009+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60010+ {
60011+ .procname = "harden_ptrace",
60012+ .data = &grsec_enable_harden_ptrace,
60013+ .maxlen = sizeof(int),
60014+ .mode = 0600,
60015+ .proc_handler = &proc_dointvec,
60016+ },
60017+#endif
60018+ {
60019+ .procname = "grsec_lock",
60020+ .data = &grsec_lock,
60021+ .maxlen = sizeof(int),
60022+ .mode = 0600,
60023+ .proc_handler = &proc_dointvec,
60024+ },
60025+#endif
60026+#ifdef CONFIG_GRKERNSEC_ROFS
60027+ {
60028+ .procname = "romount_protect",
60029+ .data = &grsec_enable_rofs,
60030+ .maxlen = sizeof(int),
60031+ .mode = 0600,
60032+ .proc_handler = &proc_dointvec_minmax,
60033+ .extra1 = &one,
60034+ .extra2 = &one,
60035+ },
60036+#endif
60037+ { }
60038+};
60039+#endif
60040diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
60041new file mode 100644
60042index 0000000..0dc13c3
60043--- /dev/null
60044+++ b/grsecurity/grsec_time.c
60045@@ -0,0 +1,16 @@
60046+#include <linux/kernel.h>
60047+#include <linux/sched.h>
60048+#include <linux/grinternal.h>
60049+#include <linux/module.h>
60050+
60051+void
60052+gr_log_timechange(void)
60053+{
60054+#ifdef CONFIG_GRKERNSEC_TIME
60055+ if (grsec_enable_time)
60056+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
60057+#endif
60058+ return;
60059+}
60060+
60061+EXPORT_SYMBOL(gr_log_timechange);
60062diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
60063new file mode 100644
60064index 0000000..4a78774
60065--- /dev/null
60066+++ b/grsecurity/grsec_tpe.c
60067@@ -0,0 +1,39 @@
60068+#include <linux/kernel.h>
60069+#include <linux/sched.h>
60070+#include <linux/file.h>
60071+#include <linux/fs.h>
60072+#include <linux/grinternal.h>
60073+
60074+extern int gr_acl_tpe_check(void);
60075+
60076+int
60077+gr_tpe_allow(const struct file *file)
60078+{
60079+#ifdef CONFIG_GRKERNSEC
60080+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
60081+ const struct cred *cred = current_cred();
60082+
60083+ if (cred->uid && ((grsec_enable_tpe &&
60084+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60085+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
60086+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
60087+#else
60088+ in_group_p(grsec_tpe_gid)
60089+#endif
60090+ ) || gr_acl_tpe_check()) &&
60091+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
60092+ (inode->i_mode & S_IWOTH))))) {
60093+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
60094+ return 0;
60095+ }
60096+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60097+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
60098+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
60099+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
60100+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
60101+ return 0;
60102+ }
60103+#endif
60104+#endif
60105+ return 1;
60106+}
60107diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60108new file mode 100644
60109index 0000000..9f7b1ac
60110--- /dev/null
60111+++ b/grsecurity/grsum.c
60112@@ -0,0 +1,61 @@
60113+#include <linux/err.h>
60114+#include <linux/kernel.h>
60115+#include <linux/sched.h>
60116+#include <linux/mm.h>
60117+#include <linux/scatterlist.h>
60118+#include <linux/crypto.h>
60119+#include <linux/gracl.h>
60120+
60121+
60122+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60123+#error "crypto and sha256 must be built into the kernel"
60124+#endif
60125+
60126+int
60127+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60128+{
60129+ char *p;
60130+ struct crypto_hash *tfm;
60131+ struct hash_desc desc;
60132+ struct scatterlist sg;
60133+ unsigned char temp_sum[GR_SHA_LEN];
60134+ volatile int retval = 0;
60135+ volatile int dummy = 0;
60136+ unsigned int i;
60137+
60138+ sg_init_table(&sg, 1);
60139+
60140+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60141+ if (IS_ERR(tfm)) {
60142+ /* should never happen, since sha256 should be built in */
60143+ return 1;
60144+ }
60145+
60146+ desc.tfm = tfm;
60147+ desc.flags = 0;
60148+
60149+ crypto_hash_init(&desc);
60150+
60151+ p = salt;
60152+ sg_set_buf(&sg, p, GR_SALT_LEN);
60153+ crypto_hash_update(&desc, &sg, sg.length);
60154+
60155+ p = entry->pw;
60156+ sg_set_buf(&sg, p, strlen(p));
60157+
60158+ crypto_hash_update(&desc, &sg, sg.length);
60159+
60160+ crypto_hash_final(&desc, temp_sum);
60161+
60162+ memset(entry->pw, 0, GR_PW_LEN);
60163+
60164+ for (i = 0; i < GR_SHA_LEN; i++)
60165+ if (sum[i] != temp_sum[i])
60166+ retval = 1;
60167+ else
60168+ dummy = 1; // waste a cycle
60169+
60170+ crypto_free_hash(tfm);
60171+
60172+ return retval;
60173+}
60174diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60175index 6cd5b64..f620d2d 100644
60176--- a/include/acpi/acpi_bus.h
60177+++ b/include/acpi/acpi_bus.h
60178@@ -107,7 +107,7 @@ struct acpi_device_ops {
60179 acpi_op_bind bind;
60180 acpi_op_unbind unbind;
60181 acpi_op_notify notify;
60182-};
60183+} __no_const;
60184
60185 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60186
60187diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60188index b7babf0..71e4e74 100644
60189--- a/include/asm-generic/atomic-long.h
60190+++ b/include/asm-generic/atomic-long.h
60191@@ -22,6 +22,12 @@
60192
60193 typedef atomic64_t atomic_long_t;
60194
60195+#ifdef CONFIG_PAX_REFCOUNT
60196+typedef atomic64_unchecked_t atomic_long_unchecked_t;
60197+#else
60198+typedef atomic64_t atomic_long_unchecked_t;
60199+#endif
60200+
60201 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60202
60203 static inline long atomic_long_read(atomic_long_t *l)
60204@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60205 return (long)atomic64_read(v);
60206 }
60207
60208+#ifdef CONFIG_PAX_REFCOUNT
60209+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60210+{
60211+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60212+
60213+ return (long)atomic64_read_unchecked(v);
60214+}
60215+#endif
60216+
60217 static inline void atomic_long_set(atomic_long_t *l, long i)
60218 {
60219 atomic64_t *v = (atomic64_t *)l;
60220@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60221 atomic64_set(v, i);
60222 }
60223
60224+#ifdef CONFIG_PAX_REFCOUNT
60225+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60226+{
60227+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60228+
60229+ atomic64_set_unchecked(v, i);
60230+}
60231+#endif
60232+
60233 static inline void atomic_long_inc(atomic_long_t *l)
60234 {
60235 atomic64_t *v = (atomic64_t *)l;
60236@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60237 atomic64_inc(v);
60238 }
60239
60240+#ifdef CONFIG_PAX_REFCOUNT
60241+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60242+{
60243+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60244+
60245+ atomic64_inc_unchecked(v);
60246+}
60247+#endif
60248+
60249 static inline void atomic_long_dec(atomic_long_t *l)
60250 {
60251 atomic64_t *v = (atomic64_t *)l;
60252@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60253 atomic64_dec(v);
60254 }
60255
60256+#ifdef CONFIG_PAX_REFCOUNT
60257+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60258+{
60259+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60260+
60261+ atomic64_dec_unchecked(v);
60262+}
60263+#endif
60264+
60265 static inline void atomic_long_add(long i, atomic_long_t *l)
60266 {
60267 atomic64_t *v = (atomic64_t *)l;
60268@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60269 atomic64_add(i, v);
60270 }
60271
60272+#ifdef CONFIG_PAX_REFCOUNT
60273+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60274+{
60275+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60276+
60277+ atomic64_add_unchecked(i, v);
60278+}
60279+#endif
60280+
60281 static inline void atomic_long_sub(long i, atomic_long_t *l)
60282 {
60283 atomic64_t *v = (atomic64_t *)l;
60284@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60285 atomic64_sub(i, v);
60286 }
60287
60288+#ifdef CONFIG_PAX_REFCOUNT
60289+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60290+{
60291+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60292+
60293+ atomic64_sub_unchecked(i, v);
60294+}
60295+#endif
60296+
60297 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60298 {
60299 atomic64_t *v = (atomic64_t *)l;
60300@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60301 return (long)atomic64_inc_return(v);
60302 }
60303
60304+#ifdef CONFIG_PAX_REFCOUNT
60305+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60306+{
60307+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60308+
60309+ return (long)atomic64_inc_return_unchecked(v);
60310+}
60311+#endif
60312+
60313 static inline long atomic_long_dec_return(atomic_long_t *l)
60314 {
60315 atomic64_t *v = (atomic64_t *)l;
60316@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60317
60318 typedef atomic_t atomic_long_t;
60319
60320+#ifdef CONFIG_PAX_REFCOUNT
60321+typedef atomic_unchecked_t atomic_long_unchecked_t;
60322+#else
60323+typedef atomic_t atomic_long_unchecked_t;
60324+#endif
60325+
60326 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60327 static inline long atomic_long_read(atomic_long_t *l)
60328 {
60329@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60330 return (long)atomic_read(v);
60331 }
60332
60333+#ifdef CONFIG_PAX_REFCOUNT
60334+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60335+{
60336+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60337+
60338+ return (long)atomic_read_unchecked(v);
60339+}
60340+#endif
60341+
60342 static inline void atomic_long_set(atomic_long_t *l, long i)
60343 {
60344 atomic_t *v = (atomic_t *)l;
60345@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60346 atomic_set(v, i);
60347 }
60348
60349+#ifdef CONFIG_PAX_REFCOUNT
60350+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60351+{
60352+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60353+
60354+ atomic_set_unchecked(v, i);
60355+}
60356+#endif
60357+
60358 static inline void atomic_long_inc(atomic_long_t *l)
60359 {
60360 atomic_t *v = (atomic_t *)l;
60361@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60362 atomic_inc(v);
60363 }
60364
60365+#ifdef CONFIG_PAX_REFCOUNT
60366+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60367+{
60368+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60369+
60370+ atomic_inc_unchecked(v);
60371+}
60372+#endif
60373+
60374 static inline void atomic_long_dec(atomic_long_t *l)
60375 {
60376 atomic_t *v = (atomic_t *)l;
60377@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60378 atomic_dec(v);
60379 }
60380
60381+#ifdef CONFIG_PAX_REFCOUNT
60382+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60383+{
60384+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60385+
60386+ atomic_dec_unchecked(v);
60387+}
60388+#endif
60389+
60390 static inline void atomic_long_add(long i, atomic_long_t *l)
60391 {
60392 atomic_t *v = (atomic_t *)l;
60393@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60394 atomic_add(i, v);
60395 }
60396
60397+#ifdef CONFIG_PAX_REFCOUNT
60398+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60399+{
60400+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60401+
60402+ atomic_add_unchecked(i, v);
60403+}
60404+#endif
60405+
60406 static inline void atomic_long_sub(long i, atomic_long_t *l)
60407 {
60408 atomic_t *v = (atomic_t *)l;
60409@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60410 atomic_sub(i, v);
60411 }
60412
60413+#ifdef CONFIG_PAX_REFCOUNT
60414+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60415+{
60416+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60417+
60418+ atomic_sub_unchecked(i, v);
60419+}
60420+#endif
60421+
60422 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60423 {
60424 atomic_t *v = (atomic_t *)l;
60425@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60426 return (long)atomic_inc_return(v);
60427 }
60428
60429+#ifdef CONFIG_PAX_REFCOUNT
60430+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60431+{
60432+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60433+
60434+ return (long)atomic_inc_return_unchecked(v);
60435+}
60436+#endif
60437+
60438 static inline long atomic_long_dec_return(atomic_long_t *l)
60439 {
60440 atomic_t *v = (atomic_t *)l;
60441@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60442
60443 #endif /* BITS_PER_LONG == 64 */
60444
60445+#ifdef CONFIG_PAX_REFCOUNT
60446+static inline void pax_refcount_needs_these_functions(void)
60447+{
60448+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
60449+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60450+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60451+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60452+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60453+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60454+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60455+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60456+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60457+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60458+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60459+
60460+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60461+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60462+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60463+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60464+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60465+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60466+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60467+}
60468+#else
60469+#define atomic_read_unchecked(v) atomic_read(v)
60470+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60471+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60472+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60473+#define atomic_inc_unchecked(v) atomic_inc(v)
60474+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60475+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60476+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60477+#define atomic_dec_unchecked(v) atomic_dec(v)
60478+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60479+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60480+
60481+#define atomic_long_read_unchecked(v) atomic_long_read(v)
60482+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60483+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60484+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60485+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60486+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60487+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60488+#endif
60489+
60490 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60491diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60492index b18ce4f..2ee2843 100644
60493--- a/include/asm-generic/atomic64.h
60494+++ b/include/asm-generic/atomic64.h
60495@@ -16,6 +16,8 @@ typedef struct {
60496 long long counter;
60497 } atomic64_t;
60498
60499+typedef atomic64_t atomic64_unchecked_t;
60500+
60501 #define ATOMIC64_INIT(i) { (i) }
60502
60503 extern long long atomic64_read(const atomic64_t *v);
60504@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60505 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60506 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60507
60508+#define atomic64_read_unchecked(v) atomic64_read(v)
60509+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60510+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60511+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60512+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60513+#define atomic64_inc_unchecked(v) atomic64_inc(v)
60514+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60515+#define atomic64_dec_unchecked(v) atomic64_dec(v)
60516+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60517+
60518 #endif /* _ASM_GENERIC_ATOMIC64_H */
60519diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60520index 1bfcfe5..e04c5c9 100644
60521--- a/include/asm-generic/cache.h
60522+++ b/include/asm-generic/cache.h
60523@@ -6,7 +6,7 @@
60524 * cache lines need to provide their own cache.h.
60525 */
60526
60527-#define L1_CACHE_SHIFT 5
60528-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60529+#define L1_CACHE_SHIFT 5UL
60530+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60531
60532 #endif /* __ASM_GENERIC_CACHE_H */
60533diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
60534index 1ca3efc..e3dc852 100644
60535--- a/include/asm-generic/int-l64.h
60536+++ b/include/asm-generic/int-l64.h
60537@@ -46,6 +46,8 @@ typedef unsigned int u32;
60538 typedef signed long s64;
60539 typedef unsigned long u64;
60540
60541+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
60542+
60543 #define S8_C(x) x
60544 #define U8_C(x) x ## U
60545 #define S16_C(x) x
60546diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
60547index f394147..b6152b9 100644
60548--- a/include/asm-generic/int-ll64.h
60549+++ b/include/asm-generic/int-ll64.h
60550@@ -51,6 +51,8 @@ typedef unsigned int u32;
60551 typedef signed long long s64;
60552 typedef unsigned long long u64;
60553
60554+typedef unsigned long long intoverflow_t;
60555+
60556 #define S8_C(x) x
60557 #define U8_C(x) x ## U
60558 #define S16_C(x) x
60559diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60560index 0232ccb..13d9165 100644
60561--- a/include/asm-generic/kmap_types.h
60562+++ b/include/asm-generic/kmap_types.h
60563@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60564 KMAP_D(17) KM_NMI,
60565 KMAP_D(18) KM_NMI_PTE,
60566 KMAP_D(19) KM_KDB,
60567+KMAP_D(20) KM_CLEARPAGE,
60568 /*
60569 * Remember to update debug_kmap_atomic() when adding new kmap types!
60570 */
60571-KMAP_D(20) KM_TYPE_NR
60572+KMAP_D(21) KM_TYPE_NR
60573 };
60574
60575 #undef KMAP_D
60576diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60577index 725612b..9cc513a 100644
60578--- a/include/asm-generic/pgtable-nopmd.h
60579+++ b/include/asm-generic/pgtable-nopmd.h
60580@@ -1,14 +1,19 @@
60581 #ifndef _PGTABLE_NOPMD_H
60582 #define _PGTABLE_NOPMD_H
60583
60584-#ifndef __ASSEMBLY__
60585-
60586 #include <asm-generic/pgtable-nopud.h>
60587
60588-struct mm_struct;
60589-
60590 #define __PAGETABLE_PMD_FOLDED
60591
60592+#define PMD_SHIFT PUD_SHIFT
60593+#define PTRS_PER_PMD 1
60594+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60595+#define PMD_MASK (~(PMD_SIZE-1))
60596+
60597+#ifndef __ASSEMBLY__
60598+
60599+struct mm_struct;
60600+
60601 /*
60602 * Having the pmd type consist of a pud gets the size right, and allows
60603 * us to conceptually access the pud entry that this pmd is folded into
60604@@ -16,11 +21,6 @@ struct mm_struct;
60605 */
60606 typedef struct { pud_t pud; } pmd_t;
60607
60608-#define PMD_SHIFT PUD_SHIFT
60609-#define PTRS_PER_PMD 1
60610-#define PMD_SIZE (1UL << PMD_SHIFT)
60611-#define PMD_MASK (~(PMD_SIZE-1))
60612-
60613 /*
60614 * The "pud_xxx()" functions here are trivial for a folded two-level
60615 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60616diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60617index 810431d..ccc3638 100644
60618--- a/include/asm-generic/pgtable-nopud.h
60619+++ b/include/asm-generic/pgtable-nopud.h
60620@@ -1,10 +1,15 @@
60621 #ifndef _PGTABLE_NOPUD_H
60622 #define _PGTABLE_NOPUD_H
60623
60624-#ifndef __ASSEMBLY__
60625-
60626 #define __PAGETABLE_PUD_FOLDED
60627
60628+#define PUD_SHIFT PGDIR_SHIFT
60629+#define PTRS_PER_PUD 1
60630+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60631+#define PUD_MASK (~(PUD_SIZE-1))
60632+
60633+#ifndef __ASSEMBLY__
60634+
60635 /*
60636 * Having the pud type consist of a pgd gets the size right, and allows
60637 * us to conceptually access the pgd entry that this pud is folded into
60638@@ -12,11 +17,6 @@
60639 */
60640 typedef struct { pgd_t pgd; } pud_t;
60641
60642-#define PUD_SHIFT PGDIR_SHIFT
60643-#define PTRS_PER_PUD 1
60644-#define PUD_SIZE (1UL << PUD_SHIFT)
60645-#define PUD_MASK (~(PUD_SIZE-1))
60646-
60647 /*
60648 * The "pgd_xxx()" functions here are trivial for a folded two-level
60649 * setup: the pud is never bad, and a pud always exists (as it's folded
60650diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60651index 76bff2b..c7a14e2 100644
60652--- a/include/asm-generic/pgtable.h
60653+++ b/include/asm-generic/pgtable.h
60654@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
60655 #endif /* __HAVE_ARCH_PMD_WRITE */
60656 #endif
60657
60658+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60659+static inline unsigned long pax_open_kernel(void) { return 0; }
60660+#endif
60661+
60662+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60663+static inline unsigned long pax_close_kernel(void) { return 0; }
60664+#endif
60665+
60666 #endif /* !__ASSEMBLY__ */
60667
60668 #endif /* _ASM_GENERIC_PGTABLE_H */
60669diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60670index db22d13..1f2e3e1 100644
60671--- a/include/asm-generic/vmlinux.lds.h
60672+++ b/include/asm-generic/vmlinux.lds.h
60673@@ -217,6 +217,7 @@
60674 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60675 VMLINUX_SYMBOL(__start_rodata) = .; \
60676 *(.rodata) *(.rodata.*) \
60677+ *(.data..read_only) \
60678 *(__vermagic) /* Kernel version magic */ \
60679 . = ALIGN(8); \
60680 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60681@@ -723,17 +724,18 @@
60682 * section in the linker script will go there too. @phdr should have
60683 * a leading colon.
60684 *
60685- * Note that this macros defines __per_cpu_load as an absolute symbol.
60686+ * Note that this macros defines per_cpu_load as an absolute symbol.
60687 * If there is no need to put the percpu section at a predetermined
60688 * address, use PERCPU_SECTION.
60689 */
60690 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60691- VMLINUX_SYMBOL(__per_cpu_load) = .; \
60692- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60693+ per_cpu_load = .; \
60694+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60695 - LOAD_OFFSET) { \
60696+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60697 PERCPU_INPUT(cacheline) \
60698 } phdr \
60699- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60700+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60701
60702 /**
60703 * PERCPU_SECTION - define output section for percpu area, simple version
60704diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60705index 9b7c2bb..76b7d1e 100644
60706--- a/include/drm/drmP.h
60707+++ b/include/drm/drmP.h
60708@@ -73,6 +73,7 @@
60709 #include <linux/workqueue.h>
60710 #include <linux/poll.h>
60711 #include <asm/pgalloc.h>
60712+#include <asm/local.h>
60713 #include "drm.h"
60714
60715 #include <linux/idr.h>
60716@@ -1035,7 +1036,7 @@ struct drm_device {
60717
60718 /** \name Usage Counters */
60719 /*@{ */
60720- int open_count; /**< Outstanding files open */
60721+ local_t open_count; /**< Outstanding files open */
60722 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60723 atomic_t vma_count; /**< Outstanding vma areas open */
60724 int buf_use; /**< Buffers in use -- cannot alloc */
60725@@ -1046,7 +1047,7 @@ struct drm_device {
60726 /*@{ */
60727 unsigned long counters;
60728 enum drm_stat_type types[15];
60729- atomic_t counts[15];
60730+ atomic_unchecked_t counts[15];
60731 /*@} */
60732
60733 struct list_head filelist;
60734diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60735index 73b0712..0b7ef2f 100644
60736--- a/include/drm/drm_crtc_helper.h
60737+++ b/include/drm/drm_crtc_helper.h
60738@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60739
60740 /* disable crtc when not in use - more explicit than dpms off */
60741 void (*disable)(struct drm_crtc *crtc);
60742-};
60743+} __no_const;
60744
60745 struct drm_encoder_helper_funcs {
60746 void (*dpms)(struct drm_encoder *encoder, int mode);
60747@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60748 struct drm_connector *connector);
60749 /* disable encoder when not in use - more explicit than dpms off */
60750 void (*disable)(struct drm_encoder *encoder);
60751-};
60752+} __no_const;
60753
60754 struct drm_connector_helper_funcs {
60755 int (*get_modes)(struct drm_connector *connector);
60756diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60757index 26c1f78..6722682 100644
60758--- a/include/drm/ttm/ttm_memory.h
60759+++ b/include/drm/ttm/ttm_memory.h
60760@@ -47,7 +47,7 @@
60761
60762 struct ttm_mem_shrink {
60763 int (*do_shrink) (struct ttm_mem_shrink *);
60764-};
60765+} __no_const;
60766
60767 /**
60768 * struct ttm_mem_global - Global memory accounting structure.
60769diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60770index e86dfca..40cc55f 100644
60771--- a/include/linux/a.out.h
60772+++ b/include/linux/a.out.h
60773@@ -39,6 +39,14 @@ enum machine_type {
60774 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60775 };
60776
60777+/* Constants for the N_FLAGS field */
60778+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60779+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60780+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60781+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60782+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60783+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60784+
60785 #if !defined (N_MAGIC)
60786 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60787 #endif
60788diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60789index 49a83ca..df96b54 100644
60790--- a/include/linux/atmdev.h
60791+++ b/include/linux/atmdev.h
60792@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60793 #endif
60794
60795 struct k_atm_aal_stats {
60796-#define __HANDLE_ITEM(i) atomic_t i
60797+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60798 __AAL_STAT_ITEMS
60799 #undef __HANDLE_ITEM
60800 };
60801diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60802index fd88a39..f4d0bad 100644
60803--- a/include/linux/binfmts.h
60804+++ b/include/linux/binfmts.h
60805@@ -88,6 +88,7 @@ struct linux_binfmt {
60806 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60807 int (*load_shlib)(struct file *);
60808 int (*core_dump)(struct coredump_params *cprm);
60809+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60810 unsigned long min_coredump; /* minimal dump size */
60811 };
60812
60813diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60814index 5e30b45..5b41b49 100644
60815--- a/include/linux/blkdev.h
60816+++ b/include/linux/blkdev.h
60817@@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
60818 struct request *rq);
60819 extern void blk_delay_queue(struct request_queue *, unsigned long);
60820 extern void blk_recount_segments(struct request_queue *, struct bio *);
60821+extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
60822+extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
60823+ unsigned int, void __user *);
60824 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
60825 unsigned int, void __user *);
60826 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
60827@@ -1318,7 +1321,7 @@ struct block_device_operations {
60828 /* this callback is with swap_lock and sometimes page table lock held */
60829 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60830 struct module *owner;
60831-};
60832+} __do_const;
60833
60834 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60835 unsigned long);
60836diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60837index 8e9e4bc..88bd457 100644
60838--- a/include/linux/blktrace_api.h
60839+++ b/include/linux/blktrace_api.h
60840@@ -162,7 +162,7 @@ struct blk_trace {
60841 struct dentry *dir;
60842 struct dentry *dropped_file;
60843 struct dentry *msg_file;
60844- atomic_t dropped;
60845+ atomic_unchecked_t dropped;
60846 };
60847
60848 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60849diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60850index 83195fb..0b0f77d 100644
60851--- a/include/linux/byteorder/little_endian.h
60852+++ b/include/linux/byteorder/little_endian.h
60853@@ -42,51 +42,51 @@
60854
60855 static inline __le64 __cpu_to_le64p(const __u64 *p)
60856 {
60857- return (__force __le64)*p;
60858+ return (__force const __le64)*p;
60859 }
60860 static inline __u64 __le64_to_cpup(const __le64 *p)
60861 {
60862- return (__force __u64)*p;
60863+ return (__force const __u64)*p;
60864 }
60865 static inline __le32 __cpu_to_le32p(const __u32 *p)
60866 {
60867- return (__force __le32)*p;
60868+ return (__force const __le32)*p;
60869 }
60870 static inline __u32 __le32_to_cpup(const __le32 *p)
60871 {
60872- return (__force __u32)*p;
60873+ return (__force const __u32)*p;
60874 }
60875 static inline __le16 __cpu_to_le16p(const __u16 *p)
60876 {
60877- return (__force __le16)*p;
60878+ return (__force const __le16)*p;
60879 }
60880 static inline __u16 __le16_to_cpup(const __le16 *p)
60881 {
60882- return (__force __u16)*p;
60883+ return (__force const __u16)*p;
60884 }
60885 static inline __be64 __cpu_to_be64p(const __u64 *p)
60886 {
60887- return (__force __be64)__swab64p(p);
60888+ return (__force const __be64)__swab64p(p);
60889 }
60890 static inline __u64 __be64_to_cpup(const __be64 *p)
60891 {
60892- return __swab64p((__u64 *)p);
60893+ return __swab64p((const __u64 *)p);
60894 }
60895 static inline __be32 __cpu_to_be32p(const __u32 *p)
60896 {
60897- return (__force __be32)__swab32p(p);
60898+ return (__force const __be32)__swab32p(p);
60899 }
60900 static inline __u32 __be32_to_cpup(const __be32 *p)
60901 {
60902- return __swab32p((__u32 *)p);
60903+ return __swab32p((const __u32 *)p);
60904 }
60905 static inline __be16 __cpu_to_be16p(const __u16 *p)
60906 {
60907- return (__force __be16)__swab16p(p);
60908+ return (__force const __be16)__swab16p(p);
60909 }
60910 static inline __u16 __be16_to_cpup(const __be16 *p)
60911 {
60912- return __swab16p((__u16 *)p);
60913+ return __swab16p((const __u16 *)p);
60914 }
60915 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60916 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60917diff --git a/include/linux/cache.h b/include/linux/cache.h
60918index 4c57065..4307975 100644
60919--- a/include/linux/cache.h
60920+++ b/include/linux/cache.h
60921@@ -16,6 +16,10 @@
60922 #define __read_mostly
60923 #endif
60924
60925+#ifndef __read_only
60926+#define __read_only __read_mostly
60927+#endif
60928+
60929 #ifndef ____cacheline_aligned
60930 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60931 #endif
60932diff --git a/include/linux/capability.h b/include/linux/capability.h
60933index c421123..e343179 100644
60934--- a/include/linux/capability.h
60935+++ b/include/linux/capability.h
60936@@ -547,6 +547,9 @@ extern bool capable(int cap);
60937 extern bool ns_capable(struct user_namespace *ns, int cap);
60938 extern bool task_ns_capable(struct task_struct *t, int cap);
60939 extern bool nsown_capable(int cap);
60940+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
60941+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60942+extern bool capable_nolog(int cap);
60943
60944 /* audit system wants to get cap info from files as well */
60945 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60946diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60947index 04ffb2e..6799180 100644
60948--- a/include/linux/cleancache.h
60949+++ b/include/linux/cleancache.h
60950@@ -31,7 +31,7 @@ struct cleancache_ops {
60951 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60952 void (*flush_inode)(int, struct cleancache_filekey);
60953 void (*flush_fs)(int);
60954-};
60955+} __no_const;
60956
60957 extern struct cleancache_ops
60958 cleancache_register_ops(struct cleancache_ops *ops);
60959diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60960index dfadc96..c0e70c1 100644
60961--- a/include/linux/compiler-gcc4.h
60962+++ b/include/linux/compiler-gcc4.h
60963@@ -31,6 +31,12 @@
60964
60965
60966 #if __GNUC_MINOR__ >= 5
60967+
60968+#ifdef CONSTIFY_PLUGIN
60969+#define __no_const __attribute__((no_const))
60970+#define __do_const __attribute__((do_const))
60971+#endif
60972+
60973 /*
60974 * Mark a position in code as unreachable. This can be used to
60975 * suppress control flow warnings after asm blocks that transfer
60976@@ -46,6 +52,11 @@
60977 #define __noclone __attribute__((__noclone__))
60978
60979 #endif
60980+
60981+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60982+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60983+#define __bos0(ptr) __bos((ptr), 0)
60984+#define __bos1(ptr) __bos((ptr), 1)
60985 #endif
60986
60987 #if __GNUC_MINOR__ > 0
60988diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60989index 320d6c9..8573a1c 100644
60990--- a/include/linux/compiler.h
60991+++ b/include/linux/compiler.h
60992@@ -5,31 +5,62 @@
60993
60994 #ifdef __CHECKER__
60995 # define __user __attribute__((noderef, address_space(1)))
60996+# define __force_user __force __user
60997 # define __kernel __attribute__((address_space(0)))
60998+# define __force_kernel __force __kernel
60999 # define __safe __attribute__((safe))
61000 # define __force __attribute__((force))
61001 # define __nocast __attribute__((nocast))
61002 # define __iomem __attribute__((noderef, address_space(2)))
61003+# define __force_iomem __force __iomem
61004 # define __acquires(x) __attribute__((context(x,0,1)))
61005 # define __releases(x) __attribute__((context(x,1,0)))
61006 # define __acquire(x) __context__(x,1)
61007 # define __release(x) __context__(x,-1)
61008 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61009 # define __percpu __attribute__((noderef, address_space(3)))
61010+# define __force_percpu __force __percpu
61011 #ifdef CONFIG_SPARSE_RCU_POINTER
61012 # define __rcu __attribute__((noderef, address_space(4)))
61013+# define __force_rcu __force __rcu
61014 #else
61015 # define __rcu
61016+# define __force_rcu
61017 #endif
61018 extern void __chk_user_ptr(const volatile void __user *);
61019 extern void __chk_io_ptr(const volatile void __iomem *);
61020+#elif defined(CHECKER_PLUGIN)
61021+//# define __user
61022+//# define __force_user
61023+//# define __kernel
61024+//# define __force_kernel
61025+# define __safe
61026+# define __force
61027+# define __nocast
61028+# define __iomem
61029+# define __force_iomem
61030+# define __chk_user_ptr(x) (void)0
61031+# define __chk_io_ptr(x) (void)0
61032+# define __builtin_warning(x, y...) (1)
61033+# define __acquires(x)
61034+# define __releases(x)
61035+# define __acquire(x) (void)0
61036+# define __release(x) (void)0
61037+# define __cond_lock(x,c) (c)
61038+# define __percpu
61039+# define __force_percpu
61040+# define __rcu
61041+# define __force_rcu
61042 #else
61043 # define __user
61044+# define __force_user
61045 # define __kernel
61046+# define __force_kernel
61047 # define __safe
61048 # define __force
61049 # define __nocast
61050 # define __iomem
61051+# define __force_iomem
61052 # define __chk_user_ptr(x) (void)0
61053 # define __chk_io_ptr(x) (void)0
61054 # define __builtin_warning(x, y...) (1)
61055@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
61056 # define __release(x) (void)0
61057 # define __cond_lock(x,c) (c)
61058 # define __percpu
61059+# define __force_percpu
61060 # define __rcu
61061+# define __force_rcu
61062 #endif
61063
61064 #ifdef __KERNEL__
61065@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61066 # define __attribute_const__ /* unimplemented */
61067 #endif
61068
61069+#ifndef __no_const
61070+# define __no_const
61071+#endif
61072+
61073+#ifndef __do_const
61074+# define __do_const
61075+#endif
61076+
61077 /*
61078 * Tell gcc if a function is cold. The compiler will assume any path
61079 * directly leading to the call is unlikely.
61080@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61081 #define __cold
61082 #endif
61083
61084+#ifndef __alloc_size
61085+#define __alloc_size(...)
61086+#endif
61087+
61088+#ifndef __bos
61089+#define __bos(ptr, arg)
61090+#endif
61091+
61092+#ifndef __bos0
61093+#define __bos0(ptr)
61094+#endif
61095+
61096+#ifndef __bos1
61097+#define __bos1(ptr)
61098+#endif
61099+
61100 /* Simple shorthand for a section definition */
61101 #ifndef __section
61102 # define __section(S) __attribute__ ((__section__(#S)))
61103@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61104 * use is to mediate communication between process-level code and irq/NMI
61105 * handlers, all running on the same CPU.
61106 */
61107-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61108+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61109+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61110
61111 #endif /* __LINUX_COMPILER_H */
61112diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
61113index e9eaec5..bfeb9bb 100644
61114--- a/include/linux/cpuset.h
61115+++ b/include/linux/cpuset.h
61116@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
61117 * nodemask.
61118 */
61119 smp_mb();
61120- --ACCESS_ONCE(current->mems_allowed_change_disable);
61121+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
61122 }
61123
61124 static inline void set_mems_allowed(nodemask_t nodemask)
61125diff --git a/include/linux/cred.h b/include/linux/cred.h
61126index 4030896..8d6f342 100644
61127--- a/include/linux/cred.h
61128+++ b/include/linux/cred.h
61129@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61130 static inline void validate_process_creds(void)
61131 {
61132 }
61133+static inline void validate_task_creds(struct task_struct *task)
61134+{
61135+}
61136 #endif
61137
61138 /**
61139diff --git a/include/linux/crypto.h b/include/linux/crypto.h
61140index e5e468e..f079672 100644
61141--- a/include/linux/crypto.h
61142+++ b/include/linux/crypto.h
61143@@ -361,7 +361,7 @@ struct cipher_tfm {
61144 const u8 *key, unsigned int keylen);
61145 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61146 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61147-};
61148+} __no_const;
61149
61150 struct hash_tfm {
61151 int (*init)(struct hash_desc *desc);
61152@@ -382,13 +382,13 @@ struct compress_tfm {
61153 int (*cot_decompress)(struct crypto_tfm *tfm,
61154 const u8 *src, unsigned int slen,
61155 u8 *dst, unsigned int *dlen);
61156-};
61157+} __no_const;
61158
61159 struct rng_tfm {
61160 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61161 unsigned int dlen);
61162 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61163-};
61164+} __no_const;
61165
61166 #define crt_ablkcipher crt_u.ablkcipher
61167 #define crt_aead crt_u.aead
61168diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61169index 7925bf0..d5143d2 100644
61170--- a/include/linux/decompress/mm.h
61171+++ b/include/linux/decompress/mm.h
61172@@ -77,7 +77,7 @@ static void free(void *where)
61173 * warnings when not needed (indeed large_malloc / large_free are not
61174 * needed by inflate */
61175
61176-#define malloc(a) kmalloc(a, GFP_KERNEL)
61177+#define malloc(a) kmalloc((a), GFP_KERNEL)
61178 #define free(a) kfree(a)
61179
61180 #define large_malloc(a) vmalloc(a)
61181diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61182index 347fdc3..cd01657 100644
61183--- a/include/linux/dma-mapping.h
61184+++ b/include/linux/dma-mapping.h
61185@@ -42,7 +42,7 @@ struct dma_map_ops {
61186 int (*dma_supported)(struct device *dev, u64 mask);
61187 int (*set_dma_mask)(struct device *dev, u64 mask);
61188 int is_phys;
61189-};
61190+} __do_const;
61191
61192 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61193
61194diff --git a/include/linux/efi.h b/include/linux/efi.h
61195index 2362a0b..cfaf8fcc 100644
61196--- a/include/linux/efi.h
61197+++ b/include/linux/efi.h
61198@@ -446,7 +446,7 @@ struct efivar_operations {
61199 efi_get_variable_t *get_variable;
61200 efi_get_next_variable_t *get_next_variable;
61201 efi_set_variable_t *set_variable;
61202-};
61203+} __no_const;
61204
61205 struct efivars {
61206 /*
61207diff --git a/include/linux/elf.h b/include/linux/elf.h
61208index 110821c..cb14c08 100644
61209--- a/include/linux/elf.h
61210+++ b/include/linux/elf.h
61211@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
61212 #define PT_GNU_EH_FRAME 0x6474e550
61213
61214 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61215+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61216+
61217+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61218+
61219+/* Constants for the e_flags field */
61220+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61221+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61222+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61223+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61224+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61225+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61226
61227 /*
61228 * Extended Numbering
61229@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
61230 #define DT_DEBUG 21
61231 #define DT_TEXTREL 22
61232 #define DT_JMPREL 23
61233+#define DT_FLAGS 30
61234+ #define DF_TEXTREL 0x00000004
61235 #define DT_ENCODING 32
61236 #define OLD_DT_LOOS 0x60000000
61237 #define DT_LOOS 0x6000000d
61238@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
61239 #define PF_W 0x2
61240 #define PF_X 0x1
61241
61242+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61243+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61244+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61245+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61246+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61247+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61248+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61249+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61250+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61251+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61252+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61253+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61254+
61255 typedef struct elf32_phdr{
61256 Elf32_Word p_type;
61257 Elf32_Off p_offset;
61258@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
61259 #define EI_OSABI 7
61260 #define EI_PAD 8
61261
61262+#define EI_PAX 14
61263+
61264 #define ELFMAG0 0x7f /* EI_MAG */
61265 #define ELFMAG1 'E'
61266 #define ELFMAG2 'L'
61267@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
61268 #define elf_note elf32_note
61269 #define elf_addr_t Elf32_Off
61270 #define Elf_Half Elf32_Half
61271+#define elf_dyn Elf32_Dyn
61272
61273 #else
61274
61275@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
61276 #define elf_note elf64_note
61277 #define elf_addr_t Elf64_Off
61278 #define Elf_Half Elf64_Half
61279+#define elf_dyn Elf64_Dyn
61280
61281 #endif
61282
61283diff --git a/include/linux/filter.h b/include/linux/filter.h
61284index 741956f..f02f482 100644
61285--- a/include/linux/filter.h
61286+++ b/include/linux/filter.h
61287@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
61288
61289 struct sk_buff;
61290 struct sock;
61291+struct bpf_jit_work;
61292
61293 struct sk_filter
61294 {
61295@@ -141,6 +142,9 @@ struct sk_filter
61296 unsigned int len; /* Number of filter blocks */
61297 unsigned int (*bpf_func)(const struct sk_buff *skb,
61298 const struct sock_filter *filter);
61299+#ifdef CONFIG_BPF_JIT
61300+ struct bpf_jit_work *work;
61301+#endif
61302 struct rcu_head rcu;
61303 struct sock_filter insns[0];
61304 };
61305diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61306index 84ccf8e..2e9b14c 100644
61307--- a/include/linux/firewire.h
61308+++ b/include/linux/firewire.h
61309@@ -428,7 +428,7 @@ struct fw_iso_context {
61310 union {
61311 fw_iso_callback_t sc;
61312 fw_iso_mc_callback_t mc;
61313- } callback;
61314+ } __no_const callback;
61315 void *callback_data;
61316 };
61317
61318diff --git a/include/linux/fs.h b/include/linux/fs.h
61319index cf7bc25..0d2babf 100644
61320--- a/include/linux/fs.h
61321+++ b/include/linux/fs.h
61322@@ -1588,7 +1588,8 @@ struct file_operations {
61323 int (*setlease)(struct file *, long, struct file_lock **);
61324 long (*fallocate)(struct file *file, int mode, loff_t offset,
61325 loff_t len);
61326-};
61327+} __do_const;
61328+typedef struct file_operations __no_const file_operations_no_const;
61329
61330 struct inode_operations {
61331 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61332diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61333index 003dc0f..3c4ea97 100644
61334--- a/include/linux/fs_struct.h
61335+++ b/include/linux/fs_struct.h
61336@@ -6,7 +6,7 @@
61337 #include <linux/seqlock.h>
61338
61339 struct fs_struct {
61340- int users;
61341+ atomic_t users;
61342 spinlock_t lock;
61343 seqcount_t seq;
61344 int umask;
61345diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61346index af095b5..cf1220c 100644
61347--- a/include/linux/fscache-cache.h
61348+++ b/include/linux/fscache-cache.h
61349@@ -102,7 +102,7 @@ struct fscache_operation {
61350 fscache_operation_release_t release;
61351 };
61352
61353-extern atomic_t fscache_op_debug_id;
61354+extern atomic_unchecked_t fscache_op_debug_id;
61355 extern void fscache_op_work_func(struct work_struct *work);
61356
61357 extern void fscache_enqueue_operation(struct fscache_operation *);
61358@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61359 {
61360 INIT_WORK(&op->work, fscache_op_work_func);
61361 atomic_set(&op->usage, 1);
61362- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61363+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61364 op->processor = processor;
61365 op->release = release;
61366 INIT_LIST_HEAD(&op->pend_link);
61367diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61368index 2a53f10..0187fdf 100644
61369--- a/include/linux/fsnotify.h
61370+++ b/include/linux/fsnotify.h
61371@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61372 */
61373 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61374 {
61375- return kstrdup(name, GFP_KERNEL);
61376+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61377 }
61378
61379 /*
61380diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61381index 91d0e0a3..035666b 100644
61382--- a/include/linux/fsnotify_backend.h
61383+++ b/include/linux/fsnotify_backend.h
61384@@ -105,6 +105,7 @@ struct fsnotify_ops {
61385 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61386 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61387 };
61388+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61389
61390 /*
61391 * A group is a "thing" that wants to receive notification about filesystem
61392diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61393index 96efa67..1261547 100644
61394--- a/include/linux/ftrace_event.h
61395+++ b/include/linux/ftrace_event.h
61396@@ -97,7 +97,7 @@ struct trace_event_functions {
61397 trace_print_func raw;
61398 trace_print_func hex;
61399 trace_print_func binary;
61400-};
61401+} __no_const;
61402
61403 struct trace_event {
61404 struct hlist_node node;
61405@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61406 extern int trace_add_event_call(struct ftrace_event_call *call);
61407 extern void trace_remove_event_call(struct ftrace_event_call *call);
61408
61409-#define is_signed_type(type) (((type)(-1)) < 0)
61410+#define is_signed_type(type) (((type)(-1)) < (type)1)
61411
61412 int trace_set_clr_event(const char *system, const char *event, int set);
61413
61414diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61415index 02fa469..a15f279 100644
61416--- a/include/linux/genhd.h
61417+++ b/include/linux/genhd.h
61418@@ -184,7 +184,7 @@ struct gendisk {
61419 struct kobject *slave_dir;
61420
61421 struct timer_rand_state *random;
61422- atomic_t sync_io; /* RAID */
61423+ atomic_unchecked_t sync_io; /* RAID */
61424 struct disk_events *ev;
61425 #ifdef CONFIG_BLK_DEV_INTEGRITY
61426 struct blk_integrity *integrity;
61427diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61428new file mode 100644
61429index 0000000..0dc3943
61430--- /dev/null
61431+++ b/include/linux/gracl.h
61432@@ -0,0 +1,317 @@
61433+#ifndef GR_ACL_H
61434+#define GR_ACL_H
61435+
61436+#include <linux/grdefs.h>
61437+#include <linux/resource.h>
61438+#include <linux/capability.h>
61439+#include <linux/dcache.h>
61440+#include <asm/resource.h>
61441+
61442+/* Major status information */
61443+
61444+#define GR_VERSION "grsecurity 2.2.2"
61445+#define GRSECURITY_VERSION 0x2202
61446+
61447+enum {
61448+ GR_SHUTDOWN = 0,
61449+ GR_ENABLE = 1,
61450+ GR_SPROLE = 2,
61451+ GR_RELOAD = 3,
61452+ GR_SEGVMOD = 4,
61453+ GR_STATUS = 5,
61454+ GR_UNSPROLE = 6,
61455+ GR_PASSSET = 7,
61456+ GR_SPROLEPAM = 8,
61457+};
61458+
61459+/* Password setup definitions
61460+ * kernel/grhash.c */
61461+enum {
61462+ GR_PW_LEN = 128,
61463+ GR_SALT_LEN = 16,
61464+ GR_SHA_LEN = 32,
61465+};
61466+
61467+enum {
61468+ GR_SPROLE_LEN = 64,
61469+};
61470+
61471+enum {
61472+ GR_NO_GLOB = 0,
61473+ GR_REG_GLOB,
61474+ GR_CREATE_GLOB
61475+};
61476+
61477+#define GR_NLIMITS 32
61478+
61479+/* Begin Data Structures */
61480+
61481+struct sprole_pw {
61482+ unsigned char *rolename;
61483+ unsigned char salt[GR_SALT_LEN];
61484+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61485+};
61486+
61487+struct name_entry {
61488+ __u32 key;
61489+ ino_t inode;
61490+ dev_t device;
61491+ char *name;
61492+ __u16 len;
61493+ __u8 deleted;
61494+ struct name_entry *prev;
61495+ struct name_entry *next;
61496+};
61497+
61498+struct inodev_entry {
61499+ struct name_entry *nentry;
61500+ struct inodev_entry *prev;
61501+ struct inodev_entry *next;
61502+};
61503+
61504+struct acl_role_db {
61505+ struct acl_role_label **r_hash;
61506+ __u32 r_size;
61507+};
61508+
61509+struct inodev_db {
61510+ struct inodev_entry **i_hash;
61511+ __u32 i_size;
61512+};
61513+
61514+struct name_db {
61515+ struct name_entry **n_hash;
61516+ __u32 n_size;
61517+};
61518+
61519+struct crash_uid {
61520+ uid_t uid;
61521+ unsigned long expires;
61522+};
61523+
61524+struct gr_hash_struct {
61525+ void **table;
61526+ void **nametable;
61527+ void *first;
61528+ __u32 table_size;
61529+ __u32 used_size;
61530+ int type;
61531+};
61532+
61533+/* Userspace Grsecurity ACL data structures */
61534+
61535+struct acl_subject_label {
61536+ char *filename;
61537+ ino_t inode;
61538+ dev_t device;
61539+ __u32 mode;
61540+ kernel_cap_t cap_mask;
61541+ kernel_cap_t cap_lower;
61542+ kernel_cap_t cap_invert_audit;
61543+
61544+ struct rlimit res[GR_NLIMITS];
61545+ __u32 resmask;
61546+
61547+ __u8 user_trans_type;
61548+ __u8 group_trans_type;
61549+ uid_t *user_transitions;
61550+ gid_t *group_transitions;
61551+ __u16 user_trans_num;
61552+ __u16 group_trans_num;
61553+
61554+ __u32 sock_families[2];
61555+ __u32 ip_proto[8];
61556+ __u32 ip_type;
61557+ struct acl_ip_label **ips;
61558+ __u32 ip_num;
61559+ __u32 inaddr_any_override;
61560+
61561+ __u32 crashes;
61562+ unsigned long expires;
61563+
61564+ struct acl_subject_label *parent_subject;
61565+ struct gr_hash_struct *hash;
61566+ struct acl_subject_label *prev;
61567+ struct acl_subject_label *next;
61568+
61569+ struct acl_object_label **obj_hash;
61570+ __u32 obj_hash_size;
61571+ __u16 pax_flags;
61572+};
61573+
61574+struct role_allowed_ip {
61575+ __u32 addr;
61576+ __u32 netmask;
61577+
61578+ struct role_allowed_ip *prev;
61579+ struct role_allowed_ip *next;
61580+};
61581+
61582+struct role_transition {
61583+ char *rolename;
61584+
61585+ struct role_transition *prev;
61586+ struct role_transition *next;
61587+};
61588+
61589+struct acl_role_label {
61590+ char *rolename;
61591+ uid_t uidgid;
61592+ __u16 roletype;
61593+
61594+ __u16 auth_attempts;
61595+ unsigned long expires;
61596+
61597+ struct acl_subject_label *root_label;
61598+ struct gr_hash_struct *hash;
61599+
61600+ struct acl_role_label *prev;
61601+ struct acl_role_label *next;
61602+
61603+ struct role_transition *transitions;
61604+ struct role_allowed_ip *allowed_ips;
61605+ uid_t *domain_children;
61606+ __u16 domain_child_num;
61607+
61608+ struct acl_subject_label **subj_hash;
61609+ __u32 subj_hash_size;
61610+};
61611+
61612+struct user_acl_role_db {
61613+ struct acl_role_label **r_table;
61614+ __u32 num_pointers; /* Number of allocations to track */
61615+ __u32 num_roles; /* Number of roles */
61616+ __u32 num_domain_children; /* Number of domain children */
61617+ __u32 num_subjects; /* Number of subjects */
61618+ __u32 num_objects; /* Number of objects */
61619+};
61620+
61621+struct acl_object_label {
61622+ char *filename;
61623+ ino_t inode;
61624+ dev_t device;
61625+ __u32 mode;
61626+
61627+ struct acl_subject_label *nested;
61628+ struct acl_object_label *globbed;
61629+
61630+ /* next two structures not used */
61631+
61632+ struct acl_object_label *prev;
61633+ struct acl_object_label *next;
61634+};
61635+
61636+struct acl_ip_label {
61637+ char *iface;
61638+ __u32 addr;
61639+ __u32 netmask;
61640+ __u16 low, high;
61641+ __u8 mode;
61642+ __u32 type;
61643+ __u32 proto[8];
61644+
61645+ /* next two structures not used */
61646+
61647+ struct acl_ip_label *prev;
61648+ struct acl_ip_label *next;
61649+};
61650+
61651+struct gr_arg {
61652+ struct user_acl_role_db role_db;
61653+ unsigned char pw[GR_PW_LEN];
61654+ unsigned char salt[GR_SALT_LEN];
61655+ unsigned char sum[GR_SHA_LEN];
61656+ unsigned char sp_role[GR_SPROLE_LEN];
61657+ struct sprole_pw *sprole_pws;
61658+ dev_t segv_device;
61659+ ino_t segv_inode;
61660+ uid_t segv_uid;
61661+ __u16 num_sprole_pws;
61662+ __u16 mode;
61663+};
61664+
61665+struct gr_arg_wrapper {
61666+ struct gr_arg *arg;
61667+ __u32 version;
61668+ __u32 size;
61669+};
61670+
61671+struct subject_map {
61672+ struct acl_subject_label *user;
61673+ struct acl_subject_label *kernel;
61674+ struct subject_map *prev;
61675+ struct subject_map *next;
61676+};
61677+
61678+struct acl_subj_map_db {
61679+ struct subject_map **s_hash;
61680+ __u32 s_size;
61681+};
61682+
61683+/* End Data Structures Section */
61684+
61685+/* Hash functions generated by empirical testing by Brad Spengler
61686+ Makes good use of the low bits of the inode. Generally 0-1 times
61687+ in loop for successful match. 0-3 for unsuccessful match.
61688+ Shift/add algorithm with modulus of table size and an XOR*/
61689+
61690+static __inline__ unsigned int
61691+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61692+{
61693+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
61694+}
61695+
61696+ static __inline__ unsigned int
61697+shash(const struct acl_subject_label *userp, const unsigned int sz)
61698+{
61699+ return ((const unsigned long)userp % sz);
61700+}
61701+
61702+static __inline__ unsigned int
61703+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61704+{
61705+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61706+}
61707+
61708+static __inline__ unsigned int
61709+nhash(const char *name, const __u16 len, const unsigned int sz)
61710+{
61711+ return full_name_hash((const unsigned char *)name, len) % sz;
61712+}
61713+
61714+#define FOR_EACH_ROLE_START(role) \
61715+ role = role_list; \
61716+ while (role) {
61717+
61718+#define FOR_EACH_ROLE_END(role) \
61719+ role = role->prev; \
61720+ }
61721+
61722+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61723+ subj = NULL; \
61724+ iter = 0; \
61725+ while (iter < role->subj_hash_size) { \
61726+ if (subj == NULL) \
61727+ subj = role->subj_hash[iter]; \
61728+ if (subj == NULL) { \
61729+ iter++; \
61730+ continue; \
61731+ }
61732+
61733+#define FOR_EACH_SUBJECT_END(subj,iter) \
61734+ subj = subj->next; \
61735+ if (subj == NULL) \
61736+ iter++; \
61737+ }
61738+
61739+
61740+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61741+ subj = role->hash->first; \
61742+ while (subj != NULL) {
61743+
61744+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61745+ subj = subj->next; \
61746+ }
61747+
61748+#endif
61749+
61750diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61751new file mode 100644
61752index 0000000..323ecf2
61753--- /dev/null
61754+++ b/include/linux/gralloc.h
61755@@ -0,0 +1,9 @@
61756+#ifndef __GRALLOC_H
61757+#define __GRALLOC_H
61758+
61759+void acl_free_all(void);
61760+int acl_alloc_stack_init(unsigned long size);
61761+void *acl_alloc(unsigned long len);
61762+void *acl_alloc_num(unsigned long num, unsigned long len);
61763+
61764+#endif
61765diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61766new file mode 100644
61767index 0000000..b30e9bc
61768--- /dev/null
61769+++ b/include/linux/grdefs.h
61770@@ -0,0 +1,140 @@
61771+#ifndef GRDEFS_H
61772+#define GRDEFS_H
61773+
61774+/* Begin grsecurity status declarations */
61775+
61776+enum {
61777+ GR_READY = 0x01,
61778+ GR_STATUS_INIT = 0x00 // disabled state
61779+};
61780+
61781+/* Begin ACL declarations */
61782+
61783+/* Role flags */
61784+
61785+enum {
61786+ GR_ROLE_USER = 0x0001,
61787+ GR_ROLE_GROUP = 0x0002,
61788+ GR_ROLE_DEFAULT = 0x0004,
61789+ GR_ROLE_SPECIAL = 0x0008,
61790+ GR_ROLE_AUTH = 0x0010,
61791+ GR_ROLE_NOPW = 0x0020,
61792+ GR_ROLE_GOD = 0x0040,
61793+ GR_ROLE_LEARN = 0x0080,
61794+ GR_ROLE_TPE = 0x0100,
61795+ GR_ROLE_DOMAIN = 0x0200,
61796+ GR_ROLE_PAM = 0x0400,
61797+ GR_ROLE_PERSIST = 0x0800
61798+};
61799+
61800+/* ACL Subject and Object mode flags */
61801+enum {
61802+ GR_DELETED = 0x80000000
61803+};
61804+
61805+/* ACL Object-only mode flags */
61806+enum {
61807+ GR_READ = 0x00000001,
61808+ GR_APPEND = 0x00000002,
61809+ GR_WRITE = 0x00000004,
61810+ GR_EXEC = 0x00000008,
61811+ GR_FIND = 0x00000010,
61812+ GR_INHERIT = 0x00000020,
61813+ GR_SETID = 0x00000040,
61814+ GR_CREATE = 0x00000080,
61815+ GR_DELETE = 0x00000100,
61816+ GR_LINK = 0x00000200,
61817+ GR_AUDIT_READ = 0x00000400,
61818+ GR_AUDIT_APPEND = 0x00000800,
61819+ GR_AUDIT_WRITE = 0x00001000,
61820+ GR_AUDIT_EXEC = 0x00002000,
61821+ GR_AUDIT_FIND = 0x00004000,
61822+ GR_AUDIT_INHERIT= 0x00008000,
61823+ GR_AUDIT_SETID = 0x00010000,
61824+ GR_AUDIT_CREATE = 0x00020000,
61825+ GR_AUDIT_DELETE = 0x00040000,
61826+ GR_AUDIT_LINK = 0x00080000,
61827+ GR_PTRACERD = 0x00100000,
61828+ GR_NOPTRACE = 0x00200000,
61829+ GR_SUPPRESS = 0x00400000,
61830+ GR_NOLEARN = 0x00800000,
61831+ GR_INIT_TRANSFER= 0x01000000
61832+};
61833+
61834+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61835+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61836+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61837+
61838+/* ACL subject-only mode flags */
61839+enum {
61840+ GR_KILL = 0x00000001,
61841+ GR_VIEW = 0x00000002,
61842+ GR_PROTECTED = 0x00000004,
61843+ GR_LEARN = 0x00000008,
61844+ GR_OVERRIDE = 0x00000010,
61845+ /* just a placeholder, this mode is only used in userspace */
61846+ GR_DUMMY = 0x00000020,
61847+ GR_PROTSHM = 0x00000040,
61848+ GR_KILLPROC = 0x00000080,
61849+ GR_KILLIPPROC = 0x00000100,
61850+ /* just a placeholder, this mode is only used in userspace */
61851+ GR_NOTROJAN = 0x00000200,
61852+ GR_PROTPROCFD = 0x00000400,
61853+ GR_PROCACCT = 0x00000800,
61854+ GR_RELAXPTRACE = 0x00001000,
61855+ GR_NESTED = 0x00002000,
61856+ GR_INHERITLEARN = 0x00004000,
61857+ GR_PROCFIND = 0x00008000,
61858+ GR_POVERRIDE = 0x00010000,
61859+ GR_KERNELAUTH = 0x00020000,
61860+ GR_ATSECURE = 0x00040000,
61861+ GR_SHMEXEC = 0x00080000
61862+};
61863+
61864+enum {
61865+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61866+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61867+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61868+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61869+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61870+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61871+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61872+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61873+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61874+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61875+};
61876+
61877+enum {
61878+ GR_ID_USER = 0x01,
61879+ GR_ID_GROUP = 0x02,
61880+};
61881+
61882+enum {
61883+ GR_ID_ALLOW = 0x01,
61884+ GR_ID_DENY = 0x02,
61885+};
61886+
61887+#define GR_CRASH_RES 31
61888+#define GR_UIDTABLE_MAX 500
61889+
61890+/* begin resource learning section */
61891+enum {
61892+ GR_RLIM_CPU_BUMP = 60,
61893+ GR_RLIM_FSIZE_BUMP = 50000,
61894+ GR_RLIM_DATA_BUMP = 10000,
61895+ GR_RLIM_STACK_BUMP = 1000,
61896+ GR_RLIM_CORE_BUMP = 10000,
61897+ GR_RLIM_RSS_BUMP = 500000,
61898+ GR_RLIM_NPROC_BUMP = 1,
61899+ GR_RLIM_NOFILE_BUMP = 5,
61900+ GR_RLIM_MEMLOCK_BUMP = 50000,
61901+ GR_RLIM_AS_BUMP = 500000,
61902+ GR_RLIM_LOCKS_BUMP = 2,
61903+ GR_RLIM_SIGPENDING_BUMP = 5,
61904+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61905+ GR_RLIM_NICE_BUMP = 1,
61906+ GR_RLIM_RTPRIO_BUMP = 1,
61907+ GR_RLIM_RTTIME_BUMP = 1000000
61908+};
61909+
61910+#endif
61911diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61912new file mode 100644
61913index 0000000..60cda84
61914--- /dev/null
61915+++ b/include/linux/grinternal.h
61916@@ -0,0 +1,220 @@
61917+#ifndef __GRINTERNAL_H
61918+#define __GRINTERNAL_H
61919+
61920+#ifdef CONFIG_GRKERNSEC
61921+
61922+#include <linux/fs.h>
61923+#include <linux/mnt_namespace.h>
61924+#include <linux/nsproxy.h>
61925+#include <linux/gracl.h>
61926+#include <linux/grdefs.h>
61927+#include <linux/grmsg.h>
61928+
61929+void gr_add_learn_entry(const char *fmt, ...)
61930+ __attribute__ ((format (printf, 1, 2)));
61931+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61932+ const struct vfsmount *mnt);
61933+__u32 gr_check_create(const struct dentry *new_dentry,
61934+ const struct dentry *parent,
61935+ const struct vfsmount *mnt, const __u32 mode);
61936+int gr_check_protected_task(const struct task_struct *task);
61937+__u32 to_gr_audit(const __u32 reqmode);
61938+int gr_set_acls(const int type);
61939+int gr_apply_subject_to_task(struct task_struct *task);
61940+int gr_acl_is_enabled(void);
61941+char gr_roletype_to_char(void);
61942+
61943+void gr_handle_alertkill(struct task_struct *task);
61944+char *gr_to_filename(const struct dentry *dentry,
61945+ const struct vfsmount *mnt);
61946+char *gr_to_filename1(const struct dentry *dentry,
61947+ const struct vfsmount *mnt);
61948+char *gr_to_filename2(const struct dentry *dentry,
61949+ const struct vfsmount *mnt);
61950+char *gr_to_filename3(const struct dentry *dentry,
61951+ const struct vfsmount *mnt);
61952+
61953+extern int grsec_enable_harden_ptrace;
61954+extern int grsec_enable_link;
61955+extern int grsec_enable_fifo;
61956+extern int grsec_enable_execve;
61957+extern int grsec_enable_shm;
61958+extern int grsec_enable_execlog;
61959+extern int grsec_enable_signal;
61960+extern int grsec_enable_audit_ptrace;
61961+extern int grsec_enable_forkfail;
61962+extern int grsec_enable_time;
61963+extern int grsec_enable_rofs;
61964+extern int grsec_enable_chroot_shmat;
61965+extern int grsec_enable_chroot_mount;
61966+extern int grsec_enable_chroot_double;
61967+extern int grsec_enable_chroot_pivot;
61968+extern int grsec_enable_chroot_chdir;
61969+extern int grsec_enable_chroot_chmod;
61970+extern int grsec_enable_chroot_mknod;
61971+extern int grsec_enable_chroot_fchdir;
61972+extern int grsec_enable_chroot_nice;
61973+extern int grsec_enable_chroot_execlog;
61974+extern int grsec_enable_chroot_caps;
61975+extern int grsec_enable_chroot_sysctl;
61976+extern int grsec_enable_chroot_unix;
61977+extern int grsec_enable_tpe;
61978+extern int grsec_tpe_gid;
61979+extern int grsec_enable_tpe_all;
61980+extern int grsec_enable_tpe_invert;
61981+extern int grsec_enable_socket_all;
61982+extern int grsec_socket_all_gid;
61983+extern int grsec_enable_socket_client;
61984+extern int grsec_socket_client_gid;
61985+extern int grsec_enable_socket_server;
61986+extern int grsec_socket_server_gid;
61987+extern int grsec_audit_gid;
61988+extern int grsec_enable_group;
61989+extern int grsec_enable_audit_textrel;
61990+extern int grsec_enable_log_rwxmaps;
61991+extern int grsec_enable_mount;
61992+extern int grsec_enable_chdir;
61993+extern int grsec_resource_logging;
61994+extern int grsec_enable_blackhole;
61995+extern int grsec_lastack_retries;
61996+extern int grsec_enable_brute;
61997+extern int grsec_lock;
61998+
61999+extern spinlock_t grsec_alert_lock;
62000+extern unsigned long grsec_alert_wtime;
62001+extern unsigned long grsec_alert_fyet;
62002+
62003+extern spinlock_t grsec_audit_lock;
62004+
62005+extern rwlock_t grsec_exec_file_lock;
62006+
62007+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62008+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62009+ (tsk)->exec_file->f_vfsmnt) : "/")
62010+
62011+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62012+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62013+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62014+
62015+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62016+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
62017+ (tsk)->exec_file->f_vfsmnt) : "/")
62018+
62019+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62020+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62021+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62022+
62023+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62024+
62025+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62026+
62027+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62028+ (task)->pid, (cred)->uid, \
62029+ (cred)->euid, (cred)->gid, (cred)->egid, \
62030+ gr_parent_task_fullpath(task), \
62031+ (task)->real_parent->comm, (task)->real_parent->pid, \
62032+ (pcred)->uid, (pcred)->euid, \
62033+ (pcred)->gid, (pcred)->egid
62034+
62035+#define GR_CHROOT_CAPS {{ \
62036+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62037+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62038+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62039+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62040+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62041+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62042+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
62043+
62044+#define security_learn(normal_msg,args...) \
62045+({ \
62046+ read_lock(&grsec_exec_file_lock); \
62047+ gr_add_learn_entry(normal_msg "\n", ## args); \
62048+ read_unlock(&grsec_exec_file_lock); \
62049+})
62050+
62051+enum {
62052+ GR_DO_AUDIT,
62053+ GR_DONT_AUDIT,
62054+ /* used for non-audit messages that we shouldn't kill the task on */
62055+ GR_DONT_AUDIT_GOOD
62056+};
62057+
62058+enum {
62059+ GR_TTYSNIFF,
62060+ GR_RBAC,
62061+ GR_RBAC_STR,
62062+ GR_STR_RBAC,
62063+ GR_RBAC_MODE2,
62064+ GR_RBAC_MODE3,
62065+ GR_FILENAME,
62066+ GR_SYSCTL_HIDDEN,
62067+ GR_NOARGS,
62068+ GR_ONE_INT,
62069+ GR_ONE_INT_TWO_STR,
62070+ GR_ONE_STR,
62071+ GR_STR_INT,
62072+ GR_TWO_STR_INT,
62073+ GR_TWO_INT,
62074+ GR_TWO_U64,
62075+ GR_THREE_INT,
62076+ GR_FIVE_INT_TWO_STR,
62077+ GR_TWO_STR,
62078+ GR_THREE_STR,
62079+ GR_FOUR_STR,
62080+ GR_STR_FILENAME,
62081+ GR_FILENAME_STR,
62082+ GR_FILENAME_TWO_INT,
62083+ GR_FILENAME_TWO_INT_STR,
62084+ GR_TEXTREL,
62085+ GR_PTRACE,
62086+ GR_RESOURCE,
62087+ GR_CAP,
62088+ GR_SIG,
62089+ GR_SIG2,
62090+ GR_CRASH1,
62091+ GR_CRASH2,
62092+ GR_PSACCT,
62093+ GR_RWXMAP
62094+};
62095+
62096+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62097+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62098+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62099+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62100+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62101+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62102+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62103+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62104+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62105+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62106+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62107+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62108+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62109+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62110+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62111+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62112+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62113+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62114+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62115+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62116+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62117+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62118+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62119+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62120+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62121+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62122+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62123+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62124+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62125+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62126+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62127+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62128+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62129+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62130+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62131+
62132+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62133+
62134+#endif
62135+
62136+#endif
62137diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62138new file mode 100644
62139index 0000000..9d5fd4a
62140--- /dev/null
62141+++ b/include/linux/grmsg.h
62142@@ -0,0 +1,108 @@
62143+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62144+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62145+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62146+#define GR_STOPMOD_MSG "denied modification of module state by "
62147+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62148+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62149+#define GR_IOPERM_MSG "denied use of ioperm() by "
62150+#define GR_IOPL_MSG "denied use of iopl() by "
62151+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62152+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62153+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62154+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62155+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62156+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62157+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62158+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62159+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62160+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62161+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62162+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62163+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62164+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62165+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62166+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62167+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62168+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62169+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62170+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62171+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62172+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62173+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62174+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62175+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62176+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62177+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
62178+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62179+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62180+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62181+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62182+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62183+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62184+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62185+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62186+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
62187+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62188+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62189+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62190+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62191+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62192+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62193+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62194+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62195+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
62196+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62197+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62198+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62199+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62200+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62201+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62202+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62203+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62204+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62205+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62206+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62207+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62208+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62209+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62210+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62211+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62212+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62213+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62214+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62215+#define GR_FAILFORK_MSG "failed fork with errno %s by "
62216+#define GR_NICE_CHROOT_MSG "denied priority change by "
62217+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62218+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62219+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62220+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62221+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62222+#define GR_TIME_MSG "time set by "
62223+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62224+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62225+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62226+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62227+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62228+#define GR_BIND_MSG "denied bind() by "
62229+#define GR_CONNECT_MSG "denied connect() by "
62230+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62231+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62232+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62233+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62234+#define GR_CAP_ACL_MSG "use of %s denied for "
62235+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62236+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62237+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62238+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62239+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62240+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62241+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62242+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62243+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62244+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62245+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62246+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62247+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62248+#define GR_VM86_MSG "denied use of vm86 by "
62249+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62250+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62251diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62252new file mode 100644
62253index 0000000..4620f36
62254--- /dev/null
62255+++ b/include/linux/grsecurity.h
62256@@ -0,0 +1,231 @@
62257+#ifndef GR_SECURITY_H
62258+#define GR_SECURITY_H
62259+#include <linux/fs.h>
62260+#include <linux/fs_struct.h>
62261+#include <linux/binfmts.h>
62262+#include <linux/gracl.h>
62263+
62264+/* notify of brain-dead configs */
62265+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62266+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62267+#endif
62268+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62269+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62270+#endif
62271+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
62272+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
62273+#endif
62274+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
62275+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
62276+#endif
62277+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62278+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62279+#endif
62280+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62281+#error "CONFIG_PAX enabled, but no PaX options are enabled."
62282+#endif
62283+
62284+#include <linux/compat.h>
62285+
62286+struct user_arg_ptr {
62287+#ifdef CONFIG_COMPAT
62288+ bool is_compat;
62289+#endif
62290+ union {
62291+ const char __user *const __user *native;
62292+#ifdef CONFIG_COMPAT
62293+ compat_uptr_t __user *compat;
62294+#endif
62295+ } ptr;
62296+};
62297+
62298+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62299+void gr_handle_brute_check(void);
62300+void gr_handle_kernel_exploit(void);
62301+int gr_process_user_ban(void);
62302+
62303+char gr_roletype_to_char(void);
62304+
62305+int gr_acl_enable_at_secure(void);
62306+
62307+int gr_check_user_change(int real, int effective, int fs);
62308+int gr_check_group_change(int real, int effective, int fs);
62309+
62310+void gr_del_task_from_ip_table(struct task_struct *p);
62311+
62312+int gr_pid_is_chrooted(struct task_struct *p);
62313+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62314+int gr_handle_chroot_nice(void);
62315+int gr_handle_chroot_sysctl(const int op);
62316+int gr_handle_chroot_setpriority(struct task_struct *p,
62317+ const int niceval);
62318+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62319+int gr_handle_chroot_chroot(const struct dentry *dentry,
62320+ const struct vfsmount *mnt);
62321+void gr_handle_chroot_chdir(struct path *path);
62322+int gr_handle_chroot_chmod(const struct dentry *dentry,
62323+ const struct vfsmount *mnt, const int mode);
62324+int gr_handle_chroot_mknod(const struct dentry *dentry,
62325+ const struct vfsmount *mnt, const int mode);
62326+int gr_handle_chroot_mount(const struct dentry *dentry,
62327+ const struct vfsmount *mnt,
62328+ const char *dev_name);
62329+int gr_handle_chroot_pivot(void);
62330+int gr_handle_chroot_unix(const pid_t pid);
62331+
62332+int gr_handle_rawio(const struct inode *inode);
62333+
62334+void gr_handle_ioperm(void);
62335+void gr_handle_iopl(void);
62336+
62337+int gr_tpe_allow(const struct file *file);
62338+
62339+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62340+void gr_clear_chroot_entries(struct task_struct *task);
62341+
62342+void gr_log_forkfail(const int retval);
62343+void gr_log_timechange(void);
62344+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62345+void gr_log_chdir(const struct dentry *dentry,
62346+ const struct vfsmount *mnt);
62347+void gr_log_chroot_exec(const struct dentry *dentry,
62348+ const struct vfsmount *mnt);
62349+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62350+void gr_log_remount(const char *devname, const int retval);
62351+void gr_log_unmount(const char *devname, const int retval);
62352+void gr_log_mount(const char *from, const char *to, const int retval);
62353+void gr_log_textrel(struct vm_area_struct *vma);
62354+void gr_log_rwxmmap(struct file *file);
62355+void gr_log_rwxmprotect(struct file *file);
62356+
62357+int gr_handle_follow_link(const struct inode *parent,
62358+ const struct inode *inode,
62359+ const struct dentry *dentry,
62360+ const struct vfsmount *mnt);
62361+int gr_handle_fifo(const struct dentry *dentry,
62362+ const struct vfsmount *mnt,
62363+ const struct dentry *dir, const int flag,
62364+ const int acc_mode);
62365+int gr_handle_hardlink(const struct dentry *dentry,
62366+ const struct vfsmount *mnt,
62367+ struct inode *inode,
62368+ const int mode, const char *to);
62369+
62370+int gr_is_capable(const int cap);
62371+int gr_is_capable_nolog(const int cap);
62372+void gr_learn_resource(const struct task_struct *task, const int limit,
62373+ const unsigned long wanted, const int gt);
62374+void gr_copy_label(struct task_struct *tsk);
62375+void gr_handle_crash(struct task_struct *task, const int sig);
62376+int gr_handle_signal(const struct task_struct *p, const int sig);
62377+int gr_check_crash_uid(const uid_t uid);
62378+int gr_check_protected_task(const struct task_struct *task);
62379+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62380+int gr_acl_handle_mmap(const struct file *file,
62381+ const unsigned long prot);
62382+int gr_acl_handle_mprotect(const struct file *file,
62383+ const unsigned long prot);
62384+int gr_check_hidden_task(const struct task_struct *tsk);
62385+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62386+ const struct vfsmount *mnt);
62387+__u32 gr_acl_handle_utime(const struct dentry *dentry,
62388+ const struct vfsmount *mnt);
62389+__u32 gr_acl_handle_access(const struct dentry *dentry,
62390+ const struct vfsmount *mnt, const int fmode);
62391+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
62392+ const struct vfsmount *mnt, mode_t mode);
62393+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62394+ const struct vfsmount *mnt, mode_t mode);
62395+__u32 gr_acl_handle_chown(const struct dentry *dentry,
62396+ const struct vfsmount *mnt);
62397+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62398+ const struct vfsmount *mnt);
62399+int gr_handle_ptrace(struct task_struct *task, const long request);
62400+int gr_handle_proc_ptrace(struct task_struct *task);
62401+__u32 gr_acl_handle_execve(const struct dentry *dentry,
62402+ const struct vfsmount *mnt);
62403+int gr_check_crash_exec(const struct file *filp);
62404+int gr_acl_is_enabled(void);
62405+void gr_set_kernel_label(struct task_struct *task);
62406+void gr_set_role_label(struct task_struct *task, const uid_t uid,
62407+ const gid_t gid);
62408+int gr_set_proc_label(const struct dentry *dentry,
62409+ const struct vfsmount *mnt,
62410+ const int unsafe_share);
62411+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62412+ const struct vfsmount *mnt);
62413+__u32 gr_acl_handle_open(const struct dentry *dentry,
62414+ const struct vfsmount *mnt, int acc_mode);
62415+__u32 gr_acl_handle_creat(const struct dentry *dentry,
62416+ const struct dentry *p_dentry,
62417+ const struct vfsmount *p_mnt,
62418+ int open_flags, int acc_mode, const int imode);
62419+void gr_handle_create(const struct dentry *dentry,
62420+ const struct vfsmount *mnt);
62421+void gr_handle_proc_create(const struct dentry *dentry,
62422+ const struct inode *inode);
62423+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62424+ const struct dentry *parent_dentry,
62425+ const struct vfsmount *parent_mnt,
62426+ const int mode);
62427+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62428+ const struct dentry *parent_dentry,
62429+ const struct vfsmount *parent_mnt);
62430+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62431+ const struct vfsmount *mnt);
62432+void gr_handle_delete(const ino_t ino, const dev_t dev);
62433+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62434+ const struct vfsmount *mnt);
62435+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62436+ const struct dentry *parent_dentry,
62437+ const struct vfsmount *parent_mnt,
62438+ const char *from);
62439+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62440+ const struct dentry *parent_dentry,
62441+ const struct vfsmount *parent_mnt,
62442+ const struct dentry *old_dentry,
62443+ const struct vfsmount *old_mnt, const char *to);
62444+int gr_acl_handle_rename(struct dentry *new_dentry,
62445+ struct dentry *parent_dentry,
62446+ const struct vfsmount *parent_mnt,
62447+ struct dentry *old_dentry,
62448+ struct inode *old_parent_inode,
62449+ struct vfsmount *old_mnt, const char *newname);
62450+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62451+ struct dentry *old_dentry,
62452+ struct dentry *new_dentry,
62453+ struct vfsmount *mnt, const __u8 replace);
62454+__u32 gr_check_link(const struct dentry *new_dentry,
62455+ const struct dentry *parent_dentry,
62456+ const struct vfsmount *parent_mnt,
62457+ const struct dentry *old_dentry,
62458+ const struct vfsmount *old_mnt);
62459+int gr_acl_handle_filldir(const struct file *file, const char *name,
62460+ const unsigned int namelen, const ino_t ino);
62461+
62462+__u32 gr_acl_handle_unix(const struct dentry *dentry,
62463+ const struct vfsmount *mnt);
62464+void gr_acl_handle_exit(void);
62465+void gr_acl_handle_psacct(struct task_struct *task, const long code);
62466+int gr_acl_handle_procpidmem(const struct task_struct *task);
62467+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62468+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62469+void gr_audit_ptrace(struct task_struct *task);
62470+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62471+
62472+#ifdef CONFIG_GRKERNSEC
62473+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62474+void gr_handle_vm86(void);
62475+void gr_handle_mem_readwrite(u64 from, u64 to);
62476+
62477+extern int grsec_enable_dmesg;
62478+extern int grsec_disable_privio;
62479+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62480+extern int grsec_enable_chroot_findtask;
62481+#endif
62482+#ifdef CONFIG_GRKERNSEC_SETXID
62483+extern int grsec_enable_setxid;
62484+#endif
62485+#endif
62486+
62487+#endif
62488diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62489new file mode 100644
62490index 0000000..e7ffaaf
62491--- /dev/null
62492+++ b/include/linux/grsock.h
62493@@ -0,0 +1,19 @@
62494+#ifndef __GRSOCK_H
62495+#define __GRSOCK_H
62496+
62497+extern void gr_attach_curr_ip(const struct sock *sk);
62498+extern int gr_handle_sock_all(const int family, const int type,
62499+ const int protocol);
62500+extern int gr_handle_sock_server(const struct sockaddr *sck);
62501+extern int gr_handle_sock_server_other(const struct sock *sck);
62502+extern int gr_handle_sock_client(const struct sockaddr *sck);
62503+extern int gr_search_connect(struct socket * sock,
62504+ struct sockaddr_in * addr);
62505+extern int gr_search_bind(struct socket * sock,
62506+ struct sockaddr_in * addr);
62507+extern int gr_search_listen(struct socket * sock);
62508+extern int gr_search_accept(struct socket * sock);
62509+extern int gr_search_socket(const int domain, const int type,
62510+ const int protocol);
62511+
62512+#endif
62513diff --git a/include/linux/hid.h b/include/linux/hid.h
62514index 9cf8e7a..5ec94d0 100644
62515--- a/include/linux/hid.h
62516+++ b/include/linux/hid.h
62517@@ -676,7 +676,7 @@ struct hid_ll_driver {
62518 unsigned int code, int value);
62519
62520 int (*parse)(struct hid_device *hdev);
62521-};
62522+} __no_const;
62523
62524 #define PM_HINT_FULLON 1<<5
62525 #define PM_HINT_NORMAL 1<<1
62526diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62527index 3a93f73..b19d0b3 100644
62528--- a/include/linux/highmem.h
62529+++ b/include/linux/highmem.h
62530@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
62531 kunmap_atomic(kaddr, KM_USER0);
62532 }
62533
62534+static inline void sanitize_highpage(struct page *page)
62535+{
62536+ void *kaddr;
62537+ unsigned long flags;
62538+
62539+ local_irq_save(flags);
62540+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
62541+ clear_page(kaddr);
62542+ kunmap_atomic(kaddr, KM_CLEARPAGE);
62543+ local_irq_restore(flags);
62544+}
62545+
62546 static inline void zero_user_segments(struct page *page,
62547 unsigned start1, unsigned end1,
62548 unsigned start2, unsigned end2)
62549diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62550index a6c652e..1f5878f 100644
62551--- a/include/linux/i2c.h
62552+++ b/include/linux/i2c.h
62553@@ -346,6 +346,7 @@ struct i2c_algorithm {
62554 /* To determine what the adapter supports */
62555 u32 (*functionality) (struct i2c_adapter *);
62556 };
62557+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62558
62559 /*
62560 * i2c_adapter is the structure used to identify a physical i2c bus along
62561diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62562index a6deef4..c56a7f2 100644
62563--- a/include/linux/i2o.h
62564+++ b/include/linux/i2o.h
62565@@ -564,7 +564,7 @@ struct i2o_controller {
62566 struct i2o_device *exec; /* Executive */
62567 #if BITS_PER_LONG == 64
62568 spinlock_t context_list_lock; /* lock for context_list */
62569- atomic_t context_list_counter; /* needed for unique contexts */
62570+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62571 struct list_head context_list; /* list of context id's
62572 and pointers */
62573 #endif
62574diff --git a/include/linux/init.h b/include/linux/init.h
62575index 9146f39..885354d 100644
62576--- a/include/linux/init.h
62577+++ b/include/linux/init.h
62578@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
62579
62580 /* Each module must use one module_init(). */
62581 #define module_init(initfn) \
62582- static inline initcall_t __inittest(void) \
62583+ static inline __used initcall_t __inittest(void) \
62584 { return initfn; } \
62585 int init_module(void) __attribute__((alias(#initfn)));
62586
62587 /* This is only required if you want to be unloadable. */
62588 #define module_exit(exitfn) \
62589- static inline exitcall_t __exittest(void) \
62590+ static inline __used exitcall_t __exittest(void) \
62591 { return exitfn; } \
62592 void cleanup_module(void) __attribute__((alias(#exitfn)));
62593
62594diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62595index d14e058..4162929 100644
62596--- a/include/linux/init_task.h
62597+++ b/include/linux/init_task.h
62598@@ -126,6 +126,12 @@ extern struct cred init_cred;
62599 # define INIT_PERF_EVENTS(tsk)
62600 #endif
62601
62602+#ifdef CONFIG_X86
62603+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62604+#else
62605+#define INIT_TASK_THREAD_INFO
62606+#endif
62607+
62608 /*
62609 * INIT_TASK is used to set up the first task table, touch at
62610 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62611@@ -164,6 +170,7 @@ extern struct cred init_cred;
62612 RCU_INIT_POINTER(.cred, &init_cred), \
62613 .comm = "swapper", \
62614 .thread = INIT_THREAD, \
62615+ INIT_TASK_THREAD_INFO \
62616 .fs = &init_fs, \
62617 .files = &init_files, \
62618 .signal = &init_signals, \
62619diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62620index 9310c69..6ebb244 100644
62621--- a/include/linux/intel-iommu.h
62622+++ b/include/linux/intel-iommu.h
62623@@ -296,7 +296,7 @@ struct iommu_flush {
62624 u8 fm, u64 type);
62625 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62626 unsigned int size_order, u64 type);
62627-};
62628+} __no_const;
62629
62630 enum {
62631 SR_DMAR_FECTL_REG,
62632diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62633index f51a81b..adfcb44 100644
62634--- a/include/linux/interrupt.h
62635+++ b/include/linux/interrupt.h
62636@@ -425,7 +425,7 @@ enum
62637 /* map softirq index to softirq name. update 'softirq_to_name' in
62638 * kernel/softirq.c when adding a new softirq.
62639 */
62640-extern char *softirq_to_name[NR_SOFTIRQS];
62641+extern const char * const softirq_to_name[NR_SOFTIRQS];
62642
62643 /* softirq mask and active fields moved to irq_cpustat_t in
62644 * asm/hardirq.h to get better cache usage. KAO
62645@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62646
62647 struct softirq_action
62648 {
62649- void (*action)(struct softirq_action *);
62650+ void (*action)(void);
62651 };
62652
62653 asmlinkage void do_softirq(void);
62654 asmlinkage void __do_softirq(void);
62655-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62656+extern void open_softirq(int nr, void (*action)(void));
62657 extern void softirq_init(void);
62658 static inline void __raise_softirq_irqoff(unsigned int nr)
62659 {
62660diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62661index 0df513b..fe901a2 100644
62662--- a/include/linux/kallsyms.h
62663+++ b/include/linux/kallsyms.h
62664@@ -15,7 +15,8 @@
62665
62666 struct module;
62667
62668-#ifdef CONFIG_KALLSYMS
62669+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62670+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62671 /* Lookup the address for a symbol. Returns 0 if not found. */
62672 unsigned long kallsyms_lookup_name(const char *name);
62673
62674@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62675 /* Stupid that this does nothing, but I didn't create this mess. */
62676 #define __print_symbol(fmt, addr)
62677 #endif /*CONFIG_KALLSYMS*/
62678+#else /* when included by kallsyms.c, vsnprintf.c, or
62679+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62680+extern void __print_symbol(const char *fmt, unsigned long address);
62681+extern int sprint_backtrace(char *buffer, unsigned long address);
62682+extern int sprint_symbol(char *buffer, unsigned long address);
62683+const char *kallsyms_lookup(unsigned long addr,
62684+ unsigned long *symbolsize,
62685+ unsigned long *offset,
62686+ char **modname, char *namebuf);
62687+#endif
62688
62689 /* This macro allows us to keep printk typechecking */
62690 static void __check_printsym_format(const char *fmt, ...)
62691diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62692index fa39183..40160be 100644
62693--- a/include/linux/kgdb.h
62694+++ b/include/linux/kgdb.h
62695@@ -53,7 +53,7 @@ extern int kgdb_connected;
62696 extern int kgdb_io_module_registered;
62697
62698 extern atomic_t kgdb_setting_breakpoint;
62699-extern atomic_t kgdb_cpu_doing_single_step;
62700+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62701
62702 extern struct task_struct *kgdb_usethread;
62703 extern struct task_struct *kgdb_contthread;
62704@@ -251,7 +251,7 @@ struct kgdb_arch {
62705 void (*disable_hw_break)(struct pt_regs *regs);
62706 void (*remove_all_hw_break)(void);
62707 void (*correct_hw_break)(void);
62708-};
62709+} __do_const;
62710
62711 /**
62712 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62713@@ -276,7 +276,7 @@ struct kgdb_io {
62714 void (*pre_exception) (void);
62715 void (*post_exception) (void);
62716 int is_console;
62717-};
62718+} __do_const;
62719
62720 extern struct kgdb_arch arch_kgdb_ops;
62721
62722diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62723index 0da38cf..d23f05f 100644
62724--- a/include/linux/kmod.h
62725+++ b/include/linux/kmod.h
62726@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62727 * usually useless though. */
62728 extern int __request_module(bool wait, const char *name, ...) \
62729 __attribute__((format(printf, 2, 3)));
62730+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
62731+ __attribute__((format(printf, 3, 4)));
62732 #define request_module(mod...) __request_module(true, mod)
62733 #define request_module_nowait(mod...) __request_module(false, mod)
62734 #define try_then_request_module(x, mod...) \
62735diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62736index eabb21a..3f030f4 100644
62737--- a/include/linux/kvm_host.h
62738+++ b/include/linux/kvm_host.h
62739@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62740 void vcpu_load(struct kvm_vcpu *vcpu);
62741 void vcpu_put(struct kvm_vcpu *vcpu);
62742
62743-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62744+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62745 struct module *module);
62746 void kvm_exit(void);
62747
62748@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62749 struct kvm_guest_debug *dbg);
62750 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62751
62752-int kvm_arch_init(void *opaque);
62753+int kvm_arch_init(const void *opaque);
62754 void kvm_arch_exit(void);
62755
62756 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62757diff --git a/include/linux/libata.h b/include/linux/libata.h
62758index efd6f98..5f5fd37 100644
62759--- a/include/linux/libata.h
62760+++ b/include/linux/libata.h
62761@@ -909,7 +909,7 @@ struct ata_port_operations {
62762 * fields must be pointers.
62763 */
62764 const struct ata_port_operations *inherits;
62765-};
62766+} __do_const;
62767
62768 struct ata_port_info {
62769 unsigned long flags;
62770diff --git a/include/linux/mca.h b/include/linux/mca.h
62771index 3797270..7765ede 100644
62772--- a/include/linux/mca.h
62773+++ b/include/linux/mca.h
62774@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62775 int region);
62776 void * (*mca_transform_memory)(struct mca_device *,
62777 void *memory);
62778-};
62779+} __no_const;
62780
62781 struct mca_bus {
62782 u64 default_dma_mask;
62783diff --git a/include/linux/memory.h b/include/linux/memory.h
62784index 935699b..11042cc 100644
62785--- a/include/linux/memory.h
62786+++ b/include/linux/memory.h
62787@@ -144,7 +144,7 @@ struct memory_accessor {
62788 size_t count);
62789 ssize_t (*write)(struct memory_accessor *, const char *buf,
62790 off_t offset, size_t count);
62791-};
62792+} __no_const;
62793
62794 /*
62795 * Kernel text modification mutex, used for code patching. Users of this lock
62796diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62797index 896b5e4..1159ad0 100644
62798--- a/include/linux/mfd/abx500.h
62799+++ b/include/linux/mfd/abx500.h
62800@@ -234,6 +234,7 @@ struct abx500_ops {
62801 int (*event_registers_startup_state_get) (struct device *, u8 *);
62802 int (*startup_irq_enabled) (struct device *, unsigned int);
62803 };
62804+typedef struct abx500_ops __no_const abx500_ops_no_const;
62805
62806 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62807 void abx500_remove_ops(struct device *dev);
62808diff --git a/include/linux/mm.h b/include/linux/mm.h
62809index fedc5f0..7cedb6d 100644
62810--- a/include/linux/mm.h
62811+++ b/include/linux/mm.h
62812@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp);
62813
62814 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62815 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62816+
62817+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62818+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62819+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62820+#else
62821 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62822+#endif
62823+
62824 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62825 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62826
62827@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
62828 int set_page_dirty_lock(struct page *page);
62829 int clear_page_dirty_for_io(struct page *page);
62830
62831-/* Is the vma a continuation of the stack vma above it? */
62832-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62833-{
62834- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62835-}
62836-
62837-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62838- unsigned long addr)
62839-{
62840- return (vma->vm_flags & VM_GROWSDOWN) &&
62841- (vma->vm_start == addr) &&
62842- !vma_growsdown(vma->vm_prev, addr);
62843-}
62844-
62845-/* Is the vma a continuation of the stack vma below it? */
62846-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62847-{
62848- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62849-}
62850-
62851-static inline int stack_guard_page_end(struct vm_area_struct *vma,
62852- unsigned long addr)
62853-{
62854- return (vma->vm_flags & VM_GROWSUP) &&
62855- (vma->vm_end == addr) &&
62856- !vma_growsup(vma->vm_next, addr);
62857-}
62858-
62859 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62860 unsigned long old_addr, struct vm_area_struct *new_vma,
62861 unsigned long new_addr, unsigned long len);
62862@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62863 }
62864 #endif
62865
62866+#ifdef CONFIG_MMU
62867+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62868+#else
62869+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62870+{
62871+ return __pgprot(0);
62872+}
62873+#endif
62874+
62875 int vma_wants_writenotify(struct vm_area_struct *vma);
62876
62877 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62878@@ -1417,6 +1405,7 @@ out:
62879 }
62880
62881 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62882+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62883
62884 extern unsigned long do_brk(unsigned long, unsigned long);
62885
62886@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62887 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62888 struct vm_area_struct **pprev);
62889
62890+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62891+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62892+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62893+
62894 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62895 NULL if none. Assume start_addr < end_addr. */
62896 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62897@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
62898 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
62899 }
62900
62901-#ifdef CONFIG_MMU
62902-pgprot_t vm_get_page_prot(unsigned long vm_flags);
62903-#else
62904-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62905-{
62906- return __pgprot(0);
62907-}
62908-#endif
62909-
62910 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62911 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62912 unsigned long pfn, unsigned long size, pgprot_t);
62913@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn);
62914 extern int sysctl_memory_failure_early_kill;
62915 extern int sysctl_memory_failure_recovery;
62916 extern void shake_page(struct page *p, int access);
62917-extern atomic_long_t mce_bad_pages;
62918+extern atomic_long_unchecked_t mce_bad_pages;
62919 extern int soft_offline_page(struct page *page, int flags);
62920
62921 extern void dump_page(struct page *page);
62922@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
62923 unsigned int pages_per_huge_page);
62924 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
62925
62926+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62927+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62928+#else
62929+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62930+#endif
62931+
62932 #endif /* __KERNEL__ */
62933 #endif /* _LINUX_MM_H */
62934diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62935index 10a2f62..d655142 100644
62936--- a/include/linux/mm_types.h
62937+++ b/include/linux/mm_types.h
62938@@ -230,6 +230,8 @@ struct vm_area_struct {
62939 #ifdef CONFIG_NUMA
62940 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62941 #endif
62942+
62943+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62944 };
62945
62946 struct core_thread {
62947@@ -362,6 +364,24 @@ struct mm_struct {
62948 #ifdef CONFIG_CPUMASK_OFFSTACK
62949 struct cpumask cpumask_allocation;
62950 #endif
62951+
62952+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62953+ unsigned long pax_flags;
62954+#endif
62955+
62956+#ifdef CONFIG_PAX_DLRESOLVE
62957+ unsigned long call_dl_resolve;
62958+#endif
62959+
62960+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62961+ unsigned long call_syscall;
62962+#endif
62963+
62964+#ifdef CONFIG_PAX_ASLR
62965+ unsigned long delta_mmap; /* randomized offset */
62966+ unsigned long delta_stack; /* randomized offset */
62967+#endif
62968+
62969 };
62970
62971 static inline void mm_init_cpumask(struct mm_struct *mm)
62972diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62973index 1d1b1e1..2a13c78 100644
62974--- a/include/linux/mmu_notifier.h
62975+++ b/include/linux/mmu_notifier.h
62976@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62977 */
62978 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62979 ({ \
62980- pte_t __pte; \
62981+ pte_t ___pte; \
62982 struct vm_area_struct *___vma = __vma; \
62983 unsigned long ___address = __address; \
62984- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62985+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62986 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62987- __pte; \
62988+ ___pte; \
62989 })
62990
62991 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62992diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62993index be1ac8d..26868ce 100644
62994--- a/include/linux/mmzone.h
62995+++ b/include/linux/mmzone.h
62996@@ -356,7 +356,7 @@ struct zone {
62997 unsigned long flags; /* zone flags, see below */
62998
62999 /* Zone statistics */
63000- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63001+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63002
63003 /*
63004 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
63005diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
63006index ae28e93..1ac2233 100644
63007--- a/include/linux/mod_devicetable.h
63008+++ b/include/linux/mod_devicetable.h
63009@@ -12,7 +12,7 @@
63010 typedef unsigned long kernel_ulong_t;
63011 #endif
63012
63013-#define PCI_ANY_ID (~0)
63014+#define PCI_ANY_ID ((__u16)~0)
63015
63016 struct pci_device_id {
63017 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63018@@ -131,7 +131,7 @@ struct usb_device_id {
63019 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63020 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63021
63022-#define HID_ANY_ID (~0)
63023+#define HID_ANY_ID (~0U)
63024
63025 struct hid_device_id {
63026 __u16 bus;
63027diff --git a/include/linux/module.h b/include/linux/module.h
63028index 1c30087..fc2a442 100644
63029--- a/include/linux/module.h
63030+++ b/include/linux/module.h
63031@@ -16,6 +16,7 @@
63032 #include <linux/kobject.h>
63033 #include <linux/moduleparam.h>
63034 #include <linux/tracepoint.h>
63035+#include <linux/fs.h>
63036
63037 #include <linux/percpu.h>
63038 #include <asm/module.h>
63039@@ -327,19 +328,16 @@ struct module
63040 int (*init)(void);
63041
63042 /* If this is non-NULL, vfree after init() returns */
63043- void *module_init;
63044+ void *module_init_rx, *module_init_rw;
63045
63046 /* Here is the actual code + data, vfree'd on unload. */
63047- void *module_core;
63048+ void *module_core_rx, *module_core_rw;
63049
63050 /* Here are the sizes of the init and core sections */
63051- unsigned int init_size, core_size;
63052+ unsigned int init_size_rw, core_size_rw;
63053
63054 /* The size of the executable code in each section. */
63055- unsigned int init_text_size, core_text_size;
63056-
63057- /* Size of RO sections of the module (text+rodata) */
63058- unsigned int init_ro_size, core_ro_size;
63059+ unsigned int init_size_rx, core_size_rx;
63060
63061 /* Arch-specific module values */
63062 struct mod_arch_specific arch;
63063@@ -395,6 +393,10 @@ struct module
63064 #ifdef CONFIG_EVENT_TRACING
63065 struct ftrace_event_call **trace_events;
63066 unsigned int num_trace_events;
63067+ struct file_operations trace_id;
63068+ struct file_operations trace_enable;
63069+ struct file_operations trace_format;
63070+ struct file_operations trace_filter;
63071 #endif
63072 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63073 unsigned int num_ftrace_callsites;
63074@@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr);
63075 bool is_module_percpu_address(unsigned long addr);
63076 bool is_module_text_address(unsigned long addr);
63077
63078+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63079+{
63080+
63081+#ifdef CONFIG_PAX_KERNEXEC
63082+ if (ktla_ktva(addr) >= (unsigned long)start &&
63083+ ktla_ktva(addr) < (unsigned long)start + size)
63084+ return 1;
63085+#endif
63086+
63087+ return ((void *)addr >= start && (void *)addr < start + size);
63088+}
63089+
63090+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63091+{
63092+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63093+}
63094+
63095+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63096+{
63097+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63098+}
63099+
63100+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63101+{
63102+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63103+}
63104+
63105+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63106+{
63107+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63108+}
63109+
63110 static inline int within_module_core(unsigned long addr, struct module *mod)
63111 {
63112- return (unsigned long)mod->module_core <= addr &&
63113- addr < (unsigned long)mod->module_core + mod->core_size;
63114+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63115 }
63116
63117 static inline int within_module_init(unsigned long addr, struct module *mod)
63118 {
63119- return (unsigned long)mod->module_init <= addr &&
63120- addr < (unsigned long)mod->module_init + mod->init_size;
63121+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63122 }
63123
63124 /* Search for module by name: must hold module_mutex. */
63125diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63126index b2be02e..6a9fdb1 100644
63127--- a/include/linux/moduleloader.h
63128+++ b/include/linux/moduleloader.h
63129@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63130 sections. Returns NULL on failure. */
63131 void *module_alloc(unsigned long size);
63132
63133+#ifdef CONFIG_PAX_KERNEXEC
63134+void *module_alloc_exec(unsigned long size);
63135+#else
63136+#define module_alloc_exec(x) module_alloc(x)
63137+#endif
63138+
63139 /* Free memory returned from module_alloc. */
63140 void module_free(struct module *mod, void *module_region);
63141
63142+#ifdef CONFIG_PAX_KERNEXEC
63143+void module_free_exec(struct module *mod, void *module_region);
63144+#else
63145+#define module_free_exec(x, y) module_free((x), (y))
63146+#endif
63147+
63148 /* Apply the given relocation to the (simplified) ELF. Return -error
63149 or 0. */
63150 int apply_relocate(Elf_Shdr *sechdrs,
63151diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63152index ddaae98..3c70938 100644
63153--- a/include/linux/moduleparam.h
63154+++ b/include/linux/moduleparam.h
63155@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void)
63156 * @len is usually just sizeof(string).
63157 */
63158 #define module_param_string(name, string, len, perm) \
63159- static const struct kparam_string __param_string_##name \
63160+ static const struct kparam_string __param_string_##name __used \
63161 = { len, string }; \
63162 __module_param_call(MODULE_PARAM_PREFIX, name, \
63163 &param_ops_string, \
63164@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
63165 * module_param_named() for why this might be necessary.
63166 */
63167 #define module_param_array_named(name, array, type, nump, perm) \
63168- static const struct kparam_array __param_arr_##name \
63169+ static const struct kparam_array __param_arr_##name __used \
63170 = { .max = ARRAY_SIZE(array), .num = nump, \
63171 .ops = &param_ops_##type, \
63172 .elemsize = sizeof(array[0]), .elem = array }; \
63173diff --git a/include/linux/namei.h b/include/linux/namei.h
63174index ffc0213..2c1f2cb 100644
63175--- a/include/linux/namei.h
63176+++ b/include/linux/namei.h
63177@@ -24,7 +24,7 @@ struct nameidata {
63178 unsigned seq;
63179 int last_type;
63180 unsigned depth;
63181- char *saved_names[MAX_NESTED_LINKS + 1];
63182+ const char *saved_names[MAX_NESTED_LINKS + 1];
63183
63184 /* Intent data */
63185 union {
63186@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63187 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63188 extern void unlock_rename(struct dentry *, struct dentry *);
63189
63190-static inline void nd_set_link(struct nameidata *nd, char *path)
63191+static inline void nd_set_link(struct nameidata *nd, const char *path)
63192 {
63193 nd->saved_names[nd->depth] = path;
63194 }
63195
63196-static inline char *nd_get_link(struct nameidata *nd)
63197+static inline const char *nd_get_link(const struct nameidata *nd)
63198 {
63199 return nd->saved_names[nd->depth];
63200 }
63201diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63202index ddee79b..67af106 100644
63203--- a/include/linux/netdevice.h
63204+++ b/include/linux/netdevice.h
63205@@ -944,6 +944,7 @@ struct net_device_ops {
63206 int (*ndo_set_features)(struct net_device *dev,
63207 u32 features);
63208 };
63209+typedef struct net_device_ops __no_const net_device_ops_no_const;
63210
63211 /*
63212 * The DEVICE structure.
63213diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63214new file mode 100644
63215index 0000000..33f4af8
63216--- /dev/null
63217+++ b/include/linux/netfilter/xt_gradm.h
63218@@ -0,0 +1,9 @@
63219+#ifndef _LINUX_NETFILTER_XT_GRADM_H
63220+#define _LINUX_NETFILTER_XT_GRADM_H 1
63221+
63222+struct xt_gradm_mtinfo {
63223+ __u16 flags;
63224+ __u16 invflags;
63225+};
63226+
63227+#endif
63228diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63229index c65a18a..0c05f3a 100644
63230--- a/include/linux/of_pdt.h
63231+++ b/include/linux/of_pdt.h
63232@@ -32,7 +32,7 @@ struct of_pdt_ops {
63233
63234 /* return 0 on success; fill in 'len' with number of bytes in path */
63235 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63236-};
63237+} __no_const;
63238
63239 extern void *prom_early_alloc(unsigned long size);
63240
63241diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63242index 49c8727..34d2ae1 100644
63243--- a/include/linux/oprofile.h
63244+++ b/include/linux/oprofile.h
63245@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63246 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63247 char const * name, ulong * val);
63248
63249-/** Create a file for read-only access to an atomic_t. */
63250+/** Create a file for read-only access to an atomic_unchecked_t. */
63251 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63252- char const * name, atomic_t * val);
63253+ char const * name, atomic_unchecked_t * val);
63254
63255 /** create a directory */
63256 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63257diff --git a/include/linux/padata.h b/include/linux/padata.h
63258index 4633b2f..988bc08 100644
63259--- a/include/linux/padata.h
63260+++ b/include/linux/padata.h
63261@@ -129,7 +129,7 @@ struct parallel_data {
63262 struct padata_instance *pinst;
63263 struct padata_parallel_queue __percpu *pqueue;
63264 struct padata_serial_queue __percpu *squeue;
63265- atomic_t seq_nr;
63266+ atomic_unchecked_t seq_nr;
63267 atomic_t reorder_objects;
63268 atomic_t refcnt;
63269 unsigned int max_seq_nr;
63270diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63271index c816075..cd28c4d 100644
63272--- a/include/linux/perf_event.h
63273+++ b/include/linux/perf_event.h
63274@@ -745,8 +745,8 @@ struct perf_event {
63275
63276 enum perf_event_active_state state;
63277 unsigned int attach_state;
63278- local64_t count;
63279- atomic64_t child_count;
63280+ local64_t count; /* PaX: fix it one day */
63281+ atomic64_unchecked_t child_count;
63282
63283 /*
63284 * These are the total time in nanoseconds that the event
63285@@ -797,8 +797,8 @@ struct perf_event {
63286 * These accumulate total time (in nanoseconds) that children
63287 * events have been enabled and running, respectively.
63288 */
63289- atomic64_t child_total_time_enabled;
63290- atomic64_t child_total_time_running;
63291+ atomic64_unchecked_t child_total_time_enabled;
63292+ atomic64_unchecked_t child_total_time_running;
63293
63294 /*
63295 * Protect attach/detach and child_list:
63296diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63297index 77257c9..51d473a 100644
63298--- a/include/linux/pipe_fs_i.h
63299+++ b/include/linux/pipe_fs_i.h
63300@@ -46,9 +46,9 @@ struct pipe_buffer {
63301 struct pipe_inode_info {
63302 wait_queue_head_t wait;
63303 unsigned int nrbufs, curbuf, buffers;
63304- unsigned int readers;
63305- unsigned int writers;
63306- unsigned int waiting_writers;
63307+ atomic_t readers;
63308+ atomic_t writers;
63309+ atomic_t waiting_writers;
63310 unsigned int r_counter;
63311 unsigned int w_counter;
63312 struct page *tmp_page;
63313diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63314index daac05d..c6802ce 100644
63315--- a/include/linux/pm_runtime.h
63316+++ b/include/linux/pm_runtime.h
63317@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63318
63319 static inline void pm_runtime_mark_last_busy(struct device *dev)
63320 {
63321- ACCESS_ONCE(dev->power.last_busy) = jiffies;
63322+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63323 }
63324
63325 #else /* !CONFIG_PM_RUNTIME */
63326diff --git a/include/linux/poison.h b/include/linux/poison.h
63327index 79159de..f1233a9 100644
63328--- a/include/linux/poison.h
63329+++ b/include/linux/poison.h
63330@@ -19,8 +19,8 @@
63331 * under normal circumstances, used to verify that nobody uses
63332 * non-initialized list entries.
63333 */
63334-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63335-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63336+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63337+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63338
63339 /********** include/linux/timer.h **********/
63340 /*
63341diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63342index 58969b2..ead129b 100644
63343--- a/include/linux/preempt.h
63344+++ b/include/linux/preempt.h
63345@@ -123,7 +123,7 @@ struct preempt_ops {
63346 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63347 void (*sched_out)(struct preempt_notifier *notifier,
63348 struct task_struct *next);
63349-};
63350+} __no_const;
63351
63352 /**
63353 * preempt_notifier - key for installing preemption notifiers
63354diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63355index 643b96c..ef55a9c 100644
63356--- a/include/linux/proc_fs.h
63357+++ b/include/linux/proc_fs.h
63358@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
63359 return proc_create_data(name, mode, parent, proc_fops, NULL);
63360 }
63361
63362+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
63363+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63364+{
63365+#ifdef CONFIG_GRKERNSEC_PROC_USER
63366+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63367+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63368+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63369+#else
63370+ return proc_create_data(name, mode, parent, proc_fops, NULL);
63371+#endif
63372+}
63373+
63374+
63375 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63376 mode_t mode, struct proc_dir_entry *base,
63377 read_proc_t *read_proc, void * data)
63378@@ -258,7 +271,7 @@ union proc_op {
63379 int (*proc_show)(struct seq_file *m,
63380 struct pid_namespace *ns, struct pid *pid,
63381 struct task_struct *task);
63382-};
63383+} __no_const;
63384
63385 struct ctl_table_header;
63386 struct ctl_table;
63387diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
63388index 800f113..af90cc8 100644
63389--- a/include/linux/ptrace.h
63390+++ b/include/linux/ptrace.h
63391@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child);
63392 extern void exit_ptrace(struct task_struct *tracer);
63393 #define PTRACE_MODE_READ 1
63394 #define PTRACE_MODE_ATTACH 2
63395-/* Returns 0 on success, -errno on denial. */
63396-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
63397 /* Returns true on success, false on denial. */
63398 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
63399+/* Returns true on success, false on denial. */
63400+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
63401
63402 static inline int ptrace_reparented(struct task_struct *child)
63403 {
63404diff --git a/include/linux/random.h b/include/linux/random.h
63405index d13059f..2eaafaa 100644
63406--- a/include/linux/random.h
63407+++ b/include/linux/random.h
63408@@ -69,12 +69,17 @@ void srandom32(u32 seed);
63409
63410 u32 prandom32(struct rnd_state *);
63411
63412+static inline unsigned long pax_get_random_long(void)
63413+{
63414+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63415+}
63416+
63417 /*
63418 * Handle minimum values for seeds
63419 */
63420 static inline u32 __seed(u32 x, u32 m)
63421 {
63422- return (x < m) ? x + m : x;
63423+ return (x <= m) ? x + m + 1 : x;
63424 }
63425
63426 /**
63427diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63428index e0879a7..a12f962 100644
63429--- a/include/linux/reboot.h
63430+++ b/include/linux/reboot.h
63431@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
63432 * Architecture-specific implementations of sys_reboot commands.
63433 */
63434
63435-extern void machine_restart(char *cmd);
63436-extern void machine_halt(void);
63437-extern void machine_power_off(void);
63438+extern void machine_restart(char *cmd) __noreturn;
63439+extern void machine_halt(void) __noreturn;
63440+extern void machine_power_off(void) __noreturn;
63441
63442 extern void machine_shutdown(void);
63443 struct pt_regs;
63444@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63445 */
63446
63447 extern void kernel_restart_prepare(char *cmd);
63448-extern void kernel_restart(char *cmd);
63449-extern void kernel_halt(void);
63450-extern void kernel_power_off(void);
63451+extern void kernel_restart(char *cmd) __noreturn;
63452+extern void kernel_halt(void) __noreturn;
63453+extern void kernel_power_off(void) __noreturn;
63454
63455 extern int C_A_D; /* for sysctl */
63456 void ctrl_alt_del(void);
63457@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63458 * Emergency restart, callable from an interrupt handler.
63459 */
63460
63461-extern void emergency_restart(void);
63462+extern void emergency_restart(void) __noreturn;
63463 #include <asm/emergency-restart.h>
63464
63465 #endif
63466diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
63467index 96d465f..b084e05 100644
63468--- a/include/linux/reiserfs_fs.h
63469+++ b/include/linux/reiserfs_fs.h
63470@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63471 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63472
63473 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63474-#define get_generation(s) atomic_read (&fs_generation(s))
63475+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63476 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63477 #define __fs_changed(gen,s) (gen != get_generation (s))
63478 #define fs_changed(gen,s) \
63479diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
63480index 52c83b6..18ed7eb 100644
63481--- a/include/linux/reiserfs_fs_sb.h
63482+++ b/include/linux/reiserfs_fs_sb.h
63483@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
63484 /* Comment? -Hans */
63485 wait_queue_head_t s_wait;
63486 /* To be obsoleted soon by per buffer seals.. -Hans */
63487- atomic_t s_generation_counter; // increased by one every time the
63488+ atomic_unchecked_t s_generation_counter; // increased by one every time the
63489 // tree gets re-balanced
63490 unsigned long s_properties; /* File system properties. Currently holds
63491 on-disk FS format */
63492diff --git a/include/linux/relay.h b/include/linux/relay.h
63493index 14a86bc..17d0700 100644
63494--- a/include/linux/relay.h
63495+++ b/include/linux/relay.h
63496@@ -159,7 +159,7 @@ struct rchan_callbacks
63497 * The callback should return 0 if successful, negative if not.
63498 */
63499 int (*remove_buf_file)(struct dentry *dentry);
63500-};
63501+} __no_const;
63502
63503 /*
63504 * CONFIG_RELAY kernel API, kernel/relay.c
63505diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63506index c6c6084..5bf1212 100644
63507--- a/include/linux/rfkill.h
63508+++ b/include/linux/rfkill.h
63509@@ -147,6 +147,7 @@ struct rfkill_ops {
63510 void (*query)(struct rfkill *rfkill, void *data);
63511 int (*set_block)(void *data, bool blocked);
63512 };
63513+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63514
63515 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63516 /**
63517diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63518index 2148b12..519b820 100644
63519--- a/include/linux/rmap.h
63520+++ b/include/linux/rmap.h
63521@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63522 void anon_vma_init(void); /* create anon_vma_cachep */
63523 int anon_vma_prepare(struct vm_area_struct *);
63524 void unlink_anon_vmas(struct vm_area_struct *);
63525-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63526-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63527+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63528+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63529 void __anon_vma_link(struct vm_area_struct *);
63530
63531 static inline void anon_vma_merge(struct vm_area_struct *vma,
63532diff --git a/include/linux/sched.h b/include/linux/sched.h
63533index 41d0237..51dd96c 100644
63534--- a/include/linux/sched.h
63535+++ b/include/linux/sched.h
63536@@ -100,6 +100,7 @@ struct bio_list;
63537 struct fs_struct;
63538 struct perf_event_context;
63539 struct blk_plug;
63540+struct linux_binprm;
63541
63542 /*
63543 * List of flags we want to share for kernel threads,
63544@@ -380,10 +381,13 @@ struct user_namespace;
63545 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63546
63547 extern int sysctl_max_map_count;
63548+extern unsigned long sysctl_heap_stack_gap;
63549
63550 #include <linux/aio.h>
63551
63552 #ifdef CONFIG_MMU
63553+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63554+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63555 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63556 extern unsigned long
63557 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63558@@ -629,6 +633,17 @@ struct signal_struct {
63559 #ifdef CONFIG_TASKSTATS
63560 struct taskstats *stats;
63561 #endif
63562+
63563+#ifdef CONFIG_GRKERNSEC
63564+ u32 curr_ip;
63565+ u32 saved_ip;
63566+ u32 gr_saddr;
63567+ u32 gr_daddr;
63568+ u16 gr_sport;
63569+ u16 gr_dport;
63570+ u8 used_accept:1;
63571+#endif
63572+
63573 #ifdef CONFIG_AUDIT
63574 unsigned audit_tty;
63575 struct tty_audit_buf *tty_audit_buf;
63576@@ -710,6 +725,11 @@ struct user_struct {
63577 struct key *session_keyring; /* UID's default session keyring */
63578 #endif
63579
63580+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63581+ unsigned int banned;
63582+ unsigned long ban_expires;
63583+#endif
63584+
63585 /* Hash table maintenance information */
63586 struct hlist_node uidhash_node;
63587 uid_t uid;
63588@@ -1340,8 +1360,8 @@ struct task_struct {
63589 struct list_head thread_group;
63590
63591 struct completion *vfork_done; /* for vfork() */
63592- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63593- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63594+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63595+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63596
63597 cputime_t utime, stime, utimescaled, stimescaled;
63598 cputime_t gtime;
63599@@ -1357,13 +1377,6 @@ struct task_struct {
63600 struct task_cputime cputime_expires;
63601 struct list_head cpu_timers[3];
63602
63603-/* process credentials */
63604- const struct cred __rcu *real_cred; /* objective and real subjective task
63605- * credentials (COW) */
63606- const struct cred __rcu *cred; /* effective (overridable) subjective task
63607- * credentials (COW) */
63608- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63609-
63610 char comm[TASK_COMM_LEN]; /* executable name excluding path
63611 - access with [gs]et_task_comm (which lock
63612 it with task_lock())
63613@@ -1380,8 +1393,16 @@ struct task_struct {
63614 #endif
63615 /* CPU-specific state of this task */
63616 struct thread_struct thread;
63617+/* thread_info moved to task_struct */
63618+#ifdef CONFIG_X86
63619+ struct thread_info tinfo;
63620+#endif
63621 /* filesystem information */
63622 struct fs_struct *fs;
63623+
63624+ const struct cred __rcu *cred; /* effective (overridable) subjective task
63625+ * credentials (COW) */
63626+
63627 /* open file information */
63628 struct files_struct *files;
63629 /* namespaces */
63630@@ -1428,6 +1449,11 @@ struct task_struct {
63631 struct rt_mutex_waiter *pi_blocked_on;
63632 #endif
63633
63634+/* process credentials */
63635+ const struct cred __rcu *real_cred; /* objective and real subjective task
63636+ * credentials (COW) */
63637+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63638+
63639 #ifdef CONFIG_DEBUG_MUTEXES
63640 /* mutex deadlock detection */
63641 struct mutex_waiter *blocked_on;
63642@@ -1537,6 +1563,22 @@ struct task_struct {
63643 unsigned long default_timer_slack_ns;
63644
63645 struct list_head *scm_work_list;
63646+
63647+#ifdef CONFIG_GRKERNSEC
63648+ /* grsecurity */
63649+ const struct cred *delayed_cred;
63650+ struct dentry *gr_chroot_dentry;
63651+ struct acl_subject_label *acl;
63652+ struct acl_role_label *role;
63653+ struct file *exec_file;
63654+ u16 acl_role_id;
63655+ /* is this the task that authenticated to the special role */
63656+ u8 acl_sp_role;
63657+ u8 is_writable;
63658+ u8 brute;
63659+ u8 gr_is_chrooted;
63660+#endif
63661+
63662 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63663 /* Index of current stored address in ret_stack */
63664 int curr_ret_stack;
63665@@ -1571,6 +1613,57 @@ struct task_struct {
63666 #endif
63667 };
63668
63669+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63670+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63671+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63672+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63673+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63674+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63675+
63676+#ifdef CONFIG_PAX_SOFTMODE
63677+extern int pax_softmode;
63678+#endif
63679+
63680+extern int pax_check_flags(unsigned long *);
63681+
63682+/* if tsk != current then task_lock must be held on it */
63683+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63684+static inline unsigned long pax_get_flags(struct task_struct *tsk)
63685+{
63686+ if (likely(tsk->mm))
63687+ return tsk->mm->pax_flags;
63688+ else
63689+ return 0UL;
63690+}
63691+
63692+/* if tsk != current then task_lock must be held on it */
63693+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63694+{
63695+ if (likely(tsk->mm)) {
63696+ tsk->mm->pax_flags = flags;
63697+ return 0;
63698+ }
63699+ return -EINVAL;
63700+}
63701+#endif
63702+
63703+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63704+extern void pax_set_initial_flags(struct linux_binprm *bprm);
63705+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63706+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63707+#endif
63708+
63709+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63710+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63711+extern void pax_report_refcount_overflow(struct pt_regs *regs);
63712+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
63713+
63714+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
63715+extern void pax_track_stack(void);
63716+#else
63717+static inline void pax_track_stack(void) {}
63718+#endif
63719+
63720 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63721 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63722
63723@@ -2074,7 +2167,9 @@ void yield(void);
63724 extern struct exec_domain default_exec_domain;
63725
63726 union thread_union {
63727+#ifndef CONFIG_X86
63728 struct thread_info thread_info;
63729+#endif
63730 unsigned long stack[THREAD_SIZE/sizeof(long)];
63731 };
63732
63733@@ -2107,6 +2202,7 @@ extern struct pid_namespace init_pid_ns;
63734 */
63735
63736 extern struct task_struct *find_task_by_vpid(pid_t nr);
63737+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63738 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63739 struct pid_namespace *ns);
63740
63741@@ -2243,7 +2339,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63742 extern void exit_itimers(struct signal_struct *);
63743 extern void flush_itimer_signals(void);
63744
63745-extern NORET_TYPE void do_group_exit(int);
63746+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
63747
63748 extern void daemonize(const char *, ...);
63749 extern int allow_signal(int);
63750@@ -2408,13 +2504,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63751
63752 #endif
63753
63754-static inline int object_is_on_stack(void *obj)
63755+static inline int object_starts_on_stack(void *obj)
63756 {
63757- void *stack = task_stack_page(current);
63758+ const void *stack = task_stack_page(current);
63759
63760 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63761 }
63762
63763+#ifdef CONFIG_PAX_USERCOPY
63764+extern int object_is_on_stack(const void *obj, unsigned long len);
63765+#endif
63766+
63767 extern void thread_info_cache_init(void);
63768
63769 #ifdef CONFIG_DEBUG_STACK_USAGE
63770diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63771index 899fbb4..1cb4138 100644
63772--- a/include/linux/screen_info.h
63773+++ b/include/linux/screen_info.h
63774@@ -43,7 +43,8 @@ struct screen_info {
63775 __u16 pages; /* 0x32 */
63776 __u16 vesa_attributes; /* 0x34 */
63777 __u32 capabilities; /* 0x36 */
63778- __u8 _reserved[6]; /* 0x3a */
63779+ __u16 vesapm_size; /* 0x3a */
63780+ __u8 _reserved[4]; /* 0x3c */
63781 } __attribute__((packed));
63782
63783 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63784diff --git a/include/linux/security.h b/include/linux/security.h
63785index ebd2a53..2d949ae 100644
63786--- a/include/linux/security.h
63787+++ b/include/linux/security.h
63788@@ -36,6 +36,7 @@
63789 #include <linux/key.h>
63790 #include <linux/xfrm.h>
63791 #include <linux/slab.h>
63792+#include <linux/grsecurity.h>
63793 #include <net/flow.h>
63794
63795 /* Maximum number of letters for an LSM name string */
63796diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63797index be720cd..a0e1b94 100644
63798--- a/include/linux/seq_file.h
63799+++ b/include/linux/seq_file.h
63800@@ -33,6 +33,7 @@ struct seq_operations {
63801 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63802 int (*show) (struct seq_file *m, void *v);
63803 };
63804+typedef struct seq_operations __no_const seq_operations_no_const;
63805
63806 #define SEQ_SKIP 1
63807
63808diff --git a/include/linux/shm.h b/include/linux/shm.h
63809index 92808b8..c28cac4 100644
63810--- a/include/linux/shm.h
63811+++ b/include/linux/shm.h
63812@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63813
63814 /* The task created the shm object. NULL if the task is dead. */
63815 struct task_struct *shm_creator;
63816+#ifdef CONFIG_GRKERNSEC
63817+ time_t shm_createtime;
63818+ pid_t shm_lapid;
63819+#endif
63820 };
63821
63822 /* shm_mode upper byte flags */
63823diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63824index 0f96646..cfb757a 100644
63825--- a/include/linux/skbuff.h
63826+++ b/include/linux/skbuff.h
63827@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63828 */
63829 static inline int skb_queue_empty(const struct sk_buff_head *list)
63830 {
63831- return list->next == (struct sk_buff *)list;
63832+ return list->next == (const struct sk_buff *)list;
63833 }
63834
63835 /**
63836@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63837 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63838 const struct sk_buff *skb)
63839 {
63840- return skb->next == (struct sk_buff *)list;
63841+ return skb->next == (const struct sk_buff *)list;
63842 }
63843
63844 /**
63845@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63846 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63847 const struct sk_buff *skb)
63848 {
63849- return skb->prev == (struct sk_buff *)list;
63850+ return skb->prev == (const struct sk_buff *)list;
63851 }
63852
63853 /**
63854@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63855 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63856 */
63857 #ifndef NET_SKB_PAD
63858-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63859+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63860 #endif
63861
63862 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63863diff --git a/include/linux/slab.h b/include/linux/slab.h
63864index 573c809..e84c132 100644
63865--- a/include/linux/slab.h
63866+++ b/include/linux/slab.h
63867@@ -11,12 +11,20 @@
63868
63869 #include <linux/gfp.h>
63870 #include <linux/types.h>
63871+#include <linux/err.h>
63872
63873 /*
63874 * Flags to pass to kmem_cache_create().
63875 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63876 */
63877 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63878+
63879+#ifdef CONFIG_PAX_USERCOPY
63880+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63881+#else
63882+#define SLAB_USERCOPY 0x00000000UL
63883+#endif
63884+
63885 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63886 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63887 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63888@@ -87,10 +95,13 @@
63889 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63890 * Both make kfree a no-op.
63891 */
63892-#define ZERO_SIZE_PTR ((void *)16)
63893+#define ZERO_SIZE_PTR \
63894+({ \
63895+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63896+ (void *)(-MAX_ERRNO-1L); \
63897+})
63898
63899-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63900- (unsigned long)ZERO_SIZE_PTR)
63901+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63902
63903 /*
63904 * struct kmem_cache related prototypes
63905@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63906 void kfree(const void *);
63907 void kzfree(const void *);
63908 size_t ksize(const void *);
63909+void check_object_size(const void *ptr, unsigned long n, bool to);
63910
63911 /*
63912 * Allocator specific definitions. These are mainly used to establish optimized
63913@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
63914
63915 void __init kmem_cache_init_late(void);
63916
63917+#define kmalloc(x, y) \
63918+({ \
63919+ void *___retval; \
63920+ intoverflow_t ___x = (intoverflow_t)x; \
63921+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
63922+ ___retval = NULL; \
63923+ else \
63924+ ___retval = kmalloc((size_t)___x, (y)); \
63925+ ___retval; \
63926+})
63927+
63928+#define kmalloc_node(x, y, z) \
63929+({ \
63930+ void *___retval; \
63931+ intoverflow_t ___x = (intoverflow_t)x; \
63932+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
63933+ ___retval = NULL; \
63934+ else \
63935+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
63936+ ___retval; \
63937+})
63938+
63939+#define kzalloc(x, y) \
63940+({ \
63941+ void *___retval; \
63942+ intoverflow_t ___x = (intoverflow_t)x; \
63943+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
63944+ ___retval = NULL; \
63945+ else \
63946+ ___retval = kzalloc((size_t)___x, (y)); \
63947+ ___retval; \
63948+})
63949+
63950+#define __krealloc(x, y, z) \
63951+({ \
63952+ void *___retval; \
63953+ intoverflow_t ___y = (intoverflow_t)y; \
63954+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
63955+ ___retval = NULL; \
63956+ else \
63957+ ___retval = __krealloc((x), (size_t)___y, (z)); \
63958+ ___retval; \
63959+})
63960+
63961+#define krealloc(x, y, z) \
63962+({ \
63963+ void *___retval; \
63964+ intoverflow_t ___y = (intoverflow_t)y; \
63965+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
63966+ ___retval = NULL; \
63967+ else \
63968+ ___retval = krealloc((x), (size_t)___y, (z)); \
63969+ ___retval; \
63970+})
63971+
63972 #endif /* _LINUX_SLAB_H */
63973diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63974index d00e0ba..1b3bf7b 100644
63975--- a/include/linux/slab_def.h
63976+++ b/include/linux/slab_def.h
63977@@ -68,10 +68,10 @@ struct kmem_cache {
63978 unsigned long node_allocs;
63979 unsigned long node_frees;
63980 unsigned long node_overflow;
63981- atomic_t allochit;
63982- atomic_t allocmiss;
63983- atomic_t freehit;
63984- atomic_t freemiss;
63985+ atomic_unchecked_t allochit;
63986+ atomic_unchecked_t allocmiss;
63987+ atomic_unchecked_t freehit;
63988+ atomic_unchecked_t freemiss;
63989
63990 /*
63991 * If debugging is enabled, then the allocator can add additional
63992diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63993index f58d641..c56bf9c 100644
63994--- a/include/linux/slub_def.h
63995+++ b/include/linux/slub_def.h
63996@@ -85,7 +85,7 @@ struct kmem_cache {
63997 struct kmem_cache_order_objects max;
63998 struct kmem_cache_order_objects min;
63999 gfp_t allocflags; /* gfp flags to use on each alloc */
64000- int refcount; /* Refcount for slab cache destroy */
64001+ atomic_t refcount; /* Refcount for slab cache destroy */
64002 void (*ctor)(void *);
64003 int inuse; /* Offset to metadata */
64004 int align; /* Alignment */
64005@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64006 }
64007
64008 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64009-void *__kmalloc(size_t size, gfp_t flags);
64010+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
64011
64012 static __always_inline void *
64013 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64014diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64015index de8832d..0147b46 100644
64016--- a/include/linux/sonet.h
64017+++ b/include/linux/sonet.h
64018@@ -61,7 +61,7 @@ struct sonet_stats {
64019 #include <linux/atomic.h>
64020
64021 struct k_sonet_stats {
64022-#define __HANDLE_ITEM(i) atomic_t i
64023+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64024 __SONET_ITEMS
64025 #undef __HANDLE_ITEM
64026 };
64027diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64028index db7bcaf..1aca77e 100644
64029--- a/include/linux/sunrpc/clnt.h
64030+++ b/include/linux/sunrpc/clnt.h
64031@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64032 {
64033 switch (sap->sa_family) {
64034 case AF_INET:
64035- return ntohs(((struct sockaddr_in *)sap)->sin_port);
64036+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64037 case AF_INET6:
64038- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64039+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64040 }
64041 return 0;
64042 }
64043@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64044 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64045 const struct sockaddr *src)
64046 {
64047- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64048+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64049 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64050
64051 dsin->sin_family = ssin->sin_family;
64052@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64053 if (sa->sa_family != AF_INET6)
64054 return 0;
64055
64056- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64057+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64058 }
64059
64060 #endif /* __KERNEL__ */
64061diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64062index e775689..9e206d9 100644
64063--- a/include/linux/sunrpc/sched.h
64064+++ b/include/linux/sunrpc/sched.h
64065@@ -105,6 +105,7 @@ struct rpc_call_ops {
64066 void (*rpc_call_done)(struct rpc_task *, void *);
64067 void (*rpc_release)(void *);
64068 };
64069+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64070
64071 struct rpc_task_setup {
64072 struct rpc_task *task;
64073diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64074index c14fe86..393245e 100644
64075--- a/include/linux/sunrpc/svc_rdma.h
64076+++ b/include/linux/sunrpc/svc_rdma.h
64077@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64078 extern unsigned int svcrdma_max_requests;
64079 extern unsigned int svcrdma_max_req_size;
64080
64081-extern atomic_t rdma_stat_recv;
64082-extern atomic_t rdma_stat_read;
64083-extern atomic_t rdma_stat_write;
64084-extern atomic_t rdma_stat_sq_starve;
64085-extern atomic_t rdma_stat_rq_starve;
64086-extern atomic_t rdma_stat_rq_poll;
64087-extern atomic_t rdma_stat_rq_prod;
64088-extern atomic_t rdma_stat_sq_poll;
64089-extern atomic_t rdma_stat_sq_prod;
64090+extern atomic_unchecked_t rdma_stat_recv;
64091+extern atomic_unchecked_t rdma_stat_read;
64092+extern atomic_unchecked_t rdma_stat_write;
64093+extern atomic_unchecked_t rdma_stat_sq_starve;
64094+extern atomic_unchecked_t rdma_stat_rq_starve;
64095+extern atomic_unchecked_t rdma_stat_rq_poll;
64096+extern atomic_unchecked_t rdma_stat_rq_prod;
64097+extern atomic_unchecked_t rdma_stat_sq_poll;
64098+extern atomic_unchecked_t rdma_stat_sq_prod;
64099
64100 #define RPCRDMA_VERSION 1
64101
64102diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64103index 11684d9..0d245eb 100644
64104--- a/include/linux/sysctl.h
64105+++ b/include/linux/sysctl.h
64106@@ -155,7 +155,11 @@ enum
64107 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64108 };
64109
64110-
64111+#ifdef CONFIG_PAX_SOFTMODE
64112+enum {
64113+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64114+};
64115+#endif
64116
64117 /* CTL_VM names: */
64118 enum
64119@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64120
64121 extern int proc_dostring(struct ctl_table *, int,
64122 void __user *, size_t *, loff_t *);
64123+extern int proc_dostring_modpriv(struct ctl_table *, int,
64124+ void __user *, size_t *, loff_t *);
64125 extern int proc_dointvec(struct ctl_table *, int,
64126 void __user *, size_t *, loff_t *);
64127 extern int proc_dointvec_minmax(struct ctl_table *, int,
64128diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64129index ff7dc08..893e1bd 100644
64130--- a/include/linux/tty_ldisc.h
64131+++ b/include/linux/tty_ldisc.h
64132@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
64133
64134 struct module *owner;
64135
64136- int refcount;
64137+ atomic_t refcount;
64138 };
64139
64140 struct tty_ldisc {
64141diff --git a/include/linux/types.h b/include/linux/types.h
64142index 176da8c..e45e473 100644
64143--- a/include/linux/types.h
64144+++ b/include/linux/types.h
64145@@ -213,10 +213,26 @@ typedef struct {
64146 int counter;
64147 } atomic_t;
64148
64149+#ifdef CONFIG_PAX_REFCOUNT
64150+typedef struct {
64151+ int counter;
64152+} atomic_unchecked_t;
64153+#else
64154+typedef atomic_t atomic_unchecked_t;
64155+#endif
64156+
64157 #ifdef CONFIG_64BIT
64158 typedef struct {
64159 long counter;
64160 } atomic64_t;
64161+
64162+#ifdef CONFIG_PAX_REFCOUNT
64163+typedef struct {
64164+ long counter;
64165+} atomic64_unchecked_t;
64166+#else
64167+typedef atomic64_t atomic64_unchecked_t;
64168+#endif
64169 #endif
64170
64171 struct list_head {
64172diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64173index 5ca0951..ab496a5 100644
64174--- a/include/linux/uaccess.h
64175+++ b/include/linux/uaccess.h
64176@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64177 long ret; \
64178 mm_segment_t old_fs = get_fs(); \
64179 \
64180- set_fs(KERNEL_DS); \
64181 pagefault_disable(); \
64182- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64183- pagefault_enable(); \
64184+ set_fs(KERNEL_DS); \
64185+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64186 set_fs(old_fs); \
64187+ pagefault_enable(); \
64188 ret; \
64189 })
64190
64191diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64192index 99c1b4d..bb94261 100644
64193--- a/include/linux/unaligned/access_ok.h
64194+++ b/include/linux/unaligned/access_ok.h
64195@@ -6,32 +6,32 @@
64196
64197 static inline u16 get_unaligned_le16(const void *p)
64198 {
64199- return le16_to_cpup((__le16 *)p);
64200+ return le16_to_cpup((const __le16 *)p);
64201 }
64202
64203 static inline u32 get_unaligned_le32(const void *p)
64204 {
64205- return le32_to_cpup((__le32 *)p);
64206+ return le32_to_cpup((const __le32 *)p);
64207 }
64208
64209 static inline u64 get_unaligned_le64(const void *p)
64210 {
64211- return le64_to_cpup((__le64 *)p);
64212+ return le64_to_cpup((const __le64 *)p);
64213 }
64214
64215 static inline u16 get_unaligned_be16(const void *p)
64216 {
64217- return be16_to_cpup((__be16 *)p);
64218+ return be16_to_cpup((const __be16 *)p);
64219 }
64220
64221 static inline u32 get_unaligned_be32(const void *p)
64222 {
64223- return be32_to_cpup((__be32 *)p);
64224+ return be32_to_cpup((const __be32 *)p);
64225 }
64226
64227 static inline u64 get_unaligned_be64(const void *p)
64228 {
64229- return be64_to_cpup((__be64 *)p);
64230+ return be64_to_cpup((const __be64 *)p);
64231 }
64232
64233 static inline void put_unaligned_le16(u16 val, void *p)
64234diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64235index cf97b5b..40ebc87 100644
64236--- a/include/linux/vermagic.h
64237+++ b/include/linux/vermagic.h
64238@@ -26,9 +26,35 @@
64239 #define MODULE_ARCH_VERMAGIC ""
64240 #endif
64241
64242+#ifdef CONFIG_PAX_REFCOUNT
64243+#define MODULE_PAX_REFCOUNT "REFCOUNT "
64244+#else
64245+#define MODULE_PAX_REFCOUNT ""
64246+#endif
64247+
64248+#ifdef CONSTIFY_PLUGIN
64249+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64250+#else
64251+#define MODULE_CONSTIFY_PLUGIN ""
64252+#endif
64253+
64254+#ifdef STACKLEAK_PLUGIN
64255+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64256+#else
64257+#define MODULE_STACKLEAK_PLUGIN ""
64258+#endif
64259+
64260+#ifdef CONFIG_GRKERNSEC
64261+#define MODULE_GRSEC "GRSEC "
64262+#else
64263+#define MODULE_GRSEC ""
64264+#endif
64265+
64266 #define VERMAGIC_STRING \
64267 UTS_RELEASE " " \
64268 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64269 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64270- MODULE_ARCH_VERMAGIC
64271+ MODULE_ARCH_VERMAGIC \
64272+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64273+ MODULE_GRSEC
64274
64275diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64276index 687fb11..b342358 100644
64277--- a/include/linux/vmalloc.h
64278+++ b/include/linux/vmalloc.h
64279@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64280 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64281 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64282 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64283+
64284+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64285+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64286+#endif
64287+
64288 /* bits [20..32] reserved for arch specific ioremap internals */
64289
64290 /*
64291@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
64292 # endif
64293 #endif
64294
64295+#define vmalloc(x) \
64296+({ \
64297+ void *___retval; \
64298+ intoverflow_t ___x = (intoverflow_t)x; \
64299+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
64300+ ___retval = NULL; \
64301+ else \
64302+ ___retval = vmalloc((unsigned long)___x); \
64303+ ___retval; \
64304+})
64305+
64306+#define vzalloc(x) \
64307+({ \
64308+ void *___retval; \
64309+ intoverflow_t ___x = (intoverflow_t)x; \
64310+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
64311+ ___retval = NULL; \
64312+ else \
64313+ ___retval = vzalloc((unsigned long)___x); \
64314+ ___retval; \
64315+})
64316+
64317+#define __vmalloc(x, y, z) \
64318+({ \
64319+ void *___retval; \
64320+ intoverflow_t ___x = (intoverflow_t)x; \
64321+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
64322+ ___retval = NULL; \
64323+ else \
64324+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
64325+ ___retval; \
64326+})
64327+
64328+#define vmalloc_user(x) \
64329+({ \
64330+ void *___retval; \
64331+ intoverflow_t ___x = (intoverflow_t)x; \
64332+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
64333+ ___retval = NULL; \
64334+ else \
64335+ ___retval = vmalloc_user((unsigned long)___x); \
64336+ ___retval; \
64337+})
64338+
64339+#define vmalloc_exec(x) \
64340+({ \
64341+ void *___retval; \
64342+ intoverflow_t ___x = (intoverflow_t)x; \
64343+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
64344+ ___retval = NULL; \
64345+ else \
64346+ ___retval = vmalloc_exec((unsigned long)___x); \
64347+ ___retval; \
64348+})
64349+
64350+#define vmalloc_node(x, y) \
64351+({ \
64352+ void *___retval; \
64353+ intoverflow_t ___x = (intoverflow_t)x; \
64354+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
64355+ ___retval = NULL; \
64356+ else \
64357+ ___retval = vmalloc_node((unsigned long)___x, (y));\
64358+ ___retval; \
64359+})
64360+
64361+#define vzalloc_node(x, y) \
64362+({ \
64363+ void *___retval; \
64364+ intoverflow_t ___x = (intoverflow_t)x; \
64365+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
64366+ ___retval = NULL; \
64367+ else \
64368+ ___retval = vzalloc_node((unsigned long)___x, (y));\
64369+ ___retval; \
64370+})
64371+
64372+#define vmalloc_32(x) \
64373+({ \
64374+ void *___retval; \
64375+ intoverflow_t ___x = (intoverflow_t)x; \
64376+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
64377+ ___retval = NULL; \
64378+ else \
64379+ ___retval = vmalloc_32((unsigned long)___x); \
64380+ ___retval; \
64381+})
64382+
64383+#define vmalloc_32_user(x) \
64384+({ \
64385+void *___retval; \
64386+ intoverflow_t ___x = (intoverflow_t)x; \
64387+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
64388+ ___retval = NULL; \
64389+ else \
64390+ ___retval = vmalloc_32_user((unsigned long)___x);\
64391+ ___retval; \
64392+})
64393+
64394 #endif /* _LINUX_VMALLOC_H */
64395diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64396index 65efb92..137adbb 100644
64397--- a/include/linux/vmstat.h
64398+++ b/include/linux/vmstat.h
64399@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64400 /*
64401 * Zone based page accounting with per cpu differentials.
64402 */
64403-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64404+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64405
64406 static inline void zone_page_state_add(long x, struct zone *zone,
64407 enum zone_stat_item item)
64408 {
64409- atomic_long_add(x, &zone->vm_stat[item]);
64410- atomic_long_add(x, &vm_stat[item]);
64411+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64412+ atomic_long_add_unchecked(x, &vm_stat[item]);
64413 }
64414
64415 static inline unsigned long global_page_state(enum zone_stat_item item)
64416 {
64417- long x = atomic_long_read(&vm_stat[item]);
64418+ long x = atomic_long_read_unchecked(&vm_stat[item]);
64419 #ifdef CONFIG_SMP
64420 if (x < 0)
64421 x = 0;
64422@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64423 static inline unsigned long zone_page_state(struct zone *zone,
64424 enum zone_stat_item item)
64425 {
64426- long x = atomic_long_read(&zone->vm_stat[item]);
64427+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64428 #ifdef CONFIG_SMP
64429 if (x < 0)
64430 x = 0;
64431@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64432 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64433 enum zone_stat_item item)
64434 {
64435- long x = atomic_long_read(&zone->vm_stat[item]);
64436+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64437
64438 #ifdef CONFIG_SMP
64439 int cpu;
64440@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64441
64442 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64443 {
64444- atomic_long_inc(&zone->vm_stat[item]);
64445- atomic_long_inc(&vm_stat[item]);
64446+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
64447+ atomic_long_inc_unchecked(&vm_stat[item]);
64448 }
64449
64450 static inline void __inc_zone_page_state(struct page *page,
64451@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64452
64453 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64454 {
64455- atomic_long_dec(&zone->vm_stat[item]);
64456- atomic_long_dec(&vm_stat[item]);
64457+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
64458+ atomic_long_dec_unchecked(&vm_stat[item]);
64459 }
64460
64461 static inline void __dec_zone_page_state(struct page *page,
64462diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64463index aed54c5..3e07f7a 100644
64464--- a/include/linux/xattr.h
64465+++ b/include/linux/xattr.h
64466@@ -49,6 +49,11 @@
64467 #define XATTR_CAPS_SUFFIX "capability"
64468 #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
64469
64470+/* User namespace */
64471+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64472+#define XATTR_PAX_FLAGS_SUFFIX "flags"
64473+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64474+
64475 #ifdef __KERNEL__
64476
64477 #include <linux/types.h>
64478diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64479index 4aeff96..b378cdc 100644
64480--- a/include/media/saa7146_vv.h
64481+++ b/include/media/saa7146_vv.h
64482@@ -163,7 +163,7 @@ struct saa7146_ext_vv
64483 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64484
64485 /* the extension can override this */
64486- struct v4l2_ioctl_ops ops;
64487+ v4l2_ioctl_ops_no_const ops;
64488 /* pointer to the saa7146 core ops */
64489 const struct v4l2_ioctl_ops *core_ops;
64490
64491diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64492index c7c40f1..4f01585 100644
64493--- a/include/media/v4l2-dev.h
64494+++ b/include/media/v4l2-dev.h
64495@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
64496
64497
64498 struct v4l2_file_operations {
64499- struct module *owner;
64500+ struct module * const owner;
64501 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
64502 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
64503 unsigned int (*poll) (struct file *, struct poll_table_struct *);
64504@@ -68,6 +68,7 @@ struct v4l2_file_operations {
64505 int (*open) (struct file *);
64506 int (*release) (struct file *);
64507 };
64508+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64509
64510 /*
64511 * Newer version of video_device, handled by videodev2.c
64512diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
64513index dd9f1e7..8c4dd86 100644
64514--- a/include/media/v4l2-ioctl.h
64515+++ b/include/media/v4l2-ioctl.h
64516@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
64517 long (*vidioc_default) (struct file *file, void *fh,
64518 bool valid_prio, int cmd, void *arg);
64519 };
64520-
64521+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64522
64523 /* v4l debugging and diagnostics */
64524
64525diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64526index c5dedd8..a93b07b 100644
64527--- a/include/net/caif/caif_hsi.h
64528+++ b/include/net/caif/caif_hsi.h
64529@@ -94,7 +94,7 @@ struct cfhsi_drv {
64530 void (*rx_done_cb) (struct cfhsi_drv *drv);
64531 void (*wake_up_cb) (struct cfhsi_drv *drv);
64532 void (*wake_down_cb) (struct cfhsi_drv *drv);
64533-};
64534+} __no_const;
64535
64536 /* Structure implemented by HSI device. */
64537 struct cfhsi_dev {
64538diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64539index 9e5425b..8136ffc 100644
64540--- a/include/net/caif/cfctrl.h
64541+++ b/include/net/caif/cfctrl.h
64542@@ -52,7 +52,7 @@ struct cfctrl_rsp {
64543 void (*radioset_rsp)(void);
64544 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64545 struct cflayer *client_layer);
64546-};
64547+} __no_const;
64548
64549 /* Link Setup Parameters for CAIF-Links. */
64550 struct cfctrl_link_param {
64551@@ -101,8 +101,8 @@ struct cfctrl_request_info {
64552 struct cfctrl {
64553 struct cfsrvl serv;
64554 struct cfctrl_rsp res;
64555- atomic_t req_seq_no;
64556- atomic_t rsp_seq_no;
64557+ atomic_unchecked_t req_seq_no;
64558+ atomic_unchecked_t rsp_seq_no;
64559 struct list_head list;
64560 /* Protects from simultaneous access to first_req list */
64561 spinlock_t info_list_lock;
64562diff --git a/include/net/flow.h b/include/net/flow.h
64563index 57f15a7..0de26c6 100644
64564--- a/include/net/flow.h
64565+++ b/include/net/flow.h
64566@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64567
64568 extern void flow_cache_flush(void);
64569 extern void flow_cache_flush_deferred(void);
64570-extern atomic_t flow_cache_genid;
64571+extern atomic_unchecked_t flow_cache_genid;
64572
64573 #endif
64574diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64575index e9ff3fc..9d3e5c7 100644
64576--- a/include/net/inetpeer.h
64577+++ b/include/net/inetpeer.h
64578@@ -48,8 +48,8 @@ struct inet_peer {
64579 */
64580 union {
64581 struct {
64582- atomic_t rid; /* Frag reception counter */
64583- atomic_t ip_id_count; /* IP ID for the next packet */
64584+ atomic_unchecked_t rid; /* Frag reception counter */
64585+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64586 __u32 tcp_ts;
64587 __u32 tcp_ts_stamp;
64588 };
64589@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64590 more++;
64591 inet_peer_refcheck(p);
64592 do {
64593- old = atomic_read(&p->ip_id_count);
64594+ old = atomic_read_unchecked(&p->ip_id_count);
64595 new = old + more;
64596 if (!new)
64597 new = 1;
64598- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64599+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64600 return new;
64601 }
64602
64603diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64604index 10422ef..662570f 100644
64605--- a/include/net/ip_fib.h
64606+++ b/include/net/ip_fib.h
64607@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64608
64609 #define FIB_RES_SADDR(net, res) \
64610 ((FIB_RES_NH(res).nh_saddr_genid == \
64611- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64612+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64613 FIB_RES_NH(res).nh_saddr : \
64614 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64615 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64616diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64617index 8fa4430..05dd772 100644
64618--- a/include/net/ip_vs.h
64619+++ b/include/net/ip_vs.h
64620@@ -509,7 +509,7 @@ struct ip_vs_conn {
64621 struct ip_vs_conn *control; /* Master control connection */
64622 atomic_t n_control; /* Number of controlled ones */
64623 struct ip_vs_dest *dest; /* real server */
64624- atomic_t in_pkts; /* incoming packet counter */
64625+ atomic_unchecked_t in_pkts; /* incoming packet counter */
64626
64627 /* packet transmitter for different forwarding methods. If it
64628 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64629@@ -647,7 +647,7 @@ struct ip_vs_dest {
64630 __be16 port; /* port number of the server */
64631 union nf_inet_addr addr; /* IP address of the server */
64632 volatile unsigned flags; /* dest status flags */
64633- atomic_t conn_flags; /* flags to copy to conn */
64634+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
64635 atomic_t weight; /* server weight */
64636
64637 atomic_t refcnt; /* reference counter */
64638diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64639index 69b610a..fe3962c 100644
64640--- a/include/net/irda/ircomm_core.h
64641+++ b/include/net/irda/ircomm_core.h
64642@@ -51,7 +51,7 @@ typedef struct {
64643 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64644 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64645 struct ircomm_info *);
64646-} call_t;
64647+} __no_const call_t;
64648
64649 struct ircomm_cb {
64650 irda_queue_t queue;
64651diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64652index 59ba38bc..d515662 100644
64653--- a/include/net/irda/ircomm_tty.h
64654+++ b/include/net/irda/ircomm_tty.h
64655@@ -35,6 +35,7 @@
64656 #include <linux/termios.h>
64657 #include <linux/timer.h>
64658 #include <linux/tty.h> /* struct tty_struct */
64659+#include <asm/local.h>
64660
64661 #include <net/irda/irias_object.h>
64662 #include <net/irda/ircomm_core.h>
64663@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64664 unsigned short close_delay;
64665 unsigned short closing_wait; /* time to wait before closing */
64666
64667- int open_count;
64668- int blocked_open; /* # of blocked opens */
64669+ local_t open_count;
64670+ local_t blocked_open; /* # of blocked opens */
64671
64672 /* Protect concurent access to :
64673 * o self->open_count
64674diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64675index f82a1e8..82d81e8 100644
64676--- a/include/net/iucv/af_iucv.h
64677+++ b/include/net/iucv/af_iucv.h
64678@@ -87,7 +87,7 @@ struct iucv_sock {
64679 struct iucv_sock_list {
64680 struct hlist_head head;
64681 rwlock_t lock;
64682- atomic_t autobind_name;
64683+ atomic_unchecked_t autobind_name;
64684 };
64685
64686 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64687diff --git a/include/net/lapb.h b/include/net/lapb.h
64688index 96cb5dd..25e8d4f 100644
64689--- a/include/net/lapb.h
64690+++ b/include/net/lapb.h
64691@@ -95,7 +95,7 @@ struct lapb_cb {
64692 struct sk_buff_head write_queue;
64693 struct sk_buff_head ack_queue;
64694 unsigned char window;
64695- struct lapb_register_struct callbacks;
64696+ struct lapb_register_struct *callbacks;
64697
64698 /* FRMR control information */
64699 struct lapb_frame frmr_data;
64700diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64701index 2720884..3aa5c25 100644
64702--- a/include/net/neighbour.h
64703+++ b/include/net/neighbour.h
64704@@ -122,7 +122,7 @@ struct neigh_ops {
64705 void (*error_report)(struct neighbour *, struct sk_buff *);
64706 int (*output)(struct neighbour *, struct sk_buff *);
64707 int (*connected_output)(struct neighbour *, struct sk_buff *);
64708-};
64709+} __do_const;
64710
64711 struct pneigh_entry {
64712 struct pneigh_entry *next;
64713diff --git a/include/net/netlink.h b/include/net/netlink.h
64714index 98c1854..d4add7b 100644
64715--- a/include/net/netlink.h
64716+++ b/include/net/netlink.h
64717@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64718 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64719 {
64720 if (mark)
64721- skb_trim(skb, (unsigned char *) mark - skb->data);
64722+ skb_trim(skb, (const unsigned char *) mark - skb->data);
64723 }
64724
64725 /**
64726diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64727index d786b4f..4c3dd41 100644
64728--- a/include/net/netns/ipv4.h
64729+++ b/include/net/netns/ipv4.h
64730@@ -56,8 +56,8 @@ struct netns_ipv4 {
64731
64732 unsigned int sysctl_ping_group_range[2];
64733
64734- atomic_t rt_genid;
64735- atomic_t dev_addr_genid;
64736+ atomic_unchecked_t rt_genid;
64737+ atomic_unchecked_t dev_addr_genid;
64738
64739 #ifdef CONFIG_IP_MROUTE
64740 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64741diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64742index 6a72a58..e6a127d 100644
64743--- a/include/net/sctp/sctp.h
64744+++ b/include/net/sctp/sctp.h
64745@@ -318,9 +318,9 @@ do { \
64746
64747 #else /* SCTP_DEBUG */
64748
64749-#define SCTP_DEBUG_PRINTK(whatever...)
64750-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64751-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64752+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64753+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64754+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64755 #define SCTP_ENABLE_DEBUG
64756 #define SCTP_DISABLE_DEBUG
64757 #define SCTP_ASSERT(expr, str, func)
64758diff --git a/include/net/sock.h b/include/net/sock.h
64759index 8e4062f..77b041e 100644
64760--- a/include/net/sock.h
64761+++ b/include/net/sock.h
64762@@ -278,7 +278,7 @@ struct sock {
64763 #ifdef CONFIG_RPS
64764 __u32 sk_rxhash;
64765 #endif
64766- atomic_t sk_drops;
64767+ atomic_unchecked_t sk_drops;
64768 int sk_rcvbuf;
64769
64770 struct sk_filter __rcu *sk_filter;
64771@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
64772 }
64773
64774 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64775- char __user *from, char *to,
64776+ char __user *from, unsigned char *to,
64777 int copy, int offset)
64778 {
64779 if (skb->ip_summed == CHECKSUM_NONE) {
64780diff --git a/include/net/tcp.h b/include/net/tcp.h
64781index acc620a..f4d99c6 100644
64782--- a/include/net/tcp.h
64783+++ b/include/net/tcp.h
64784@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
64785 struct tcp_seq_afinfo {
64786 char *name;
64787 sa_family_t family;
64788- struct file_operations seq_fops;
64789- struct seq_operations seq_ops;
64790+ file_operations_no_const seq_fops;
64791+ seq_operations_no_const seq_ops;
64792 };
64793
64794 struct tcp_iter_state {
64795diff --git a/include/net/udp.h b/include/net/udp.h
64796index 67ea6fc..e42aee8 100644
64797--- a/include/net/udp.h
64798+++ b/include/net/udp.h
64799@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
64800 char *name;
64801 sa_family_t family;
64802 struct udp_table *udp_table;
64803- struct file_operations seq_fops;
64804- struct seq_operations seq_ops;
64805+ file_operations_no_const seq_fops;
64806+ seq_operations_no_const seq_ops;
64807 };
64808
64809 struct udp_iter_state {
64810diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64811index b203e14..1df3991 100644
64812--- a/include/net/xfrm.h
64813+++ b/include/net/xfrm.h
64814@@ -505,7 +505,7 @@ struct xfrm_policy {
64815 struct timer_list timer;
64816
64817 struct flow_cache_object flo;
64818- atomic_t genid;
64819+ atomic_unchecked_t genid;
64820 u32 priority;
64821 u32 index;
64822 struct xfrm_mark mark;
64823diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64824index 2d0191c..a55797d 100644
64825--- a/include/rdma/iw_cm.h
64826+++ b/include/rdma/iw_cm.h
64827@@ -120,7 +120,7 @@ struct iw_cm_verbs {
64828 int backlog);
64829
64830 int (*destroy_listen)(struct iw_cm_id *cm_id);
64831-};
64832+} __no_const;
64833
64834 /**
64835 * iw_create_cm_id - Create an IW CM identifier.
64836diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64837index 7d96829..4ba78d3 100644
64838--- a/include/scsi/libfc.h
64839+++ b/include/scsi/libfc.h
64840@@ -758,6 +758,7 @@ struct libfc_function_template {
64841 */
64842 void (*disc_stop_final) (struct fc_lport *);
64843 };
64844+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64845
64846 /**
64847 * struct fc_disc - Discovery context
64848@@ -861,7 +862,7 @@ struct fc_lport {
64849 struct fc_vport *vport;
64850
64851 /* Operational Information */
64852- struct libfc_function_template tt;
64853+ libfc_function_template_no_const tt;
64854 u8 link_up;
64855 u8 qfull;
64856 enum fc_lport_state state;
64857diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64858index d371c3c..e228a8c 100644
64859--- a/include/scsi/scsi_device.h
64860+++ b/include/scsi/scsi_device.h
64861@@ -161,9 +161,9 @@ struct scsi_device {
64862 unsigned int max_device_blocked; /* what device_blocked counts down from */
64863 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64864
64865- atomic_t iorequest_cnt;
64866- atomic_t iodone_cnt;
64867- atomic_t ioerr_cnt;
64868+ atomic_unchecked_t iorequest_cnt;
64869+ atomic_unchecked_t iodone_cnt;
64870+ atomic_unchecked_t ioerr_cnt;
64871
64872 struct device sdev_gendev,
64873 sdev_dev;
64874diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64875index 2a65167..91e01f8 100644
64876--- a/include/scsi/scsi_transport_fc.h
64877+++ b/include/scsi/scsi_transport_fc.h
64878@@ -711,7 +711,7 @@ struct fc_function_template {
64879 unsigned long show_host_system_hostname:1;
64880
64881 unsigned long disable_target_scan:1;
64882-};
64883+} __do_const;
64884
64885
64886 /**
64887diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64888index 030b87c..98a6954 100644
64889--- a/include/sound/ak4xxx-adda.h
64890+++ b/include/sound/ak4xxx-adda.h
64891@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64892 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64893 unsigned char val);
64894 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64895-};
64896+} __no_const;
64897
64898 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64899
64900diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64901index 8c05e47..2b5df97 100644
64902--- a/include/sound/hwdep.h
64903+++ b/include/sound/hwdep.h
64904@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64905 struct snd_hwdep_dsp_status *status);
64906 int (*dsp_load)(struct snd_hwdep *hw,
64907 struct snd_hwdep_dsp_image *image);
64908-};
64909+} __no_const;
64910
64911 struct snd_hwdep {
64912 struct snd_card *card;
64913diff --git a/include/sound/info.h b/include/sound/info.h
64914index 4e94cf1..76748b1 100644
64915--- a/include/sound/info.h
64916+++ b/include/sound/info.h
64917@@ -44,7 +44,7 @@ struct snd_info_entry_text {
64918 struct snd_info_buffer *buffer);
64919 void (*write)(struct snd_info_entry *entry,
64920 struct snd_info_buffer *buffer);
64921-};
64922+} __no_const;
64923
64924 struct snd_info_entry_ops {
64925 int (*open)(struct snd_info_entry *entry,
64926diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64927index 57e71fa..a2c7534 100644
64928--- a/include/sound/pcm.h
64929+++ b/include/sound/pcm.h
64930@@ -81,6 +81,7 @@ struct snd_pcm_ops {
64931 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64932 int (*ack)(struct snd_pcm_substream *substream);
64933 };
64934+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64935
64936 /*
64937 *
64938diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64939index af1b49e..a5d55a5 100644
64940--- a/include/sound/sb16_csp.h
64941+++ b/include/sound/sb16_csp.h
64942@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64943 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64944 int (*csp_stop) (struct snd_sb_csp * p);
64945 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64946-};
64947+} __no_const;
64948
64949 /*
64950 * CSP private data
64951diff --git a/include/sound/soc.h b/include/sound/soc.h
64952index aa19f5a..a5b8208 100644
64953--- a/include/sound/soc.h
64954+++ b/include/sound/soc.h
64955@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
64956 /* platform IO - used for platform DAPM */
64957 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64958 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64959-};
64960+} __do_const;
64961
64962 struct snd_soc_platform {
64963 const char *name;
64964diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64965index 444cd6b..3327cc5 100644
64966--- a/include/sound/ymfpci.h
64967+++ b/include/sound/ymfpci.h
64968@@ -358,7 +358,7 @@ struct snd_ymfpci {
64969 spinlock_t reg_lock;
64970 spinlock_t voice_lock;
64971 wait_queue_head_t interrupt_sleep;
64972- atomic_t interrupt_sleep_count;
64973+ atomic_unchecked_t interrupt_sleep_count;
64974 struct snd_info_entry *proc_entry;
64975 const struct firmware *dsp_microcode;
64976 const struct firmware *controller_microcode;
64977diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64978index 2704065..e10f3ef 100644
64979--- a/include/target/target_core_base.h
64980+++ b/include/target/target_core_base.h
64981@@ -356,7 +356,7 @@ struct t10_reservation_ops {
64982 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64983 int (*t10_pr_register)(struct se_cmd *);
64984 int (*t10_pr_clear)(struct se_cmd *);
64985-};
64986+} __no_const;
64987
64988 struct t10_reservation {
64989 /* Reservation effects all target ports */
64990@@ -496,8 +496,8 @@ struct se_cmd {
64991 atomic_t t_task_cdbs_left;
64992 atomic_t t_task_cdbs_ex_left;
64993 atomic_t t_task_cdbs_timeout_left;
64994- atomic_t t_task_cdbs_sent;
64995- atomic_t t_transport_aborted;
64996+ atomic_unchecked_t t_task_cdbs_sent;
64997+ atomic_unchecked_t t_transport_aborted;
64998 atomic_t t_transport_active;
64999 atomic_t t_transport_complete;
65000 atomic_t t_transport_queue_active;
65001@@ -744,7 +744,7 @@ struct se_device {
65002 atomic_t active_cmds;
65003 atomic_t simple_cmds;
65004 atomic_t depth_left;
65005- atomic_t dev_ordered_id;
65006+ atomic_unchecked_t dev_ordered_id;
65007 atomic_t dev_tur_active;
65008 atomic_t execute_tasks;
65009 atomic_t dev_status_thr_count;
65010diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65011index 1c09820..7f5ec79 100644
65012--- a/include/trace/events/irq.h
65013+++ b/include/trace/events/irq.h
65014@@ -36,7 +36,7 @@ struct softirq_action;
65015 */
65016 TRACE_EVENT(irq_handler_entry,
65017
65018- TP_PROTO(int irq, struct irqaction *action),
65019+ TP_PROTO(int irq, const struct irqaction *action),
65020
65021 TP_ARGS(irq, action),
65022
65023@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65024 */
65025 TRACE_EVENT(irq_handler_exit,
65026
65027- TP_PROTO(int irq, struct irqaction *action, int ret),
65028+ TP_PROTO(int irq, const struct irqaction *action, int ret),
65029
65030 TP_ARGS(irq, action, ret),
65031
65032diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65033index 69d485a..dd0bee7 100644
65034--- a/include/video/udlfb.h
65035+++ b/include/video/udlfb.h
65036@@ -51,10 +51,10 @@ struct dlfb_data {
65037 int base8;
65038 u32 pseudo_palette[256];
65039 /* blit-only rendering path metrics, exposed through sysfs */
65040- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65041- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65042- atomic_t bytes_sent; /* to usb, after compression including overhead */
65043- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65044+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65045+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65046+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65047+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65048 };
65049
65050 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65051diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65052index 0993a22..32ba2fe 100644
65053--- a/include/video/uvesafb.h
65054+++ b/include/video/uvesafb.h
65055@@ -177,6 +177,7 @@ struct uvesafb_par {
65056 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65057 u8 pmi_setpal; /* PMI for palette changes */
65058 u16 *pmi_base; /* protected mode interface location */
65059+ u8 *pmi_code; /* protected mode code location */
65060 void *pmi_start;
65061 void *pmi_pal;
65062 u8 *vbe_state_orig; /*
65063diff --git a/init/Kconfig b/init/Kconfig
65064index d627783..693a9f3 100644
65065--- a/init/Kconfig
65066+++ b/init/Kconfig
65067@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
65068
65069 config COMPAT_BRK
65070 bool "Disable heap randomization"
65071- default y
65072+ default n
65073 help
65074 Randomizing heap placement makes heap exploits harder, but it
65075 also breaks ancient binaries (including anything libc5 based).
65076diff --git a/init/do_mounts.c b/init/do_mounts.c
65077index ef6478f..fdb0d8a 100644
65078--- a/init/do_mounts.c
65079+++ b/init/do_mounts.c
65080@@ -287,11 +287,11 @@ static void __init get_fs_names(char *page)
65081
65082 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65083 {
65084- int err = sys_mount(name, "/root", fs, flags, data);
65085+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65086 if (err)
65087 return err;
65088
65089- sys_chdir((const char __user __force *)"/root");
65090+ sys_chdir((const char __force_user*)"/root");
65091 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
65092 printk(KERN_INFO
65093 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
65094@@ -410,18 +410,18 @@ void __init change_floppy(char *fmt, ...)
65095 va_start(args, fmt);
65096 vsprintf(buf, fmt, args);
65097 va_end(args);
65098- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65099+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65100 if (fd >= 0) {
65101 sys_ioctl(fd, FDEJECT, 0);
65102 sys_close(fd);
65103 }
65104 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65105- fd = sys_open("/dev/console", O_RDWR, 0);
65106+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65107 if (fd >= 0) {
65108 sys_ioctl(fd, TCGETS, (long)&termios);
65109 termios.c_lflag &= ~ICANON;
65110 sys_ioctl(fd, TCSETSF, (long)&termios);
65111- sys_read(fd, &c, 1);
65112+ sys_read(fd, (char __user *)&c, 1);
65113 termios.c_lflag |= ICANON;
65114 sys_ioctl(fd, TCSETSF, (long)&termios);
65115 sys_close(fd);
65116@@ -515,6 +515,6 @@ void __init prepare_namespace(void)
65117 mount_root();
65118 out:
65119 devtmpfs_mount("dev");
65120- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65121- sys_chroot((const char __user __force *)".");
65122+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65123+ sys_chroot((const char __force_user *)".");
65124 }
65125diff --git a/init/do_mounts.h b/init/do_mounts.h
65126index f5b978a..69dbfe8 100644
65127--- a/init/do_mounts.h
65128+++ b/init/do_mounts.h
65129@@ -15,15 +15,15 @@ extern int root_mountflags;
65130
65131 static inline int create_dev(char *name, dev_t dev)
65132 {
65133- sys_unlink(name);
65134- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65135+ sys_unlink((char __force_user *)name);
65136+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65137 }
65138
65139 #if BITS_PER_LONG == 32
65140 static inline u32 bstat(char *name)
65141 {
65142 struct stat64 stat;
65143- if (sys_stat64(name, &stat) != 0)
65144+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65145 return 0;
65146 if (!S_ISBLK(stat.st_mode))
65147 return 0;
65148@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65149 static inline u32 bstat(char *name)
65150 {
65151 struct stat stat;
65152- if (sys_newstat(name, &stat) != 0)
65153+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65154 return 0;
65155 if (!S_ISBLK(stat.st_mode))
65156 return 0;
65157diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65158index 3098a38..253064e 100644
65159--- a/init/do_mounts_initrd.c
65160+++ b/init/do_mounts_initrd.c
65161@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
65162 create_dev("/dev/root.old", Root_RAM0);
65163 /* mount initrd on rootfs' /root */
65164 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65165- sys_mkdir("/old", 0700);
65166- root_fd = sys_open("/", 0, 0);
65167- old_fd = sys_open("/old", 0, 0);
65168+ sys_mkdir((const char __force_user *)"/old", 0700);
65169+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
65170+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65171 /* move initrd over / and chdir/chroot in initrd root */
65172- sys_chdir("/root");
65173- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65174- sys_chroot(".");
65175+ sys_chdir((const char __force_user *)"/root");
65176+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65177+ sys_chroot((const char __force_user *)".");
65178
65179 /*
65180 * In case that a resume from disk is carried out by linuxrc or one of
65181@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
65182
65183 /* move initrd to rootfs' /old */
65184 sys_fchdir(old_fd);
65185- sys_mount("/", ".", NULL, MS_MOVE, NULL);
65186+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65187 /* switch root and cwd back to / of rootfs */
65188 sys_fchdir(root_fd);
65189- sys_chroot(".");
65190+ sys_chroot((const char __force_user *)".");
65191 sys_close(old_fd);
65192 sys_close(root_fd);
65193
65194 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65195- sys_chdir("/old");
65196+ sys_chdir((const char __force_user *)"/old");
65197 return;
65198 }
65199
65200@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
65201 mount_root();
65202
65203 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65204- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65205+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65206 if (!error)
65207 printk("okay\n");
65208 else {
65209- int fd = sys_open("/dev/root.old", O_RDWR, 0);
65210+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65211 if (error == -ENOENT)
65212 printk("/initrd does not exist. Ignored.\n");
65213 else
65214 printk("failed\n");
65215 printk(KERN_NOTICE "Unmounting old root\n");
65216- sys_umount("/old", MNT_DETACH);
65217+ sys_umount((char __force_user *)"/old", MNT_DETACH);
65218 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65219 if (fd < 0) {
65220 error = fd;
65221@@ -116,11 +116,11 @@ int __init initrd_load(void)
65222 * mounted in the normal path.
65223 */
65224 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65225- sys_unlink("/initrd.image");
65226+ sys_unlink((const char __force_user *)"/initrd.image");
65227 handle_initrd();
65228 return 1;
65229 }
65230 }
65231- sys_unlink("/initrd.image");
65232+ sys_unlink((const char __force_user *)"/initrd.image");
65233 return 0;
65234 }
65235diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65236index 32c4799..c27ee74 100644
65237--- a/init/do_mounts_md.c
65238+++ b/init/do_mounts_md.c
65239@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
65240 partitioned ? "_d" : "", minor,
65241 md_setup_args[ent].device_names);
65242
65243- fd = sys_open(name, 0, 0);
65244+ fd = sys_open((char __force_user *)name, 0, 0);
65245 if (fd < 0) {
65246 printk(KERN_ERR "md: open failed - cannot start "
65247 "array %s\n", name);
65248@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
65249 * array without it
65250 */
65251 sys_close(fd);
65252- fd = sys_open(name, 0, 0);
65253+ fd = sys_open((char __force_user *)name, 0, 0);
65254 sys_ioctl(fd, BLKRRPART, 0);
65255 }
65256 sys_close(fd);
65257@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
65258
65259 wait_for_device_probe();
65260
65261- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
65262+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65263 if (fd >= 0) {
65264 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65265 sys_close(fd);
65266diff --git a/init/initramfs.c b/init/initramfs.c
65267index 2531811..040d4d4 100644
65268--- a/init/initramfs.c
65269+++ b/init/initramfs.c
65270@@ -74,7 +74,7 @@ static void __init free_hash(void)
65271 }
65272 }
65273
65274-static long __init do_utime(char __user *filename, time_t mtime)
65275+static long __init do_utime(__force char __user *filename, time_t mtime)
65276 {
65277 struct timespec t[2];
65278
65279@@ -109,7 +109,7 @@ static void __init dir_utime(void)
65280 struct dir_entry *de, *tmp;
65281 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65282 list_del(&de->list);
65283- do_utime(de->name, de->mtime);
65284+ do_utime((char __force_user *)de->name, de->mtime);
65285 kfree(de->name);
65286 kfree(de);
65287 }
65288@@ -271,7 +271,7 @@ static int __init maybe_link(void)
65289 if (nlink >= 2) {
65290 char *old = find_link(major, minor, ino, mode, collected);
65291 if (old)
65292- return (sys_link(old, collected) < 0) ? -1 : 1;
65293+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
65294 }
65295 return 0;
65296 }
65297@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
65298 {
65299 struct stat st;
65300
65301- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
65302+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
65303 if (S_ISDIR(st.st_mode))
65304- sys_rmdir(path);
65305+ sys_rmdir((char __force_user *)path);
65306 else
65307- sys_unlink(path);
65308+ sys_unlink((char __force_user *)path);
65309 }
65310 }
65311
65312@@ -305,7 +305,7 @@ static int __init do_name(void)
65313 int openflags = O_WRONLY|O_CREAT;
65314 if (ml != 1)
65315 openflags |= O_TRUNC;
65316- wfd = sys_open(collected, openflags, mode);
65317+ wfd = sys_open((char __force_user *)collected, openflags, mode);
65318
65319 if (wfd >= 0) {
65320 sys_fchown(wfd, uid, gid);
65321@@ -317,17 +317,17 @@ static int __init do_name(void)
65322 }
65323 }
65324 } else if (S_ISDIR(mode)) {
65325- sys_mkdir(collected, mode);
65326- sys_chown(collected, uid, gid);
65327- sys_chmod(collected, mode);
65328+ sys_mkdir((char __force_user *)collected, mode);
65329+ sys_chown((char __force_user *)collected, uid, gid);
65330+ sys_chmod((char __force_user *)collected, mode);
65331 dir_add(collected, mtime);
65332 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65333 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65334 if (maybe_link() == 0) {
65335- sys_mknod(collected, mode, rdev);
65336- sys_chown(collected, uid, gid);
65337- sys_chmod(collected, mode);
65338- do_utime(collected, mtime);
65339+ sys_mknod((char __force_user *)collected, mode, rdev);
65340+ sys_chown((char __force_user *)collected, uid, gid);
65341+ sys_chmod((char __force_user *)collected, mode);
65342+ do_utime((char __force_user *)collected, mtime);
65343 }
65344 }
65345 return 0;
65346@@ -336,15 +336,15 @@ static int __init do_name(void)
65347 static int __init do_copy(void)
65348 {
65349 if (count >= body_len) {
65350- sys_write(wfd, victim, body_len);
65351+ sys_write(wfd, (char __force_user *)victim, body_len);
65352 sys_close(wfd);
65353- do_utime(vcollected, mtime);
65354+ do_utime((char __force_user *)vcollected, mtime);
65355 kfree(vcollected);
65356 eat(body_len);
65357 state = SkipIt;
65358 return 0;
65359 } else {
65360- sys_write(wfd, victim, count);
65361+ sys_write(wfd, (char __force_user *)victim, count);
65362 body_len -= count;
65363 eat(count);
65364 return 1;
65365@@ -355,9 +355,9 @@ static int __init do_symlink(void)
65366 {
65367 collected[N_ALIGN(name_len) + body_len] = '\0';
65368 clean_path(collected, 0);
65369- sys_symlink(collected + N_ALIGN(name_len), collected);
65370- sys_lchown(collected, uid, gid);
65371- do_utime(collected, mtime);
65372+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65373+ sys_lchown((char __force_user *)collected, uid, gid);
65374+ do_utime((char __force_user *)collected, mtime);
65375 state = SkipIt;
65376 next_state = Reset;
65377 return 0;
65378diff --git a/init/main.c b/init/main.c
65379index 03b408d..5777f59 100644
65380--- a/init/main.c
65381+++ b/init/main.c
65382@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
65383 extern void tc_init(void);
65384 #endif
65385
65386+extern void grsecurity_init(void);
65387+
65388 /*
65389 * Debug helper: via this flag we know that we are in 'early bootup code'
65390 * where only the boot processor is running with IRQ disabled. This means
65391@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
65392
65393 __setup("reset_devices", set_reset_devices);
65394
65395+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65396+extern char pax_enter_kernel_user[];
65397+extern char pax_exit_kernel_user[];
65398+extern pgdval_t clone_pgd_mask;
65399+#endif
65400+
65401+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65402+static int __init setup_pax_nouderef(char *str)
65403+{
65404+#ifdef CONFIG_X86_32
65405+ unsigned int cpu;
65406+ struct desc_struct *gdt;
65407+
65408+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
65409+ gdt = get_cpu_gdt_table(cpu);
65410+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65411+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65412+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65413+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65414+ }
65415+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65416+#else
65417+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65418+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65419+ clone_pgd_mask = ~(pgdval_t)0UL;
65420+#endif
65421+
65422+ return 0;
65423+}
65424+early_param("pax_nouderef", setup_pax_nouderef);
65425+#endif
65426+
65427+#ifdef CONFIG_PAX_SOFTMODE
65428+int pax_softmode;
65429+
65430+static int __init setup_pax_softmode(char *str)
65431+{
65432+ get_option(&str, &pax_softmode);
65433+ return 1;
65434+}
65435+__setup("pax_softmode=", setup_pax_softmode);
65436+#endif
65437+
65438 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65439 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65440 static const char *panic_later, *panic_param;
65441@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65442 {
65443 int count = preempt_count();
65444 int ret;
65445+ const char *msg1 = "", *msg2 = "";
65446
65447 if (initcall_debug)
65448 ret = do_one_initcall_debug(fn);
65449@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65450 sprintf(msgbuf, "error code %d ", ret);
65451
65452 if (preempt_count() != count) {
65453- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65454+ msg1 = " preemption imbalance";
65455 preempt_count() = count;
65456 }
65457 if (irqs_disabled()) {
65458- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65459+ msg2 = " disabled interrupts";
65460 local_irq_enable();
65461 }
65462- if (msgbuf[0]) {
65463- printk("initcall %pF returned with %s\n", fn, msgbuf);
65464+ if (msgbuf[0] || *msg1 || *msg2) {
65465+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65466 }
65467
65468 return ret;
65469@@ -817,7 +863,7 @@ static int __init kernel_init(void * unused)
65470 do_basic_setup();
65471
65472 /* Open the /dev/console on the rootfs, this should never fail */
65473- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65474+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65475 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65476
65477 (void) sys_dup(0);
65478@@ -830,11 +876,13 @@ static int __init kernel_init(void * unused)
65479 if (!ramdisk_execute_command)
65480 ramdisk_execute_command = "/init";
65481
65482- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65483+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65484 ramdisk_execute_command = NULL;
65485 prepare_namespace();
65486 }
65487
65488+ grsecurity_init();
65489+
65490 /*
65491 * Ok, we have completed the initial bootup, and
65492 * we're essentially up and running. Get rid of the
65493diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65494index ed049ea..6442f7f 100644
65495--- a/ipc/mqueue.c
65496+++ b/ipc/mqueue.c
65497@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65498 mq_bytes = (mq_msg_tblsz +
65499 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65500
65501+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65502 spin_lock(&mq_lock);
65503 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65504 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
65505diff --git a/ipc/msg.c b/ipc/msg.c
65506index 7385de2..a8180e0 100644
65507--- a/ipc/msg.c
65508+++ b/ipc/msg.c
65509@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65510 return security_msg_queue_associate(msq, msgflg);
65511 }
65512
65513+static struct ipc_ops msg_ops = {
65514+ .getnew = newque,
65515+ .associate = msg_security,
65516+ .more_checks = NULL
65517+};
65518+
65519 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65520 {
65521 struct ipc_namespace *ns;
65522- struct ipc_ops msg_ops;
65523 struct ipc_params msg_params;
65524
65525 ns = current->nsproxy->ipc_ns;
65526
65527- msg_ops.getnew = newque;
65528- msg_ops.associate = msg_security;
65529- msg_ops.more_checks = NULL;
65530-
65531 msg_params.key = key;
65532 msg_params.flg = msgflg;
65533
65534diff --git a/ipc/sem.c b/ipc/sem.c
65535index c8e00f8..1135c4e 100644
65536--- a/ipc/sem.c
65537+++ b/ipc/sem.c
65538@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65539 return 0;
65540 }
65541
65542+static struct ipc_ops sem_ops = {
65543+ .getnew = newary,
65544+ .associate = sem_security,
65545+ .more_checks = sem_more_checks
65546+};
65547+
65548 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65549 {
65550 struct ipc_namespace *ns;
65551- struct ipc_ops sem_ops;
65552 struct ipc_params sem_params;
65553
65554 ns = current->nsproxy->ipc_ns;
65555@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65556 if (nsems < 0 || nsems > ns->sc_semmsl)
65557 return -EINVAL;
65558
65559- sem_ops.getnew = newary;
65560- sem_ops.associate = sem_security;
65561- sem_ops.more_checks = sem_more_checks;
65562-
65563 sem_params.key = key;
65564 sem_params.flg = semflg;
65565 sem_params.u.nsems = nsems;
65566@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
65567 int nsems;
65568 struct list_head tasks;
65569
65570+ pax_track_stack();
65571+
65572 sma = sem_lock_check(ns, semid);
65573 if (IS_ERR(sma))
65574 return PTR_ERR(sma);
65575@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
65576 struct ipc_namespace *ns;
65577 struct list_head tasks;
65578
65579+ pax_track_stack();
65580+
65581 ns = current->nsproxy->ipc_ns;
65582
65583 if (nsops < 1 || semid < 0)
65584diff --git a/ipc/shm.c b/ipc/shm.c
65585index 02ecf2c..be05b1e 100644
65586--- a/ipc/shm.c
65587+++ b/ipc/shm.c
65588@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65589 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65590 #endif
65591
65592+#ifdef CONFIG_GRKERNSEC
65593+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65594+ const time_t shm_createtime, const uid_t cuid,
65595+ const int shmid);
65596+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65597+ const time_t shm_createtime);
65598+#endif
65599+
65600 void shm_init_ns(struct ipc_namespace *ns)
65601 {
65602 ns->shm_ctlmax = SHMMAX;
65603@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65604 shp->shm_lprid = 0;
65605 shp->shm_atim = shp->shm_dtim = 0;
65606 shp->shm_ctim = get_seconds();
65607+#ifdef CONFIG_GRKERNSEC
65608+ {
65609+ struct timespec timeval;
65610+ do_posix_clock_monotonic_gettime(&timeval);
65611+
65612+ shp->shm_createtime = timeval.tv_sec;
65613+ }
65614+#endif
65615 shp->shm_segsz = size;
65616 shp->shm_nattch = 0;
65617 shp->shm_file = file;
65618@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65619 return 0;
65620 }
65621
65622+static struct ipc_ops shm_ops = {
65623+ .getnew = newseg,
65624+ .associate = shm_security,
65625+ .more_checks = shm_more_checks
65626+};
65627+
65628 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65629 {
65630 struct ipc_namespace *ns;
65631- struct ipc_ops shm_ops;
65632 struct ipc_params shm_params;
65633
65634 ns = current->nsproxy->ipc_ns;
65635
65636- shm_ops.getnew = newseg;
65637- shm_ops.associate = shm_security;
65638- shm_ops.more_checks = shm_more_checks;
65639-
65640 shm_params.key = key;
65641 shm_params.flg = shmflg;
65642 shm_params.u.size = size;
65643@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
65644 case SHM_LOCK:
65645 case SHM_UNLOCK:
65646 {
65647- struct file *uninitialized_var(shm_file);
65648-
65649 lru_add_drain_all(); /* drain pagevecs to lru lists */
65650
65651 shp = shm_lock_check(ns, shmid);
65652@@ -981,6 +996,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65653 f_mode = FMODE_READ | FMODE_WRITE;
65654 }
65655 if (shmflg & SHM_EXEC) {
65656+
65657+#ifdef CONFIG_PAX_MPROTECT
65658+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
65659+ goto out;
65660+#endif
65661+
65662 prot |= PROT_EXEC;
65663 acc_mode |= S_IXUGO;
65664 }
65665@@ -1004,9 +1025,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65666 if (err)
65667 goto out_unlock;
65668
65669+#ifdef CONFIG_GRKERNSEC
65670+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65671+ shp->shm_perm.cuid, shmid) ||
65672+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65673+ err = -EACCES;
65674+ goto out_unlock;
65675+ }
65676+#endif
65677+
65678 path = shp->shm_file->f_path;
65679 path_get(&path);
65680 shp->shm_nattch++;
65681+#ifdef CONFIG_GRKERNSEC
65682+ shp->shm_lapid = current->pid;
65683+#endif
65684 size = i_size_read(path.dentry->d_inode);
65685 shm_unlock(shp);
65686
65687diff --git a/kernel/acct.c b/kernel/acct.c
65688index fa7eb3d..7faf116 100644
65689--- a/kernel/acct.c
65690+++ b/kernel/acct.c
65691@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65692 */
65693 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65694 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65695- file->f_op->write(file, (char *)&ac,
65696+ file->f_op->write(file, (char __force_user *)&ac,
65697 sizeof(acct_t), &file->f_pos);
65698 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65699 set_fs(fs);
65700diff --git a/kernel/audit.c b/kernel/audit.c
65701index 0a1355c..9359745 100644
65702--- a/kernel/audit.c
65703+++ b/kernel/audit.c
65704@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65705 3) suppressed due to audit_rate_limit
65706 4) suppressed due to audit_backlog_limit
65707 */
65708-static atomic_t audit_lost = ATOMIC_INIT(0);
65709+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65710
65711 /* The netlink socket. */
65712 static struct sock *audit_sock;
65713@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65714 unsigned long now;
65715 int print;
65716
65717- atomic_inc(&audit_lost);
65718+ atomic_inc_unchecked(&audit_lost);
65719
65720 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65721
65722@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65723 printk(KERN_WARNING
65724 "audit: audit_lost=%d audit_rate_limit=%d "
65725 "audit_backlog_limit=%d\n",
65726- atomic_read(&audit_lost),
65727+ atomic_read_unchecked(&audit_lost),
65728 audit_rate_limit,
65729 audit_backlog_limit);
65730 audit_panic(message);
65731@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65732 status_set.pid = audit_pid;
65733 status_set.rate_limit = audit_rate_limit;
65734 status_set.backlog_limit = audit_backlog_limit;
65735- status_set.lost = atomic_read(&audit_lost);
65736+ status_set.lost = atomic_read_unchecked(&audit_lost);
65737 status_set.backlog = skb_queue_len(&audit_skb_queue);
65738 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65739 &status_set, sizeof(status_set));
65740@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
65741 avail = audit_expand(ab,
65742 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
65743 if (!avail)
65744- goto out;
65745+ goto out_va_end;
65746 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
65747 }
65748- va_end(args2);
65749 if (len > 0)
65750 skb_put(skb, len);
65751+out_va_end:
65752+ va_end(args2);
65753 out:
65754 return;
65755 }
65756diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65757index ce4b054..aaa419e 100644
65758--- a/kernel/auditsc.c
65759+++ b/kernel/auditsc.c
65760@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
65761 struct audit_buffer **ab,
65762 struct audit_aux_data_execve *axi)
65763 {
65764- int i;
65765- size_t len, len_sent = 0;
65766+ int i, len;
65767+ size_t len_sent = 0;
65768 const char __user *p;
65769 char *buf;
65770
65771@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65772 }
65773
65774 /* global counter which is incremented every time something logs in */
65775-static atomic_t session_id = ATOMIC_INIT(0);
65776+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65777
65778 /**
65779 * audit_set_loginuid - set a task's audit_context loginuid
65780@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
65781 */
65782 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
65783 {
65784- unsigned int sessionid = atomic_inc_return(&session_id);
65785+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
65786 struct audit_context *context = task->audit_context;
65787
65788 if (context && context->in_syscall) {
65789diff --git a/kernel/capability.c b/kernel/capability.c
65790index 283c529..36ac81e 100644
65791--- a/kernel/capability.c
65792+++ b/kernel/capability.c
65793@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65794 * before modification is attempted and the application
65795 * fails.
65796 */
65797+ if (tocopy > ARRAY_SIZE(kdata))
65798+ return -EFAULT;
65799+
65800 if (copy_to_user(dataptr, kdata, tocopy
65801 * sizeof(struct __user_cap_data_struct))) {
65802 return -EFAULT;
65803@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65804 BUG();
65805 }
65806
65807- if (security_capable(ns, current_cred(), cap) == 0) {
65808+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
65809 current->flags |= PF_SUPERPRIV;
65810 return true;
65811 }
65812@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
65813 }
65814 EXPORT_SYMBOL(ns_capable);
65815
65816+bool ns_capable_nolog(struct user_namespace *ns, int cap)
65817+{
65818+ if (unlikely(!cap_valid(cap))) {
65819+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65820+ BUG();
65821+ }
65822+
65823+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
65824+ current->flags |= PF_SUPERPRIV;
65825+ return true;
65826+ }
65827+ return false;
65828+}
65829+EXPORT_SYMBOL(ns_capable_nolog);
65830+
65831+bool capable_nolog(int cap)
65832+{
65833+ return ns_capable_nolog(&init_user_ns, cap);
65834+}
65835+EXPORT_SYMBOL(capable_nolog);
65836+
65837 /**
65838 * task_ns_capable - Determine whether current task has a superior
65839 * capability targeted at a specific task's user namespace.
65840@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
65841 }
65842 EXPORT_SYMBOL(task_ns_capable);
65843
65844+bool task_ns_capable_nolog(struct task_struct *t, int cap)
65845+{
65846+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
65847+}
65848+EXPORT_SYMBOL(task_ns_capable_nolog);
65849+
65850 /**
65851 * nsown_capable - Check superior capability to one's own user_ns
65852 * @cap: The capability in question
65853diff --git a/kernel/cgroup.c b/kernel/cgroup.c
65854index e4cbdfb..191bec4 100644
65855--- a/kernel/cgroup.c
65856+++ b/kernel/cgroup.c
65857@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
65858 struct hlist_head *hhead;
65859 struct cg_cgroup_link *link;
65860
65861+ pax_track_stack();
65862+
65863 /* First see if we already have a cgroup group that matches
65864 * the desired set */
65865 read_lock(&css_set_lock);
65866diff --git a/kernel/compat.c b/kernel/compat.c
65867index e2435ee..8e82199 100644
65868--- a/kernel/compat.c
65869+++ b/kernel/compat.c
65870@@ -13,6 +13,7 @@
65871
65872 #include <linux/linkage.h>
65873 #include <linux/compat.h>
65874+#include <linux/module.h>
65875 #include <linux/errno.h>
65876 #include <linux/time.h>
65877 #include <linux/signal.h>
65878@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65879 mm_segment_t oldfs;
65880 long ret;
65881
65882- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65883+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65884 oldfs = get_fs();
65885 set_fs(KERNEL_DS);
65886 ret = hrtimer_nanosleep_restart(restart);
65887@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65888 oldfs = get_fs();
65889 set_fs(KERNEL_DS);
65890 ret = hrtimer_nanosleep(&tu,
65891- rmtp ? (struct timespec __user *)&rmt : NULL,
65892+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
65893 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65894 set_fs(oldfs);
65895
65896@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65897 mm_segment_t old_fs = get_fs();
65898
65899 set_fs(KERNEL_DS);
65900- ret = sys_sigpending((old_sigset_t __user *) &s);
65901+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
65902 set_fs(old_fs);
65903 if (ret == 0)
65904 ret = put_user(s, set);
65905@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
65906 old_fs = get_fs();
65907 set_fs(KERNEL_DS);
65908 ret = sys_sigprocmask(how,
65909- set ? (old_sigset_t __user *) &s : NULL,
65910- oset ? (old_sigset_t __user *) &s : NULL);
65911+ set ? (old_sigset_t __force_user *) &s : NULL,
65912+ oset ? (old_sigset_t __force_user *) &s : NULL);
65913 set_fs(old_fs);
65914 if (ret == 0)
65915 if (oset)
65916@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65917 mm_segment_t old_fs = get_fs();
65918
65919 set_fs(KERNEL_DS);
65920- ret = sys_old_getrlimit(resource, &r);
65921+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65922 set_fs(old_fs);
65923
65924 if (!ret) {
65925@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65926 mm_segment_t old_fs = get_fs();
65927
65928 set_fs(KERNEL_DS);
65929- ret = sys_getrusage(who, (struct rusage __user *) &r);
65930+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65931 set_fs(old_fs);
65932
65933 if (ret)
65934@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65935 set_fs (KERNEL_DS);
65936 ret = sys_wait4(pid,
65937 (stat_addr ?
65938- (unsigned int __user *) &status : NULL),
65939- options, (struct rusage __user *) &r);
65940+ (unsigned int __force_user *) &status : NULL),
65941+ options, (struct rusage __force_user *) &r);
65942 set_fs (old_fs);
65943
65944 if (ret > 0) {
65945@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65946 memset(&info, 0, sizeof(info));
65947
65948 set_fs(KERNEL_DS);
65949- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65950- uru ? (struct rusage __user *)&ru : NULL);
65951+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65952+ uru ? (struct rusage __force_user *)&ru : NULL);
65953 set_fs(old_fs);
65954
65955 if ((ret < 0) || (info.si_signo == 0))
65956@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65957 oldfs = get_fs();
65958 set_fs(KERNEL_DS);
65959 err = sys_timer_settime(timer_id, flags,
65960- (struct itimerspec __user *) &newts,
65961- (struct itimerspec __user *) &oldts);
65962+ (struct itimerspec __force_user *) &newts,
65963+ (struct itimerspec __force_user *) &oldts);
65964 set_fs(oldfs);
65965 if (!err && old && put_compat_itimerspec(old, &oldts))
65966 return -EFAULT;
65967@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65968 oldfs = get_fs();
65969 set_fs(KERNEL_DS);
65970 err = sys_timer_gettime(timer_id,
65971- (struct itimerspec __user *) &ts);
65972+ (struct itimerspec __force_user *) &ts);
65973 set_fs(oldfs);
65974 if (!err && put_compat_itimerspec(setting, &ts))
65975 return -EFAULT;
65976@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65977 oldfs = get_fs();
65978 set_fs(KERNEL_DS);
65979 err = sys_clock_settime(which_clock,
65980- (struct timespec __user *) &ts);
65981+ (struct timespec __force_user *) &ts);
65982 set_fs(oldfs);
65983 return err;
65984 }
65985@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65986 oldfs = get_fs();
65987 set_fs(KERNEL_DS);
65988 err = sys_clock_gettime(which_clock,
65989- (struct timespec __user *) &ts);
65990+ (struct timespec __force_user *) &ts);
65991 set_fs(oldfs);
65992 if (!err && put_compat_timespec(&ts, tp))
65993 return -EFAULT;
65994@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65995
65996 oldfs = get_fs();
65997 set_fs(KERNEL_DS);
65998- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65999+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66000 set_fs(oldfs);
66001
66002 err = compat_put_timex(utp, &txc);
66003@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66004 oldfs = get_fs();
66005 set_fs(KERNEL_DS);
66006 err = sys_clock_getres(which_clock,
66007- (struct timespec __user *) &ts);
66008+ (struct timespec __force_user *) &ts);
66009 set_fs(oldfs);
66010 if (!err && tp && put_compat_timespec(&ts, tp))
66011 return -EFAULT;
66012@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66013 long err;
66014 mm_segment_t oldfs;
66015 struct timespec tu;
66016- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66017+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66018
66019- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66020+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66021 oldfs = get_fs();
66022 set_fs(KERNEL_DS);
66023 err = clock_nanosleep_restart(restart);
66024@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66025 oldfs = get_fs();
66026 set_fs(KERNEL_DS);
66027 err = sys_clock_nanosleep(which_clock, flags,
66028- (struct timespec __user *) &in,
66029- (struct timespec __user *) &out);
66030+ (struct timespec __force_user *) &in,
66031+ (struct timespec __force_user *) &out);
66032 set_fs(oldfs);
66033
66034 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66035diff --git a/kernel/configs.c b/kernel/configs.c
66036index 42e8fa0..9e7406b 100644
66037--- a/kernel/configs.c
66038+++ b/kernel/configs.c
66039@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66040 struct proc_dir_entry *entry;
66041
66042 /* create the current config file */
66043+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66044+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66045+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66046+ &ikconfig_file_ops);
66047+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66048+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66049+ &ikconfig_file_ops);
66050+#endif
66051+#else
66052 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66053 &ikconfig_file_ops);
66054+#endif
66055+
66056 if (!entry)
66057 return -ENOMEM;
66058
66059diff --git a/kernel/cred.c b/kernel/cred.c
66060index 8ef31f5..bed28ea 100644
66061--- a/kernel/cred.c
66062+++ b/kernel/cred.c
66063@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
66064 */
66065 void __put_cred(struct cred *cred)
66066 {
66067+ pax_track_stack();
66068+
66069 kdebug("__put_cred(%p{%d,%d})", cred,
66070 atomic_read(&cred->usage),
66071 read_cred_subscribers(cred));
66072@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
66073 {
66074 struct cred *cred;
66075
66076+ pax_track_stack();
66077+
66078 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
66079 atomic_read(&tsk->cred->usage),
66080 read_cred_subscribers(tsk->cred));
66081@@ -204,6 +208,15 @@ void exit_creds(struct task_struct *tsk)
66082 validate_creds(cred);
66083 put_cred(cred);
66084 }
66085+
66086+#ifdef CONFIG_GRKERNSEC_SETXID
66087+ cred = (struct cred *) tsk->delayed_cred;
66088+ if (cred) {
66089+ tsk->delayed_cred = NULL;
66090+ validate_creds(cred);
66091+ put_cred(cred);
66092+ }
66093+#endif
66094 }
66095
66096 /**
66097@@ -220,6 +233,8 @@ const struct cred *get_task_cred(struct task_struct *task)
66098 {
66099 const struct cred *cred;
66100
66101+ pax_track_stack();
66102+
66103 rcu_read_lock();
66104
66105 do {
66106@@ -239,6 +254,8 @@ struct cred *cred_alloc_blank(void)
66107 {
66108 struct cred *new;
66109
66110+ pax_track_stack();
66111+
66112 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
66113 if (!new)
66114 return NULL;
66115@@ -281,12 +298,15 @@ error:
66116 *
66117 * Call commit_creds() or abort_creds() to clean up.
66118 */
66119+
66120 struct cred *prepare_creds(void)
66121 {
66122 struct task_struct *task = current;
66123 const struct cred *old;
66124 struct cred *new;
66125
66126+ pax_track_stack();
66127+
66128 validate_process_creds();
66129
66130 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
66131@@ -333,6 +353,8 @@ struct cred *prepare_exec_creds(void)
66132 struct thread_group_cred *tgcred = NULL;
66133 struct cred *new;
66134
66135+ pax_track_stack();
66136+
66137 #ifdef CONFIG_KEYS
66138 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
66139 if (!tgcred)
66140@@ -385,6 +407,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
66141 struct cred *new;
66142 int ret;
66143
66144+ pax_track_stack();
66145+
66146 if (
66147 #ifdef CONFIG_KEYS
66148 !p->cred->thread_keyring &&
66149@@ -470,11 +494,13 @@ error_put:
66150 * Always returns 0 thus allowing this function to be tail-called at the end
66151 * of, say, sys_setgid().
66152 */
66153-int commit_creds(struct cred *new)
66154+static int __commit_creds(struct cred *new)
66155 {
66156 struct task_struct *task = current;
66157 const struct cred *old = task->real_cred;
66158
66159+ pax_track_stack();
66160+
66161 kdebug("commit_creds(%p{%d,%d})", new,
66162 atomic_read(&new->usage),
66163 read_cred_subscribers(new));
66164@@ -489,6 +515,8 @@ int commit_creds(struct cred *new)
66165
66166 get_cred(new); /* we will require a ref for the subj creds too */
66167
66168+ gr_set_role_label(task, new->uid, new->gid);
66169+
66170 /* dumpability changes */
66171 if (old->euid != new->euid ||
66172 old->egid != new->egid ||
66173@@ -538,6 +566,92 @@ int commit_creds(struct cred *new)
66174 put_cred(old);
66175 return 0;
66176 }
66177+#ifdef CONFIG_GRKERNSEC_SETXID
66178+extern int set_user(struct cred *new);
66179+
66180+void gr_delayed_cred_worker(void)
66181+{
66182+ const struct cred *new = current->delayed_cred;
66183+ struct cred *ncred;
66184+
66185+ current->delayed_cred = NULL;
66186+
66187+ if (current_uid() && new != NULL) {
66188+ // from doing get_cred on it when queueing this
66189+ put_cred(new);
66190+ return;
66191+ } else if (new == NULL)
66192+ return;
66193+
66194+ ncred = prepare_creds();
66195+ if (!ncred)
66196+ goto die;
66197+ // uids
66198+ ncred->uid = new->uid;
66199+ ncred->euid = new->euid;
66200+ ncred->suid = new->suid;
66201+ ncred->fsuid = new->fsuid;
66202+ // gids
66203+ ncred->gid = new->gid;
66204+ ncred->egid = new->egid;
66205+ ncred->sgid = new->sgid;
66206+ ncred->fsgid = new->fsgid;
66207+ // groups
66208+ if (set_groups(ncred, new->group_info) < 0) {
66209+ abort_creds(ncred);
66210+ goto die;
66211+ }
66212+ // caps
66213+ ncred->securebits = new->securebits;
66214+ ncred->cap_inheritable = new->cap_inheritable;
66215+ ncred->cap_permitted = new->cap_permitted;
66216+ ncred->cap_effective = new->cap_effective;
66217+ ncred->cap_bset = new->cap_bset;
66218+
66219+ if (set_user(ncred)) {
66220+ abort_creds(ncred);
66221+ goto die;
66222+ }
66223+
66224+ // from doing get_cred on it when queueing this
66225+ put_cred(new);
66226+
66227+ __commit_creds(ncred);
66228+ return;
66229+die:
66230+ // from doing get_cred on it when queueing this
66231+ put_cred(new);
66232+ do_group_exit(SIGKILL);
66233+}
66234+#endif
66235+
66236+int commit_creds(struct cred *new)
66237+{
66238+#ifdef CONFIG_GRKERNSEC_SETXID
66239+ struct task_struct *t;
66240+
66241+ /* we won't get called with tasklist_lock held for writing
66242+ and interrupts disabled as the cred struct in that case is
66243+ init_cred
66244+ */
66245+ if (grsec_enable_setxid && !current_is_single_threaded() &&
66246+ !current_uid() && new->uid) {
66247+ rcu_read_lock();
66248+ read_lock(&tasklist_lock);
66249+ for (t = next_thread(current); t != current;
66250+ t = next_thread(t)) {
66251+ if (t->delayed_cred == NULL) {
66252+ t->delayed_cred = get_cred(new);
66253+ set_tsk_need_resched(t);
66254+ }
66255+ }
66256+ read_unlock(&tasklist_lock);
66257+ rcu_read_unlock();
66258+ }
66259+#endif
66260+ return __commit_creds(new);
66261+}
66262+
66263 EXPORT_SYMBOL(commit_creds);
66264
66265 /**
66266@@ -549,6 +663,8 @@ EXPORT_SYMBOL(commit_creds);
66267 */
66268 void abort_creds(struct cred *new)
66269 {
66270+ pax_track_stack();
66271+
66272 kdebug("abort_creds(%p{%d,%d})", new,
66273 atomic_read(&new->usage),
66274 read_cred_subscribers(new));
66275@@ -572,6 +688,8 @@ const struct cred *override_creds(const struct cred *new)
66276 {
66277 const struct cred *old = current->cred;
66278
66279+ pax_track_stack();
66280+
66281 kdebug("override_creds(%p{%d,%d})", new,
66282 atomic_read(&new->usage),
66283 read_cred_subscribers(new));
66284@@ -601,6 +719,8 @@ void revert_creds(const struct cred *old)
66285 {
66286 const struct cred *override = current->cred;
66287
66288+ pax_track_stack();
66289+
66290 kdebug("revert_creds(%p{%d,%d})", old,
66291 atomic_read(&old->usage),
66292 read_cred_subscribers(old));
66293@@ -647,6 +767,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
66294 const struct cred *old;
66295 struct cred *new;
66296
66297+ pax_track_stack();
66298+
66299 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
66300 if (!new)
66301 return NULL;
66302@@ -701,6 +823,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
66303 */
66304 int set_security_override(struct cred *new, u32 secid)
66305 {
66306+ pax_track_stack();
66307+
66308 return security_kernel_act_as(new, secid);
66309 }
66310 EXPORT_SYMBOL(set_security_override);
66311@@ -720,6 +844,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
66312 u32 secid;
66313 int ret;
66314
66315+ pax_track_stack();
66316+
66317 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
66318 if (ret < 0)
66319 return ret;
66320diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66321index 0d7c087..01b8cef 100644
66322--- a/kernel/debug/debug_core.c
66323+++ b/kernel/debug/debug_core.c
66324@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66325 */
66326 static atomic_t masters_in_kgdb;
66327 static atomic_t slaves_in_kgdb;
66328-static atomic_t kgdb_break_tasklet_var;
66329+static atomic_unchecked_t kgdb_break_tasklet_var;
66330 atomic_t kgdb_setting_breakpoint;
66331
66332 struct task_struct *kgdb_usethread;
66333@@ -129,7 +129,7 @@ int kgdb_single_step;
66334 static pid_t kgdb_sstep_pid;
66335
66336 /* to keep track of the CPU which is doing the single stepping*/
66337-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66338+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66339
66340 /*
66341 * If you are debugging a problem where roundup (the collection of
66342@@ -542,7 +542,7 @@ return_normal:
66343 * kernel will only try for the value of sstep_tries before
66344 * giving up and continuing on.
66345 */
66346- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66347+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66348 (kgdb_info[cpu].task &&
66349 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66350 atomic_set(&kgdb_active, -1);
66351@@ -636,8 +636,8 @@ cpu_master_loop:
66352 }
66353
66354 kgdb_restore:
66355- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66356- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66357+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66358+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66359 if (kgdb_info[sstep_cpu].task)
66360 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66361 else
66362@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
66363 static void kgdb_tasklet_bpt(unsigned long ing)
66364 {
66365 kgdb_breakpoint();
66366- atomic_set(&kgdb_break_tasklet_var, 0);
66367+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66368 }
66369
66370 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66371
66372 void kgdb_schedule_breakpoint(void)
66373 {
66374- if (atomic_read(&kgdb_break_tasklet_var) ||
66375+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66376 atomic_read(&kgdb_active) != -1 ||
66377 atomic_read(&kgdb_setting_breakpoint))
66378 return;
66379- atomic_inc(&kgdb_break_tasklet_var);
66380+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
66381 tasklet_schedule(&kgdb_tasklet_breakpoint);
66382 }
66383 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
66384diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
66385index 63786e7..0780cac 100644
66386--- a/kernel/debug/kdb/kdb_main.c
66387+++ b/kernel/debug/kdb/kdb_main.c
66388@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
66389 list_for_each_entry(mod, kdb_modules, list) {
66390
66391 kdb_printf("%-20s%8u 0x%p ", mod->name,
66392- mod->core_size, (void *)mod);
66393+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
66394 #ifdef CONFIG_MODULE_UNLOAD
66395 kdb_printf("%4d ", module_refcount(mod));
66396 #endif
66397@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
66398 kdb_printf(" (Loading)");
66399 else
66400 kdb_printf(" (Live)");
66401- kdb_printf(" 0x%p", mod->module_core);
66402+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
66403
66404 #ifdef CONFIG_MODULE_UNLOAD
66405 {
66406diff --git a/kernel/events/core.c b/kernel/events/core.c
66407index 0f85778..0d43716 100644
66408--- a/kernel/events/core.c
66409+++ b/kernel/events/core.c
66410@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
66411 return 0;
66412 }
66413
66414-static atomic64_t perf_event_id;
66415+static atomic64_unchecked_t perf_event_id;
66416
66417 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
66418 enum event_type_t event_type);
66419@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info)
66420
66421 static inline u64 perf_event_count(struct perf_event *event)
66422 {
66423- return local64_read(&event->count) + atomic64_read(&event->child_count);
66424+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
66425 }
66426
66427 static u64 perf_event_read(struct perf_event *event)
66428@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
66429 mutex_lock(&event->child_mutex);
66430 total += perf_event_read(event);
66431 *enabled += event->total_time_enabled +
66432- atomic64_read(&event->child_total_time_enabled);
66433+ atomic64_read_unchecked(&event->child_total_time_enabled);
66434 *running += event->total_time_running +
66435- atomic64_read(&event->child_total_time_running);
66436+ atomic64_read_unchecked(&event->child_total_time_running);
66437
66438 list_for_each_entry(child, &event->child_list, child_list) {
66439 total += perf_event_read(child);
66440@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event)
66441 userpg->offset -= local64_read(&event->hw.prev_count);
66442
66443 userpg->time_enabled = enabled +
66444- atomic64_read(&event->child_total_time_enabled);
66445+ atomic64_read_unchecked(&event->child_total_time_enabled);
66446
66447 userpg->time_running = running +
66448- atomic64_read(&event->child_total_time_running);
66449+ atomic64_read_unchecked(&event->child_total_time_running);
66450
66451 barrier();
66452 ++userpg->lock;
66453@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
66454 values[n++] = perf_event_count(event);
66455 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
66456 values[n++] = enabled +
66457- atomic64_read(&event->child_total_time_enabled);
66458+ atomic64_read_unchecked(&event->child_total_time_enabled);
66459 }
66460 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
66461 values[n++] = running +
66462- atomic64_read(&event->child_total_time_running);
66463+ atomic64_read_unchecked(&event->child_total_time_running);
66464 }
66465 if (read_format & PERF_FORMAT_ID)
66466 values[n++] = primary_event_id(event);
66467@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
66468 * need to add enough zero bytes after the string to handle
66469 * the 64bit alignment we do later.
66470 */
66471- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
66472+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
66473 if (!buf) {
66474 name = strncpy(tmp, "//enomem", sizeof(tmp));
66475 goto got_name;
66476 }
66477- name = d_path(&file->f_path, buf, PATH_MAX);
66478+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
66479 if (IS_ERR(name)) {
66480 name = strncpy(tmp, "//toolong", sizeof(tmp));
66481 goto got_name;
66482@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
66483 event->parent = parent_event;
66484
66485 event->ns = get_pid_ns(current->nsproxy->pid_ns);
66486- event->id = atomic64_inc_return(&perf_event_id);
66487+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
66488
66489 event->state = PERF_EVENT_STATE_INACTIVE;
66490
66491@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event,
66492 /*
66493 * Add back the child's count to the parent's count:
66494 */
66495- atomic64_add(child_val, &parent_event->child_count);
66496- atomic64_add(child_event->total_time_enabled,
66497+ atomic64_add_unchecked(child_val, &parent_event->child_count);
66498+ atomic64_add_unchecked(child_event->total_time_enabled,
66499 &parent_event->child_total_time_enabled);
66500- atomic64_add(child_event->total_time_running,
66501+ atomic64_add_unchecked(child_event->total_time_running,
66502 &parent_event->child_total_time_running);
66503
66504 /*
66505diff --git a/kernel/exit.c b/kernel/exit.c
66506index 9e316ae..b3656d5 100644
66507--- a/kernel/exit.c
66508+++ b/kernel/exit.c
66509@@ -57,6 +57,10 @@
66510 #include <asm/pgtable.h>
66511 #include <asm/mmu_context.h>
66512
66513+#ifdef CONFIG_GRKERNSEC
66514+extern rwlock_t grsec_exec_file_lock;
66515+#endif
66516+
66517 static void exit_mm(struct task_struct * tsk);
66518
66519 static void __unhash_process(struct task_struct *p, bool group_dead)
66520@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
66521 struct task_struct *leader;
66522 int zap_leader;
66523 repeat:
66524+#ifdef CONFIG_NET
66525+ gr_del_task_from_ip_table(p);
66526+#endif
66527+
66528 /* don't need to get the RCU readlock here - the process is dead and
66529 * can't be modifying its own credentials. But shut RCU-lockdep up */
66530 rcu_read_lock();
66531@@ -380,7 +388,7 @@ int allow_signal(int sig)
66532 * know it'll be handled, so that they don't get converted to
66533 * SIGKILL or just silently dropped.
66534 */
66535- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66536+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66537 recalc_sigpending();
66538 spin_unlock_irq(&current->sighand->siglock);
66539 return 0;
66540@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
66541 vsnprintf(current->comm, sizeof(current->comm), name, args);
66542 va_end(args);
66543
66544+#ifdef CONFIG_GRKERNSEC
66545+ write_lock(&grsec_exec_file_lock);
66546+ if (current->exec_file) {
66547+ fput(current->exec_file);
66548+ current->exec_file = NULL;
66549+ }
66550+ write_unlock(&grsec_exec_file_lock);
66551+#endif
66552+
66553+ gr_set_kernel_label(current);
66554+
66555 /*
66556 * If we were started as result of loading a module, close all of the
66557 * user space pages. We don't need them, and if we didn't close them
66558@@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code)
66559 struct task_struct *tsk = current;
66560 int group_dead;
66561
66562+ set_fs(USER_DS);
66563+
66564 profile_task_exit(tsk);
66565
66566 WARN_ON(blk_needs_flush_plug(tsk));
66567@@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code)
66568 * mm_release()->clear_child_tid() from writing to a user-controlled
66569 * kernel address.
66570 */
66571- set_fs(USER_DS);
66572
66573 ptrace_event(PTRACE_EVENT_EXIT, code);
66574
66575@@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code)
66576 tsk->exit_code = code;
66577 taskstats_exit(tsk, group_dead);
66578
66579+ gr_acl_handle_psacct(tsk, code);
66580+ gr_acl_handle_exit();
66581+
66582 exit_mm(tsk);
66583
66584 if (group_dead)
66585diff --git a/kernel/fork.c b/kernel/fork.c
66586index 8e6b6f4..9dccf00 100644
66587--- a/kernel/fork.c
66588+++ b/kernel/fork.c
66589@@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66590 *stackend = STACK_END_MAGIC; /* for overflow detection */
66591
66592 #ifdef CONFIG_CC_STACKPROTECTOR
66593- tsk->stack_canary = get_random_int();
66594+ tsk->stack_canary = pax_get_random_long();
66595 #endif
66596
66597 /*
66598@@ -309,13 +309,77 @@ out:
66599 }
66600
66601 #ifdef CONFIG_MMU
66602+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66603+{
66604+ struct vm_area_struct *tmp;
66605+ unsigned long charge;
66606+ struct mempolicy *pol;
66607+ struct file *file;
66608+
66609+ charge = 0;
66610+ if (mpnt->vm_flags & VM_ACCOUNT) {
66611+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66612+ if (security_vm_enough_memory(len))
66613+ goto fail_nomem;
66614+ charge = len;
66615+ }
66616+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66617+ if (!tmp)
66618+ goto fail_nomem;
66619+ *tmp = *mpnt;
66620+ tmp->vm_mm = mm;
66621+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66622+ pol = mpol_dup(vma_policy(mpnt));
66623+ if (IS_ERR(pol))
66624+ goto fail_nomem_policy;
66625+ vma_set_policy(tmp, pol);
66626+ if (anon_vma_fork(tmp, mpnt))
66627+ goto fail_nomem_anon_vma_fork;
66628+ tmp->vm_flags &= ~VM_LOCKED;
66629+ tmp->vm_next = tmp->vm_prev = NULL;
66630+ tmp->vm_mirror = NULL;
66631+ file = tmp->vm_file;
66632+ if (file) {
66633+ struct inode *inode = file->f_path.dentry->d_inode;
66634+ struct address_space *mapping = file->f_mapping;
66635+
66636+ get_file(file);
66637+ if (tmp->vm_flags & VM_DENYWRITE)
66638+ atomic_dec(&inode->i_writecount);
66639+ mutex_lock(&mapping->i_mmap_mutex);
66640+ if (tmp->vm_flags & VM_SHARED)
66641+ mapping->i_mmap_writable++;
66642+ flush_dcache_mmap_lock(mapping);
66643+ /* insert tmp into the share list, just after mpnt */
66644+ vma_prio_tree_add(tmp, mpnt);
66645+ flush_dcache_mmap_unlock(mapping);
66646+ mutex_unlock(&mapping->i_mmap_mutex);
66647+ }
66648+
66649+ /*
66650+ * Clear hugetlb-related page reserves for children. This only
66651+ * affects MAP_PRIVATE mappings. Faults generated by the child
66652+ * are not guaranteed to succeed, even if read-only
66653+ */
66654+ if (is_vm_hugetlb_page(tmp))
66655+ reset_vma_resv_huge_pages(tmp);
66656+
66657+ return tmp;
66658+
66659+fail_nomem_anon_vma_fork:
66660+ mpol_put(pol);
66661+fail_nomem_policy:
66662+ kmem_cache_free(vm_area_cachep, tmp);
66663+fail_nomem:
66664+ vm_unacct_memory(charge);
66665+ return NULL;
66666+}
66667+
66668 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66669 {
66670 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66671 struct rb_node **rb_link, *rb_parent;
66672 int retval;
66673- unsigned long charge;
66674- struct mempolicy *pol;
66675
66676 down_write(&oldmm->mmap_sem);
66677 flush_cache_dup_mm(oldmm);
66678@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66679 mm->locked_vm = 0;
66680 mm->mmap = NULL;
66681 mm->mmap_cache = NULL;
66682- mm->free_area_cache = oldmm->mmap_base;
66683- mm->cached_hole_size = ~0UL;
66684+ mm->free_area_cache = oldmm->free_area_cache;
66685+ mm->cached_hole_size = oldmm->cached_hole_size;
66686 mm->map_count = 0;
66687 cpumask_clear(mm_cpumask(mm));
66688 mm->mm_rb = RB_ROOT;
66689@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66690
66691 prev = NULL;
66692 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66693- struct file *file;
66694-
66695 if (mpnt->vm_flags & VM_DONTCOPY) {
66696 long pages = vma_pages(mpnt);
66697 mm->total_vm -= pages;
66698@@ -353,53 +415,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66699 -pages);
66700 continue;
66701 }
66702- charge = 0;
66703- if (mpnt->vm_flags & VM_ACCOUNT) {
66704- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66705- if (security_vm_enough_memory(len))
66706- goto fail_nomem;
66707- charge = len;
66708+ tmp = dup_vma(mm, mpnt);
66709+ if (!tmp) {
66710+ retval = -ENOMEM;
66711+ goto out;
66712 }
66713- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66714- if (!tmp)
66715- goto fail_nomem;
66716- *tmp = *mpnt;
66717- INIT_LIST_HEAD(&tmp->anon_vma_chain);
66718- pol = mpol_dup(vma_policy(mpnt));
66719- retval = PTR_ERR(pol);
66720- if (IS_ERR(pol))
66721- goto fail_nomem_policy;
66722- vma_set_policy(tmp, pol);
66723- tmp->vm_mm = mm;
66724- if (anon_vma_fork(tmp, mpnt))
66725- goto fail_nomem_anon_vma_fork;
66726- tmp->vm_flags &= ~VM_LOCKED;
66727- tmp->vm_next = tmp->vm_prev = NULL;
66728- file = tmp->vm_file;
66729- if (file) {
66730- struct inode *inode = file->f_path.dentry->d_inode;
66731- struct address_space *mapping = file->f_mapping;
66732-
66733- get_file(file);
66734- if (tmp->vm_flags & VM_DENYWRITE)
66735- atomic_dec(&inode->i_writecount);
66736- mutex_lock(&mapping->i_mmap_mutex);
66737- if (tmp->vm_flags & VM_SHARED)
66738- mapping->i_mmap_writable++;
66739- flush_dcache_mmap_lock(mapping);
66740- /* insert tmp into the share list, just after mpnt */
66741- vma_prio_tree_add(tmp, mpnt);
66742- flush_dcache_mmap_unlock(mapping);
66743- mutex_unlock(&mapping->i_mmap_mutex);
66744- }
66745-
66746- /*
66747- * Clear hugetlb-related page reserves for children. This only
66748- * affects MAP_PRIVATE mappings. Faults generated by the child
66749- * are not guaranteed to succeed, even if read-only
66750- */
66751- if (is_vm_hugetlb_page(tmp))
66752- reset_vma_resv_huge_pages(tmp);
66753
66754 /*
66755 * Link in the new vma and copy the page table entries.
66756@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66757 if (retval)
66758 goto out;
66759 }
66760+
66761+#ifdef CONFIG_PAX_SEGMEXEC
66762+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66763+ struct vm_area_struct *mpnt_m;
66764+
66765+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66766+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66767+
66768+ if (!mpnt->vm_mirror)
66769+ continue;
66770+
66771+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66772+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66773+ mpnt->vm_mirror = mpnt_m;
66774+ } else {
66775+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66776+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66777+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66778+ mpnt->vm_mirror->vm_mirror = mpnt;
66779+ }
66780+ }
66781+ BUG_ON(mpnt_m);
66782+ }
66783+#endif
66784+
66785 /* a new mm has just been created */
66786 arch_dup_mmap(oldmm, mm);
66787 retval = 0;
66788@@ -430,14 +475,6 @@ out:
66789 flush_tlb_mm(oldmm);
66790 up_write(&oldmm->mmap_sem);
66791 return retval;
66792-fail_nomem_anon_vma_fork:
66793- mpol_put(pol);
66794-fail_nomem_policy:
66795- kmem_cache_free(vm_area_cachep, tmp);
66796-fail_nomem:
66797- retval = -ENOMEM;
66798- vm_unacct_memory(charge);
66799- goto out;
66800 }
66801
66802 static inline int mm_alloc_pgd(struct mm_struct *mm)
66803@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66804 spin_unlock(&fs->lock);
66805 return -EAGAIN;
66806 }
66807- fs->users++;
66808+ atomic_inc(&fs->users);
66809 spin_unlock(&fs->lock);
66810 return 0;
66811 }
66812 tsk->fs = copy_fs_struct(fs);
66813 if (!tsk->fs)
66814 return -ENOMEM;
66815+ gr_set_chroot_entries(tsk, &tsk->fs->root);
66816 return 0;
66817 }
66818
66819@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66820 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66821 #endif
66822 retval = -EAGAIN;
66823+
66824+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66825+
66826 if (atomic_read(&p->real_cred->user->processes) >=
66827 task_rlimit(p, RLIMIT_NPROC)) {
66828 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66829@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66830 if (clone_flags & CLONE_THREAD)
66831 p->tgid = current->tgid;
66832
66833+ gr_copy_label(p);
66834+
66835 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66836 /*
66837 * Clear TID on mm_release()?
66838@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
66839 bad_fork_free:
66840 free_task(p);
66841 fork_out:
66842+ gr_log_forkfail(retval);
66843+
66844 return ERR_PTR(retval);
66845 }
66846
66847@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
66848 if (clone_flags & CLONE_PARENT_SETTID)
66849 put_user(nr, parent_tidptr);
66850
66851+ gr_handle_brute_check();
66852+
66853 if (clone_flags & CLONE_VFORK) {
66854 p->vfork_done = &vfork;
66855 init_completion(&vfork);
66856@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66857 return 0;
66858
66859 /* don't need lock here; in the worst case we'll do useless copy */
66860- if (fs->users == 1)
66861+ if (atomic_read(&fs->users) == 1)
66862 return 0;
66863
66864 *new_fsp = copy_fs_struct(fs);
66865@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66866 fs = current->fs;
66867 spin_lock(&fs->lock);
66868 current->fs = new_fs;
66869- if (--fs->users)
66870+ gr_set_chroot_entries(current, &current->fs->root);
66871+ if (atomic_dec_return(&fs->users))
66872 new_fs = NULL;
66873 else
66874 new_fs = fs;
66875diff --git a/kernel/futex.c b/kernel/futex.c
66876index e6160fa..edf9565 100644
66877--- a/kernel/futex.c
66878+++ b/kernel/futex.c
66879@@ -54,6 +54,7 @@
66880 #include <linux/mount.h>
66881 #include <linux/pagemap.h>
66882 #include <linux/syscalls.h>
66883+#include <linux/ptrace.h>
66884 #include <linux/signal.h>
66885 #include <linux/module.h>
66886 #include <linux/magic.h>
66887@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66888 struct page *page, *page_head;
66889 int err, ro = 0;
66890
66891+#ifdef CONFIG_PAX_SEGMEXEC
66892+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66893+ return -EFAULT;
66894+#endif
66895+
66896 /*
66897 * The futex address must be "naturally" aligned.
66898 */
66899@@ -1875,6 +1881,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
66900 struct futex_q q = futex_q_init;
66901 int ret;
66902
66903+ pax_track_stack();
66904+
66905 if (!bitset)
66906 return -EINVAL;
66907 q.bitset = bitset;
66908@@ -2271,6 +2279,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
66909 struct futex_q q = futex_q_init;
66910 int res, ret;
66911
66912+ pax_track_stack();
66913+
66914 if (!bitset)
66915 return -EINVAL;
66916
66917@@ -2459,6 +2469,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66918 if (!p)
66919 goto err_unlock;
66920 ret = -EPERM;
66921+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66922+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
66923+ goto err_unlock;
66924+#endif
66925 pcred = __task_cred(p);
66926 /* If victim is in different user_ns, then uids are not
66927 comparable, so we must have CAP_SYS_PTRACE */
66928@@ -2724,6 +2738,7 @@ static int __init futex_init(void)
66929 {
66930 u32 curval;
66931 int i;
66932+ mm_segment_t oldfs;
66933
66934 /*
66935 * This will fail and we want it. Some arch implementations do
66936@@ -2735,8 +2750,11 @@ static int __init futex_init(void)
66937 * implementation, the non-functional ones will return
66938 * -ENOSYS.
66939 */
66940+ oldfs = get_fs();
66941+ set_fs(USER_DS);
66942 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66943 futex_cmpxchg_enabled = 1;
66944+ set_fs(oldfs);
66945
66946 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66947 plist_head_init(&futex_queues[i].chain);
66948diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
66949index 5f9e689..582d46d 100644
66950--- a/kernel/futex_compat.c
66951+++ b/kernel/futex_compat.c
66952@@ -10,6 +10,7 @@
66953 #include <linux/compat.h>
66954 #include <linux/nsproxy.h>
66955 #include <linux/futex.h>
66956+#include <linux/ptrace.h>
66957
66958 #include <asm/uaccess.h>
66959
66960@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66961 {
66962 struct compat_robust_list_head __user *head;
66963 unsigned long ret;
66964- const struct cred *cred = current_cred(), *pcred;
66965+ const struct cred *cred = current_cred();
66966+ const struct cred *pcred;
66967
66968 if (!futex_cmpxchg_enabled)
66969 return -ENOSYS;
66970@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66971 if (!p)
66972 goto err_unlock;
66973 ret = -EPERM;
66974+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66975+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
66976+ goto err_unlock;
66977+#endif
66978 pcred = __task_cred(p);
66979 /* If victim is in different user_ns, then uids are not
66980 comparable, so we must have CAP_SYS_PTRACE */
66981diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66982index 9b22d03..6295b62 100644
66983--- a/kernel/gcov/base.c
66984+++ b/kernel/gcov/base.c
66985@@ -102,11 +102,6 @@ void gcov_enable_events(void)
66986 }
66987
66988 #ifdef CONFIG_MODULES
66989-static inline int within(void *addr, void *start, unsigned long size)
66990-{
66991- return ((addr >= start) && (addr < start + size));
66992-}
66993-
66994 /* Update list and generate events when modules are unloaded. */
66995 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66996 void *data)
66997@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66998 prev = NULL;
66999 /* Remove entries located in module from linked list. */
67000 for (info = gcov_info_head; info; info = info->next) {
67001- if (within(info, mod->module_core, mod->core_size)) {
67002+ if (within_module_core_rw((unsigned long)info, mod)) {
67003 if (prev)
67004 prev->next = info->next;
67005 else
67006diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
67007index 2043c08..ec81a69 100644
67008--- a/kernel/hrtimer.c
67009+++ b/kernel/hrtimer.c
67010@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
67011 local_irq_restore(flags);
67012 }
67013
67014-static void run_hrtimer_softirq(struct softirq_action *h)
67015+static void run_hrtimer_softirq(void)
67016 {
67017 hrtimer_peek_ahead_timers();
67018 }
67019diff --git a/kernel/jump_label.c b/kernel/jump_label.c
67020index e6f1f24..6c19597 100644
67021--- a/kernel/jump_label.c
67022+++ b/kernel/jump_label.c
67023@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
67024
67025 size = (((unsigned long)stop - (unsigned long)start)
67026 / sizeof(struct jump_entry));
67027+ pax_open_kernel();
67028 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67029+ pax_close_kernel();
67030 }
67031
67032 static void jump_label_update(struct jump_label_key *key, int enable);
67033@@ -298,10 +300,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
67034 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67035 struct jump_entry *iter;
67036
67037+ pax_open_kernel();
67038 for (iter = iter_start; iter < iter_stop; iter++) {
67039 if (within_module_init(iter->code, mod))
67040 iter->code = 0;
67041 }
67042+ pax_close_kernel();
67043 }
67044
67045 static int
67046diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67047index 079f1d3..a407562 100644
67048--- a/kernel/kallsyms.c
67049+++ b/kernel/kallsyms.c
67050@@ -11,6 +11,9 @@
67051 * Changed the compression method from stem compression to "table lookup"
67052 * compression (see scripts/kallsyms.c for a more complete description)
67053 */
67054+#ifdef CONFIG_GRKERNSEC_HIDESYM
67055+#define __INCLUDED_BY_HIDESYM 1
67056+#endif
67057 #include <linux/kallsyms.h>
67058 #include <linux/module.h>
67059 #include <linux/init.h>
67060@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
67061
67062 static inline int is_kernel_inittext(unsigned long addr)
67063 {
67064+ if (system_state != SYSTEM_BOOTING)
67065+ return 0;
67066+
67067 if (addr >= (unsigned long)_sinittext
67068 && addr <= (unsigned long)_einittext)
67069 return 1;
67070 return 0;
67071 }
67072
67073+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67074+#ifdef CONFIG_MODULES
67075+static inline int is_module_text(unsigned long addr)
67076+{
67077+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67078+ return 1;
67079+
67080+ addr = ktla_ktva(addr);
67081+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67082+}
67083+#else
67084+static inline int is_module_text(unsigned long addr)
67085+{
67086+ return 0;
67087+}
67088+#endif
67089+#endif
67090+
67091 static inline int is_kernel_text(unsigned long addr)
67092 {
67093 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67094@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67095
67096 static inline int is_kernel(unsigned long addr)
67097 {
67098+
67099+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67100+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
67101+ return 1;
67102+
67103+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67104+#else
67105 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67106+#endif
67107+
67108 return 1;
67109 return in_gate_area_no_mm(addr);
67110 }
67111
67112 static int is_ksym_addr(unsigned long addr)
67113 {
67114+
67115+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67116+ if (is_module_text(addr))
67117+ return 0;
67118+#endif
67119+
67120 if (all_var)
67121 return is_kernel(addr);
67122
67123@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67124
67125 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67126 {
67127- iter->name[0] = '\0';
67128 iter->nameoff = get_symbol_offset(new_pos);
67129 iter->pos = new_pos;
67130 }
67131@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
67132 {
67133 struct kallsym_iter *iter = m->private;
67134
67135+#ifdef CONFIG_GRKERNSEC_HIDESYM
67136+ if (current_uid())
67137+ return 0;
67138+#endif
67139+
67140 /* Some debugging symbols have no name. Ignore them. */
67141 if (!iter->name[0])
67142 return 0;
67143@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67144 struct kallsym_iter *iter;
67145 int ret;
67146
67147- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67148+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67149 if (!iter)
67150 return -ENOMEM;
67151 reset_iter(iter, 0);
67152diff --git a/kernel/kexec.c b/kernel/kexec.c
67153index 296fbc8..84cb857 100644
67154--- a/kernel/kexec.c
67155+++ b/kernel/kexec.c
67156@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67157 unsigned long flags)
67158 {
67159 struct compat_kexec_segment in;
67160- struct kexec_segment out, __user *ksegments;
67161+ struct kexec_segment out;
67162+ struct kexec_segment __user *ksegments;
67163 unsigned long i, result;
67164
67165 /* Don't allow clients that don't understand the native
67166diff --git a/kernel/kmod.c b/kernel/kmod.c
67167index a4bea97..7a1ae9a 100644
67168--- a/kernel/kmod.c
67169+++ b/kernel/kmod.c
67170@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
67171 * If module auto-loading support is disabled then this function
67172 * becomes a no-operation.
67173 */
67174-int __request_module(bool wait, const char *fmt, ...)
67175+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67176 {
67177- va_list args;
67178 char module_name[MODULE_NAME_LEN];
67179 unsigned int max_modprobes;
67180 int ret;
67181- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
67182+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
67183 static char *envp[] = { "HOME=/",
67184 "TERM=linux",
67185 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
67186@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
67187 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67188 static int kmod_loop_msg;
67189
67190- va_start(args, fmt);
67191- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67192- va_end(args);
67193+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67194 if (ret >= MODULE_NAME_LEN)
67195 return -ENAMETOOLONG;
67196
67197@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
67198 if (ret)
67199 return ret;
67200
67201+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67202+ if (!current_uid()) {
67203+ /* hack to workaround consolekit/udisks stupidity */
67204+ read_lock(&tasklist_lock);
67205+ if (!strcmp(current->comm, "mount") &&
67206+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67207+ read_unlock(&tasklist_lock);
67208+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67209+ return -EPERM;
67210+ }
67211+ read_unlock(&tasklist_lock);
67212+ }
67213+#endif
67214+
67215 /* If modprobe needs a service that is in a module, we get a recursive
67216 * loop. Limit the number of running kmod threads to max_threads/2 or
67217 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67218@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
67219 atomic_dec(&kmod_concurrent);
67220 return ret;
67221 }
67222+
67223+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67224+{
67225+ va_list args;
67226+ int ret;
67227+
67228+ va_start(args, fmt);
67229+ ret = ____request_module(wait, module_param, fmt, args);
67230+ va_end(args);
67231+
67232+ return ret;
67233+}
67234+
67235+int __request_module(bool wait, const char *fmt, ...)
67236+{
67237+ va_list args;
67238+ int ret;
67239+
67240+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67241+ if (current_uid()) {
67242+ char module_param[MODULE_NAME_LEN];
67243+
67244+ memset(module_param, 0, sizeof(module_param));
67245+
67246+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67247+
67248+ va_start(args, fmt);
67249+ ret = ____request_module(wait, module_param, fmt, args);
67250+ va_end(args);
67251+
67252+ return ret;
67253+ }
67254+#endif
67255+
67256+ va_start(args, fmt);
67257+ ret = ____request_module(wait, NULL, fmt, args);
67258+ va_end(args);
67259+
67260+ return ret;
67261+}
67262+
67263 EXPORT_SYMBOL(__request_module);
67264 #endif /* CONFIG_MODULES */
67265
67266@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
67267 *
67268 * Thus the __user pointer cast is valid here.
67269 */
67270- sys_wait4(pid, (int __user *)&ret, 0, NULL);
67271+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67272
67273 /*
67274 * If ret is 0, either ____call_usermodehelper failed and the
67275diff --git a/kernel/kprobes.c b/kernel/kprobes.c
67276index b30fd54..11821ec 100644
67277--- a/kernel/kprobes.c
67278+++ b/kernel/kprobes.c
67279@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
67280 * kernel image and loaded module images reside. This is required
67281 * so x86_64 can correctly handle the %rip-relative fixups.
67282 */
67283- kip->insns = module_alloc(PAGE_SIZE);
67284+ kip->insns = module_alloc_exec(PAGE_SIZE);
67285 if (!kip->insns) {
67286 kfree(kip);
67287 return NULL;
67288@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
67289 */
67290 if (!list_is_singular(&kip->list)) {
67291 list_del(&kip->list);
67292- module_free(NULL, kip->insns);
67293+ module_free_exec(NULL, kip->insns);
67294 kfree(kip);
67295 }
67296 return 1;
67297@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
67298 {
67299 int i, err = 0;
67300 unsigned long offset = 0, size = 0;
67301- char *modname, namebuf[128];
67302+ char *modname, namebuf[KSYM_NAME_LEN];
67303 const char *symbol_name;
67304 void *addr;
67305 struct kprobe_blackpoint *kb;
67306@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
67307 const char *sym = NULL;
67308 unsigned int i = *(loff_t *) v;
67309 unsigned long offset = 0;
67310- char *modname, namebuf[128];
67311+ char *modname, namebuf[KSYM_NAME_LEN];
67312
67313 head = &kprobe_table[i];
67314 preempt_disable();
67315diff --git a/kernel/lockdep.c b/kernel/lockdep.c
67316index 4479606..4036bea 100644
67317--- a/kernel/lockdep.c
67318+++ b/kernel/lockdep.c
67319@@ -584,6 +584,10 @@ static int static_obj(void *obj)
67320 end = (unsigned long) &_end,
67321 addr = (unsigned long) obj;
67322
67323+#ifdef CONFIG_PAX_KERNEXEC
67324+ start = ktla_ktva(start);
67325+#endif
67326+
67327 /*
67328 * static variable?
67329 */
67330@@ -719,6 +723,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
67331 if (!static_obj(lock->key)) {
67332 debug_locks_off();
67333 printk("INFO: trying to register non-static key.\n");
67334+ printk("lock:%pS key:%pS.\n", lock, lock->key);
67335 printk("the code is fine but needs lockdep annotation.\n");
67336 printk("turning off the locking correctness validator.\n");
67337 dump_stack();
67338@@ -2954,7 +2959,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
67339 if (!class)
67340 return 0;
67341 }
67342- atomic_inc((atomic_t *)&class->ops);
67343+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
67344 if (very_verbose(class)) {
67345 printk("\nacquire class [%p] %s", class->key, class->name);
67346 if (class->name_version > 1)
67347diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
67348index 71edd2f..e0542a5 100644
67349--- a/kernel/lockdep_proc.c
67350+++ b/kernel/lockdep_proc.c
67351@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
67352
67353 static void print_name(struct seq_file *m, struct lock_class *class)
67354 {
67355- char str[128];
67356+ char str[KSYM_NAME_LEN];
67357 const char *name = class->name;
67358
67359 if (!name) {
67360diff --git a/kernel/module.c b/kernel/module.c
67361index 04379f92..fba2faf 100644
67362--- a/kernel/module.c
67363+++ b/kernel/module.c
67364@@ -58,6 +58,7 @@
67365 #include <linux/jump_label.h>
67366 #include <linux/pfn.h>
67367 #include <linux/bsearch.h>
67368+#include <linux/grsecurity.h>
67369
67370 #define CREATE_TRACE_POINTS
67371 #include <trace/events/module.h>
67372@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
67373
67374 /* Bounds of module allocation, for speeding __module_address.
67375 * Protected by module_mutex. */
67376-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67377+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67378+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67379
67380 int register_module_notifier(struct notifier_block * nb)
67381 {
67382@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67383 return true;
67384
67385 list_for_each_entry_rcu(mod, &modules, list) {
67386- struct symsearch arr[] = {
67387+ struct symsearch modarr[] = {
67388 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67389 NOT_GPL_ONLY, false },
67390 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
67391@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67392 #endif
67393 };
67394
67395- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67396+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67397 return true;
67398 }
67399 return false;
67400@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
67401 static int percpu_modalloc(struct module *mod,
67402 unsigned long size, unsigned long align)
67403 {
67404- if (align > PAGE_SIZE) {
67405+ if (align-1 >= PAGE_SIZE) {
67406 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
67407 mod->name, align, PAGE_SIZE);
67408 align = PAGE_SIZE;
67409@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
67410 */
67411 #ifdef CONFIG_SYSFS
67412
67413-#ifdef CONFIG_KALLSYMS
67414+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67415 static inline bool sect_empty(const Elf_Shdr *sect)
67416 {
67417 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
67418@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
67419
67420 static void unset_module_core_ro_nx(struct module *mod)
67421 {
67422- set_page_attributes(mod->module_core + mod->core_text_size,
67423- mod->module_core + mod->core_size,
67424+ set_page_attributes(mod->module_core_rw,
67425+ mod->module_core_rw + mod->core_size_rw,
67426 set_memory_x);
67427- set_page_attributes(mod->module_core,
67428- mod->module_core + mod->core_ro_size,
67429+ set_page_attributes(mod->module_core_rx,
67430+ mod->module_core_rx + mod->core_size_rx,
67431 set_memory_rw);
67432 }
67433
67434 static void unset_module_init_ro_nx(struct module *mod)
67435 {
67436- set_page_attributes(mod->module_init + mod->init_text_size,
67437- mod->module_init + mod->init_size,
67438+ set_page_attributes(mod->module_init_rw,
67439+ mod->module_init_rw + mod->init_size_rw,
67440 set_memory_x);
67441- set_page_attributes(mod->module_init,
67442- mod->module_init + mod->init_ro_size,
67443+ set_page_attributes(mod->module_init_rx,
67444+ mod->module_init_rx + mod->init_size_rx,
67445 set_memory_rw);
67446 }
67447
67448@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
67449
67450 mutex_lock(&module_mutex);
67451 list_for_each_entry_rcu(mod, &modules, list) {
67452- if ((mod->module_core) && (mod->core_text_size)) {
67453- set_page_attributes(mod->module_core,
67454- mod->module_core + mod->core_text_size,
67455+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67456+ set_page_attributes(mod->module_core_rx,
67457+ mod->module_core_rx + mod->core_size_rx,
67458 set_memory_rw);
67459 }
67460- if ((mod->module_init) && (mod->init_text_size)) {
67461- set_page_attributes(mod->module_init,
67462- mod->module_init + mod->init_text_size,
67463+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67464+ set_page_attributes(mod->module_init_rx,
67465+ mod->module_init_rx + mod->init_size_rx,
67466 set_memory_rw);
67467 }
67468 }
67469@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
67470
67471 mutex_lock(&module_mutex);
67472 list_for_each_entry_rcu(mod, &modules, list) {
67473- if ((mod->module_core) && (mod->core_text_size)) {
67474- set_page_attributes(mod->module_core,
67475- mod->module_core + mod->core_text_size,
67476+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
67477+ set_page_attributes(mod->module_core_rx,
67478+ mod->module_core_rx + mod->core_size_rx,
67479 set_memory_ro);
67480 }
67481- if ((mod->module_init) && (mod->init_text_size)) {
67482- set_page_attributes(mod->module_init,
67483- mod->module_init + mod->init_text_size,
67484+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
67485+ set_page_attributes(mod->module_init_rx,
67486+ mod->module_init_rx + mod->init_size_rx,
67487 set_memory_ro);
67488 }
67489 }
67490@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
67491
67492 /* This may be NULL, but that's OK */
67493 unset_module_init_ro_nx(mod);
67494- module_free(mod, mod->module_init);
67495+ module_free(mod, mod->module_init_rw);
67496+ module_free_exec(mod, mod->module_init_rx);
67497 kfree(mod->args);
67498 percpu_modfree(mod);
67499
67500 /* Free lock-classes: */
67501- lockdep_free_key_range(mod->module_core, mod->core_size);
67502+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67503+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67504
67505 /* Finally, free the core (containing the module structure) */
67506 unset_module_core_ro_nx(mod);
67507- module_free(mod, mod->module_core);
67508+ module_free_exec(mod, mod->module_core_rx);
67509+ module_free(mod, mod->module_core_rw);
67510
67511 #ifdef CONFIG_MPU
67512 update_protections(current->mm);
67513@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67514 unsigned int i;
67515 int ret = 0;
67516 const struct kernel_symbol *ksym;
67517+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67518+ int is_fs_load = 0;
67519+ int register_filesystem_found = 0;
67520+ char *p;
67521+
67522+ p = strstr(mod->args, "grsec_modharden_fs");
67523+ if (p) {
67524+ char *endptr = p + strlen("grsec_modharden_fs");
67525+ /* copy \0 as well */
67526+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67527+ is_fs_load = 1;
67528+ }
67529+#endif
67530
67531 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67532 const char *name = info->strtab + sym[i].st_name;
67533
67534+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67535+ /* it's a real shame this will never get ripped and copied
67536+ upstream! ;(
67537+ */
67538+ if (is_fs_load && !strcmp(name, "register_filesystem"))
67539+ register_filesystem_found = 1;
67540+#endif
67541+
67542 switch (sym[i].st_shndx) {
67543 case SHN_COMMON:
67544 /* We compiled with -fno-common. These are not
67545@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67546 ksym = resolve_symbol_wait(mod, info, name);
67547 /* Ok if resolved. */
67548 if (ksym && !IS_ERR(ksym)) {
67549+ pax_open_kernel();
67550 sym[i].st_value = ksym->value;
67551+ pax_close_kernel();
67552 break;
67553 }
67554
67555@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67556 secbase = (unsigned long)mod_percpu(mod);
67557 else
67558 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67559+ pax_open_kernel();
67560 sym[i].st_value += secbase;
67561+ pax_close_kernel();
67562 break;
67563 }
67564 }
67565
67566+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67567+ if (is_fs_load && !register_filesystem_found) {
67568+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67569+ ret = -EPERM;
67570+ }
67571+#endif
67572+
67573 return ret;
67574 }
67575
67576@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67577 || s->sh_entsize != ~0UL
67578 || strstarts(sname, ".init"))
67579 continue;
67580- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67581+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67582+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67583+ else
67584+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67585 DEBUGP("\t%s\n", name);
67586 }
67587- switch (m) {
67588- case 0: /* executable */
67589- mod->core_size = debug_align(mod->core_size);
67590- mod->core_text_size = mod->core_size;
67591- break;
67592- case 1: /* RO: text and ro-data */
67593- mod->core_size = debug_align(mod->core_size);
67594- mod->core_ro_size = mod->core_size;
67595- break;
67596- case 3: /* whole core */
67597- mod->core_size = debug_align(mod->core_size);
67598- break;
67599- }
67600 }
67601
67602 DEBUGP("Init section allocation order:\n");
67603@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67604 || s->sh_entsize != ~0UL
67605 || !strstarts(sname, ".init"))
67606 continue;
67607- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67608- | INIT_OFFSET_MASK);
67609+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67610+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67611+ else
67612+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67613+ s->sh_entsize |= INIT_OFFSET_MASK;
67614 DEBUGP("\t%s\n", sname);
67615 }
67616- switch (m) {
67617- case 0: /* executable */
67618- mod->init_size = debug_align(mod->init_size);
67619- mod->init_text_size = mod->init_size;
67620- break;
67621- case 1: /* RO: text and ro-data */
67622- mod->init_size = debug_align(mod->init_size);
67623- mod->init_ro_size = mod->init_size;
67624- break;
67625- case 3: /* whole init */
67626- mod->init_size = debug_align(mod->init_size);
67627- break;
67628- }
67629 }
67630 }
67631
67632@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67633
67634 /* Put symbol section at end of init part of module. */
67635 symsect->sh_flags |= SHF_ALLOC;
67636- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67637+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67638 info->index.sym) | INIT_OFFSET_MASK;
67639 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
67640
67641@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67642 }
67643
67644 /* Append room for core symbols at end of core part. */
67645- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67646- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67647+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67648+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67649
67650 /* Put string table section at end of init part of module. */
67651 strsect->sh_flags |= SHF_ALLOC;
67652- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67653+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67654 info->index.str) | INIT_OFFSET_MASK;
67655 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
67656
67657 /* Append room for core symbols' strings at end of core part. */
67658- info->stroffs = mod->core_size;
67659+ info->stroffs = mod->core_size_rx;
67660 __set_bit(0, info->strmap);
67661- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
67662+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
67663 }
67664
67665 static void add_kallsyms(struct module *mod, const struct load_info *info)
67666@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67667 /* Make sure we get permanent strtab: don't use info->strtab. */
67668 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67669
67670+ pax_open_kernel();
67671+
67672 /* Set types up while we still have access to sections. */
67673 for (i = 0; i < mod->num_symtab; i++)
67674 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67675
67676- mod->core_symtab = dst = mod->module_core + info->symoffs;
67677+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67678 src = mod->symtab;
67679 *dst = *src;
67680 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
67681@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67682 }
67683 mod->core_num_syms = ndst;
67684
67685- mod->core_strtab = s = mod->module_core + info->stroffs;
67686+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67687 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
67688 if (test_bit(i, info->strmap))
67689 *++s = mod->strtab[i];
67690+
67691+ pax_close_kernel();
67692 }
67693 #else
67694 static inline void layout_symtab(struct module *mod, struct load_info *info)
67695@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
67696 return size == 0 ? NULL : vmalloc_exec(size);
67697 }
67698
67699-static void *module_alloc_update_bounds(unsigned long size)
67700+static void *module_alloc_update_bounds_rw(unsigned long size)
67701 {
67702 void *ret = module_alloc(size);
67703
67704 if (ret) {
67705 mutex_lock(&module_mutex);
67706 /* Update module bounds. */
67707- if ((unsigned long)ret < module_addr_min)
67708- module_addr_min = (unsigned long)ret;
67709- if ((unsigned long)ret + size > module_addr_max)
67710- module_addr_max = (unsigned long)ret + size;
67711+ if ((unsigned long)ret < module_addr_min_rw)
67712+ module_addr_min_rw = (unsigned long)ret;
67713+ if ((unsigned long)ret + size > module_addr_max_rw)
67714+ module_addr_max_rw = (unsigned long)ret + size;
67715+ mutex_unlock(&module_mutex);
67716+ }
67717+ return ret;
67718+}
67719+
67720+static void *module_alloc_update_bounds_rx(unsigned long size)
67721+{
67722+ void *ret = module_alloc_exec(size);
67723+
67724+ if (ret) {
67725+ mutex_lock(&module_mutex);
67726+ /* Update module bounds. */
67727+ if ((unsigned long)ret < module_addr_min_rx)
67728+ module_addr_min_rx = (unsigned long)ret;
67729+ if ((unsigned long)ret + size > module_addr_max_rx)
67730+ module_addr_max_rx = (unsigned long)ret + size;
67731 mutex_unlock(&module_mutex);
67732 }
67733 return ret;
67734@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
67735 static int check_modinfo(struct module *mod, struct load_info *info)
67736 {
67737 const char *modmagic = get_modinfo(info, "vermagic");
67738+ const char *license = get_modinfo(info, "license");
67739 int err;
67740
67741+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67742+ if (!license || !license_is_gpl_compatible(license))
67743+ return -ENOEXEC;
67744+#endif
67745+
67746 /* This is allowed: modprobe --force will invalidate it. */
67747 if (!modmagic) {
67748 err = try_to_force_load(mod, "bad vermagic");
67749@@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67750 }
67751
67752 /* Set up license info based on the info section */
67753- set_license(mod, get_modinfo(info, "license"));
67754+ set_license(mod, license);
67755
67756 return 0;
67757 }
67758@@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info)
67759 void *ptr;
67760
67761 /* Do the allocs. */
67762- ptr = module_alloc_update_bounds(mod->core_size);
67763+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67764 /*
67765 * The pointer to this block is stored in the module structure
67766 * which is inside the block. Just mark it as not being a
67767@@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info)
67768 if (!ptr)
67769 return -ENOMEM;
67770
67771- memset(ptr, 0, mod->core_size);
67772- mod->module_core = ptr;
67773+ memset(ptr, 0, mod->core_size_rw);
67774+ mod->module_core_rw = ptr;
67775
67776- ptr = module_alloc_update_bounds(mod->init_size);
67777+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67778 /*
67779 * The pointer to this block is stored in the module structure
67780 * which is inside the block. This block doesn't need to be
67781 * scanned as it contains data and code that will be freed
67782 * after the module is initialized.
67783 */
67784- kmemleak_ignore(ptr);
67785- if (!ptr && mod->init_size) {
67786- module_free(mod, mod->module_core);
67787+ kmemleak_not_leak(ptr);
67788+ if (!ptr && mod->init_size_rw) {
67789+ module_free(mod, mod->module_core_rw);
67790 return -ENOMEM;
67791 }
67792- memset(ptr, 0, mod->init_size);
67793- mod->module_init = ptr;
67794+ memset(ptr, 0, mod->init_size_rw);
67795+ mod->module_init_rw = ptr;
67796+
67797+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67798+ kmemleak_not_leak(ptr);
67799+ if (!ptr) {
67800+ module_free(mod, mod->module_init_rw);
67801+ module_free(mod, mod->module_core_rw);
67802+ return -ENOMEM;
67803+ }
67804+
67805+ pax_open_kernel();
67806+ memset(ptr, 0, mod->core_size_rx);
67807+ pax_close_kernel();
67808+ mod->module_core_rx = ptr;
67809+
67810+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67811+ kmemleak_not_leak(ptr);
67812+ if (!ptr && mod->init_size_rx) {
67813+ module_free_exec(mod, mod->module_core_rx);
67814+ module_free(mod, mod->module_init_rw);
67815+ module_free(mod, mod->module_core_rw);
67816+ return -ENOMEM;
67817+ }
67818+
67819+ pax_open_kernel();
67820+ memset(ptr, 0, mod->init_size_rx);
67821+ pax_close_kernel();
67822+ mod->module_init_rx = ptr;
67823
67824 /* Transfer each section which specifies SHF_ALLOC */
67825 DEBUGP("final section addresses:\n");
67826@@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info)
67827 if (!(shdr->sh_flags & SHF_ALLOC))
67828 continue;
67829
67830- if (shdr->sh_entsize & INIT_OFFSET_MASK)
67831- dest = mod->module_init
67832- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67833- else
67834- dest = mod->module_core + shdr->sh_entsize;
67835+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67836+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67837+ dest = mod->module_init_rw
67838+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67839+ else
67840+ dest = mod->module_init_rx
67841+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67842+ } else {
67843+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67844+ dest = mod->module_core_rw + shdr->sh_entsize;
67845+ else
67846+ dest = mod->module_core_rx + shdr->sh_entsize;
67847+ }
67848+
67849+ if (shdr->sh_type != SHT_NOBITS) {
67850+
67851+#ifdef CONFIG_PAX_KERNEXEC
67852+#ifdef CONFIG_X86_64
67853+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67854+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67855+#endif
67856+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67857+ pax_open_kernel();
67858+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67859+ pax_close_kernel();
67860+ } else
67861+#endif
67862
67863- if (shdr->sh_type != SHT_NOBITS)
67864 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67865+ }
67866 /* Update sh_addr to point to copy in image. */
67867- shdr->sh_addr = (unsigned long)dest;
67868+
67869+#ifdef CONFIG_PAX_KERNEXEC
67870+ if (shdr->sh_flags & SHF_EXECINSTR)
67871+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
67872+ else
67873+#endif
67874+
67875+ shdr->sh_addr = (unsigned long)dest;
67876 DEBUGP("\t0x%lx %s\n",
67877 shdr->sh_addr, info->secstrings + shdr->sh_name);
67878 }
67879@@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod)
67880 * Do it before processing of module parameters, so the module
67881 * can provide parameter accessor functions of its own.
67882 */
67883- if (mod->module_init)
67884- flush_icache_range((unsigned long)mod->module_init,
67885- (unsigned long)mod->module_init
67886- + mod->init_size);
67887- flush_icache_range((unsigned long)mod->module_core,
67888- (unsigned long)mod->module_core + mod->core_size);
67889+ if (mod->module_init_rx)
67890+ flush_icache_range((unsigned long)mod->module_init_rx,
67891+ (unsigned long)mod->module_init_rx
67892+ + mod->init_size_rx);
67893+ flush_icache_range((unsigned long)mod->module_core_rx,
67894+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
67895
67896 set_fs(old_fs);
67897 }
67898@@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
67899 {
67900 kfree(info->strmap);
67901 percpu_modfree(mod);
67902- module_free(mod, mod->module_init);
67903- module_free(mod, mod->module_core);
67904+ module_free_exec(mod, mod->module_init_rx);
67905+ module_free_exec(mod, mod->module_core_rx);
67906+ module_free(mod, mod->module_init_rw);
67907+ module_free(mod, mod->module_core_rw);
67908 }
67909
67910 int __weak module_finalize(const Elf_Ehdr *hdr,
67911@@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod,
67912 if (err)
67913 goto free_unload;
67914
67915+ /* Now copy in args */
67916+ mod->args = strndup_user(uargs, ~0UL >> 1);
67917+ if (IS_ERR(mod->args)) {
67918+ err = PTR_ERR(mod->args);
67919+ goto free_unload;
67920+ }
67921+
67922 /* Set up MODINFO_ATTR fields */
67923 setup_modinfo(mod, &info);
67924
67925+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67926+ {
67927+ char *p, *p2;
67928+
67929+ if (strstr(mod->args, "grsec_modharden_netdev")) {
67930+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67931+ err = -EPERM;
67932+ goto free_modinfo;
67933+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67934+ p += strlen("grsec_modharden_normal");
67935+ p2 = strstr(p, "_");
67936+ if (p2) {
67937+ *p2 = '\0';
67938+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67939+ *p2 = '_';
67940+ }
67941+ err = -EPERM;
67942+ goto free_modinfo;
67943+ }
67944+ }
67945+#endif
67946+
67947 /* Fix up syms, so that st_value is a pointer to location. */
67948 err = simplify_symbols(mod, &info);
67949 if (err < 0)
67950@@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod,
67951
67952 flush_module_icache(mod);
67953
67954- /* Now copy in args */
67955- mod->args = strndup_user(uargs, ~0UL >> 1);
67956- if (IS_ERR(mod->args)) {
67957- err = PTR_ERR(mod->args);
67958- goto free_arch_cleanup;
67959- }
67960-
67961 /* Mark state as coming so strong_try_module_get() ignores us. */
67962 mod->state = MODULE_STATE_COMING;
67963
67964@@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod,
67965 unlock:
67966 mutex_unlock(&module_mutex);
67967 synchronize_sched();
67968- kfree(mod->args);
67969- free_arch_cleanup:
67970 module_arch_cleanup(mod);
67971 free_modinfo:
67972 free_modinfo(mod);
67973+ kfree(mod->args);
67974 free_unload:
67975 module_unload_free(mod);
67976 free_module:
67977@@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67978 MODULE_STATE_COMING, mod);
67979
67980 /* Set RO and NX regions for core */
67981- set_section_ro_nx(mod->module_core,
67982- mod->core_text_size,
67983- mod->core_ro_size,
67984- mod->core_size);
67985+ set_section_ro_nx(mod->module_core_rx,
67986+ mod->core_size_rx,
67987+ mod->core_size_rx,
67988+ mod->core_size_rx);
67989
67990 /* Set RO and NX regions for init */
67991- set_section_ro_nx(mod->module_init,
67992- mod->init_text_size,
67993- mod->init_ro_size,
67994- mod->init_size);
67995+ set_section_ro_nx(mod->module_init_rx,
67996+ mod->init_size_rx,
67997+ mod->init_size_rx,
67998+ mod->init_size_rx);
67999
68000 do_mod_ctors(mod);
68001 /* Start the module */
68002@@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68003 mod->strtab = mod->core_strtab;
68004 #endif
68005 unset_module_init_ro_nx(mod);
68006- module_free(mod, mod->module_init);
68007- mod->module_init = NULL;
68008- mod->init_size = 0;
68009- mod->init_ro_size = 0;
68010- mod->init_text_size = 0;
68011+ module_free(mod, mod->module_init_rw);
68012+ module_free_exec(mod, mod->module_init_rx);
68013+ mod->module_init_rw = NULL;
68014+ mod->module_init_rx = NULL;
68015+ mod->init_size_rw = 0;
68016+ mod->init_size_rx = 0;
68017 mutex_unlock(&module_mutex);
68018
68019 return 0;
68020@@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod,
68021 unsigned long nextval;
68022
68023 /* At worse, next value is at end of module */
68024- if (within_module_init(addr, mod))
68025- nextval = (unsigned long)mod->module_init+mod->init_text_size;
68026+ if (within_module_init_rx(addr, mod))
68027+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68028+ else if (within_module_init_rw(addr, mod))
68029+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68030+ else if (within_module_core_rx(addr, mod))
68031+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68032+ else if (within_module_core_rw(addr, mod))
68033+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68034 else
68035- nextval = (unsigned long)mod->module_core+mod->core_text_size;
68036+ return NULL;
68037
68038 /* Scan for closest preceding symbol, and next symbol. (ELF
68039 starts real symbols at 1). */
68040@@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p)
68041 char buf[8];
68042
68043 seq_printf(m, "%s %u",
68044- mod->name, mod->init_size + mod->core_size);
68045+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68046 print_unload_info(m, mod);
68047
68048 /* Informative for users. */
68049@@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p)
68050 mod->state == MODULE_STATE_COMING ? "Loading":
68051 "Live");
68052 /* Used by oprofile and other similar tools. */
68053- seq_printf(m, " 0x%pK", mod->module_core);
68054+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68055
68056 /* Taints info */
68057 if (mod->taints)
68058@@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = {
68059
68060 static int __init proc_modules_init(void)
68061 {
68062+#ifndef CONFIG_GRKERNSEC_HIDESYM
68063+#ifdef CONFIG_GRKERNSEC_PROC_USER
68064+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68065+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68066+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68067+#else
68068 proc_create("modules", 0, NULL, &proc_modules_operations);
68069+#endif
68070+#else
68071+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68072+#endif
68073 return 0;
68074 }
68075 module_init(proc_modules_init);
68076@@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr)
68077 {
68078 struct module *mod;
68079
68080- if (addr < module_addr_min || addr > module_addr_max)
68081+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68082+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
68083 return NULL;
68084
68085 list_for_each_entry_rcu(mod, &modules, list)
68086- if (within_module_core(addr, mod)
68087- || within_module_init(addr, mod))
68088+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
68089 return mod;
68090 return NULL;
68091 }
68092@@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr)
68093 */
68094 struct module *__module_text_address(unsigned long addr)
68095 {
68096- struct module *mod = __module_address(addr);
68097+ struct module *mod;
68098+
68099+#ifdef CONFIG_X86_32
68100+ addr = ktla_ktva(addr);
68101+#endif
68102+
68103+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68104+ return NULL;
68105+
68106+ mod = __module_address(addr);
68107+
68108 if (mod) {
68109 /* Make sure it's within the text section. */
68110- if (!within(addr, mod->module_init, mod->init_text_size)
68111- && !within(addr, mod->module_core, mod->core_text_size))
68112+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68113 mod = NULL;
68114 }
68115 return mod;
68116diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68117index 73da83a..fe46e99 100644
68118--- a/kernel/mutex-debug.c
68119+++ b/kernel/mutex-debug.c
68120@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68121 }
68122
68123 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68124- struct thread_info *ti)
68125+ struct task_struct *task)
68126 {
68127 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68128
68129 /* Mark the current thread as blocked on the lock: */
68130- ti->task->blocked_on = waiter;
68131+ task->blocked_on = waiter;
68132 }
68133
68134 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68135- struct thread_info *ti)
68136+ struct task_struct *task)
68137 {
68138 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68139- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68140- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68141- ti->task->blocked_on = NULL;
68142+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
68143+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68144+ task->blocked_on = NULL;
68145
68146 list_del_init(&waiter->list);
68147 waiter->task = NULL;
68148diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68149index 0799fd3..d06ae3b 100644
68150--- a/kernel/mutex-debug.h
68151+++ b/kernel/mutex-debug.h
68152@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68153 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68154 extern void debug_mutex_add_waiter(struct mutex *lock,
68155 struct mutex_waiter *waiter,
68156- struct thread_info *ti);
68157+ struct task_struct *task);
68158 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68159- struct thread_info *ti);
68160+ struct task_struct *task);
68161 extern void debug_mutex_unlock(struct mutex *lock);
68162 extern void debug_mutex_init(struct mutex *lock, const char *name,
68163 struct lock_class_key *key);
68164diff --git a/kernel/mutex.c b/kernel/mutex.c
68165index d607ed5..58d0a52 100644
68166--- a/kernel/mutex.c
68167+++ b/kernel/mutex.c
68168@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68169 spin_lock_mutex(&lock->wait_lock, flags);
68170
68171 debug_mutex_lock_common(lock, &waiter);
68172- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68173+ debug_mutex_add_waiter(lock, &waiter, task);
68174
68175 /* add waiting tasks to the end of the waitqueue (FIFO): */
68176 list_add_tail(&waiter.list, &lock->wait_list);
68177@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68178 * TASK_UNINTERRUPTIBLE case.)
68179 */
68180 if (unlikely(signal_pending_state(state, task))) {
68181- mutex_remove_waiter(lock, &waiter,
68182- task_thread_info(task));
68183+ mutex_remove_waiter(lock, &waiter, task);
68184 mutex_release(&lock->dep_map, 1, ip);
68185 spin_unlock_mutex(&lock->wait_lock, flags);
68186
68187@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68188 done:
68189 lock_acquired(&lock->dep_map, ip);
68190 /* got the lock - rejoice! */
68191- mutex_remove_waiter(lock, &waiter, current_thread_info());
68192+ mutex_remove_waiter(lock, &waiter, task);
68193 mutex_set_owner(lock);
68194
68195 /* set it to 0 if there are no waiters left: */
68196diff --git a/kernel/padata.c b/kernel/padata.c
68197index b91941d..0871d60 100644
68198--- a/kernel/padata.c
68199+++ b/kernel/padata.c
68200@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
68201 padata->pd = pd;
68202 padata->cb_cpu = cb_cpu;
68203
68204- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
68205- atomic_set(&pd->seq_nr, -1);
68206+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
68207+ atomic_set_unchecked(&pd->seq_nr, -1);
68208
68209- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
68210+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
68211
68212 target_cpu = padata_cpu_hash(padata);
68213 queue = per_cpu_ptr(pd->pqueue, target_cpu);
68214@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
68215 padata_init_pqueues(pd);
68216 padata_init_squeues(pd);
68217 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
68218- atomic_set(&pd->seq_nr, -1);
68219+ atomic_set_unchecked(&pd->seq_nr, -1);
68220 atomic_set(&pd->reorder_objects, 0);
68221 atomic_set(&pd->refcnt, 0);
68222 pd->pinst = pinst;
68223diff --git a/kernel/panic.c b/kernel/panic.c
68224index d7bb697..0ff55cc 100644
68225--- a/kernel/panic.c
68226+++ b/kernel/panic.c
68227@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
68228 va_end(args);
68229 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
68230 #ifdef CONFIG_DEBUG_BUGVERBOSE
68231- dump_stack();
68232+ /*
68233+ * Avoid nested stack-dumping if a panic occurs during oops processing
68234+ */
68235+ if (!oops_in_progress)
68236+ dump_stack();
68237 #endif
68238
68239 /*
68240@@ -371,7 +375,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
68241 const char *board;
68242
68243 printk(KERN_WARNING "------------[ cut here ]------------\n");
68244- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68245+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68246 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68247 if (board)
68248 printk(KERN_WARNING "Hardware name: %s\n", board);
68249@@ -426,7 +430,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68250 */
68251 void __stack_chk_fail(void)
68252 {
68253- panic("stack-protector: Kernel stack is corrupted in: %p\n",
68254+ dump_stack();
68255+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68256 __builtin_return_address(0));
68257 }
68258 EXPORT_SYMBOL(__stack_chk_fail);
68259diff --git a/kernel/pid.c b/kernel/pid.c
68260index e432057..a2b2ac5 100644
68261--- a/kernel/pid.c
68262+++ b/kernel/pid.c
68263@@ -33,6 +33,7 @@
68264 #include <linux/rculist.h>
68265 #include <linux/bootmem.h>
68266 #include <linux/hash.h>
68267+#include <linux/security.h>
68268 #include <linux/pid_namespace.h>
68269 #include <linux/init_task.h>
68270 #include <linux/syscalls.h>
68271@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
68272
68273 int pid_max = PID_MAX_DEFAULT;
68274
68275-#define RESERVED_PIDS 300
68276+#define RESERVED_PIDS 500
68277
68278 int pid_max_min = RESERVED_PIDS + 1;
68279 int pid_max_max = PID_MAX_LIMIT;
68280@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
68281 */
68282 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68283 {
68284+ struct task_struct *task;
68285+
68286 rcu_lockdep_assert(rcu_read_lock_held());
68287- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68288+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68289+
68290+ if (gr_pid_is_chrooted(task))
68291+ return NULL;
68292+
68293+ return task;
68294 }
68295
68296 struct task_struct *find_task_by_vpid(pid_t vnr)
68297@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
68298 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68299 }
68300
68301+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68302+{
68303+ rcu_lockdep_assert(rcu_read_lock_held());
68304+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68305+}
68306+
68307 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68308 {
68309 struct pid *pid;
68310diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
68311index 640ded8..3dafb85 100644
68312--- a/kernel/posix-cpu-timers.c
68313+++ b/kernel/posix-cpu-timers.c
68314@@ -6,6 +6,7 @@
68315 #include <linux/posix-timers.h>
68316 #include <linux/errno.h>
68317 #include <linux/math64.h>
68318+#include <linux/security.h>
68319 #include <asm/uaccess.h>
68320 #include <linux/kernel_stat.h>
68321 #include <trace/events/timer.h>
68322@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
68323
68324 static __init int init_posix_cpu_timers(void)
68325 {
68326- struct k_clock process = {
68327+ static struct k_clock process = {
68328 .clock_getres = process_cpu_clock_getres,
68329 .clock_get = process_cpu_clock_get,
68330 .timer_create = process_cpu_timer_create,
68331 .nsleep = process_cpu_nsleep,
68332 .nsleep_restart = process_cpu_nsleep_restart,
68333 };
68334- struct k_clock thread = {
68335+ static struct k_clock thread = {
68336 .clock_getres = thread_cpu_clock_getres,
68337 .clock_get = thread_cpu_clock_get,
68338 .timer_create = thread_cpu_timer_create,
68339diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
68340index 4556182..9335419 100644
68341--- a/kernel/posix-timers.c
68342+++ b/kernel/posix-timers.c
68343@@ -43,6 +43,7 @@
68344 #include <linux/idr.h>
68345 #include <linux/posix-clock.h>
68346 #include <linux/posix-timers.h>
68347+#include <linux/grsecurity.h>
68348 #include <linux/syscalls.h>
68349 #include <linux/wait.h>
68350 #include <linux/workqueue.h>
68351@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
68352 * which we beg off on and pass to do_sys_settimeofday().
68353 */
68354
68355-static struct k_clock posix_clocks[MAX_CLOCKS];
68356+static struct k_clock *posix_clocks[MAX_CLOCKS];
68357
68358 /*
68359 * These ones are defined below.
68360@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
68361 */
68362 static __init int init_posix_timers(void)
68363 {
68364- struct k_clock clock_realtime = {
68365+ static struct k_clock clock_realtime = {
68366 .clock_getres = hrtimer_get_res,
68367 .clock_get = posix_clock_realtime_get,
68368 .clock_set = posix_clock_realtime_set,
68369@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
68370 .timer_get = common_timer_get,
68371 .timer_del = common_timer_del,
68372 };
68373- struct k_clock clock_monotonic = {
68374+ static struct k_clock clock_monotonic = {
68375 .clock_getres = hrtimer_get_res,
68376 .clock_get = posix_ktime_get_ts,
68377 .nsleep = common_nsleep,
68378@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
68379 .timer_get = common_timer_get,
68380 .timer_del = common_timer_del,
68381 };
68382- struct k_clock clock_monotonic_raw = {
68383+ static struct k_clock clock_monotonic_raw = {
68384 .clock_getres = hrtimer_get_res,
68385 .clock_get = posix_get_monotonic_raw,
68386 };
68387- struct k_clock clock_realtime_coarse = {
68388+ static struct k_clock clock_realtime_coarse = {
68389 .clock_getres = posix_get_coarse_res,
68390 .clock_get = posix_get_realtime_coarse,
68391 };
68392- struct k_clock clock_monotonic_coarse = {
68393+ static struct k_clock clock_monotonic_coarse = {
68394 .clock_getres = posix_get_coarse_res,
68395 .clock_get = posix_get_monotonic_coarse,
68396 };
68397- struct k_clock clock_boottime = {
68398+ static struct k_clock clock_boottime = {
68399 .clock_getres = hrtimer_get_res,
68400 .clock_get = posix_get_boottime,
68401 .nsleep = common_nsleep,
68402@@ -272,6 +273,8 @@ static __init int init_posix_timers(void)
68403 .timer_del = common_timer_del,
68404 };
68405
68406+ pax_track_stack();
68407+
68408 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
68409 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
68410 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
68411@@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
68412 return;
68413 }
68414
68415- posix_clocks[clock_id] = *new_clock;
68416+ posix_clocks[clock_id] = new_clock;
68417 }
68418 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
68419
68420@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
68421 return (id & CLOCKFD_MASK) == CLOCKFD ?
68422 &clock_posix_dynamic : &clock_posix_cpu;
68423
68424- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
68425+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
68426 return NULL;
68427- return &posix_clocks[id];
68428+ return posix_clocks[id];
68429 }
68430
68431 static int common_timer_create(struct k_itimer *new_timer)
68432@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
68433 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68434 return -EFAULT;
68435
68436+ /* only the CLOCK_REALTIME clock can be set, all other clocks
68437+ have their clock_set fptr set to a nosettime dummy function
68438+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68439+ call common_clock_set, which calls do_sys_settimeofday, which
68440+ we hook
68441+ */
68442+
68443 return kc->clock_set(which_clock, &new_tp);
68444 }
68445
68446diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
68447index d523593..68197a4 100644
68448--- a/kernel/power/poweroff.c
68449+++ b/kernel/power/poweroff.c
68450@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
68451 .enable_mask = SYSRQ_ENABLE_BOOT,
68452 };
68453
68454-static int pm_sysrq_init(void)
68455+static int __init pm_sysrq_init(void)
68456 {
68457 register_sysrq_key('o', &sysrq_poweroff_op);
68458 return 0;
68459diff --git a/kernel/power/process.c b/kernel/power/process.c
68460index 0cf3a27..5481be4 100644
68461--- a/kernel/power/process.c
68462+++ b/kernel/power/process.c
68463@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
68464 u64 elapsed_csecs64;
68465 unsigned int elapsed_csecs;
68466 bool wakeup = false;
68467+ bool timedout = false;
68468
68469 do_gettimeofday(&start);
68470
68471@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
68472
68473 while (true) {
68474 todo = 0;
68475+ if (time_after(jiffies, end_time))
68476+ timedout = true;
68477 read_lock(&tasklist_lock);
68478 do_each_thread(g, p) {
68479 if (frozen(p) || !freezable(p))
68480@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
68481 * try_to_stop() after schedule() in ptrace/signal
68482 * stop sees TIF_FREEZE.
68483 */
68484- if (!task_is_stopped_or_traced(p) &&
68485- !freezer_should_skip(p))
68486+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
68487 todo++;
68488+ if (timedout) {
68489+ printk(KERN_ERR "Task refusing to freeze:\n");
68490+ sched_show_task(p);
68491+ }
68492+ }
68493 } while_each_thread(g, p);
68494 read_unlock(&tasklist_lock);
68495
68496@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
68497 todo += wq_busy;
68498 }
68499
68500- if (!todo || time_after(jiffies, end_time))
68501+ if (!todo || timedout)
68502 break;
68503
68504 if (pm_wakeup_pending()) {
68505diff --git a/kernel/printk.c b/kernel/printk.c
68506index 28a40d8..2411bec 100644
68507--- a/kernel/printk.c
68508+++ b/kernel/printk.c
68509@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
68510 if (from_file && type != SYSLOG_ACTION_OPEN)
68511 return 0;
68512
68513+#ifdef CONFIG_GRKERNSEC_DMESG
68514+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
68515+ return -EPERM;
68516+#endif
68517+
68518 if (syslog_action_restricted(type)) {
68519 if (capable(CAP_SYSLOG))
68520 return 0;
68521diff --git a/kernel/profile.c b/kernel/profile.c
68522index 961b389..c451353 100644
68523--- a/kernel/profile.c
68524+++ b/kernel/profile.c
68525@@ -39,7 +39,7 @@ struct profile_hit {
68526 /* Oprofile timer tick hook */
68527 static int (*timer_hook)(struct pt_regs *) __read_mostly;
68528
68529-static atomic_t *prof_buffer;
68530+static atomic_unchecked_t *prof_buffer;
68531 static unsigned long prof_len, prof_shift;
68532
68533 int prof_on __read_mostly;
68534@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
68535 hits[i].pc = 0;
68536 continue;
68537 }
68538- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68539+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68540 hits[i].hits = hits[i].pc = 0;
68541 }
68542 }
68543@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68544 * Add the current hit(s) and flush the write-queue out
68545 * to the global buffer:
68546 */
68547- atomic_add(nr_hits, &prof_buffer[pc]);
68548+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
68549 for (i = 0; i < NR_PROFILE_HIT; ++i) {
68550- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68551+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68552 hits[i].pc = hits[i].hits = 0;
68553 }
68554 out:
68555@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68556 {
68557 unsigned long pc;
68558 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68559- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68560+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68561 }
68562 #endif /* !CONFIG_SMP */
68563
68564@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
68565 return -EFAULT;
68566 buf++; p++; count--; read++;
68567 }
68568- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68569+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68570 if (copy_to_user(buf, (void *)pnt, count))
68571 return -EFAULT;
68572 read += count;
68573@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
68574 }
68575 #endif
68576 profile_discard_flip_buffers();
68577- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68578+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68579 return count;
68580 }
68581
68582diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68583index 67d1fdd..1af21e2 100644
68584--- a/kernel/ptrace.c
68585+++ b/kernel/ptrace.c
68586@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
68587 return ret;
68588 }
68589
68590-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
68591+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
68592+ unsigned int log)
68593 {
68594 const struct cred *cred = current_cred(), *tcred;
68595
68596@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
68597 cred->gid == tcred->sgid &&
68598 cred->gid == tcred->gid))
68599 goto ok;
68600- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
68601+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
68602+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
68603 goto ok;
68604 rcu_read_unlock();
68605 return -EPERM;
68606@@ -207,7 +209,9 @@ ok:
68607 smp_rmb();
68608 if (task->mm)
68609 dumpable = get_dumpable(task->mm);
68610- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
68611+ if (!dumpable &&
68612+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
68613+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
68614 return -EPERM;
68615
68616 return security_ptrace_access_check(task, mode);
68617@@ -217,7 +221,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
68618 {
68619 int err;
68620 task_lock(task);
68621- err = __ptrace_may_access(task, mode);
68622+ err = __ptrace_may_access(task, mode, 0);
68623+ task_unlock(task);
68624+ return !err;
68625+}
68626+
68627+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
68628+{
68629+ int err;
68630+ task_lock(task);
68631+ err = __ptrace_may_access(task, mode, 1);
68632 task_unlock(task);
68633 return !err;
68634 }
68635@@ -262,7 +275,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68636 goto out;
68637
68638 task_lock(task);
68639- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
68640+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
68641 task_unlock(task);
68642 if (retval)
68643 goto unlock_creds;
68644@@ -277,7 +290,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68645 task->ptrace = PT_PTRACED;
68646 if (seize)
68647 task->ptrace |= PT_SEIZED;
68648- if (task_ns_capable(task, CAP_SYS_PTRACE))
68649+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
68650 task->ptrace |= PT_PTRACE_CAP;
68651
68652 __ptrace_link(task, current);
68653@@ -472,6 +485,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68654 {
68655 int copied = 0;
68656
68657+ pax_track_stack();
68658+
68659 while (len > 0) {
68660 char buf[128];
68661 int this_len, retval;
68662@@ -483,7 +498,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68663 break;
68664 return -EIO;
68665 }
68666- if (copy_to_user(dst, buf, retval))
68667+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68668 return -EFAULT;
68669 copied += retval;
68670 src += retval;
68671@@ -497,6 +512,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
68672 {
68673 int copied = 0;
68674
68675+ pax_track_stack();
68676+
68677 while (len > 0) {
68678 char buf[128];
68679 int this_len, retval;
68680@@ -680,10 +697,12 @@ int ptrace_request(struct task_struct *child, long request,
68681 bool seized = child->ptrace & PT_SEIZED;
68682 int ret = -EIO;
68683 siginfo_t siginfo, *si;
68684- void __user *datavp = (void __user *) data;
68685+ void __user *datavp = (__force void __user *) data;
68686 unsigned long __user *datalp = datavp;
68687 unsigned long flags;
68688
68689+ pax_track_stack();
68690+
68691 switch (request) {
68692 case PTRACE_PEEKTEXT:
68693 case PTRACE_PEEKDATA:
68694@@ -882,14 +901,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68695 goto out;
68696 }
68697
68698+ if (gr_handle_ptrace(child, request)) {
68699+ ret = -EPERM;
68700+ goto out_put_task_struct;
68701+ }
68702+
68703 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68704 ret = ptrace_attach(child, request, data);
68705 /*
68706 * Some architectures need to do book-keeping after
68707 * a ptrace attach.
68708 */
68709- if (!ret)
68710+ if (!ret) {
68711 arch_ptrace_attach(child);
68712+ gr_audit_ptrace(child);
68713+ }
68714 goto out_put_task_struct;
68715 }
68716
68717@@ -915,7 +941,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68718 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68719 if (copied != sizeof(tmp))
68720 return -EIO;
68721- return put_user(tmp, (unsigned long __user *)data);
68722+ return put_user(tmp, (__force unsigned long __user *)data);
68723 }
68724
68725 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68726@@ -938,6 +964,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
68727 siginfo_t siginfo;
68728 int ret;
68729
68730+ pax_track_stack();
68731+
68732 switch (request) {
68733 case PTRACE_PEEKTEXT:
68734 case PTRACE_PEEKDATA:
68735@@ -1025,14 +1053,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68736 goto out;
68737 }
68738
68739+ if (gr_handle_ptrace(child, request)) {
68740+ ret = -EPERM;
68741+ goto out_put_task_struct;
68742+ }
68743+
68744 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68745 ret = ptrace_attach(child, request, data);
68746 /*
68747 * Some architectures need to do book-keeping after
68748 * a ptrace attach.
68749 */
68750- if (!ret)
68751+ if (!ret) {
68752 arch_ptrace_attach(child);
68753+ gr_audit_ptrace(child);
68754+ }
68755 goto out_put_task_struct;
68756 }
68757
68758diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68759index 98f51b1..30b950c 100644
68760--- a/kernel/rcutorture.c
68761+++ b/kernel/rcutorture.c
68762@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68763 { 0 };
68764 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68765 { 0 };
68766-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68767-static atomic_t n_rcu_torture_alloc;
68768-static atomic_t n_rcu_torture_alloc_fail;
68769-static atomic_t n_rcu_torture_free;
68770-static atomic_t n_rcu_torture_mberror;
68771-static atomic_t n_rcu_torture_error;
68772+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68773+static atomic_unchecked_t n_rcu_torture_alloc;
68774+static atomic_unchecked_t n_rcu_torture_alloc_fail;
68775+static atomic_unchecked_t n_rcu_torture_free;
68776+static atomic_unchecked_t n_rcu_torture_mberror;
68777+static atomic_unchecked_t n_rcu_torture_error;
68778 static long n_rcu_torture_boost_ktrerror;
68779 static long n_rcu_torture_boost_rterror;
68780 static long n_rcu_torture_boost_failure;
68781@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
68782
68783 spin_lock_bh(&rcu_torture_lock);
68784 if (list_empty(&rcu_torture_freelist)) {
68785- atomic_inc(&n_rcu_torture_alloc_fail);
68786+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68787 spin_unlock_bh(&rcu_torture_lock);
68788 return NULL;
68789 }
68790- atomic_inc(&n_rcu_torture_alloc);
68791+ atomic_inc_unchecked(&n_rcu_torture_alloc);
68792 p = rcu_torture_freelist.next;
68793 list_del_init(p);
68794 spin_unlock_bh(&rcu_torture_lock);
68795@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
68796 static void
68797 rcu_torture_free(struct rcu_torture *p)
68798 {
68799- atomic_inc(&n_rcu_torture_free);
68800+ atomic_inc_unchecked(&n_rcu_torture_free);
68801 spin_lock_bh(&rcu_torture_lock);
68802 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68803 spin_unlock_bh(&rcu_torture_lock);
68804@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
68805 i = rp->rtort_pipe_count;
68806 if (i > RCU_TORTURE_PIPE_LEN)
68807 i = RCU_TORTURE_PIPE_LEN;
68808- atomic_inc(&rcu_torture_wcount[i]);
68809+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68810 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68811 rp->rtort_mbtest = 0;
68812 rcu_torture_free(rp);
68813@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68814 i = rp->rtort_pipe_count;
68815 if (i > RCU_TORTURE_PIPE_LEN)
68816 i = RCU_TORTURE_PIPE_LEN;
68817- atomic_inc(&rcu_torture_wcount[i]);
68818+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68819 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68820 rp->rtort_mbtest = 0;
68821 list_del(&rp->rtort_free);
68822@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
68823 i = old_rp->rtort_pipe_count;
68824 if (i > RCU_TORTURE_PIPE_LEN)
68825 i = RCU_TORTURE_PIPE_LEN;
68826- atomic_inc(&rcu_torture_wcount[i]);
68827+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68828 old_rp->rtort_pipe_count++;
68829 cur_ops->deferred_free(old_rp);
68830 }
68831@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused)
68832 return;
68833 }
68834 if (p->rtort_mbtest == 0)
68835- atomic_inc(&n_rcu_torture_mberror);
68836+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68837 spin_lock(&rand_lock);
68838 cur_ops->read_delay(&rand);
68839 n_rcu_torture_timers++;
68840@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
68841 continue;
68842 }
68843 if (p->rtort_mbtest == 0)
68844- atomic_inc(&n_rcu_torture_mberror);
68845+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68846 cur_ops->read_delay(&rand);
68847 preempt_disable();
68848 pipe_count = p->rtort_pipe_count;
68849@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
68850 rcu_torture_current,
68851 rcu_torture_current_version,
68852 list_empty(&rcu_torture_freelist),
68853- atomic_read(&n_rcu_torture_alloc),
68854- atomic_read(&n_rcu_torture_alloc_fail),
68855- atomic_read(&n_rcu_torture_free),
68856- atomic_read(&n_rcu_torture_mberror),
68857+ atomic_read_unchecked(&n_rcu_torture_alloc),
68858+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68859+ atomic_read_unchecked(&n_rcu_torture_free),
68860+ atomic_read_unchecked(&n_rcu_torture_mberror),
68861 n_rcu_torture_boost_ktrerror,
68862 n_rcu_torture_boost_rterror,
68863 n_rcu_torture_boost_failure,
68864 n_rcu_torture_boosts,
68865 n_rcu_torture_timers);
68866- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68867+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68868 n_rcu_torture_boost_ktrerror != 0 ||
68869 n_rcu_torture_boost_rterror != 0 ||
68870 n_rcu_torture_boost_failure != 0)
68871@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
68872 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68873 if (i > 1) {
68874 cnt += sprintf(&page[cnt], "!!! ");
68875- atomic_inc(&n_rcu_torture_error);
68876+ atomic_inc_unchecked(&n_rcu_torture_error);
68877 WARN_ON_ONCE(1);
68878 }
68879 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68880@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
68881 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68882 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68883 cnt += sprintf(&page[cnt], " %d",
68884- atomic_read(&rcu_torture_wcount[i]));
68885+ atomic_read_unchecked(&rcu_torture_wcount[i]));
68886 }
68887 cnt += sprintf(&page[cnt], "\n");
68888 if (cur_ops->stats)
68889@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
68890
68891 if (cur_ops->cleanup)
68892 cur_ops->cleanup();
68893- if (atomic_read(&n_rcu_torture_error))
68894+ if (atomic_read_unchecked(&n_rcu_torture_error))
68895 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68896 else
68897 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68898@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
68899
68900 rcu_torture_current = NULL;
68901 rcu_torture_current_version = 0;
68902- atomic_set(&n_rcu_torture_alloc, 0);
68903- atomic_set(&n_rcu_torture_alloc_fail, 0);
68904- atomic_set(&n_rcu_torture_free, 0);
68905- atomic_set(&n_rcu_torture_mberror, 0);
68906- atomic_set(&n_rcu_torture_error, 0);
68907+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68908+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68909+ atomic_set_unchecked(&n_rcu_torture_free, 0);
68910+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68911+ atomic_set_unchecked(&n_rcu_torture_error, 0);
68912 n_rcu_torture_boost_ktrerror = 0;
68913 n_rcu_torture_boost_rterror = 0;
68914 n_rcu_torture_boost_failure = 0;
68915 n_rcu_torture_boosts = 0;
68916 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68917- atomic_set(&rcu_torture_wcount[i], 0);
68918+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68919 for_each_possible_cpu(cpu) {
68920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68921 per_cpu(rcu_torture_count, cpu)[i] = 0;
68922diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68923index ba06207..85d8ba8 100644
68924--- a/kernel/rcutree.c
68925+++ b/kernel/rcutree.c
68926@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
68927 }
68928 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68929 smp_mb__before_atomic_inc(); /* See above. */
68930- atomic_inc(&rdtp->dynticks);
68931+ atomic_inc_unchecked(&rdtp->dynticks);
68932 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68933- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68934+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68935 local_irq_restore(flags);
68936
68937 /* If the interrupt queued a callback, get out of dyntick mode. */
68938@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
68939 return;
68940 }
68941 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68942- atomic_inc(&rdtp->dynticks);
68943+ atomic_inc_unchecked(&rdtp->dynticks);
68944 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68945 smp_mb__after_atomic_inc(); /* See above. */
68946- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68947+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68948 local_irq_restore(flags);
68949 }
68950
68951@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
68952 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68953
68954 if (rdtp->dynticks_nmi_nesting == 0 &&
68955- (atomic_read(&rdtp->dynticks) & 0x1))
68956+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68957 return;
68958 rdtp->dynticks_nmi_nesting++;
68959 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68960- atomic_inc(&rdtp->dynticks);
68961+ atomic_inc_unchecked(&rdtp->dynticks);
68962 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68963 smp_mb__after_atomic_inc(); /* See above. */
68964- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68965+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68966 }
68967
68968 /**
68969@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
68970 return;
68971 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68972 smp_mb__before_atomic_inc(); /* See above. */
68973- atomic_inc(&rdtp->dynticks);
68974+ atomic_inc_unchecked(&rdtp->dynticks);
68975 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68976- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68977+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68978 }
68979
68980 /**
68981@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
68982 */
68983 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68984 {
68985- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68986+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68987 return 0;
68988 }
68989
68990@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68991 unsigned long curr;
68992 unsigned long snap;
68993
68994- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
68995+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68996 snap = (unsigned long)rdp->dynticks_snap;
68997
68998 /*
68999@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
69000 /*
69001 * Do softirq processing for the current CPU.
69002 */
69003-static void rcu_process_callbacks(struct softirq_action *unused)
69004+static void rcu_process_callbacks(void)
69005 {
69006 __rcu_process_callbacks(&rcu_sched_state,
69007 &__get_cpu_var(rcu_sched_data));
69008diff --git a/kernel/rcutree.h b/kernel/rcutree.h
69009index 01b2ccd..4f5d80a 100644
69010--- a/kernel/rcutree.h
69011+++ b/kernel/rcutree.h
69012@@ -86,7 +86,7 @@
69013 struct rcu_dynticks {
69014 int dynticks_nesting; /* Track irq/process nesting level. */
69015 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69016- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
69017+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
69018 };
69019
69020 /* RCU's kthread states for tracing. */
69021diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
69022index 8aafbb8..2fca109 100644
69023--- a/kernel/rcutree_plugin.h
69024+++ b/kernel/rcutree_plugin.h
69025@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
69026
69027 /* Clean up and exit. */
69028 smp_mb(); /* ensure expedited GP seen before counter increment. */
69029- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69030+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69031 unlock_mb_ret:
69032 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69033 mb_ret:
69034@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
69035
69036 #else /* #ifndef CONFIG_SMP */
69037
69038-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69039-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69040+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69041+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69042
69043 static int synchronize_sched_expedited_cpu_stop(void *data)
69044 {
69045@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
69046 int firstsnap, s, snap, trycount = 0;
69047
69048 /* Note that atomic_inc_return() implies full memory barrier. */
69049- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69050+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69051 get_online_cpus();
69052
69053 /*
69054@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
69055 }
69056
69057 /* Check to see if someone else did our work for us. */
69058- s = atomic_read(&sync_sched_expedited_done);
69059+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69060 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69061 smp_mb(); /* ensure test happens before caller kfree */
69062 return;
69063@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
69064 * grace period works for us.
69065 */
69066 get_online_cpus();
69067- snap = atomic_read(&sync_sched_expedited_started) - 1;
69068+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
69069 smp_mb(); /* ensure read is before try_stop_cpus(). */
69070 }
69071
69072@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
69073 * than we did beat us to the punch.
69074 */
69075 do {
69076- s = atomic_read(&sync_sched_expedited_done);
69077+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69078 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69079 smp_mb(); /* ensure test happens before caller kfree */
69080 break;
69081 }
69082- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69083+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69084
69085 put_online_cpus();
69086 }
69087@@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
69088 for_each_online_cpu(thatcpu) {
69089 if (thatcpu == cpu)
69090 continue;
69091- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
69092+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
69093 thatcpu).dynticks);
69094 smp_mb(); /* Order sampling of snap with end of grace period. */
69095 if ((snap & 0x1) != 0) {
69096diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69097index 3b0c098..43ba2d8 100644
69098--- a/kernel/rcutree_trace.c
69099+++ b/kernel/rcutree_trace.c
69100@@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69101 rdp->qs_pending);
69102 #ifdef CONFIG_NO_HZ
69103 seq_printf(m, " dt=%d/%d/%d df=%lu",
69104- atomic_read(&rdp->dynticks->dynticks),
69105+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69106 rdp->dynticks->dynticks_nesting,
69107 rdp->dynticks->dynticks_nmi_nesting,
69108 rdp->dynticks_fqs);
69109@@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69110 rdp->qs_pending);
69111 #ifdef CONFIG_NO_HZ
69112 seq_printf(m, ",%d,%d,%d,%lu",
69113- atomic_read(&rdp->dynticks->dynticks),
69114+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69115 rdp->dynticks->dynticks_nesting,
69116 rdp->dynticks->dynticks_nmi_nesting,
69117 rdp->dynticks_fqs);
69118diff --git a/kernel/relay.c b/kernel/relay.c
69119index 859ea5a..096e2fe 100644
69120--- a/kernel/relay.c
69121+++ b/kernel/relay.c
69122@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
69123 };
69124 ssize_t ret;
69125
69126+ pax_track_stack();
69127+
69128 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
69129 return 0;
69130 if (splice_grow_spd(pipe, &spd))
69131diff --git a/kernel/resource.c b/kernel/resource.c
69132index c8dc249..f1e2359 100644
69133--- a/kernel/resource.c
69134+++ b/kernel/resource.c
69135@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69136
69137 static int __init ioresources_init(void)
69138 {
69139+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69140+#ifdef CONFIG_GRKERNSEC_PROC_USER
69141+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69142+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69143+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69144+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69145+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69146+#endif
69147+#else
69148 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69149 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69150+#endif
69151 return 0;
69152 }
69153 __initcall(ioresources_init);
69154diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69155index 5c9ccd3..a35e22b 100644
69156--- a/kernel/rtmutex-tester.c
69157+++ b/kernel/rtmutex-tester.c
69158@@ -20,7 +20,7 @@
69159 #define MAX_RT_TEST_MUTEXES 8
69160
69161 static spinlock_t rttest_lock;
69162-static atomic_t rttest_event;
69163+static atomic_unchecked_t rttest_event;
69164
69165 struct test_thread_data {
69166 int opcode;
69167@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69168
69169 case RTTEST_LOCKCONT:
69170 td->mutexes[td->opdata] = 1;
69171- td->event = atomic_add_return(1, &rttest_event);
69172+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69173 return 0;
69174
69175 case RTTEST_RESET:
69176@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69177 return 0;
69178
69179 case RTTEST_RESETEVENT:
69180- atomic_set(&rttest_event, 0);
69181+ atomic_set_unchecked(&rttest_event, 0);
69182 return 0;
69183
69184 default:
69185@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69186 return ret;
69187
69188 td->mutexes[id] = 1;
69189- td->event = atomic_add_return(1, &rttest_event);
69190+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69191 rt_mutex_lock(&mutexes[id]);
69192- td->event = atomic_add_return(1, &rttest_event);
69193+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69194 td->mutexes[id] = 4;
69195 return 0;
69196
69197@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69198 return ret;
69199
69200 td->mutexes[id] = 1;
69201- td->event = atomic_add_return(1, &rttest_event);
69202+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69203 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69204- td->event = atomic_add_return(1, &rttest_event);
69205+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69206 td->mutexes[id] = ret ? 0 : 4;
69207 return ret ? -EINTR : 0;
69208
69209@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69210 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69211 return ret;
69212
69213- td->event = atomic_add_return(1, &rttest_event);
69214+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69215 rt_mutex_unlock(&mutexes[id]);
69216- td->event = atomic_add_return(1, &rttest_event);
69217+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69218 td->mutexes[id] = 0;
69219 return 0;
69220
69221@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69222 break;
69223
69224 td->mutexes[dat] = 2;
69225- td->event = atomic_add_return(1, &rttest_event);
69226+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69227 break;
69228
69229 default:
69230@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69231 return;
69232
69233 td->mutexes[dat] = 3;
69234- td->event = atomic_add_return(1, &rttest_event);
69235+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69236 break;
69237
69238 case RTTEST_LOCKNOWAIT:
69239@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69240 return;
69241
69242 td->mutexes[dat] = 1;
69243- td->event = atomic_add_return(1, &rttest_event);
69244+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69245 return;
69246
69247 default:
69248diff --git a/kernel/sched.c b/kernel/sched.c
69249index b50b0f0..91e9aed 100644
69250--- a/kernel/sched.c
69251+++ b/kernel/sched.c
69252@@ -4254,6 +4254,19 @@ pick_next_task(struct rq *rq)
69253 BUG(); /* the idle class will always have a runnable task */
69254 }
69255
69256+#ifdef CONFIG_GRKERNSEC_SETXID
69257+extern void gr_delayed_cred_worker(void);
69258+static inline void gr_cred_schedule(void)
69259+{
69260+ if (unlikely(current->delayed_cred))
69261+ gr_delayed_cred_worker();
69262+}
69263+#else
69264+static inline void gr_cred_schedule(void)
69265+{
69266+}
69267+#endif
69268+
69269 /*
69270 * __schedule() is the main scheduler function.
69271 */
69272@@ -4264,6 +4277,8 @@ static void __sched __schedule(void)
69273 struct rq *rq;
69274 int cpu;
69275
69276+ pax_track_stack();
69277+
69278 need_resched:
69279 preempt_disable();
69280 cpu = smp_processor_id();
69281@@ -4273,6 +4288,8 @@ need_resched:
69282
69283 schedule_debug(prev);
69284
69285+ gr_cred_schedule();
69286+
69287 if (sched_feat(HRTICK))
69288 hrtick_clear(rq);
69289
69290@@ -4950,6 +4967,8 @@ int can_nice(const struct task_struct *p, const int nice)
69291 /* convert nice value [19,-20] to rlimit style value [1,40] */
69292 int nice_rlim = 20 - nice;
69293
69294+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69295+
69296 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
69297 capable(CAP_SYS_NICE));
69298 }
69299@@ -4983,7 +5002,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69300 if (nice > 19)
69301 nice = 19;
69302
69303- if (increment < 0 && !can_nice(current, nice))
69304+ if (increment < 0 && (!can_nice(current, nice) ||
69305+ gr_handle_chroot_nice()))
69306 return -EPERM;
69307
69308 retval = security_task_setnice(current, nice);
69309@@ -5127,6 +5147,7 @@ recheck:
69310 unsigned long rlim_rtprio =
69311 task_rlimit(p, RLIMIT_RTPRIO);
69312
69313+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
69314 /* can't set/change the rt policy */
69315 if (policy != p->policy && !rlim_rtprio)
69316 return -EPERM;
69317diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
69318index 429242f..d7cca82 100644
69319--- a/kernel/sched_autogroup.c
69320+++ b/kernel/sched_autogroup.c
69321@@ -7,7 +7,7 @@
69322
69323 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69324 static struct autogroup autogroup_default;
69325-static atomic_t autogroup_seq_nr;
69326+static atomic_unchecked_t autogroup_seq_nr;
69327
69328 static void __init autogroup_init(struct task_struct *init_task)
69329 {
69330@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
69331
69332 kref_init(&ag->kref);
69333 init_rwsem(&ag->lock);
69334- ag->id = atomic_inc_return(&autogroup_seq_nr);
69335+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69336 ag->tg = tg;
69337 #ifdef CONFIG_RT_GROUP_SCHED
69338 /*
69339diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
69340index bc8ee99..b6f6492 100644
69341--- a/kernel/sched_fair.c
69342+++ b/kernel/sched_fair.c
69343@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
69344 * run_rebalance_domains is triggered when needed from the scheduler tick.
69345 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
69346 */
69347-static void run_rebalance_domains(struct softirq_action *h)
69348+static void run_rebalance_domains(void)
69349 {
69350 int this_cpu = smp_processor_id();
69351 struct rq *this_rq = cpu_rq(this_cpu);
69352diff --git a/kernel/signal.c b/kernel/signal.c
69353index 195331c..e89634ce 100644
69354--- a/kernel/signal.c
69355+++ b/kernel/signal.c
69356@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
69357
69358 int print_fatal_signals __read_mostly;
69359
69360-static void __user *sig_handler(struct task_struct *t, int sig)
69361+static __sighandler_t sig_handler(struct task_struct *t, int sig)
69362 {
69363 return t->sighand->action[sig - 1].sa.sa_handler;
69364 }
69365
69366-static int sig_handler_ignored(void __user *handler, int sig)
69367+static int sig_handler_ignored(__sighandler_t handler, int sig)
69368 {
69369 /* Is it explicitly or implicitly ignored? */
69370 return handler == SIG_IGN ||
69371@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
69372 static int sig_task_ignored(struct task_struct *t, int sig,
69373 int from_ancestor_ns)
69374 {
69375- void __user *handler;
69376+ __sighandler_t handler;
69377
69378 handler = sig_handler(t, sig);
69379
69380@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
69381 atomic_inc(&user->sigpending);
69382 rcu_read_unlock();
69383
69384+ if (!override_rlimit)
69385+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
69386+
69387 if (override_rlimit ||
69388 atomic_read(&user->sigpending) <=
69389 task_rlimit(t, RLIMIT_SIGPENDING)) {
69390@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
69391
69392 int unhandled_signal(struct task_struct *tsk, int sig)
69393 {
69394- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69395+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69396 if (is_global_init(tsk))
69397 return 1;
69398 if (handler != SIG_IGN && handler != SIG_DFL)
69399@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
69400 }
69401 }
69402
69403+ /* allow glibc communication via tgkill to other threads in our
69404+ thread group */
69405+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69406+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69407+ && gr_handle_signal(t, sig))
69408+ return -EPERM;
69409+
69410 return security_task_kill(t, info, sig, 0);
69411 }
69412
69413@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69414 return send_signal(sig, info, p, 1);
69415 }
69416
69417-static int
69418+int
69419 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69420 {
69421 return send_signal(sig, info, t, 0);
69422@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69423 unsigned long int flags;
69424 int ret, blocked, ignored;
69425 struct k_sigaction *action;
69426+ int is_unhandled = 0;
69427
69428 spin_lock_irqsave(&t->sighand->siglock, flags);
69429 action = &t->sighand->action[sig-1];
69430@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69431 }
69432 if (action->sa.sa_handler == SIG_DFL)
69433 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69434+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69435+ is_unhandled = 1;
69436 ret = specific_send_sig_info(sig, info, t);
69437 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69438
69439+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
69440+ normal operation */
69441+ if (is_unhandled) {
69442+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69443+ gr_handle_crash(t, sig);
69444+ }
69445+
69446 return ret;
69447 }
69448
69449@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69450 ret = check_kill_permission(sig, info, p);
69451 rcu_read_unlock();
69452
69453- if (!ret && sig)
69454+ if (!ret && sig) {
69455 ret = do_send_sig_info(sig, info, p, true);
69456+ if (!ret)
69457+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
69458+ }
69459
69460 return ret;
69461 }
69462@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
69463 {
69464 siginfo_t info;
69465
69466+ pax_track_stack();
69467+
69468 memset(&info, 0, sizeof info);
69469 info.si_signo = signr;
69470 info.si_code = exit_code;
69471@@ -2746,7 +2771,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
69472 int error = -ESRCH;
69473
69474 rcu_read_lock();
69475- p = find_task_by_vpid(pid);
69476+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69477+ /* allow glibc communication via tgkill to other threads in our
69478+ thread group */
69479+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69480+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
69481+ p = find_task_by_vpid_unrestricted(pid);
69482+ else
69483+#endif
69484+ p = find_task_by_vpid(pid);
69485 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69486 error = check_kill_permission(sig, info, p);
69487 /*
69488diff --git a/kernel/smp.c b/kernel/smp.c
69489index fb67dfa..f819e2e 100644
69490--- a/kernel/smp.c
69491+++ b/kernel/smp.c
69492@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
69493 }
69494 EXPORT_SYMBOL(smp_call_function);
69495
69496-void ipi_call_lock(void)
69497+void ipi_call_lock(void) __acquires(call_function.lock)
69498 {
69499 raw_spin_lock(&call_function.lock);
69500 }
69501
69502-void ipi_call_unlock(void)
69503+void ipi_call_unlock(void) __releases(call_function.lock)
69504 {
69505 raw_spin_unlock(&call_function.lock);
69506 }
69507
69508-void ipi_call_lock_irq(void)
69509+void ipi_call_lock_irq(void) __acquires(call_function.lock)
69510 {
69511 raw_spin_lock_irq(&call_function.lock);
69512 }
69513
69514-void ipi_call_unlock_irq(void)
69515+void ipi_call_unlock_irq(void) __releases(call_function.lock)
69516 {
69517 raw_spin_unlock_irq(&call_function.lock);
69518 }
69519diff --git a/kernel/softirq.c b/kernel/softirq.c
69520index fca82c3..1db9690 100644
69521--- a/kernel/softirq.c
69522+++ b/kernel/softirq.c
69523@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
69524
69525 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
69526
69527-char *softirq_to_name[NR_SOFTIRQS] = {
69528+const char * const softirq_to_name[NR_SOFTIRQS] = {
69529 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69530 "TASKLET", "SCHED", "HRTIMER", "RCU"
69531 };
69532@@ -235,7 +235,7 @@ restart:
69533 kstat_incr_softirqs_this_cpu(vec_nr);
69534
69535 trace_softirq_entry(vec_nr);
69536- h->action(h);
69537+ h->action();
69538 trace_softirq_exit(vec_nr);
69539 if (unlikely(prev_count != preempt_count())) {
69540 printk(KERN_ERR "huh, entered softirq %u %s %p"
69541@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
69542 local_irq_restore(flags);
69543 }
69544
69545-void open_softirq(int nr, void (*action)(struct softirq_action *))
69546+void open_softirq(int nr, void (*action)(void))
69547 {
69548- softirq_vec[nr].action = action;
69549+ pax_open_kernel();
69550+ *(void **)&softirq_vec[nr].action = action;
69551+ pax_close_kernel();
69552 }
69553
69554 /*
69555@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
69556
69557 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
69558
69559-static void tasklet_action(struct softirq_action *a)
69560+static void tasklet_action(void)
69561 {
69562 struct tasklet_struct *list;
69563
69564@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
69565 }
69566 }
69567
69568-static void tasklet_hi_action(struct softirq_action *a)
69569+static void tasklet_hi_action(void)
69570 {
69571 struct tasklet_struct *list;
69572
69573diff --git a/kernel/sys.c b/kernel/sys.c
69574index 1dbbe69..6d0c5d8 100644
69575--- a/kernel/sys.c
69576+++ b/kernel/sys.c
69577@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
69578 error = -EACCES;
69579 goto out;
69580 }
69581+
69582+ if (gr_handle_chroot_setpriority(p, niceval)) {
69583+ error = -EACCES;
69584+ goto out;
69585+ }
69586+
69587 no_nice = security_task_setnice(p, niceval);
69588 if (no_nice) {
69589 error = no_nice;
69590@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
69591 goto error;
69592 }
69593
69594+ if (gr_check_group_change(new->gid, new->egid, -1))
69595+ goto error;
69596+
69597 if (rgid != (gid_t) -1 ||
69598 (egid != (gid_t) -1 && egid != old->gid))
69599 new->sgid = new->egid;
69600@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
69601 old = current_cred();
69602
69603 retval = -EPERM;
69604+
69605+ if (gr_check_group_change(gid, gid, gid))
69606+ goto error;
69607+
69608 if (nsown_capable(CAP_SETGID))
69609 new->gid = new->egid = new->sgid = new->fsgid = gid;
69610 else if (gid == old->gid || gid == old->sgid)
69611@@ -617,7 +630,7 @@ error:
69612 /*
69613 * change the user struct in a credentials set to match the new UID
69614 */
69615-static int set_user(struct cred *new)
69616+int set_user(struct cred *new)
69617 {
69618 struct user_struct *new_user;
69619
69620@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
69621 goto error;
69622 }
69623
69624+ if (gr_check_user_change(new->uid, new->euid, -1))
69625+ goto error;
69626+
69627 if (new->uid != old->uid) {
69628 retval = set_user(new);
69629 if (retval < 0)
69630@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
69631 old = current_cred();
69632
69633 retval = -EPERM;
69634+
69635+ if (gr_check_crash_uid(uid))
69636+ goto error;
69637+ if (gr_check_user_change(uid, uid, uid))
69638+ goto error;
69639+
69640 if (nsown_capable(CAP_SETUID)) {
69641 new->suid = new->uid = uid;
69642 if (uid != old->uid) {
69643@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
69644 goto error;
69645 }
69646
69647+ if (gr_check_user_change(ruid, euid, -1))
69648+ goto error;
69649+
69650 if (ruid != (uid_t) -1) {
69651 new->uid = ruid;
69652 if (ruid != old->uid) {
69653@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69654 goto error;
69655 }
69656
69657+ if (gr_check_group_change(rgid, egid, -1))
69658+ goto error;
69659+
69660 if (rgid != (gid_t) -1)
69661 new->gid = rgid;
69662 if (egid != (gid_t) -1)
69663@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69664 old = current_cred();
69665 old_fsuid = old->fsuid;
69666
69667+ if (gr_check_user_change(-1, -1, uid))
69668+ goto error;
69669+
69670 if (uid == old->uid || uid == old->euid ||
69671 uid == old->suid || uid == old->fsuid ||
69672 nsown_capable(CAP_SETUID)) {
69673@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69674 }
69675 }
69676
69677+error:
69678 abort_creds(new);
69679 return old_fsuid;
69680
69681@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69682 if (gid == old->gid || gid == old->egid ||
69683 gid == old->sgid || gid == old->fsgid ||
69684 nsown_capable(CAP_SETGID)) {
69685+ if (gr_check_group_change(-1, -1, gid))
69686+ goto error;
69687+
69688 if (gid != old_fsgid) {
69689 new->fsgid = gid;
69690 goto change_okay;
69691 }
69692 }
69693
69694+error:
69695 abort_creds(new);
69696 return old_fsgid;
69697
69698@@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
69699 }
69700 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69701 snprintf(buf, len, "2.6.%u%s", v, rest);
69702- ret = copy_to_user(release, buf, len);
69703+ if (len > sizeof(buf))
69704+ ret = -EFAULT;
69705+ else
69706+ ret = copy_to_user(release, buf, len);
69707 }
69708 return ret;
69709 }
69710@@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69711 return -EFAULT;
69712
69713 down_read(&uts_sem);
69714- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69715+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69716 __OLD_UTS_LEN);
69717 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69718- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69719+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69720 __OLD_UTS_LEN);
69721 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69722- error |= __copy_to_user(&name->release, &utsname()->release,
69723+ error |= __copy_to_user(name->release, &utsname()->release,
69724 __OLD_UTS_LEN);
69725 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69726- error |= __copy_to_user(&name->version, &utsname()->version,
69727+ error |= __copy_to_user(name->version, &utsname()->version,
69728 __OLD_UTS_LEN);
69729 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69730- error |= __copy_to_user(&name->machine, &utsname()->machine,
69731+ error |= __copy_to_user(name->machine, &utsname()->machine,
69732 __OLD_UTS_LEN);
69733 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69734 up_read(&uts_sem);
69735@@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69736 error = get_dumpable(me->mm);
69737 break;
69738 case PR_SET_DUMPABLE:
69739- if (arg2 < 0 || arg2 > 1) {
69740+ if (arg2 > 1) {
69741 error = -EINVAL;
69742 break;
69743 }
69744diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69745index 11d65b5..6957b37 100644
69746--- a/kernel/sysctl.c
69747+++ b/kernel/sysctl.c
69748@@ -85,6 +85,13 @@
69749
69750
69751 #if defined(CONFIG_SYSCTL)
69752+#include <linux/grsecurity.h>
69753+#include <linux/grinternal.h>
69754+
69755+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69756+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69757+ const int op);
69758+extern int gr_handle_chroot_sysctl(const int op);
69759
69760 /* External variables not in a header file. */
69761 extern int sysctl_overcommit_memory;
69762@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69763 }
69764
69765 #endif
69766+extern struct ctl_table grsecurity_table[];
69767
69768 static struct ctl_table root_table[];
69769 static struct ctl_table_root sysctl_table_root;
69770@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
69771 int sysctl_legacy_va_layout;
69772 #endif
69773
69774+#ifdef CONFIG_PAX_SOFTMODE
69775+static ctl_table pax_table[] = {
69776+ {
69777+ .procname = "softmode",
69778+ .data = &pax_softmode,
69779+ .maxlen = sizeof(unsigned int),
69780+ .mode = 0600,
69781+ .proc_handler = &proc_dointvec,
69782+ },
69783+
69784+ { }
69785+};
69786+#endif
69787+
69788 /* The default sysctl tables: */
69789
69790 static struct ctl_table root_table[] = {
69791@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
69792 #endif
69793
69794 static struct ctl_table kern_table[] = {
69795+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69796+ {
69797+ .procname = "grsecurity",
69798+ .mode = 0500,
69799+ .child = grsecurity_table,
69800+ },
69801+#endif
69802+
69803+#ifdef CONFIG_PAX_SOFTMODE
69804+ {
69805+ .procname = "pax",
69806+ .mode = 0500,
69807+ .child = pax_table,
69808+ },
69809+#endif
69810+
69811 {
69812 .procname = "sched_child_runs_first",
69813 .data = &sysctl_sched_child_runs_first,
69814@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
69815 .data = &modprobe_path,
69816 .maxlen = KMOD_PATH_LEN,
69817 .mode = 0644,
69818- .proc_handler = proc_dostring,
69819+ .proc_handler = proc_dostring_modpriv,
69820 },
69821 {
69822 .procname = "modules_disabled",
69823@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
69824 .extra1 = &zero,
69825 .extra2 = &one,
69826 },
69827+#endif
69828 {
69829 .procname = "kptr_restrict",
69830 .data = &kptr_restrict,
69831 .maxlen = sizeof(int),
69832 .mode = 0644,
69833 .proc_handler = proc_dmesg_restrict,
69834+#ifdef CONFIG_GRKERNSEC_HIDESYM
69835+ .extra1 = &two,
69836+#else
69837 .extra1 = &zero,
69838+#endif
69839 .extra2 = &two,
69840 },
69841-#endif
69842 {
69843 .procname = "ngroups_max",
69844 .data = &ngroups_max,
69845@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
69846 .proc_handler = proc_dointvec_minmax,
69847 .extra1 = &zero,
69848 },
69849+ {
69850+ .procname = "heap_stack_gap",
69851+ .data = &sysctl_heap_stack_gap,
69852+ .maxlen = sizeof(sysctl_heap_stack_gap),
69853+ .mode = 0644,
69854+ .proc_handler = proc_doulongvec_minmax,
69855+ },
69856 #else
69857 {
69858 .procname = "nr_trim_pages",
69859@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
69860 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69861 {
69862 int mode;
69863+ int error;
69864+
69865+ if (table->parent != NULL && table->parent->procname != NULL &&
69866+ table->procname != NULL &&
69867+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69868+ return -EACCES;
69869+ if (gr_handle_chroot_sysctl(op))
69870+ return -EACCES;
69871+ error = gr_handle_sysctl(table, op);
69872+ if (error)
69873+ return error;
69874
69875 if (root->permissions)
69876 mode = root->permissions(root, current->nsproxy, table);
69877@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write,
69878 buffer, lenp, ppos);
69879 }
69880
69881+int proc_dostring_modpriv(struct ctl_table *table, int write,
69882+ void __user *buffer, size_t *lenp, loff_t *ppos)
69883+{
69884+ if (write && !capable(CAP_SYS_MODULE))
69885+ return -EPERM;
69886+
69887+ return _proc_do_string(table->data, table->maxlen, write,
69888+ buffer, lenp, ppos);
69889+}
69890+
69891 static size_t proc_skip_spaces(char **buf)
69892 {
69893 size_t ret;
69894@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69895 len = strlen(tmp);
69896 if (len > *size)
69897 len = *size;
69898+ if (len > sizeof(tmp))
69899+ len = sizeof(tmp);
69900 if (copy_to_user(*buf, tmp, len))
69901 return -EFAULT;
69902 *size -= len;
69903@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69904 *i = val;
69905 } else {
69906 val = convdiv * (*i) / convmul;
69907- if (!first)
69908+ if (!first) {
69909 err = proc_put_char(&buffer, &left, '\t');
69910+ if (err)
69911+ break;
69912+ }
69913 err = proc_put_long(&buffer, &left, val, false);
69914 if (err)
69915 break;
69916@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write,
69917 return -ENOSYS;
69918 }
69919
69920+int proc_dostring_modpriv(struct ctl_table *table, int write,
69921+ void __user *buffer, size_t *lenp, loff_t *ppos)
69922+{
69923+ return -ENOSYS;
69924+}
69925+
69926 int proc_dointvec(struct ctl_table *table, int write,
69927 void __user *buffer, size_t *lenp, loff_t *ppos)
69928 {
69929@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69930 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69931 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69932 EXPORT_SYMBOL(proc_dostring);
69933+EXPORT_SYMBOL(proc_dostring_modpriv);
69934 EXPORT_SYMBOL(proc_doulongvec_minmax);
69935 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69936 EXPORT_SYMBOL(register_sysctl_table);
69937diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69938index 2ce1b30..82bf0a4 100644
69939--- a/kernel/sysctl_binary.c
69940+++ b/kernel/sysctl_binary.c
69941@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69942 int i;
69943
69944 set_fs(KERNEL_DS);
69945- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69946+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69947 set_fs(old_fs);
69948 if (result < 0)
69949 goto out_kfree;
69950@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69951 }
69952
69953 set_fs(KERNEL_DS);
69954- result = vfs_write(file, buffer, str - buffer, &pos);
69955+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69956 set_fs(old_fs);
69957 if (result < 0)
69958 goto out_kfree;
69959@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69960 int i;
69961
69962 set_fs(KERNEL_DS);
69963- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69964+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69965 set_fs(old_fs);
69966 if (result < 0)
69967 goto out_kfree;
69968@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69969 }
69970
69971 set_fs(KERNEL_DS);
69972- result = vfs_write(file, buffer, str - buffer, &pos);
69973+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69974 set_fs(old_fs);
69975 if (result < 0)
69976 goto out_kfree;
69977@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69978 int i;
69979
69980 set_fs(KERNEL_DS);
69981- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69982+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69983 set_fs(old_fs);
69984 if (result < 0)
69985 goto out;
69986@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69987 __le16 dnaddr;
69988
69989 set_fs(KERNEL_DS);
69990- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69991+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69992 set_fs(old_fs);
69993 if (result < 0)
69994 goto out;
69995@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69996 le16_to_cpu(dnaddr) & 0x3ff);
69997
69998 set_fs(KERNEL_DS);
69999- result = vfs_write(file, buf, len, &pos);
70000+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70001 set_fs(old_fs);
70002 if (result < 0)
70003 goto out;
70004diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
70005index 362da65..ab8ef8c 100644
70006--- a/kernel/sysctl_check.c
70007+++ b/kernel/sysctl_check.c
70008@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
70009 set_fail(&fail, table, "Directory with extra2");
70010 } else {
70011 if ((table->proc_handler == proc_dostring) ||
70012+ (table->proc_handler == proc_dostring_modpriv) ||
70013 (table->proc_handler == proc_dointvec) ||
70014 (table->proc_handler == proc_dointvec_minmax) ||
70015 (table->proc_handler == proc_dointvec_jiffies) ||
70016diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70017index e660464..c8b9e67 100644
70018--- a/kernel/taskstats.c
70019+++ b/kernel/taskstats.c
70020@@ -27,9 +27,12 @@
70021 #include <linux/cgroup.h>
70022 #include <linux/fs.h>
70023 #include <linux/file.h>
70024+#include <linux/grsecurity.h>
70025 #include <net/genetlink.h>
70026 #include <linux/atomic.h>
70027
70028+extern int gr_is_taskstats_denied(int pid);
70029+
70030 /*
70031 * Maximum length of a cpumask that can be specified in
70032 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70033@@ -556,6 +559,9 @@ err:
70034
70035 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70036 {
70037+ if (gr_is_taskstats_denied(current->pid))
70038+ return -EACCES;
70039+
70040 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70041 return cmd_attr_register_cpumask(info);
70042 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
70043diff --git a/kernel/time.c b/kernel/time.c
70044index d776062..fa8d186 100644
70045--- a/kernel/time.c
70046+++ b/kernel/time.c
70047@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70048 return error;
70049
70050 if (tz) {
70051+ /* we log in do_settimeofday called below, so don't log twice
70052+ */
70053+ if (!tv)
70054+ gr_log_timechange();
70055+
70056 /* SMP safe, global irq locking makes it work. */
70057 sys_tz = *tz;
70058 update_vsyscall_tz();
70059diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
70060index 8b70c76..923e9f5 100644
70061--- a/kernel/time/alarmtimer.c
70062+++ b/kernel/time/alarmtimer.c
70063@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
70064 {
70065 int error = 0;
70066 int i;
70067- struct k_clock alarm_clock = {
70068+ static struct k_clock alarm_clock = {
70069 .clock_getres = alarm_clock_getres,
70070 .clock_get = alarm_clock_get,
70071 .timer_create = alarm_timer_create,
70072diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
70073index 7a90d02..6d8585a 100644
70074--- a/kernel/time/tick-broadcast.c
70075+++ b/kernel/time/tick-broadcast.c
70076@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
70077 * then clear the broadcast bit.
70078 */
70079 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70080- int cpu = smp_processor_id();
70081+ cpu = smp_processor_id();
70082
70083 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70084 tick_broadcast_clear_oneshot(cpu);
70085diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
70086index 6f9798b..f8c4087 100644
70087--- a/kernel/time/timekeeping.c
70088+++ b/kernel/time/timekeeping.c
70089@@ -14,6 +14,7 @@
70090 #include <linux/init.h>
70091 #include <linux/mm.h>
70092 #include <linux/sched.h>
70093+#include <linux/grsecurity.h>
70094 #include <linux/syscore_ops.h>
70095 #include <linux/clocksource.h>
70096 #include <linux/jiffies.h>
70097@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
70098 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70099 return -EINVAL;
70100
70101+ gr_log_timechange();
70102+
70103 write_seqlock_irqsave(&xtime_lock, flags);
70104
70105 timekeeping_forward_now();
70106diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70107index 3258455..f35227d 100644
70108--- a/kernel/time/timer_list.c
70109+++ b/kernel/time/timer_list.c
70110@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
70111
70112 static void print_name_offset(struct seq_file *m, void *sym)
70113 {
70114+#ifdef CONFIG_GRKERNSEC_HIDESYM
70115+ SEQ_printf(m, "<%p>", NULL);
70116+#else
70117 char symname[KSYM_NAME_LEN];
70118
70119 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70120 SEQ_printf(m, "<%pK>", sym);
70121 else
70122 SEQ_printf(m, "%s", symname);
70123+#endif
70124 }
70125
70126 static void
70127@@ -112,7 +116,11 @@ next_one:
70128 static void
70129 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70130 {
70131+#ifdef CONFIG_GRKERNSEC_HIDESYM
70132+ SEQ_printf(m, " .base: %p\n", NULL);
70133+#else
70134 SEQ_printf(m, " .base: %pK\n", base);
70135+#endif
70136 SEQ_printf(m, " .index: %d\n",
70137 base->index);
70138 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70139@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
70140 {
70141 struct proc_dir_entry *pe;
70142
70143+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70144+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70145+#else
70146 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70147+#endif
70148 if (!pe)
70149 return -ENOMEM;
70150 return 0;
70151diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
70152index a5d0a3a..60c7948 100644
70153--- a/kernel/time/timer_stats.c
70154+++ b/kernel/time/timer_stats.c
70155@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70156 static unsigned long nr_entries;
70157 static struct entry entries[MAX_ENTRIES];
70158
70159-static atomic_t overflow_count;
70160+static atomic_unchecked_t overflow_count;
70161
70162 /*
70163 * The entries are in a hash-table, for fast lookup:
70164@@ -140,7 +140,7 @@ static void reset_entries(void)
70165 nr_entries = 0;
70166 memset(entries, 0, sizeof(entries));
70167 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70168- atomic_set(&overflow_count, 0);
70169+ atomic_set_unchecked(&overflow_count, 0);
70170 }
70171
70172 static struct entry *alloc_entry(void)
70173@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70174 if (likely(entry))
70175 entry->count++;
70176 else
70177- atomic_inc(&overflow_count);
70178+ atomic_inc_unchecked(&overflow_count);
70179
70180 out_unlock:
70181 raw_spin_unlock_irqrestore(lock, flags);
70182@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70183
70184 static void print_name_offset(struct seq_file *m, unsigned long addr)
70185 {
70186+#ifdef CONFIG_GRKERNSEC_HIDESYM
70187+ seq_printf(m, "<%p>", NULL);
70188+#else
70189 char symname[KSYM_NAME_LEN];
70190
70191 if (lookup_symbol_name(addr, symname) < 0)
70192 seq_printf(m, "<%p>", (void *)addr);
70193 else
70194 seq_printf(m, "%s", symname);
70195+#endif
70196 }
70197
70198 static int tstats_show(struct seq_file *m, void *v)
70199@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
70200
70201 seq_puts(m, "Timer Stats Version: v0.2\n");
70202 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70203- if (atomic_read(&overflow_count))
70204+ if (atomic_read_unchecked(&overflow_count))
70205 seq_printf(m, "Overflow: %d entries\n",
70206- atomic_read(&overflow_count));
70207+ atomic_read_unchecked(&overflow_count));
70208
70209 for (i = 0; i < nr_entries; i++) {
70210 entry = entries + i;
70211@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
70212 {
70213 struct proc_dir_entry *pe;
70214
70215+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70216+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70217+#else
70218 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70219+#endif
70220 if (!pe)
70221 return -ENOMEM;
70222 return 0;
70223diff --git a/kernel/timer.c b/kernel/timer.c
70224index 8cff361..0fb5cd8 100644
70225--- a/kernel/timer.c
70226+++ b/kernel/timer.c
70227@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
70228 /*
70229 * This function runs timers and the timer-tq in bottom half context.
70230 */
70231-static void run_timer_softirq(struct softirq_action *h)
70232+static void run_timer_softirq(void)
70233 {
70234 struct tvec_base *base = __this_cpu_read(tvec_bases);
70235
70236diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
70237index 7c910a5..8b72104 100644
70238--- a/kernel/trace/blktrace.c
70239+++ b/kernel/trace/blktrace.c
70240@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
70241 struct blk_trace *bt = filp->private_data;
70242 char buf[16];
70243
70244- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70245+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70246
70247 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70248 }
70249@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
70250 return 1;
70251
70252 bt = buf->chan->private_data;
70253- atomic_inc(&bt->dropped);
70254+ atomic_inc_unchecked(&bt->dropped);
70255 return 0;
70256 }
70257
70258@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
70259
70260 bt->dir = dir;
70261 bt->dev = dev;
70262- atomic_set(&bt->dropped, 0);
70263+ atomic_set_unchecked(&bt->dropped, 0);
70264
70265 ret = -EIO;
70266 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70267diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
70268index 48d3762..3b61fce 100644
70269--- a/kernel/trace/ftrace.c
70270+++ b/kernel/trace/ftrace.c
70271@@ -1584,12 +1584,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
70272 if (unlikely(ftrace_disabled))
70273 return 0;
70274
70275+ ret = ftrace_arch_code_modify_prepare();
70276+ FTRACE_WARN_ON(ret);
70277+ if (ret)
70278+ return 0;
70279+
70280 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70281+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70282 if (ret) {
70283 ftrace_bug(ret, ip);
70284- return 0;
70285 }
70286- return 1;
70287+ return ret ? 0 : 1;
70288 }
70289
70290 /*
70291@@ -2606,7 +2611,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
70292
70293 int
70294 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70295- void *data)
70296+ void *data)
70297 {
70298 struct ftrace_func_probe *entry;
70299 struct ftrace_page *pg;
70300diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
70301index 17a2d44..85907e2 100644
70302--- a/kernel/trace/trace.c
70303+++ b/kernel/trace/trace.c
70304@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
70305 size_t rem;
70306 unsigned int i;
70307
70308+ pax_track_stack();
70309+
70310 if (splice_grow_spd(pipe, &spd))
70311 return -ENOMEM;
70312
70313@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
70314 int entries, size, i;
70315 size_t ret;
70316
70317+ pax_track_stack();
70318+
70319 if (splice_grow_spd(pipe, &spd))
70320 return -ENOMEM;
70321
70322@@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = {
70323 };
70324 #endif
70325
70326-static struct dentry *d_tracer;
70327-
70328 struct dentry *tracing_init_dentry(void)
70329 {
70330+ static struct dentry *d_tracer;
70331 static int once;
70332
70333 if (d_tracer)
70334@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
70335 return d_tracer;
70336 }
70337
70338-static struct dentry *d_percpu;
70339-
70340 struct dentry *tracing_dentry_percpu(void)
70341 {
70342+ static struct dentry *d_percpu;
70343 static int once;
70344 struct dentry *d_tracer;
70345
70346diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
70347index c212a7f..7b02394 100644
70348--- a/kernel/trace/trace_events.c
70349+++ b/kernel/trace/trace_events.c
70350@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
70351 struct ftrace_module_file_ops {
70352 struct list_head list;
70353 struct module *mod;
70354- struct file_operations id;
70355- struct file_operations enable;
70356- struct file_operations format;
70357- struct file_operations filter;
70358 };
70359
70360 static struct ftrace_module_file_ops *
70361@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
70362
70363 file_ops->mod = mod;
70364
70365- file_ops->id = ftrace_event_id_fops;
70366- file_ops->id.owner = mod;
70367-
70368- file_ops->enable = ftrace_enable_fops;
70369- file_ops->enable.owner = mod;
70370-
70371- file_ops->filter = ftrace_event_filter_fops;
70372- file_ops->filter.owner = mod;
70373-
70374- file_ops->format = ftrace_event_format_fops;
70375- file_ops->format.owner = mod;
70376+ pax_open_kernel();
70377+ *(void **)&mod->trace_id.owner = mod;
70378+ *(void **)&mod->trace_enable.owner = mod;
70379+ *(void **)&mod->trace_filter.owner = mod;
70380+ *(void **)&mod->trace_format.owner = mod;
70381+ pax_close_kernel();
70382
70383 list_add(&file_ops->list, &ftrace_module_file_list);
70384
70385@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
70386
70387 for_each_event(call, start, end) {
70388 __trace_add_event_call(*call, mod,
70389- &file_ops->id, &file_ops->enable,
70390- &file_ops->filter, &file_ops->format);
70391+ &mod->trace_id, &mod->trace_enable,
70392+ &mod->trace_filter, &mod->trace_format);
70393 }
70394 }
70395
70396diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
70397index 00d527c..7c5b1a3 100644
70398--- a/kernel/trace/trace_kprobe.c
70399+++ b/kernel/trace/trace_kprobe.c
70400@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70401 long ret;
70402 int maxlen = get_rloc_len(*(u32 *)dest);
70403 u8 *dst = get_rloc_data(dest);
70404- u8 *src = addr;
70405+ const u8 __user *src = (const u8 __force_user *)addr;
70406 mm_segment_t old_fs = get_fs();
70407 if (!maxlen)
70408 return;
70409@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70410 pagefault_disable();
70411 do
70412 ret = __copy_from_user_inatomic(dst++, src++, 1);
70413- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
70414+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
70415 dst[-1] = '\0';
70416 pagefault_enable();
70417 set_fs(old_fs);
70418@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70419 ((u8 *)get_rloc_data(dest))[0] = '\0';
70420 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
70421 } else
70422- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
70423+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
70424 get_rloc_offs(*(u32 *)dest));
70425 }
70426 /* Return the length of string -- including null terminal byte */
70427@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
70428 set_fs(KERNEL_DS);
70429 pagefault_disable();
70430 do {
70431- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
70432+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
70433 len++;
70434 } while (c && ret == 0 && len < MAX_STRING_SIZE);
70435 pagefault_enable();
70436diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
70437index fd3c8aa..5f324a6 100644
70438--- a/kernel/trace/trace_mmiotrace.c
70439+++ b/kernel/trace/trace_mmiotrace.c
70440@@ -24,7 +24,7 @@ struct header_iter {
70441 static struct trace_array *mmio_trace_array;
70442 static bool overrun_detected;
70443 static unsigned long prev_overruns;
70444-static atomic_t dropped_count;
70445+static atomic_unchecked_t dropped_count;
70446
70447 static void mmio_reset_data(struct trace_array *tr)
70448 {
70449@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
70450
70451 static unsigned long count_overruns(struct trace_iterator *iter)
70452 {
70453- unsigned long cnt = atomic_xchg(&dropped_count, 0);
70454+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70455 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70456
70457 if (over > prev_overruns)
70458@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
70459 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70460 sizeof(*entry), 0, pc);
70461 if (!event) {
70462- atomic_inc(&dropped_count);
70463+ atomic_inc_unchecked(&dropped_count);
70464 return;
70465 }
70466 entry = ring_buffer_event_data(event);
70467@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
70468 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70469 sizeof(*entry), 0, pc);
70470 if (!event) {
70471- atomic_inc(&dropped_count);
70472+ atomic_inc_unchecked(&dropped_count);
70473 return;
70474 }
70475 entry = ring_buffer_event_data(event);
70476diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
70477index 5199930..26c73a0 100644
70478--- a/kernel/trace/trace_output.c
70479+++ b/kernel/trace/trace_output.c
70480@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
70481
70482 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70483 if (!IS_ERR(p)) {
70484- p = mangle_path(s->buffer + s->len, p, "\n");
70485+ p = mangle_path(s->buffer + s->len, p, "\n\\");
70486 if (p) {
70487 s->len = p - s->buffer;
70488 return 1;
70489diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
70490index 77575b3..6e623d1 100644
70491--- a/kernel/trace/trace_stack.c
70492+++ b/kernel/trace/trace_stack.c
70493@@ -50,7 +50,7 @@ static inline void check_stack(void)
70494 return;
70495
70496 /* we do not handle interrupt stacks yet */
70497- if (!object_is_on_stack(&this_size))
70498+ if (!object_starts_on_stack(&this_size))
70499 return;
70500
70501 local_irq_save(flags);
70502diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
70503index 209b379..7f76423 100644
70504--- a/kernel/trace/trace_workqueue.c
70505+++ b/kernel/trace/trace_workqueue.c
70506@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
70507 int cpu;
70508 pid_t pid;
70509 /* Can be inserted from interrupt or user context, need to be atomic */
70510- atomic_t inserted;
70511+ atomic_unchecked_t inserted;
70512 /*
70513 * Don't need to be atomic, works are serialized in a single workqueue thread
70514 * on a single CPU.
70515@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
70516 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
70517 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
70518 if (node->pid == wq_thread->pid) {
70519- atomic_inc(&node->inserted);
70520+ atomic_inc_unchecked(&node->inserted);
70521 goto found;
70522 }
70523 }
70524@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
70525 tsk = get_pid_task(pid, PIDTYPE_PID);
70526 if (tsk) {
70527 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
70528- atomic_read(&cws->inserted), cws->executed,
70529+ atomic_read_unchecked(&cws->inserted), cws->executed,
70530 tsk->comm);
70531 put_task_struct(tsk);
70532 }
70533diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
70534index c0cb9c4..f33aa89 100644
70535--- a/lib/Kconfig.debug
70536+++ b/lib/Kconfig.debug
70537@@ -1091,6 +1091,7 @@ config LATENCYTOP
70538 depends on DEBUG_KERNEL
70539 depends on STACKTRACE_SUPPORT
70540 depends on PROC_FS
70541+ depends on !GRKERNSEC_HIDESYM
70542 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
70543 select KALLSYMS
70544 select KALLSYMS_ALL
70545diff --git a/lib/bitmap.c b/lib/bitmap.c
70546index 2f4412e..a557e27 100644
70547--- a/lib/bitmap.c
70548+++ b/lib/bitmap.c
70549@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
70550 {
70551 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70552 u32 chunk;
70553- const char __user *ubuf = buf;
70554+ const char __user *ubuf = (const char __force_user *)buf;
70555
70556 bitmap_zero(maskp, nmaskbits);
70557
70558@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
70559 {
70560 if (!access_ok(VERIFY_READ, ubuf, ulen))
70561 return -EFAULT;
70562- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
70563+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
70564 }
70565 EXPORT_SYMBOL(bitmap_parse_user);
70566
70567@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
70568 {
70569 unsigned a, b;
70570 int c, old_c, totaldigits;
70571- const char __user *ubuf = buf;
70572+ const char __user *ubuf = (const char __force_user *)buf;
70573 int exp_digit, in_range;
70574
70575 totaldigits = c = 0;
70576@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf,
70577 {
70578 if (!access_ok(VERIFY_READ, ubuf, ulen))
70579 return -EFAULT;
70580- return __bitmap_parselist((const char *)ubuf,
70581+ return __bitmap_parselist((const char __force_kernel *)ubuf,
70582 ulen, 1, maskp, nmaskbits);
70583 }
70584 EXPORT_SYMBOL(bitmap_parselist_user);
70585diff --git a/lib/bug.c b/lib/bug.c
70586index 1955209..cbbb2ad 100644
70587--- a/lib/bug.c
70588+++ b/lib/bug.c
70589@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
70590 return BUG_TRAP_TYPE_NONE;
70591
70592 bug = find_bug(bugaddr);
70593+ if (!bug)
70594+ return BUG_TRAP_TYPE_NONE;
70595
70596 file = NULL;
70597 line = 0;
70598diff --git a/lib/debugobjects.c b/lib/debugobjects.c
70599index a78b7c6..2c73084 100644
70600--- a/lib/debugobjects.c
70601+++ b/lib/debugobjects.c
70602@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
70603 if (limit > 4)
70604 return;
70605
70606- is_on_stack = object_is_on_stack(addr);
70607+ is_on_stack = object_starts_on_stack(addr);
70608 if (is_on_stack == onstack)
70609 return;
70610
70611diff --git a/lib/devres.c b/lib/devres.c
70612index 7c0e953..f642b5c 100644
70613--- a/lib/devres.c
70614+++ b/lib/devres.c
70615@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
70616 void devm_iounmap(struct device *dev, void __iomem *addr)
70617 {
70618 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70619- (void *)addr));
70620+ (void __force *)addr));
70621 iounmap(addr);
70622 }
70623 EXPORT_SYMBOL(devm_iounmap);
70624@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
70625 {
70626 ioport_unmap(addr);
70627 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70628- devm_ioport_map_match, (void *)addr));
70629+ devm_ioport_map_match, (void __force *)addr));
70630 }
70631 EXPORT_SYMBOL(devm_ioport_unmap);
70632
70633diff --git a/lib/dma-debug.c b/lib/dma-debug.c
70634index db07bfd..719b5ab 100644
70635--- a/lib/dma-debug.c
70636+++ b/lib/dma-debug.c
70637@@ -870,7 +870,7 @@ out:
70638
70639 static void check_for_stack(struct device *dev, void *addr)
70640 {
70641- if (object_is_on_stack(addr))
70642+ if (object_starts_on_stack(addr))
70643 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70644 "stack [addr=%p]\n", addr);
70645 }
70646diff --git a/lib/extable.c b/lib/extable.c
70647index 4cac81e..63e9b8f 100644
70648--- a/lib/extable.c
70649+++ b/lib/extable.c
70650@@ -13,6 +13,7 @@
70651 #include <linux/init.h>
70652 #include <linux/sort.h>
70653 #include <asm/uaccess.h>
70654+#include <asm/pgtable.h>
70655
70656 #ifndef ARCH_HAS_SORT_EXTABLE
70657 /*
70658@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
70659 void sort_extable(struct exception_table_entry *start,
70660 struct exception_table_entry *finish)
70661 {
70662+ pax_open_kernel();
70663 sort(start, finish - start, sizeof(struct exception_table_entry),
70664 cmp_ex, NULL);
70665+ pax_close_kernel();
70666 }
70667
70668 #ifdef CONFIG_MODULES
70669diff --git a/lib/inflate.c b/lib/inflate.c
70670index 013a761..c28f3fc 100644
70671--- a/lib/inflate.c
70672+++ b/lib/inflate.c
70673@@ -269,7 +269,7 @@ static void free(void *where)
70674 malloc_ptr = free_mem_ptr;
70675 }
70676 #else
70677-#define malloc(a) kmalloc(a, GFP_KERNEL)
70678+#define malloc(a) kmalloc((a), GFP_KERNEL)
70679 #define free(a) kfree(a)
70680 #endif
70681
70682diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70683index bd2bea9..6b3c95e 100644
70684--- a/lib/is_single_threaded.c
70685+++ b/lib/is_single_threaded.c
70686@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70687 struct task_struct *p, *t;
70688 bool ret;
70689
70690+ if (!mm)
70691+ return true;
70692+
70693 if (atomic_read(&task->signal->live) != 1)
70694 return false;
70695
70696diff --git a/lib/kref.c b/lib/kref.c
70697index 3efb882..8492f4c 100644
70698--- a/lib/kref.c
70699+++ b/lib/kref.c
70700@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
70701 */
70702 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
70703 {
70704- WARN_ON(release == NULL);
70705+ BUG_ON(release == NULL);
70706 WARN_ON(release == (void (*)(struct kref *))kfree);
70707
70708 if (atomic_dec_and_test(&kref->refcount)) {
70709diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70710index a2f9da5..3bcadb6 100644
70711--- a/lib/radix-tree.c
70712+++ b/lib/radix-tree.c
70713@@ -80,7 +80,7 @@ struct radix_tree_preload {
70714 int nr;
70715 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70716 };
70717-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70718+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70719
70720 static inline void *ptr_to_indirect(void *ptr)
70721 {
70722diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70723index d7222a9..2172edc 100644
70724--- a/lib/vsprintf.c
70725+++ b/lib/vsprintf.c
70726@@ -16,6 +16,9 @@
70727 * - scnprintf and vscnprintf
70728 */
70729
70730+#ifdef CONFIG_GRKERNSEC_HIDESYM
70731+#define __INCLUDED_BY_HIDESYM 1
70732+#endif
70733 #include <stdarg.h>
70734 #include <linux/module.h>
70735 #include <linux/types.h>
70736@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70737 char sym[KSYM_SYMBOL_LEN];
70738 if (ext == 'B')
70739 sprint_backtrace(sym, value);
70740- else if (ext != 'f' && ext != 's')
70741+ else if (ext != 'f' && ext != 's' && ext != 'a')
70742 sprint_symbol(sym, value);
70743 else
70744 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70745@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
70746 return string(buf, end, uuid, spec);
70747 }
70748
70749+#ifdef CONFIG_GRKERNSEC_HIDESYM
70750+int kptr_restrict __read_mostly = 2;
70751+#else
70752 int kptr_restrict __read_mostly;
70753+#endif
70754
70755 /*
70756 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70757@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
70758 * - 'S' For symbolic direct pointers with offset
70759 * - 's' For symbolic direct pointers without offset
70760 * - 'B' For backtraced symbolic direct pointers with offset
70761+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70762+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70763 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70764 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70765 * - 'M' For a 6-byte MAC address, it prints the address in the
70766@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70767 {
70768 if (!ptr && *fmt != 'K') {
70769 /*
70770- * Print (null) with the same width as a pointer so it makes
70771+ * Print (nil) with the same width as a pointer so it makes
70772 * tabular output look nice.
70773 */
70774 if (spec.field_width == -1)
70775 spec.field_width = 2 * sizeof(void *);
70776- return string(buf, end, "(null)", spec);
70777+ return string(buf, end, "(nil)", spec);
70778 }
70779
70780 switch (*fmt) {
70781@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70782 /* Fallthrough */
70783 case 'S':
70784 case 's':
70785+#ifdef CONFIG_GRKERNSEC_HIDESYM
70786+ break;
70787+#else
70788+ return symbol_string(buf, end, ptr, spec, *fmt);
70789+#endif
70790+ case 'A':
70791+ case 'a':
70792 case 'B':
70793 return symbol_string(buf, end, ptr, spec, *fmt);
70794 case 'R':
70795@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70796 typeof(type) value; \
70797 if (sizeof(type) == 8) { \
70798 args = PTR_ALIGN(args, sizeof(u32)); \
70799- *(u32 *)&value = *(u32 *)args; \
70800- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70801+ *(u32 *)&value = *(const u32 *)args; \
70802+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70803 } else { \
70804 args = PTR_ALIGN(args, sizeof(type)); \
70805- value = *(typeof(type) *)args; \
70806+ value = *(const typeof(type) *)args; \
70807 } \
70808 args += sizeof(type); \
70809 value; \
70810@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70811 case FORMAT_TYPE_STR: {
70812 const char *str_arg = args;
70813 args += strlen(str_arg) + 1;
70814- str = string(str, end, (char *)str_arg, spec);
70815+ str = string(str, end, str_arg, spec);
70816 break;
70817 }
70818
70819diff --git a/localversion-grsec b/localversion-grsec
70820new file mode 100644
70821index 0000000..7cd6065
70822--- /dev/null
70823+++ b/localversion-grsec
70824@@ -0,0 +1 @@
70825+-grsec
70826diff --git a/mm/Kconfig b/mm/Kconfig
70827index f2f1ca1..0645f06 100644
70828--- a/mm/Kconfig
70829+++ b/mm/Kconfig
70830@@ -238,10 +238,10 @@ config KSM
70831 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70832
70833 config DEFAULT_MMAP_MIN_ADDR
70834- int "Low address space to protect from user allocation"
70835+ int "Low address space to protect from user allocation"
70836 depends on MMU
70837- default 4096
70838- help
70839+ default 65536
70840+ help
70841 This is the portion of low virtual memory which should be protected
70842 from userspace allocation. Keeping a user from writing to low pages
70843 can help reduce the impact of kernel NULL pointer bugs.
70844diff --git a/mm/filemap.c b/mm/filemap.c
70845index 0eedbf8..b108990 100644
70846--- a/mm/filemap.c
70847+++ b/mm/filemap.c
70848@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70849 struct address_space *mapping = file->f_mapping;
70850
70851 if (!mapping->a_ops->readpage)
70852- return -ENOEXEC;
70853+ return -ENODEV;
70854 file_accessed(file);
70855 vma->vm_ops = &generic_file_vm_ops;
70856 vma->vm_flags |= VM_CAN_NONLINEAR;
70857@@ -2173,6 +2173,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70858 *pos = i_size_read(inode);
70859
70860 if (limit != RLIM_INFINITY) {
70861+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70862 if (*pos >= limit) {
70863 send_sig(SIGXFSZ, current, 0);
70864 return -EFBIG;
70865diff --git a/mm/fremap.c b/mm/fremap.c
70866index b8e0e2d..076e171 100644
70867--- a/mm/fremap.c
70868+++ b/mm/fremap.c
70869@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70870 retry:
70871 vma = find_vma(mm, start);
70872
70873+#ifdef CONFIG_PAX_SEGMEXEC
70874+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70875+ goto out;
70876+#endif
70877+
70878 /*
70879 * Make sure the vma is shared, that it supports prefaulting,
70880 * and that the remapped range is valid and fully within
70881diff --git a/mm/highmem.c b/mm/highmem.c
70882index 5ef672c..d7660f4 100644
70883--- a/mm/highmem.c
70884+++ b/mm/highmem.c
70885@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70886 * So no dangers, even with speculative execution.
70887 */
70888 page = pte_page(pkmap_page_table[i]);
70889+ pax_open_kernel();
70890 pte_clear(&init_mm, (unsigned long)page_address(page),
70891 &pkmap_page_table[i]);
70892-
70893+ pax_close_kernel();
70894 set_page_address(page, NULL);
70895 need_flush = 1;
70896 }
70897@@ -186,9 +187,11 @@ start:
70898 }
70899 }
70900 vaddr = PKMAP_ADDR(last_pkmap_nr);
70901+
70902+ pax_open_kernel();
70903 set_pte_at(&init_mm, vaddr,
70904 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70905-
70906+ pax_close_kernel();
70907 pkmap_count[last_pkmap_nr] = 1;
70908 set_page_address(page, (void *)vaddr);
70909
70910diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70911index d819d93..468e18f 100644
70912--- a/mm/huge_memory.c
70913+++ b/mm/huge_memory.c
70914@@ -702,7 +702,7 @@ out:
70915 * run pte_offset_map on the pmd, if an huge pmd could
70916 * materialize from under us from a different thread.
70917 */
70918- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70919+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70920 return VM_FAULT_OOM;
70921 /* if an huge pmd materialized from under us just retry later */
70922 if (unlikely(pmd_trans_huge(*pmd)))
70923@@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
70924
70925 for (i = 0; i < HPAGE_PMD_NR; i++) {
70926 copy_user_highpage(pages[i], page + i,
70927- haddr + PAGE_SHIFT*i, vma);
70928+ haddr + PAGE_SIZE*i, vma);
70929 __SetPageUptodate(pages[i]);
70930 cond_resched();
70931 }
70932diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70933index 2316840..b418671 100644
70934--- a/mm/hugetlb.c
70935+++ b/mm/hugetlb.c
70936@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70937 return 1;
70938 }
70939
70940+#ifdef CONFIG_PAX_SEGMEXEC
70941+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70942+{
70943+ struct mm_struct *mm = vma->vm_mm;
70944+ struct vm_area_struct *vma_m;
70945+ unsigned long address_m;
70946+ pte_t *ptep_m;
70947+
70948+ vma_m = pax_find_mirror_vma(vma);
70949+ if (!vma_m)
70950+ return;
70951+
70952+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70953+ address_m = address + SEGMEXEC_TASK_SIZE;
70954+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70955+ get_page(page_m);
70956+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
70957+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70958+}
70959+#endif
70960+
70961 /*
70962 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70963 */
70964@@ -2450,6 +2471,11 @@ retry_avoidcopy:
70965 make_huge_pte(vma, new_page, 1));
70966 page_remove_rmap(old_page);
70967 hugepage_add_new_anon_rmap(new_page, vma, address);
70968+
70969+#ifdef CONFIG_PAX_SEGMEXEC
70970+ pax_mirror_huge_pte(vma, address, new_page);
70971+#endif
70972+
70973 /* Make the old page be freed below */
70974 new_page = old_page;
70975 mmu_notifier_invalidate_range_end(mm,
70976@@ -2601,6 +2627,10 @@ retry:
70977 && (vma->vm_flags & VM_SHARED)));
70978 set_huge_pte_at(mm, address, ptep, new_pte);
70979
70980+#ifdef CONFIG_PAX_SEGMEXEC
70981+ pax_mirror_huge_pte(vma, address, page);
70982+#endif
70983+
70984 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70985 /* Optimization, do the COW without a second fault */
70986 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70987@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70988 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70989 struct hstate *h = hstate_vma(vma);
70990
70991+#ifdef CONFIG_PAX_SEGMEXEC
70992+ struct vm_area_struct *vma_m;
70993+#endif
70994+
70995 ptep = huge_pte_offset(mm, address);
70996 if (ptep) {
70997 entry = huge_ptep_get(ptep);
70998@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70999 VM_FAULT_SET_HINDEX(h - hstates);
71000 }
71001
71002+#ifdef CONFIG_PAX_SEGMEXEC
71003+ vma_m = pax_find_mirror_vma(vma);
71004+ if (vma_m) {
71005+ unsigned long address_m;
71006+
71007+ if (vma->vm_start > vma_m->vm_start) {
71008+ address_m = address;
71009+ address -= SEGMEXEC_TASK_SIZE;
71010+ vma = vma_m;
71011+ h = hstate_vma(vma);
71012+ } else
71013+ address_m = address + SEGMEXEC_TASK_SIZE;
71014+
71015+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71016+ return VM_FAULT_OOM;
71017+ address_m &= HPAGE_MASK;
71018+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71019+ }
71020+#endif
71021+
71022 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71023 if (!ptep)
71024 return VM_FAULT_OOM;
71025diff --git a/mm/internal.h b/mm/internal.h
71026index 2189af4..f2ca332 100644
71027--- a/mm/internal.h
71028+++ b/mm/internal.h
71029@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
71030 * in mm/page_alloc.c
71031 */
71032 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71033+extern void free_compound_page(struct page *page);
71034 extern void prep_compound_page(struct page *page, unsigned long order);
71035 #ifdef CONFIG_MEMORY_FAILURE
71036 extern bool is_free_buddy_page(struct page *page);
71037diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71038index d6880f5..ed77913 100644
71039--- a/mm/kmemleak.c
71040+++ b/mm/kmemleak.c
71041@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
71042
71043 for (i = 0; i < object->trace_len; i++) {
71044 void *ptr = (void *)object->trace[i];
71045- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71046+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71047 }
71048 }
71049
71050diff --git a/mm/maccess.c b/mm/maccess.c
71051index 4cee182..e00511d 100644
71052--- a/mm/maccess.c
71053+++ b/mm/maccess.c
71054@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
71055 set_fs(KERNEL_DS);
71056 pagefault_disable();
71057 ret = __copy_from_user_inatomic(dst,
71058- (__force const void __user *)src, size);
71059+ (const void __force_user *)src, size);
71060 pagefault_enable();
71061 set_fs(old_fs);
71062
71063@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
71064
71065 set_fs(KERNEL_DS);
71066 pagefault_disable();
71067- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71068+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71069 pagefault_enable();
71070 set_fs(old_fs);
71071
71072diff --git a/mm/madvise.c b/mm/madvise.c
71073index 74bf193..feb6fd3 100644
71074--- a/mm/madvise.c
71075+++ b/mm/madvise.c
71076@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
71077 pgoff_t pgoff;
71078 unsigned long new_flags = vma->vm_flags;
71079
71080+#ifdef CONFIG_PAX_SEGMEXEC
71081+ struct vm_area_struct *vma_m;
71082+#endif
71083+
71084 switch (behavior) {
71085 case MADV_NORMAL:
71086 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71087@@ -110,6 +114,13 @@ success:
71088 /*
71089 * vm_flags is protected by the mmap_sem held in write mode.
71090 */
71091+
71092+#ifdef CONFIG_PAX_SEGMEXEC
71093+ vma_m = pax_find_mirror_vma(vma);
71094+ if (vma_m)
71095+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71096+#endif
71097+
71098 vma->vm_flags = new_flags;
71099
71100 out:
71101@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71102 struct vm_area_struct ** prev,
71103 unsigned long start, unsigned long end)
71104 {
71105+
71106+#ifdef CONFIG_PAX_SEGMEXEC
71107+ struct vm_area_struct *vma_m;
71108+#endif
71109+
71110 *prev = vma;
71111 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71112 return -EINVAL;
71113@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71114 zap_page_range(vma, start, end - start, &details);
71115 } else
71116 zap_page_range(vma, start, end - start, NULL);
71117+
71118+#ifdef CONFIG_PAX_SEGMEXEC
71119+ vma_m = pax_find_mirror_vma(vma);
71120+ if (vma_m) {
71121+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71122+ struct zap_details details = {
71123+ .nonlinear_vma = vma_m,
71124+ .last_index = ULONG_MAX,
71125+ };
71126+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71127+ } else
71128+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71129+ }
71130+#endif
71131+
71132 return 0;
71133 }
71134
71135@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
71136 if (end < start)
71137 goto out;
71138
71139+#ifdef CONFIG_PAX_SEGMEXEC
71140+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71141+ if (end > SEGMEXEC_TASK_SIZE)
71142+ goto out;
71143+ } else
71144+#endif
71145+
71146+ if (end > TASK_SIZE)
71147+ goto out;
71148+
71149 error = 0;
71150 if (end == start)
71151 goto out;
71152diff --git a/mm/memory-failure.c b/mm/memory-failure.c
71153index 2b43ba0..fc09657 100644
71154--- a/mm/memory-failure.c
71155+++ b/mm/memory-failure.c
71156@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
71157
71158 int sysctl_memory_failure_recovery __read_mostly = 1;
71159
71160-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71161+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71162
71163 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71164
71165@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
71166 si.si_signo = SIGBUS;
71167 si.si_errno = 0;
71168 si.si_code = BUS_MCEERR_AO;
71169- si.si_addr = (void *)addr;
71170+ si.si_addr = (void __user *)addr;
71171 #ifdef __ARCH_SI_TRAPNO
71172 si.si_trapno = trapno;
71173 #endif
71174@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71175 }
71176
71177 nr_pages = 1 << compound_trans_order(hpage);
71178- atomic_long_add(nr_pages, &mce_bad_pages);
71179+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71180
71181 /*
71182 * We need/can do nothing about count=0 pages.
71183@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71184 if (!PageHWPoison(hpage)
71185 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71186 || (p != hpage && TestSetPageHWPoison(hpage))) {
71187- atomic_long_sub(nr_pages, &mce_bad_pages);
71188+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71189 return 0;
71190 }
71191 set_page_hwpoison_huge_page(hpage);
71192@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71193 }
71194 if (hwpoison_filter(p)) {
71195 if (TestClearPageHWPoison(p))
71196- atomic_long_sub(nr_pages, &mce_bad_pages);
71197+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71198 unlock_page(hpage);
71199 put_page(hpage);
71200 return 0;
71201@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
71202 return 0;
71203 }
71204 if (TestClearPageHWPoison(p))
71205- atomic_long_sub(nr_pages, &mce_bad_pages);
71206+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71207 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71208 return 0;
71209 }
71210@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
71211 */
71212 if (TestClearPageHWPoison(page)) {
71213 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71214- atomic_long_sub(nr_pages, &mce_bad_pages);
71215+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71216 freeit = 1;
71217 if (PageHuge(page))
71218 clear_page_hwpoison_huge_page(page);
71219@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71220 }
71221 done:
71222 if (!PageHWPoison(hpage))
71223- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71224+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71225 set_page_hwpoison_huge_page(hpage);
71226 dequeue_hwpoisoned_huge_page(hpage);
71227 /* keep elevated page count for bad page */
71228@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags)
71229 return ret;
71230
71231 done:
71232- atomic_long_add(1, &mce_bad_pages);
71233+ atomic_long_add_unchecked(1, &mce_bad_pages);
71234 SetPageHWPoison(page);
71235 /* keep elevated page count for bad page */
71236 return ret;
71237diff --git a/mm/memory.c b/mm/memory.c
71238index b2b8731..6080174 100644
71239--- a/mm/memory.c
71240+++ b/mm/memory.c
71241@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71242 return;
71243
71244 pmd = pmd_offset(pud, start);
71245+
71246+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71247 pud_clear(pud);
71248 pmd_free_tlb(tlb, pmd, start);
71249+#endif
71250+
71251 }
71252
71253 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71254@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71255 if (end - 1 > ceiling - 1)
71256 return;
71257
71258+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71259 pud = pud_offset(pgd, start);
71260 pgd_clear(pgd);
71261 pud_free_tlb(tlb, pud, start);
71262+#endif
71263+
71264 }
71265
71266 /*
71267@@ -1566,12 +1573,6 @@ no_page_table:
71268 return page;
71269 }
71270
71271-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71272-{
71273- return stack_guard_page_start(vma, addr) ||
71274- stack_guard_page_end(vma, addr+PAGE_SIZE);
71275-}
71276-
71277 /**
71278 * __get_user_pages() - pin user pages in memory
71279 * @tsk: task_struct of target task
71280@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71281 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71282 i = 0;
71283
71284- do {
71285+ while (nr_pages) {
71286 struct vm_area_struct *vma;
71287
71288- vma = find_extend_vma(mm, start);
71289+ vma = find_vma(mm, start);
71290 if (!vma && in_gate_area(mm, start)) {
71291 unsigned long pg = start & PAGE_MASK;
71292 pgd_t *pgd;
71293@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71294 goto next_page;
71295 }
71296
71297- if (!vma ||
71298+ if (!vma || start < vma->vm_start ||
71299 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71300 !(vm_flags & vma->vm_flags))
71301 return i ? : -EFAULT;
71302@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71303 int ret;
71304 unsigned int fault_flags = 0;
71305
71306- /* For mlock, just skip the stack guard page. */
71307- if (foll_flags & FOLL_MLOCK) {
71308- if (stack_guard_page(vma, start))
71309- goto next_page;
71310- }
71311 if (foll_flags & FOLL_WRITE)
71312 fault_flags |= FAULT_FLAG_WRITE;
71313 if (nonblocking)
71314@@ -1800,7 +1796,7 @@ next_page:
71315 start += PAGE_SIZE;
71316 nr_pages--;
71317 } while (nr_pages && start < vma->vm_end);
71318- } while (nr_pages);
71319+ }
71320 return i;
71321 }
71322 EXPORT_SYMBOL(__get_user_pages);
71323@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
71324 page_add_file_rmap(page);
71325 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71326
71327+#ifdef CONFIG_PAX_SEGMEXEC
71328+ pax_mirror_file_pte(vma, addr, page, ptl);
71329+#endif
71330+
71331 retval = 0;
71332 pte_unmap_unlock(pte, ptl);
71333 return retval;
71334@@ -2041,10 +2041,22 @@ out:
71335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71336 struct page *page)
71337 {
71338+
71339+#ifdef CONFIG_PAX_SEGMEXEC
71340+ struct vm_area_struct *vma_m;
71341+#endif
71342+
71343 if (addr < vma->vm_start || addr >= vma->vm_end)
71344 return -EFAULT;
71345 if (!page_count(page))
71346 return -EINVAL;
71347+
71348+#ifdef CONFIG_PAX_SEGMEXEC
71349+ vma_m = pax_find_mirror_vma(vma);
71350+ if (vma_m)
71351+ vma_m->vm_flags |= VM_INSERTPAGE;
71352+#endif
71353+
71354 vma->vm_flags |= VM_INSERTPAGE;
71355 return insert_page(vma, addr, page, vma->vm_page_prot);
71356 }
71357@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
71358 unsigned long pfn)
71359 {
71360 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71361+ BUG_ON(vma->vm_mirror);
71362
71363 if (addr < vma->vm_start || addr >= vma->vm_end)
71364 return -EFAULT;
71365@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
71366 copy_user_highpage(dst, src, va, vma);
71367 }
71368
71369+#ifdef CONFIG_PAX_SEGMEXEC
71370+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71371+{
71372+ struct mm_struct *mm = vma->vm_mm;
71373+ spinlock_t *ptl;
71374+ pte_t *pte, entry;
71375+
71376+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71377+ entry = *pte;
71378+ if (!pte_present(entry)) {
71379+ if (!pte_none(entry)) {
71380+ BUG_ON(pte_file(entry));
71381+ free_swap_and_cache(pte_to_swp_entry(entry));
71382+ pte_clear_not_present_full(mm, address, pte, 0);
71383+ }
71384+ } else {
71385+ struct page *page;
71386+
71387+ flush_cache_page(vma, address, pte_pfn(entry));
71388+ entry = ptep_clear_flush(vma, address, pte);
71389+ BUG_ON(pte_dirty(entry));
71390+ page = vm_normal_page(vma, address, entry);
71391+ if (page) {
71392+ update_hiwater_rss(mm);
71393+ if (PageAnon(page))
71394+ dec_mm_counter_fast(mm, MM_ANONPAGES);
71395+ else
71396+ dec_mm_counter_fast(mm, MM_FILEPAGES);
71397+ page_remove_rmap(page);
71398+ page_cache_release(page);
71399+ }
71400+ }
71401+ pte_unmap_unlock(pte, ptl);
71402+}
71403+
71404+/* PaX: if vma is mirrored, synchronize the mirror's PTE
71405+ *
71406+ * the ptl of the lower mapped page is held on entry and is not released on exit
71407+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71408+ */
71409+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71410+{
71411+ struct mm_struct *mm = vma->vm_mm;
71412+ unsigned long address_m;
71413+ spinlock_t *ptl_m;
71414+ struct vm_area_struct *vma_m;
71415+ pmd_t *pmd_m;
71416+ pte_t *pte_m, entry_m;
71417+
71418+ BUG_ON(!page_m || !PageAnon(page_m));
71419+
71420+ vma_m = pax_find_mirror_vma(vma);
71421+ if (!vma_m)
71422+ return;
71423+
71424+ BUG_ON(!PageLocked(page_m));
71425+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71426+ address_m = address + SEGMEXEC_TASK_SIZE;
71427+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71428+ pte_m = pte_offset_map(pmd_m, address_m);
71429+ ptl_m = pte_lockptr(mm, pmd_m);
71430+ if (ptl != ptl_m) {
71431+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71432+ if (!pte_none(*pte_m))
71433+ goto out;
71434+ }
71435+
71436+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71437+ page_cache_get(page_m);
71438+ page_add_anon_rmap(page_m, vma_m, address_m);
71439+ inc_mm_counter_fast(mm, MM_ANONPAGES);
71440+ set_pte_at(mm, address_m, pte_m, entry_m);
71441+ update_mmu_cache(vma_m, address_m, entry_m);
71442+out:
71443+ if (ptl != ptl_m)
71444+ spin_unlock(ptl_m);
71445+ pte_unmap(pte_m);
71446+ unlock_page(page_m);
71447+}
71448+
71449+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71450+{
71451+ struct mm_struct *mm = vma->vm_mm;
71452+ unsigned long address_m;
71453+ spinlock_t *ptl_m;
71454+ struct vm_area_struct *vma_m;
71455+ pmd_t *pmd_m;
71456+ pte_t *pte_m, entry_m;
71457+
71458+ BUG_ON(!page_m || PageAnon(page_m));
71459+
71460+ vma_m = pax_find_mirror_vma(vma);
71461+ if (!vma_m)
71462+ return;
71463+
71464+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71465+ address_m = address + SEGMEXEC_TASK_SIZE;
71466+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71467+ pte_m = pte_offset_map(pmd_m, address_m);
71468+ ptl_m = pte_lockptr(mm, pmd_m);
71469+ if (ptl != ptl_m) {
71470+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71471+ if (!pte_none(*pte_m))
71472+ goto out;
71473+ }
71474+
71475+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71476+ page_cache_get(page_m);
71477+ page_add_file_rmap(page_m);
71478+ inc_mm_counter_fast(mm, MM_FILEPAGES);
71479+ set_pte_at(mm, address_m, pte_m, entry_m);
71480+ update_mmu_cache(vma_m, address_m, entry_m);
71481+out:
71482+ if (ptl != ptl_m)
71483+ spin_unlock(ptl_m);
71484+ pte_unmap(pte_m);
71485+}
71486+
71487+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71488+{
71489+ struct mm_struct *mm = vma->vm_mm;
71490+ unsigned long address_m;
71491+ spinlock_t *ptl_m;
71492+ struct vm_area_struct *vma_m;
71493+ pmd_t *pmd_m;
71494+ pte_t *pte_m, entry_m;
71495+
71496+ vma_m = pax_find_mirror_vma(vma);
71497+ if (!vma_m)
71498+ return;
71499+
71500+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71501+ address_m = address + SEGMEXEC_TASK_SIZE;
71502+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71503+ pte_m = pte_offset_map(pmd_m, address_m);
71504+ ptl_m = pte_lockptr(mm, pmd_m);
71505+ if (ptl != ptl_m) {
71506+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71507+ if (!pte_none(*pte_m))
71508+ goto out;
71509+ }
71510+
71511+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
71512+ set_pte_at(mm, address_m, pte_m, entry_m);
71513+out:
71514+ if (ptl != ptl_m)
71515+ spin_unlock(ptl_m);
71516+ pte_unmap(pte_m);
71517+}
71518+
71519+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
71520+{
71521+ struct page *page_m;
71522+ pte_t entry;
71523+
71524+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
71525+ goto out;
71526+
71527+ entry = *pte;
71528+ page_m = vm_normal_page(vma, address, entry);
71529+ if (!page_m)
71530+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
71531+ else if (PageAnon(page_m)) {
71532+ if (pax_find_mirror_vma(vma)) {
71533+ pte_unmap_unlock(pte, ptl);
71534+ lock_page(page_m);
71535+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
71536+ if (pte_same(entry, *pte))
71537+ pax_mirror_anon_pte(vma, address, page_m, ptl);
71538+ else
71539+ unlock_page(page_m);
71540+ }
71541+ } else
71542+ pax_mirror_file_pte(vma, address, page_m, ptl);
71543+
71544+out:
71545+ pte_unmap_unlock(pte, ptl);
71546+}
71547+#endif
71548+
71549 /*
71550 * This routine handles present pages, when users try to write
71551 * to a shared page. It is done by copying the page to a new address
71552@@ -2656,6 +2849,12 @@ gotten:
71553 */
71554 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71555 if (likely(pte_same(*page_table, orig_pte))) {
71556+
71557+#ifdef CONFIG_PAX_SEGMEXEC
71558+ if (pax_find_mirror_vma(vma))
71559+ BUG_ON(!trylock_page(new_page));
71560+#endif
71561+
71562 if (old_page) {
71563 if (!PageAnon(old_page)) {
71564 dec_mm_counter_fast(mm, MM_FILEPAGES);
71565@@ -2707,6 +2906,10 @@ gotten:
71566 page_remove_rmap(old_page);
71567 }
71568
71569+#ifdef CONFIG_PAX_SEGMEXEC
71570+ pax_mirror_anon_pte(vma, address, new_page, ptl);
71571+#endif
71572+
71573 /* Free the old page.. */
71574 new_page = old_page;
71575 ret |= VM_FAULT_WRITE;
71576@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71577 swap_free(entry);
71578 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
71579 try_to_free_swap(page);
71580+
71581+#ifdef CONFIG_PAX_SEGMEXEC
71582+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
71583+#endif
71584+
71585 unlock_page(page);
71586 if (swapcache) {
71587 /*
71588@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71589
71590 /* No need to invalidate - it was non-present before */
71591 update_mmu_cache(vma, address, page_table);
71592+
71593+#ifdef CONFIG_PAX_SEGMEXEC
71594+ pax_mirror_anon_pte(vma, address, page, ptl);
71595+#endif
71596+
71597 unlock:
71598 pte_unmap_unlock(page_table, ptl);
71599 out:
71600@@ -3028,40 +3241,6 @@ out_release:
71601 }
71602
71603 /*
71604- * This is like a special single-page "expand_{down|up}wards()",
71605- * except we must first make sure that 'address{-|+}PAGE_SIZE'
71606- * doesn't hit another vma.
71607- */
71608-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
71609-{
71610- address &= PAGE_MASK;
71611- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
71612- struct vm_area_struct *prev = vma->vm_prev;
71613-
71614- /*
71615- * Is there a mapping abutting this one below?
71616- *
71617- * That's only ok if it's the same stack mapping
71618- * that has gotten split..
71619- */
71620- if (prev && prev->vm_end == address)
71621- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71622-
71623- expand_downwards(vma, address - PAGE_SIZE);
71624- }
71625- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71626- struct vm_area_struct *next = vma->vm_next;
71627-
71628- /* As VM_GROWSDOWN but s/below/above/ */
71629- if (next && next->vm_start == address + PAGE_SIZE)
71630- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71631-
71632- expand_upwards(vma, address + PAGE_SIZE);
71633- }
71634- return 0;
71635-}
71636-
71637-/*
71638 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71639 * but allow concurrent faults), and pte mapped but not yet locked.
71640 * We return with mmap_sem still held, but pte unmapped and unlocked.
71641@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71642 unsigned long address, pte_t *page_table, pmd_t *pmd,
71643 unsigned int flags)
71644 {
71645- struct page *page;
71646+ struct page *page = NULL;
71647 spinlock_t *ptl;
71648 pte_t entry;
71649
71650- pte_unmap(page_table);
71651-
71652- /* Check if we need to add a guard page to the stack */
71653- if (check_stack_guard_page(vma, address) < 0)
71654- return VM_FAULT_SIGBUS;
71655-
71656- /* Use the zero-page for reads */
71657 if (!(flags & FAULT_FLAG_WRITE)) {
71658 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71659 vma->vm_page_prot));
71660- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71661+ ptl = pte_lockptr(mm, pmd);
71662+ spin_lock(ptl);
71663 if (!pte_none(*page_table))
71664 goto unlock;
71665 goto setpte;
71666 }
71667
71668 /* Allocate our own private page. */
71669+ pte_unmap(page_table);
71670+
71671 if (unlikely(anon_vma_prepare(vma)))
71672 goto oom;
71673 page = alloc_zeroed_user_highpage_movable(vma, address);
71674@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71675 if (!pte_none(*page_table))
71676 goto release;
71677
71678+#ifdef CONFIG_PAX_SEGMEXEC
71679+ if (pax_find_mirror_vma(vma))
71680+ BUG_ON(!trylock_page(page));
71681+#endif
71682+
71683 inc_mm_counter_fast(mm, MM_ANONPAGES);
71684 page_add_new_anon_rmap(page, vma, address);
71685 setpte:
71686@@ -3116,6 +3296,12 @@ setpte:
71687
71688 /* No need to invalidate - it was non-present before */
71689 update_mmu_cache(vma, address, page_table);
71690+
71691+#ifdef CONFIG_PAX_SEGMEXEC
71692+ if (page)
71693+ pax_mirror_anon_pte(vma, address, page, ptl);
71694+#endif
71695+
71696 unlock:
71697 pte_unmap_unlock(page_table, ptl);
71698 return 0;
71699@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71700 */
71701 /* Only go through if we didn't race with anybody else... */
71702 if (likely(pte_same(*page_table, orig_pte))) {
71703+
71704+#ifdef CONFIG_PAX_SEGMEXEC
71705+ if (anon && pax_find_mirror_vma(vma))
71706+ BUG_ON(!trylock_page(page));
71707+#endif
71708+
71709 flush_icache_page(vma, page);
71710 entry = mk_pte(page, vma->vm_page_prot);
71711 if (flags & FAULT_FLAG_WRITE)
71712@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71713
71714 /* no need to invalidate: a not-present page won't be cached */
71715 update_mmu_cache(vma, address, page_table);
71716+
71717+#ifdef CONFIG_PAX_SEGMEXEC
71718+ if (anon)
71719+ pax_mirror_anon_pte(vma, address, page, ptl);
71720+ else
71721+ pax_mirror_file_pte(vma, address, page, ptl);
71722+#endif
71723+
71724 } else {
71725 if (cow_page)
71726 mem_cgroup_uncharge_page(cow_page);
71727@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
71728 if (flags & FAULT_FLAG_WRITE)
71729 flush_tlb_fix_spurious_fault(vma, address);
71730 }
71731+
71732+#ifdef CONFIG_PAX_SEGMEXEC
71733+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71734+ return 0;
71735+#endif
71736+
71737 unlock:
71738 pte_unmap_unlock(pte, ptl);
71739 return 0;
71740@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71741 pmd_t *pmd;
71742 pte_t *pte;
71743
71744+#ifdef CONFIG_PAX_SEGMEXEC
71745+ struct vm_area_struct *vma_m;
71746+#endif
71747+
71748 __set_current_state(TASK_RUNNING);
71749
71750 count_vm_event(PGFAULT);
71751@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71752 if (unlikely(is_vm_hugetlb_page(vma)))
71753 return hugetlb_fault(mm, vma, address, flags);
71754
71755+#ifdef CONFIG_PAX_SEGMEXEC
71756+ vma_m = pax_find_mirror_vma(vma);
71757+ if (vma_m) {
71758+ unsigned long address_m;
71759+ pgd_t *pgd_m;
71760+ pud_t *pud_m;
71761+ pmd_t *pmd_m;
71762+
71763+ if (vma->vm_start > vma_m->vm_start) {
71764+ address_m = address;
71765+ address -= SEGMEXEC_TASK_SIZE;
71766+ vma = vma_m;
71767+ } else
71768+ address_m = address + SEGMEXEC_TASK_SIZE;
71769+
71770+ pgd_m = pgd_offset(mm, address_m);
71771+ pud_m = pud_alloc(mm, pgd_m, address_m);
71772+ if (!pud_m)
71773+ return VM_FAULT_OOM;
71774+ pmd_m = pmd_alloc(mm, pud_m, address_m);
71775+ if (!pmd_m)
71776+ return VM_FAULT_OOM;
71777+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71778+ return VM_FAULT_OOM;
71779+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71780+ }
71781+#endif
71782+
71783 pgd = pgd_offset(mm, address);
71784 pud = pud_alloc(mm, pgd, address);
71785 if (!pud)
71786@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71787 * run pte_offset_map on the pmd, if an huge pmd could
71788 * materialize from under us from a different thread.
71789 */
71790- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71791+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71792 return VM_FAULT_OOM;
71793 /* if an huge pmd materialized from under us just retry later */
71794 if (unlikely(pmd_trans_huge(*pmd)))
71795@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
71796 gate_vma.vm_start = FIXADDR_USER_START;
71797 gate_vma.vm_end = FIXADDR_USER_END;
71798 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71799- gate_vma.vm_page_prot = __P101;
71800+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71801 /*
71802 * Make sure the vDSO gets into every core dump.
71803 * Dumping its contents makes post-mortem fully interpretable later
71804diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71805index 2775fd0..f2b1c49 100644
71806--- a/mm/mempolicy.c
71807+++ b/mm/mempolicy.c
71808@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71809 unsigned long vmstart;
71810 unsigned long vmend;
71811
71812+#ifdef CONFIG_PAX_SEGMEXEC
71813+ struct vm_area_struct *vma_m;
71814+#endif
71815+
71816 vma = find_vma_prev(mm, start, &prev);
71817 if (!vma || vma->vm_start > start)
71818 return -EFAULT;
71819@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71820 err = policy_vma(vma, new_pol);
71821 if (err)
71822 goto out;
71823+
71824+#ifdef CONFIG_PAX_SEGMEXEC
71825+ vma_m = pax_find_mirror_vma(vma);
71826+ if (vma_m) {
71827+ err = policy_vma(vma_m, new_pol);
71828+ if (err)
71829+ goto out;
71830+ }
71831+#endif
71832+
71833 }
71834
71835 out:
71836@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71837
71838 if (end < start)
71839 return -EINVAL;
71840+
71841+#ifdef CONFIG_PAX_SEGMEXEC
71842+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71843+ if (end > SEGMEXEC_TASK_SIZE)
71844+ return -EINVAL;
71845+ } else
71846+#endif
71847+
71848+ if (end > TASK_SIZE)
71849+ return -EINVAL;
71850+
71851 if (end == start)
71852 return 0;
71853
71854@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71855 if (!mm)
71856 goto out;
71857
71858+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71859+ if (mm != current->mm &&
71860+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71861+ err = -EPERM;
71862+ goto out;
71863+ }
71864+#endif
71865+
71866 /*
71867 * Check if this process has the right to modify the specified
71868 * process. The right exists if the process has administrative
71869@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71870 rcu_read_lock();
71871 tcred = __task_cred(task);
71872 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71873- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71874- !capable(CAP_SYS_NICE)) {
71875+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71876 rcu_read_unlock();
71877 err = -EPERM;
71878 goto out;
71879diff --git a/mm/migrate.c b/mm/migrate.c
71880index 14d0a6a..0360908 100644
71881--- a/mm/migrate.c
71882+++ b/mm/migrate.c
71883@@ -866,9 +866,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
71884
71885 if (anon_vma)
71886 put_anon_vma(anon_vma);
71887-out:
71888 unlock_page(hpage);
71889
71890+out:
71891 if (rc != -EAGAIN) {
71892 list_del(&hpage->lru);
71893 put_page(hpage);
71894@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
71895 unsigned long chunk_start;
71896 int err;
71897
71898+ pax_track_stack();
71899+
71900 task_nodes = cpuset_mems_allowed(task);
71901
71902 err = -ENOMEM;
71903@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71904 if (!mm)
71905 return -EINVAL;
71906
71907+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71908+ if (mm != current->mm &&
71909+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71910+ err = -EPERM;
71911+ goto out;
71912+ }
71913+#endif
71914+
71915 /*
71916 * Check if this process has the right to modify the specified
71917 * process. The right exists if the process has administrative
71918@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71919 rcu_read_lock();
71920 tcred = __task_cred(task);
71921 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71922- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71923- !capable(CAP_SYS_NICE)) {
71924+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71925 rcu_read_unlock();
71926 err = -EPERM;
71927 goto out;
71928diff --git a/mm/mlock.c b/mm/mlock.c
71929index 048260c..57f4a4e 100644
71930--- a/mm/mlock.c
71931+++ b/mm/mlock.c
71932@@ -13,6 +13,7 @@
71933 #include <linux/pagemap.h>
71934 #include <linux/mempolicy.h>
71935 #include <linux/syscalls.h>
71936+#include <linux/security.h>
71937 #include <linux/sched.h>
71938 #include <linux/module.h>
71939 #include <linux/rmap.h>
71940@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71941 return -EINVAL;
71942 if (end == start)
71943 return 0;
71944+ if (end > TASK_SIZE)
71945+ return -EINVAL;
71946+
71947 vma = find_vma_prev(current->mm, start, &prev);
71948 if (!vma || vma->vm_start > start)
71949 return -ENOMEM;
71950@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71951 for (nstart = start ; ; ) {
71952 vm_flags_t newflags;
71953
71954+#ifdef CONFIG_PAX_SEGMEXEC
71955+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71956+ break;
71957+#endif
71958+
71959 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71960
71961 newflags = vma->vm_flags | VM_LOCKED;
71962@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71963 lock_limit >>= PAGE_SHIFT;
71964
71965 /* check against resource limits */
71966+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71967 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71968 error = do_mlock(start, len, 1);
71969 up_write(&current->mm->mmap_sem);
71970@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71971 static int do_mlockall(int flags)
71972 {
71973 struct vm_area_struct * vma, * prev = NULL;
71974- unsigned int def_flags = 0;
71975
71976 if (flags & MCL_FUTURE)
71977- def_flags = VM_LOCKED;
71978- current->mm->def_flags = def_flags;
71979+ current->mm->def_flags |= VM_LOCKED;
71980+ else
71981+ current->mm->def_flags &= ~VM_LOCKED;
71982 if (flags == MCL_FUTURE)
71983 goto out;
71984
71985 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71986 vm_flags_t newflags;
71987
71988+#ifdef CONFIG_PAX_SEGMEXEC
71989+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71990+ break;
71991+#endif
71992+
71993+ BUG_ON(vma->vm_end > TASK_SIZE);
71994 newflags = vma->vm_flags | VM_LOCKED;
71995 if (!(flags & MCL_CURRENT))
71996 newflags &= ~VM_LOCKED;
71997@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71998 lock_limit >>= PAGE_SHIFT;
71999
72000 ret = -ENOMEM;
72001+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72002 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72003 capable(CAP_IPC_LOCK))
72004 ret = do_mlockall(flags);
72005diff --git a/mm/mmap.c b/mm/mmap.c
72006index a65efd4..17d61ff 100644
72007--- a/mm/mmap.c
72008+++ b/mm/mmap.c
72009@@ -46,6 +46,16 @@
72010 #define arch_rebalance_pgtables(addr, len) (addr)
72011 #endif
72012
72013+static inline void verify_mm_writelocked(struct mm_struct *mm)
72014+{
72015+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72016+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72017+ up_read(&mm->mmap_sem);
72018+ BUG();
72019+ }
72020+#endif
72021+}
72022+
72023 static void unmap_region(struct mm_struct *mm,
72024 struct vm_area_struct *vma, struct vm_area_struct *prev,
72025 unsigned long start, unsigned long end);
72026@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
72027 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72028 *
72029 */
72030-pgprot_t protection_map[16] = {
72031+pgprot_t protection_map[16] __read_only = {
72032 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72033 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72034 };
72035
72036-pgprot_t vm_get_page_prot(unsigned long vm_flags)
72037+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
72038 {
72039- return __pgprot(pgprot_val(protection_map[vm_flags &
72040+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72041 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72042 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72043+
72044+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72045+ if (!(__supported_pte_mask & _PAGE_NX) &&
72046+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72047+ (vm_flags & (VM_READ | VM_WRITE)))
72048+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72049+#endif
72050+
72051+ return prot;
72052 }
72053 EXPORT_SYMBOL(vm_get_page_prot);
72054
72055 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72056 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
72057 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72058+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72059 /*
72060 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72061 * other variables. It can be updated by several CPUs frequently.
72062@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
72063 struct vm_area_struct *next = vma->vm_next;
72064
72065 might_sleep();
72066+ BUG_ON(vma->vm_mirror);
72067 if (vma->vm_ops && vma->vm_ops->close)
72068 vma->vm_ops->close(vma);
72069 if (vma->vm_file) {
72070@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72071 * not page aligned -Ram Gupta
72072 */
72073 rlim = rlimit(RLIMIT_DATA);
72074+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72075 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72076 (mm->end_data - mm->start_data) > rlim)
72077 goto out;
72078@@ -689,6 +711,12 @@ static int
72079 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72080 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72081 {
72082+
72083+#ifdef CONFIG_PAX_SEGMEXEC
72084+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72085+ return 0;
72086+#endif
72087+
72088 if (is_mergeable_vma(vma, file, vm_flags) &&
72089 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72090 if (vma->vm_pgoff == vm_pgoff)
72091@@ -708,6 +736,12 @@ static int
72092 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72093 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72094 {
72095+
72096+#ifdef CONFIG_PAX_SEGMEXEC
72097+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72098+ return 0;
72099+#endif
72100+
72101 if (is_mergeable_vma(vma, file, vm_flags) &&
72102 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72103 pgoff_t vm_pglen;
72104@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72105 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72106 struct vm_area_struct *prev, unsigned long addr,
72107 unsigned long end, unsigned long vm_flags,
72108- struct anon_vma *anon_vma, struct file *file,
72109+ struct anon_vma *anon_vma, struct file *file,
72110 pgoff_t pgoff, struct mempolicy *policy)
72111 {
72112 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72113 struct vm_area_struct *area, *next;
72114 int err;
72115
72116+#ifdef CONFIG_PAX_SEGMEXEC
72117+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72118+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72119+
72120+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72121+#endif
72122+
72123 /*
72124 * We later require that vma->vm_flags == vm_flags,
72125 * so this tests vma->vm_flags & VM_SPECIAL, too.
72126@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72127 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72128 next = next->vm_next;
72129
72130+#ifdef CONFIG_PAX_SEGMEXEC
72131+ if (prev)
72132+ prev_m = pax_find_mirror_vma(prev);
72133+ if (area)
72134+ area_m = pax_find_mirror_vma(area);
72135+ if (next)
72136+ next_m = pax_find_mirror_vma(next);
72137+#endif
72138+
72139 /*
72140 * Can it merge with the predecessor?
72141 */
72142@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72143 /* cases 1, 6 */
72144 err = vma_adjust(prev, prev->vm_start,
72145 next->vm_end, prev->vm_pgoff, NULL);
72146- } else /* cases 2, 5, 7 */
72147+
72148+#ifdef CONFIG_PAX_SEGMEXEC
72149+ if (!err && prev_m)
72150+ err = vma_adjust(prev_m, prev_m->vm_start,
72151+ next_m->vm_end, prev_m->vm_pgoff, NULL);
72152+#endif
72153+
72154+ } else { /* cases 2, 5, 7 */
72155 err = vma_adjust(prev, prev->vm_start,
72156 end, prev->vm_pgoff, NULL);
72157+
72158+#ifdef CONFIG_PAX_SEGMEXEC
72159+ if (!err && prev_m)
72160+ err = vma_adjust(prev_m, prev_m->vm_start,
72161+ end_m, prev_m->vm_pgoff, NULL);
72162+#endif
72163+
72164+ }
72165 if (err)
72166 return NULL;
72167 khugepaged_enter_vma_merge(prev);
72168@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72169 mpol_equal(policy, vma_policy(next)) &&
72170 can_vma_merge_before(next, vm_flags,
72171 anon_vma, file, pgoff+pglen)) {
72172- if (prev && addr < prev->vm_end) /* case 4 */
72173+ if (prev && addr < prev->vm_end) { /* case 4 */
72174 err = vma_adjust(prev, prev->vm_start,
72175 addr, prev->vm_pgoff, NULL);
72176- else /* cases 3, 8 */
72177+
72178+#ifdef CONFIG_PAX_SEGMEXEC
72179+ if (!err && prev_m)
72180+ err = vma_adjust(prev_m, prev_m->vm_start,
72181+ addr_m, prev_m->vm_pgoff, NULL);
72182+#endif
72183+
72184+ } else { /* cases 3, 8 */
72185 err = vma_adjust(area, addr, next->vm_end,
72186 next->vm_pgoff - pglen, NULL);
72187+
72188+#ifdef CONFIG_PAX_SEGMEXEC
72189+ if (!err && area_m)
72190+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
72191+ next_m->vm_pgoff - pglen, NULL);
72192+#endif
72193+
72194+ }
72195 if (err)
72196 return NULL;
72197 khugepaged_enter_vma_merge(area);
72198@@ -921,14 +1001,11 @@ none:
72199 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72200 struct file *file, long pages)
72201 {
72202- const unsigned long stack_flags
72203- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72204-
72205 if (file) {
72206 mm->shared_vm += pages;
72207 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72208 mm->exec_vm += pages;
72209- } else if (flags & stack_flags)
72210+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72211 mm->stack_vm += pages;
72212 if (flags & (VM_RESERVED|VM_IO))
72213 mm->reserved_vm += pages;
72214@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72215 * (the exception is when the underlying filesystem is noexec
72216 * mounted, in which case we dont add PROT_EXEC.)
72217 */
72218- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72219+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72220 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72221 prot |= PROT_EXEC;
72222
72223@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72224 /* Obtain the address to map to. we verify (or select) it and ensure
72225 * that it represents a valid section of the address space.
72226 */
72227- addr = get_unmapped_area(file, addr, len, pgoff, flags);
72228+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72229 if (addr & ~PAGE_MASK)
72230 return addr;
72231
72232@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72233 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72234 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72235
72236+#ifdef CONFIG_PAX_MPROTECT
72237+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72238+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72239+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72240+ gr_log_rwxmmap(file);
72241+
72242+#ifdef CONFIG_PAX_EMUPLT
72243+ vm_flags &= ~VM_EXEC;
72244+#else
72245+ return -EPERM;
72246+#endif
72247+
72248+ }
72249+
72250+ if (!(vm_flags & VM_EXEC))
72251+ vm_flags &= ~VM_MAYEXEC;
72252+#else
72253+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72254+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72255+#endif
72256+ else
72257+ vm_flags &= ~VM_MAYWRITE;
72258+ }
72259+#endif
72260+
72261+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72262+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72263+ vm_flags &= ~VM_PAGEEXEC;
72264+#endif
72265+
72266 if (flags & MAP_LOCKED)
72267 if (!can_do_mlock())
72268 return -EPERM;
72269@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72270 locked += mm->locked_vm;
72271 lock_limit = rlimit(RLIMIT_MEMLOCK);
72272 lock_limit >>= PAGE_SHIFT;
72273+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72274 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72275 return -EAGAIN;
72276 }
72277@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72278 if (error)
72279 return error;
72280
72281+ if (!gr_acl_handle_mmap(file, prot))
72282+ return -EACCES;
72283+
72284 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72285 }
72286 EXPORT_SYMBOL(do_mmap_pgoff);
72287@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
72288 vm_flags_t vm_flags = vma->vm_flags;
72289
72290 /* If it was private or non-writable, the write bit is already clear */
72291- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72292+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72293 return 0;
72294
72295 /* The backer wishes to know when pages are first written to? */
72296@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
72297 unsigned long charged = 0;
72298 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72299
72300+#ifdef CONFIG_PAX_SEGMEXEC
72301+ struct vm_area_struct *vma_m = NULL;
72302+#endif
72303+
72304+ /*
72305+ * mm->mmap_sem is required to protect against another thread
72306+ * changing the mappings in case we sleep.
72307+ */
72308+ verify_mm_writelocked(mm);
72309+
72310 /* Clear old maps */
72311 error = -ENOMEM;
72312-munmap_back:
72313 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72314 if (vma && vma->vm_start < addr + len) {
72315 if (do_munmap(mm, addr, len))
72316 return -ENOMEM;
72317- goto munmap_back;
72318+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72319+ BUG_ON(vma && vma->vm_start < addr + len);
72320 }
72321
72322 /* Check against address space limit. */
72323@@ -1258,6 +1379,16 @@ munmap_back:
72324 goto unacct_error;
72325 }
72326
72327+#ifdef CONFIG_PAX_SEGMEXEC
72328+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72329+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72330+ if (!vma_m) {
72331+ error = -ENOMEM;
72332+ goto free_vma;
72333+ }
72334+ }
72335+#endif
72336+
72337 vma->vm_mm = mm;
72338 vma->vm_start = addr;
72339 vma->vm_end = addr + len;
72340@@ -1281,6 +1412,19 @@ munmap_back:
72341 error = file->f_op->mmap(file, vma);
72342 if (error)
72343 goto unmap_and_free_vma;
72344+
72345+#ifdef CONFIG_PAX_SEGMEXEC
72346+ if (vma_m && (vm_flags & VM_EXECUTABLE))
72347+ added_exe_file_vma(mm);
72348+#endif
72349+
72350+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72351+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72352+ vma->vm_flags |= VM_PAGEEXEC;
72353+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72354+ }
72355+#endif
72356+
72357 if (vm_flags & VM_EXECUTABLE)
72358 added_exe_file_vma(mm);
72359
72360@@ -1316,6 +1460,11 @@ munmap_back:
72361 vma_link(mm, vma, prev, rb_link, rb_parent);
72362 file = vma->vm_file;
72363
72364+#ifdef CONFIG_PAX_SEGMEXEC
72365+ if (vma_m)
72366+ BUG_ON(pax_mirror_vma(vma_m, vma));
72367+#endif
72368+
72369 /* Once vma denies write, undo our temporary denial count */
72370 if (correct_wcount)
72371 atomic_inc(&inode->i_writecount);
72372@@ -1324,6 +1473,7 @@ out:
72373
72374 mm->total_vm += len >> PAGE_SHIFT;
72375 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72376+ track_exec_limit(mm, addr, addr + len, vm_flags);
72377 if (vm_flags & VM_LOCKED) {
72378 if (!mlock_vma_pages_range(vma, addr, addr + len))
72379 mm->locked_vm += (len >> PAGE_SHIFT);
72380@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
72381 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72382 charged = 0;
72383 free_vma:
72384+
72385+#ifdef CONFIG_PAX_SEGMEXEC
72386+ if (vma_m)
72387+ kmem_cache_free(vm_area_cachep, vma_m);
72388+#endif
72389+
72390 kmem_cache_free(vm_area_cachep, vma);
72391 unacct_error:
72392 if (charged)
72393@@ -1348,6 +1504,44 @@ unacct_error:
72394 return error;
72395 }
72396
72397+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
72398+{
72399+ if (!vma) {
72400+#ifdef CONFIG_STACK_GROWSUP
72401+ if (addr > sysctl_heap_stack_gap)
72402+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72403+ else
72404+ vma = find_vma(current->mm, 0);
72405+ if (vma && (vma->vm_flags & VM_GROWSUP))
72406+ return false;
72407+#endif
72408+ return true;
72409+ }
72410+
72411+ if (addr + len > vma->vm_start)
72412+ return false;
72413+
72414+ if (vma->vm_flags & VM_GROWSDOWN)
72415+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72416+#ifdef CONFIG_STACK_GROWSUP
72417+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72418+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72419+#endif
72420+
72421+ return true;
72422+}
72423+
72424+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72425+{
72426+ if (vma->vm_start < len)
72427+ return -ENOMEM;
72428+ if (!(vma->vm_flags & VM_GROWSDOWN))
72429+ return vma->vm_start - len;
72430+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
72431+ return vma->vm_start - len - sysctl_heap_stack_gap;
72432+ return -ENOMEM;
72433+}
72434+
72435 /* Get an address range which is currently unmapped.
72436 * For shmat() with addr=0.
72437 *
72438@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
72439 if (flags & MAP_FIXED)
72440 return addr;
72441
72442+#ifdef CONFIG_PAX_RANDMMAP
72443+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72444+#endif
72445+
72446 if (addr) {
72447 addr = PAGE_ALIGN(addr);
72448- vma = find_vma(mm, addr);
72449- if (TASK_SIZE - len >= addr &&
72450- (!vma || addr + len <= vma->vm_start))
72451- return addr;
72452+ if (TASK_SIZE - len >= addr) {
72453+ vma = find_vma(mm, addr);
72454+ if (check_heap_stack_gap(vma, addr, len))
72455+ return addr;
72456+ }
72457 }
72458 if (len > mm->cached_hole_size) {
72459- start_addr = addr = mm->free_area_cache;
72460+ start_addr = addr = mm->free_area_cache;
72461 } else {
72462- start_addr = addr = TASK_UNMAPPED_BASE;
72463- mm->cached_hole_size = 0;
72464+ start_addr = addr = mm->mmap_base;
72465+ mm->cached_hole_size = 0;
72466 }
72467
72468 full_search:
72469@@ -1396,34 +1595,40 @@ full_search:
72470 * Start a new search - just in case we missed
72471 * some holes.
72472 */
72473- if (start_addr != TASK_UNMAPPED_BASE) {
72474- addr = TASK_UNMAPPED_BASE;
72475- start_addr = addr;
72476+ if (start_addr != mm->mmap_base) {
72477+ start_addr = addr = mm->mmap_base;
72478 mm->cached_hole_size = 0;
72479 goto full_search;
72480 }
72481 return -ENOMEM;
72482 }
72483- if (!vma || addr + len <= vma->vm_start) {
72484- /*
72485- * Remember the place where we stopped the search:
72486- */
72487- mm->free_area_cache = addr + len;
72488- return addr;
72489- }
72490+ if (check_heap_stack_gap(vma, addr, len))
72491+ break;
72492 if (addr + mm->cached_hole_size < vma->vm_start)
72493 mm->cached_hole_size = vma->vm_start - addr;
72494 addr = vma->vm_end;
72495 }
72496+
72497+ /*
72498+ * Remember the place where we stopped the search:
72499+ */
72500+ mm->free_area_cache = addr + len;
72501+ return addr;
72502 }
72503 #endif
72504
72505 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
72506 {
72507+
72508+#ifdef CONFIG_PAX_SEGMEXEC
72509+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72510+ return;
72511+#endif
72512+
72513 /*
72514 * Is this a new hole at the lowest possible address?
72515 */
72516- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
72517+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
72518 mm->free_area_cache = addr;
72519 mm->cached_hole_size = ~0UL;
72520 }
72521@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72522 {
72523 struct vm_area_struct *vma;
72524 struct mm_struct *mm = current->mm;
72525- unsigned long addr = addr0;
72526+ unsigned long base = mm->mmap_base, addr = addr0;
72527
72528 /* requested length too big for entire address space */
72529 if (len > TASK_SIZE)
72530@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72531 if (flags & MAP_FIXED)
72532 return addr;
72533
72534+#ifdef CONFIG_PAX_RANDMMAP
72535+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72536+#endif
72537+
72538 /* requesting a specific address */
72539 if (addr) {
72540 addr = PAGE_ALIGN(addr);
72541- vma = find_vma(mm, addr);
72542- if (TASK_SIZE - len >= addr &&
72543- (!vma || addr + len <= vma->vm_start))
72544- return addr;
72545+ if (TASK_SIZE - len >= addr) {
72546+ vma = find_vma(mm, addr);
72547+ if (check_heap_stack_gap(vma, addr, len))
72548+ return addr;
72549+ }
72550 }
72551
72552 /* check if free_area_cache is useful for us */
72553@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72554 /* make sure it can fit in the remaining address space */
72555 if (addr > len) {
72556 vma = find_vma(mm, addr-len);
72557- if (!vma || addr <= vma->vm_start)
72558+ if (check_heap_stack_gap(vma, addr - len, len))
72559 /* remember the address as a hint for next time */
72560 return (mm->free_area_cache = addr-len);
72561 }
72562@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72563 * return with success:
72564 */
72565 vma = find_vma(mm, addr);
72566- if (!vma || addr+len <= vma->vm_start)
72567+ if (check_heap_stack_gap(vma, addr, len))
72568 /* remember the address as a hint for next time */
72569 return (mm->free_area_cache = addr);
72570
72571@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72572 mm->cached_hole_size = vma->vm_start - addr;
72573
72574 /* try just below the current vma->vm_start */
72575- addr = vma->vm_start-len;
72576- } while (len < vma->vm_start);
72577+ addr = skip_heap_stack_gap(vma, len);
72578+ } while (!IS_ERR_VALUE(addr));
72579
72580 bottomup:
72581 /*
72582@@ -1507,13 +1717,21 @@ bottomup:
72583 * can happen with large stack limits and large mmap()
72584 * allocations.
72585 */
72586+ mm->mmap_base = TASK_UNMAPPED_BASE;
72587+
72588+#ifdef CONFIG_PAX_RANDMMAP
72589+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72590+ mm->mmap_base += mm->delta_mmap;
72591+#endif
72592+
72593+ mm->free_area_cache = mm->mmap_base;
72594 mm->cached_hole_size = ~0UL;
72595- mm->free_area_cache = TASK_UNMAPPED_BASE;
72596 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
72597 /*
72598 * Restore the topdown base:
72599 */
72600- mm->free_area_cache = mm->mmap_base;
72601+ mm->mmap_base = base;
72602+ mm->free_area_cache = base;
72603 mm->cached_hole_size = ~0UL;
72604
72605 return addr;
72606@@ -1522,6 +1740,12 @@ bottomup:
72607
72608 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72609 {
72610+
72611+#ifdef CONFIG_PAX_SEGMEXEC
72612+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72613+ return;
72614+#endif
72615+
72616 /*
72617 * Is this a new hole at the highest possible address?
72618 */
72619@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72620 mm->free_area_cache = addr;
72621
72622 /* dont allow allocations above current base */
72623- if (mm->free_area_cache > mm->mmap_base)
72624+ if (mm->free_area_cache > mm->mmap_base) {
72625 mm->free_area_cache = mm->mmap_base;
72626+ mm->cached_hole_size = ~0UL;
72627+ }
72628 }
72629
72630 unsigned long
72631@@ -1638,6 +1864,28 @@ out:
72632 return prev ? prev->vm_next : vma;
72633 }
72634
72635+#ifdef CONFIG_PAX_SEGMEXEC
72636+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72637+{
72638+ struct vm_area_struct *vma_m;
72639+
72640+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72641+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72642+ BUG_ON(vma->vm_mirror);
72643+ return NULL;
72644+ }
72645+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72646+ vma_m = vma->vm_mirror;
72647+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72648+ BUG_ON(vma->vm_file != vma_m->vm_file);
72649+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
72650+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
72651+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72652+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72653+ return vma_m;
72654+}
72655+#endif
72656+
72657 /*
72658 * Verify that the stack growth is acceptable and
72659 * update accounting. This is shared with both the
72660@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72661 return -ENOMEM;
72662
72663 /* Stack limit test */
72664+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
72665 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72666 return -ENOMEM;
72667
72668@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72669 locked = mm->locked_vm + grow;
72670 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72671 limit >>= PAGE_SHIFT;
72672+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72673 if (locked > limit && !capable(CAP_IPC_LOCK))
72674 return -ENOMEM;
72675 }
72676@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72677 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72678 * vma is the last one with address > vma->vm_end. Have to extend vma.
72679 */
72680+#ifndef CONFIG_IA64
72681+static
72682+#endif
72683 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72684 {
72685 int error;
72686+ bool locknext;
72687
72688 if (!(vma->vm_flags & VM_GROWSUP))
72689 return -EFAULT;
72690
72691+ /* Also guard against wrapping around to address 0. */
72692+ if (address < PAGE_ALIGN(address+1))
72693+ address = PAGE_ALIGN(address+1);
72694+ else
72695+ return -ENOMEM;
72696+
72697 /*
72698 * We must make sure the anon_vma is allocated
72699 * so that the anon_vma locking is not a noop.
72700 */
72701 if (unlikely(anon_vma_prepare(vma)))
72702 return -ENOMEM;
72703+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72704+ if (locknext && anon_vma_prepare(vma->vm_next))
72705+ return -ENOMEM;
72706 vma_lock_anon_vma(vma);
72707+ if (locknext)
72708+ vma_lock_anon_vma(vma->vm_next);
72709
72710 /*
72711 * vma->vm_start/vm_end cannot change under us because the caller
72712 * is required to hold the mmap_sem in read mode. We need the
72713- * anon_vma lock to serialize against concurrent expand_stacks.
72714- * Also guard against wrapping around to address 0.
72715+ * anon_vma locks to serialize against concurrent expand_stacks
72716+ * and expand_upwards.
72717 */
72718- if (address < PAGE_ALIGN(address+4))
72719- address = PAGE_ALIGN(address+4);
72720- else {
72721- vma_unlock_anon_vma(vma);
72722- return -ENOMEM;
72723- }
72724 error = 0;
72725
72726 /* Somebody else might have raced and expanded it already */
72727- if (address > vma->vm_end) {
72728+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72729+ error = -ENOMEM;
72730+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72731 unsigned long size, grow;
72732
72733 size = address - vma->vm_start;
72734@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72735 }
72736 }
72737 }
72738+ if (locknext)
72739+ vma_unlock_anon_vma(vma->vm_next);
72740 vma_unlock_anon_vma(vma);
72741 khugepaged_enter_vma_merge(vma);
72742 return error;
72743@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
72744 unsigned long address)
72745 {
72746 int error;
72747+ bool lockprev = false;
72748+ struct vm_area_struct *prev;
72749
72750 /*
72751 * We must make sure the anon_vma is allocated
72752@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
72753 if (error)
72754 return error;
72755
72756+ prev = vma->vm_prev;
72757+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72758+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72759+#endif
72760+ if (lockprev && anon_vma_prepare(prev))
72761+ return -ENOMEM;
72762+ if (lockprev)
72763+ vma_lock_anon_vma(prev);
72764+
72765 vma_lock_anon_vma(vma);
72766
72767 /*
72768@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
72769 */
72770
72771 /* Somebody else might have raced and expanded it already */
72772- if (address < vma->vm_start) {
72773+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72774+ error = -ENOMEM;
72775+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72776 unsigned long size, grow;
72777
72778+#ifdef CONFIG_PAX_SEGMEXEC
72779+ struct vm_area_struct *vma_m;
72780+
72781+ vma_m = pax_find_mirror_vma(vma);
72782+#endif
72783+
72784 size = vma->vm_end - address;
72785 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72786
72787@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
72788 if (!error) {
72789 vma->vm_start = address;
72790 vma->vm_pgoff -= grow;
72791+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72792+
72793+#ifdef CONFIG_PAX_SEGMEXEC
72794+ if (vma_m) {
72795+ vma_m->vm_start -= grow << PAGE_SHIFT;
72796+ vma_m->vm_pgoff -= grow;
72797+ }
72798+#endif
72799+
72800 perf_event_mmap(vma);
72801 }
72802 }
72803 }
72804 vma_unlock_anon_vma(vma);
72805+ if (lockprev)
72806+ vma_unlock_anon_vma(prev);
72807 khugepaged_enter_vma_merge(vma);
72808 return error;
72809 }
72810@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72811 do {
72812 long nrpages = vma_pages(vma);
72813
72814+#ifdef CONFIG_PAX_SEGMEXEC
72815+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72816+ vma = remove_vma(vma);
72817+ continue;
72818+ }
72819+#endif
72820+
72821 mm->total_vm -= nrpages;
72822 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72823 vma = remove_vma(vma);
72824@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72825 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72826 vma->vm_prev = NULL;
72827 do {
72828+
72829+#ifdef CONFIG_PAX_SEGMEXEC
72830+ if (vma->vm_mirror) {
72831+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72832+ vma->vm_mirror->vm_mirror = NULL;
72833+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
72834+ vma->vm_mirror = NULL;
72835+ }
72836+#endif
72837+
72838 rb_erase(&vma->vm_rb, &mm->mm_rb);
72839 mm->map_count--;
72840 tail_vma = vma;
72841@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72842 struct vm_area_struct *new;
72843 int err = -ENOMEM;
72844
72845+#ifdef CONFIG_PAX_SEGMEXEC
72846+ struct vm_area_struct *vma_m, *new_m = NULL;
72847+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72848+#endif
72849+
72850 if (is_vm_hugetlb_page(vma) && (addr &
72851 ~(huge_page_mask(hstate_vma(vma)))))
72852 return -EINVAL;
72853
72854+#ifdef CONFIG_PAX_SEGMEXEC
72855+ vma_m = pax_find_mirror_vma(vma);
72856+#endif
72857+
72858 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72859 if (!new)
72860 goto out_err;
72861
72862+#ifdef CONFIG_PAX_SEGMEXEC
72863+ if (vma_m) {
72864+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72865+ if (!new_m) {
72866+ kmem_cache_free(vm_area_cachep, new);
72867+ goto out_err;
72868+ }
72869+ }
72870+#endif
72871+
72872 /* most fields are the same, copy all, and then fixup */
72873 *new = *vma;
72874
72875@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72876 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72877 }
72878
72879+#ifdef CONFIG_PAX_SEGMEXEC
72880+ if (vma_m) {
72881+ *new_m = *vma_m;
72882+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
72883+ new_m->vm_mirror = new;
72884+ new->vm_mirror = new_m;
72885+
72886+ if (new_below)
72887+ new_m->vm_end = addr_m;
72888+ else {
72889+ new_m->vm_start = addr_m;
72890+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72891+ }
72892+ }
72893+#endif
72894+
72895 pol = mpol_dup(vma_policy(vma));
72896 if (IS_ERR(pol)) {
72897 err = PTR_ERR(pol);
72898@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72899 else
72900 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72901
72902+#ifdef CONFIG_PAX_SEGMEXEC
72903+ if (!err && vma_m) {
72904+ if (anon_vma_clone(new_m, vma_m))
72905+ goto out_free_mpol;
72906+
72907+ mpol_get(pol);
72908+ vma_set_policy(new_m, pol);
72909+
72910+ if (new_m->vm_file) {
72911+ get_file(new_m->vm_file);
72912+ if (vma_m->vm_flags & VM_EXECUTABLE)
72913+ added_exe_file_vma(mm);
72914+ }
72915+
72916+ if (new_m->vm_ops && new_m->vm_ops->open)
72917+ new_m->vm_ops->open(new_m);
72918+
72919+ if (new_below)
72920+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72921+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72922+ else
72923+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72924+
72925+ if (err) {
72926+ if (new_m->vm_ops && new_m->vm_ops->close)
72927+ new_m->vm_ops->close(new_m);
72928+ if (new_m->vm_file) {
72929+ if (vma_m->vm_flags & VM_EXECUTABLE)
72930+ removed_exe_file_vma(mm);
72931+ fput(new_m->vm_file);
72932+ }
72933+ mpol_put(pol);
72934+ }
72935+ }
72936+#endif
72937+
72938 /* Success. */
72939 if (!err)
72940 return 0;
72941@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72942 removed_exe_file_vma(mm);
72943 fput(new->vm_file);
72944 }
72945- unlink_anon_vmas(new);
72946 out_free_mpol:
72947 mpol_put(pol);
72948 out_free_vma:
72949+
72950+#ifdef CONFIG_PAX_SEGMEXEC
72951+ if (new_m) {
72952+ unlink_anon_vmas(new_m);
72953+ kmem_cache_free(vm_area_cachep, new_m);
72954+ }
72955+#endif
72956+
72957+ unlink_anon_vmas(new);
72958 kmem_cache_free(vm_area_cachep, new);
72959 out_err:
72960 return err;
72961@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72962 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72963 unsigned long addr, int new_below)
72964 {
72965+
72966+#ifdef CONFIG_PAX_SEGMEXEC
72967+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72968+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72969+ if (mm->map_count >= sysctl_max_map_count-1)
72970+ return -ENOMEM;
72971+ } else
72972+#endif
72973+
72974 if (mm->map_count >= sysctl_max_map_count)
72975 return -ENOMEM;
72976
72977@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72978 * work. This now handles partial unmappings.
72979 * Jeremy Fitzhardinge <jeremy@goop.org>
72980 */
72981+#ifdef CONFIG_PAX_SEGMEXEC
72982 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72983 {
72984+ int ret = __do_munmap(mm, start, len);
72985+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72986+ return ret;
72987+
72988+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72989+}
72990+
72991+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72992+#else
72993+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72994+#endif
72995+{
72996 unsigned long end;
72997 struct vm_area_struct *vma, *prev, *last;
72998
72999+ /*
73000+ * mm->mmap_sem is required to protect against another thread
73001+ * changing the mappings in case we sleep.
73002+ */
73003+ verify_mm_writelocked(mm);
73004+
73005 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73006 return -EINVAL;
73007
73008@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73009 /* Fix up all other VM information */
73010 remove_vma_list(mm, vma);
73011
73012+ track_exec_limit(mm, start, end, 0UL);
73013+
73014 return 0;
73015 }
73016
73017@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
73018
73019 profile_munmap(addr);
73020
73021+#ifdef CONFIG_PAX_SEGMEXEC
73022+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73023+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
73024+ return -EINVAL;
73025+#endif
73026+
73027 down_write(&mm->mmap_sem);
73028 ret = do_munmap(mm, addr, len);
73029 up_write(&mm->mmap_sem);
73030 return ret;
73031 }
73032
73033-static inline void verify_mm_writelocked(struct mm_struct *mm)
73034-{
73035-#ifdef CONFIG_DEBUG_VM
73036- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73037- WARN_ON(1);
73038- up_read(&mm->mmap_sem);
73039- }
73040-#endif
73041-}
73042-
73043 /*
73044 * this is really a simplified "do_mmap". it only handles
73045 * anonymous maps. eventually we may be able to do some
73046@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73047 struct rb_node ** rb_link, * rb_parent;
73048 pgoff_t pgoff = addr >> PAGE_SHIFT;
73049 int error;
73050+ unsigned long charged;
73051
73052 len = PAGE_ALIGN(len);
73053 if (!len)
73054@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73055
73056 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73057
73058+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73059+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73060+ flags &= ~VM_EXEC;
73061+
73062+#ifdef CONFIG_PAX_MPROTECT
73063+ if (mm->pax_flags & MF_PAX_MPROTECT)
73064+ flags &= ~VM_MAYEXEC;
73065+#endif
73066+
73067+ }
73068+#endif
73069+
73070 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73071 if (error & ~PAGE_MASK)
73072 return error;
73073
73074+ charged = len >> PAGE_SHIFT;
73075+
73076 /*
73077 * mlock MCL_FUTURE?
73078 */
73079 if (mm->def_flags & VM_LOCKED) {
73080 unsigned long locked, lock_limit;
73081- locked = len >> PAGE_SHIFT;
73082+ locked = charged;
73083 locked += mm->locked_vm;
73084 lock_limit = rlimit(RLIMIT_MEMLOCK);
73085 lock_limit >>= PAGE_SHIFT;
73086@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73087 /*
73088 * Clear old maps. this also does some error checking for us
73089 */
73090- munmap_back:
73091 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73092 if (vma && vma->vm_start < addr + len) {
73093 if (do_munmap(mm, addr, len))
73094 return -ENOMEM;
73095- goto munmap_back;
73096+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73097+ BUG_ON(vma && vma->vm_start < addr + len);
73098 }
73099
73100 /* Check against address space limits *after* clearing old maps... */
73101- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73102+ if (!may_expand_vm(mm, charged))
73103 return -ENOMEM;
73104
73105 if (mm->map_count > sysctl_max_map_count)
73106 return -ENOMEM;
73107
73108- if (security_vm_enough_memory(len >> PAGE_SHIFT))
73109+ if (security_vm_enough_memory(charged))
73110 return -ENOMEM;
73111
73112 /* Can we just expand an old private anonymous mapping? */
73113@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73114 */
73115 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73116 if (!vma) {
73117- vm_unacct_memory(len >> PAGE_SHIFT);
73118+ vm_unacct_memory(charged);
73119 return -ENOMEM;
73120 }
73121
73122@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73123 vma_link(mm, vma, prev, rb_link, rb_parent);
73124 out:
73125 perf_event_mmap(vma);
73126- mm->total_vm += len >> PAGE_SHIFT;
73127+ mm->total_vm += charged;
73128 if (flags & VM_LOCKED) {
73129 if (!mlock_vma_pages_range(vma, addr, addr + len))
73130- mm->locked_vm += (len >> PAGE_SHIFT);
73131+ mm->locked_vm += charged;
73132 }
73133+ track_exec_limit(mm, addr, addr + len, flags);
73134 return addr;
73135 }
73136
73137@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
73138 * Walk the list again, actually closing and freeing it,
73139 * with preemption enabled, without holding any MM locks.
73140 */
73141- while (vma)
73142+ while (vma) {
73143+ vma->vm_mirror = NULL;
73144 vma = remove_vma(vma);
73145+ }
73146
73147 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
73148 }
73149@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73150 struct vm_area_struct * __vma, * prev;
73151 struct rb_node ** rb_link, * rb_parent;
73152
73153+#ifdef CONFIG_PAX_SEGMEXEC
73154+ struct vm_area_struct *vma_m = NULL;
73155+#endif
73156+
73157+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
73158+ return -EPERM;
73159+
73160 /*
73161 * The vm_pgoff of a purely anonymous vma should be irrelevant
73162 * until its first write fault, when page's anon_vma and index
73163@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73164 if ((vma->vm_flags & VM_ACCOUNT) &&
73165 security_vm_enough_memory_mm(mm, vma_pages(vma)))
73166 return -ENOMEM;
73167+
73168+#ifdef CONFIG_PAX_SEGMEXEC
73169+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73170+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73171+ if (!vma_m)
73172+ return -ENOMEM;
73173+ }
73174+#endif
73175+
73176 vma_link(mm, vma, prev, rb_link, rb_parent);
73177+
73178+#ifdef CONFIG_PAX_SEGMEXEC
73179+ if (vma_m)
73180+ BUG_ON(pax_mirror_vma(vma_m, vma));
73181+#endif
73182+
73183 return 0;
73184 }
73185
73186@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73187 struct rb_node **rb_link, *rb_parent;
73188 struct mempolicy *pol;
73189
73190+ BUG_ON(vma->vm_mirror);
73191+
73192 /*
73193 * If anonymous vma has not yet been faulted, update new pgoff
73194 * to match new location, to increase its chance of merging.
73195@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73196 return NULL;
73197 }
73198
73199+#ifdef CONFIG_PAX_SEGMEXEC
73200+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73201+{
73202+ struct vm_area_struct *prev_m;
73203+ struct rb_node **rb_link_m, *rb_parent_m;
73204+ struct mempolicy *pol_m;
73205+
73206+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73207+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73208+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73209+ *vma_m = *vma;
73210+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73211+ if (anon_vma_clone(vma_m, vma))
73212+ return -ENOMEM;
73213+ pol_m = vma_policy(vma_m);
73214+ mpol_get(pol_m);
73215+ vma_set_policy(vma_m, pol_m);
73216+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73217+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73218+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73219+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73220+ if (vma_m->vm_file)
73221+ get_file(vma_m->vm_file);
73222+ if (vma_m->vm_ops && vma_m->vm_ops->open)
73223+ vma_m->vm_ops->open(vma_m);
73224+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73225+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73226+ vma_m->vm_mirror = vma;
73227+ vma->vm_mirror = vma_m;
73228+ return 0;
73229+}
73230+#endif
73231+
73232 /*
73233 * Return true if the calling process may expand its vm space by the passed
73234 * number of pages
73235@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
73236 unsigned long lim;
73237
73238 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
73239-
73240+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73241 if (cur + npages > lim)
73242 return 0;
73243 return 1;
73244@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
73245 vma->vm_start = addr;
73246 vma->vm_end = addr + len;
73247
73248+#ifdef CONFIG_PAX_MPROTECT
73249+ if (mm->pax_flags & MF_PAX_MPROTECT) {
73250+#ifndef CONFIG_PAX_MPROTECT_COMPAT
73251+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73252+ return -EPERM;
73253+ if (!(vm_flags & VM_EXEC))
73254+ vm_flags &= ~VM_MAYEXEC;
73255+#else
73256+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73257+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73258+#endif
73259+ else
73260+ vm_flags &= ~VM_MAYWRITE;
73261+ }
73262+#endif
73263+
73264 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73265 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73266
73267diff --git a/mm/mprotect.c b/mm/mprotect.c
73268index 5a688a2..27e031c 100644
73269--- a/mm/mprotect.c
73270+++ b/mm/mprotect.c
73271@@ -23,10 +23,16 @@
73272 #include <linux/mmu_notifier.h>
73273 #include <linux/migrate.h>
73274 #include <linux/perf_event.h>
73275+
73276+#ifdef CONFIG_PAX_MPROTECT
73277+#include <linux/elf.h>
73278+#endif
73279+
73280 #include <asm/uaccess.h>
73281 #include <asm/pgtable.h>
73282 #include <asm/cacheflush.h>
73283 #include <asm/tlbflush.h>
73284+#include <asm/mmu_context.h>
73285
73286 #ifndef pgprot_modify
73287 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
73288@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
73289 flush_tlb_range(vma, start, end);
73290 }
73291
73292+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73293+/* called while holding the mmap semaphor for writing except stack expansion */
73294+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73295+{
73296+ unsigned long oldlimit, newlimit = 0UL;
73297+
73298+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
73299+ return;
73300+
73301+ spin_lock(&mm->page_table_lock);
73302+ oldlimit = mm->context.user_cs_limit;
73303+ if ((prot & VM_EXEC) && oldlimit < end)
73304+ /* USER_CS limit moved up */
73305+ newlimit = end;
73306+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73307+ /* USER_CS limit moved down */
73308+ newlimit = start;
73309+
73310+ if (newlimit) {
73311+ mm->context.user_cs_limit = newlimit;
73312+
73313+#ifdef CONFIG_SMP
73314+ wmb();
73315+ cpus_clear(mm->context.cpu_user_cs_mask);
73316+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73317+#endif
73318+
73319+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73320+ }
73321+ spin_unlock(&mm->page_table_lock);
73322+ if (newlimit == end) {
73323+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
73324+
73325+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
73326+ if (is_vm_hugetlb_page(vma))
73327+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73328+ else
73329+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73330+ }
73331+}
73332+#endif
73333+
73334 int
73335 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73336 unsigned long start, unsigned long end, unsigned long newflags)
73337@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73338 int error;
73339 int dirty_accountable = 0;
73340
73341+#ifdef CONFIG_PAX_SEGMEXEC
73342+ struct vm_area_struct *vma_m = NULL;
73343+ unsigned long start_m, end_m;
73344+
73345+ start_m = start + SEGMEXEC_TASK_SIZE;
73346+ end_m = end + SEGMEXEC_TASK_SIZE;
73347+#endif
73348+
73349 if (newflags == oldflags) {
73350 *pprev = vma;
73351 return 0;
73352 }
73353
73354+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73355+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73356+
73357+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73358+ return -ENOMEM;
73359+
73360+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73361+ return -ENOMEM;
73362+ }
73363+
73364 /*
73365 * If we make a private mapping writable we increase our commit;
73366 * but (without finer accounting) cannot reduce our commit if we
73367@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73368 }
73369 }
73370
73371+#ifdef CONFIG_PAX_SEGMEXEC
73372+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73373+ if (start != vma->vm_start) {
73374+ error = split_vma(mm, vma, start, 1);
73375+ if (error)
73376+ goto fail;
73377+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73378+ *pprev = (*pprev)->vm_next;
73379+ }
73380+
73381+ if (end != vma->vm_end) {
73382+ error = split_vma(mm, vma, end, 0);
73383+ if (error)
73384+ goto fail;
73385+ }
73386+
73387+ if (pax_find_mirror_vma(vma)) {
73388+ error = __do_munmap(mm, start_m, end_m - start_m);
73389+ if (error)
73390+ goto fail;
73391+ } else {
73392+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73393+ if (!vma_m) {
73394+ error = -ENOMEM;
73395+ goto fail;
73396+ }
73397+ vma->vm_flags = newflags;
73398+ error = pax_mirror_vma(vma_m, vma);
73399+ if (error) {
73400+ vma->vm_flags = oldflags;
73401+ goto fail;
73402+ }
73403+ }
73404+ }
73405+#endif
73406+
73407 /*
73408 * First try to merge with previous and/or next vma.
73409 */
73410@@ -204,9 +306,21 @@ success:
73411 * vm_flags and vm_page_prot are protected by the mmap_sem
73412 * held in write mode.
73413 */
73414+
73415+#ifdef CONFIG_PAX_SEGMEXEC
73416+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73417+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73418+#endif
73419+
73420 vma->vm_flags = newflags;
73421+
73422+#ifdef CONFIG_PAX_MPROTECT
73423+ if (mm->binfmt && mm->binfmt->handle_mprotect)
73424+ mm->binfmt->handle_mprotect(vma, newflags);
73425+#endif
73426+
73427 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73428- vm_get_page_prot(newflags));
73429+ vm_get_page_prot(vma->vm_flags));
73430
73431 if (vma_wants_writenotify(vma)) {
73432 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
73433@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73434 end = start + len;
73435 if (end <= start)
73436 return -ENOMEM;
73437+
73438+#ifdef CONFIG_PAX_SEGMEXEC
73439+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73440+ if (end > SEGMEXEC_TASK_SIZE)
73441+ return -EINVAL;
73442+ } else
73443+#endif
73444+
73445+ if (end > TASK_SIZE)
73446+ return -EINVAL;
73447+
73448 if (!arch_validate_prot(prot))
73449 return -EINVAL;
73450
73451@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73452 /*
73453 * Does the application expect PROT_READ to imply PROT_EXEC:
73454 */
73455- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73456+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73457 prot |= PROT_EXEC;
73458
73459 vm_flags = calc_vm_prot_bits(prot);
73460@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73461 if (start > vma->vm_start)
73462 prev = vma;
73463
73464+#ifdef CONFIG_PAX_MPROTECT
73465+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73466+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
73467+#endif
73468+
73469 for (nstart = start ; ; ) {
73470 unsigned long newflags;
73471
73472@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73473
73474 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73475 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73476+ if (prot & (PROT_WRITE | PROT_EXEC))
73477+ gr_log_rwxmprotect(vma->vm_file);
73478+
73479+ error = -EACCES;
73480+ goto out;
73481+ }
73482+
73483+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73484 error = -EACCES;
73485 goto out;
73486 }
73487@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73488 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
73489 if (error)
73490 goto out;
73491+
73492+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
73493+
73494 nstart = tmp;
73495
73496 if (nstart < prev->vm_end)
73497diff --git a/mm/mremap.c b/mm/mremap.c
73498index 506fa44..ccc0ba9 100644
73499--- a/mm/mremap.c
73500+++ b/mm/mremap.c
73501@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73502 continue;
73503 pte = ptep_clear_flush(vma, old_addr, old_pte);
73504 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
73505+
73506+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73507+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
73508+ pte = pte_exprotect(pte);
73509+#endif
73510+
73511 set_pte_at(mm, new_addr, new_pte, pte);
73512 }
73513
73514@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
73515 if (is_vm_hugetlb_page(vma))
73516 goto Einval;
73517
73518+#ifdef CONFIG_PAX_SEGMEXEC
73519+ if (pax_find_mirror_vma(vma))
73520+ goto Einval;
73521+#endif
73522+
73523 /* We can't remap across vm area boundaries */
73524 if (old_len > vma->vm_end - addr)
73525 goto Efault;
73526@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr,
73527 unsigned long ret = -EINVAL;
73528 unsigned long charged = 0;
73529 unsigned long map_flags;
73530+ unsigned long pax_task_size = TASK_SIZE;
73531
73532 if (new_addr & ~PAGE_MASK)
73533 goto out;
73534
73535- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
73536+#ifdef CONFIG_PAX_SEGMEXEC
73537+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73538+ pax_task_size = SEGMEXEC_TASK_SIZE;
73539+#endif
73540+
73541+ pax_task_size -= PAGE_SIZE;
73542+
73543+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
73544 goto out;
73545
73546 /* Check if the location we're moving into overlaps the
73547 * old location at all, and fail if it does.
73548 */
73549- if ((new_addr <= addr) && (new_addr+new_len) > addr)
73550- goto out;
73551-
73552- if ((addr <= new_addr) && (addr+old_len) > new_addr)
73553+ if (addr + old_len > new_addr && new_addr + new_len > addr)
73554 goto out;
73555
73556 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73557@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr,
73558 struct vm_area_struct *vma;
73559 unsigned long ret = -EINVAL;
73560 unsigned long charged = 0;
73561+ unsigned long pax_task_size = TASK_SIZE;
73562
73563 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
73564 goto out;
73565@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr,
73566 if (!new_len)
73567 goto out;
73568
73569+#ifdef CONFIG_PAX_SEGMEXEC
73570+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73571+ pax_task_size = SEGMEXEC_TASK_SIZE;
73572+#endif
73573+
73574+ pax_task_size -= PAGE_SIZE;
73575+
73576+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
73577+ old_len > pax_task_size || addr > pax_task_size-old_len)
73578+ goto out;
73579+
73580 if (flags & MREMAP_FIXED) {
73581 if (flags & MREMAP_MAYMOVE)
73582 ret = mremap_to(addr, old_len, new_addr, new_len);
73583@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr,
73584 addr + new_len);
73585 }
73586 ret = addr;
73587+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73588 goto out;
73589 }
73590 }
73591@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr,
73592 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73593 if (ret)
73594 goto out;
73595+
73596+ map_flags = vma->vm_flags;
73597 ret = move_vma(vma, addr, old_len, new_len, new_addr);
73598+ if (!(ret & ~PAGE_MASK)) {
73599+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
73600+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
73601+ }
73602 }
73603 out:
73604 if (ret & ~PAGE_MASK)
73605diff --git a/mm/nobootmem.c b/mm/nobootmem.c
73606index 6e93dc7..c98df0c 100644
73607--- a/mm/nobootmem.c
73608+++ b/mm/nobootmem.c
73609@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
73610 unsigned long __init free_all_memory_core_early(int nodeid)
73611 {
73612 int i;
73613- u64 start, end;
73614+ u64 start, end, startrange, endrange;
73615 unsigned long count = 0;
73616- struct range *range = NULL;
73617+ struct range *range = NULL, rangerange = { 0, 0 };
73618 int nr_range;
73619
73620 nr_range = get_free_all_memory_range(&range, nodeid);
73621+ startrange = __pa(range) >> PAGE_SHIFT;
73622+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
73623
73624 for (i = 0; i < nr_range; i++) {
73625 start = range[i].start;
73626 end = range[i].end;
73627+ if (start <= endrange && startrange < end) {
73628+ BUG_ON(rangerange.start | rangerange.end);
73629+ rangerange = range[i];
73630+ continue;
73631+ }
73632 count += end - start;
73633 __free_pages_memory(start, end);
73634 }
73635+ start = rangerange.start;
73636+ end = rangerange.end;
73637+ count += end - start;
73638+ __free_pages_memory(start, end);
73639
73640 return count;
73641 }
73642diff --git a/mm/nommu.c b/mm/nommu.c
73643index 4358032..e79b99f 100644
73644--- a/mm/nommu.c
73645+++ b/mm/nommu.c
73646@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
73647 int sysctl_overcommit_ratio = 50; /* default is 50% */
73648 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73649 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73650-int heap_stack_gap = 0;
73651
73652 atomic_long_t mmap_pages_allocated;
73653
73654@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
73655 EXPORT_SYMBOL(find_vma);
73656
73657 /*
73658- * find a VMA
73659- * - we don't extend stack VMAs under NOMMU conditions
73660- */
73661-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73662-{
73663- return find_vma(mm, addr);
73664-}
73665-
73666-/*
73667 * expand a stack to a given address
73668 * - not supported under NOMMU conditions
73669 */
73670@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73671
73672 /* most fields are the same, copy all, and then fixup */
73673 *new = *vma;
73674+ INIT_LIST_HEAD(&new->anon_vma_chain);
73675 *region = *vma->vm_region;
73676 new->vm_region = region;
73677
73678diff --git a/mm/page_alloc.c b/mm/page_alloc.c
73679index e8fae15..18c0442 100644
73680--- a/mm/page_alloc.c
73681+++ b/mm/page_alloc.c
73682@@ -340,7 +340,7 @@ out:
73683 * This usage means that zero-order pages may not be compound.
73684 */
73685
73686-static void free_compound_page(struct page *page)
73687+void free_compound_page(struct page *page)
73688 {
73689 __free_pages_ok(page, compound_order(page));
73690 }
73691@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73692 int i;
73693 int bad = 0;
73694
73695+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73696+ unsigned long index = 1UL << order;
73697+#endif
73698+
73699 trace_mm_page_free_direct(page, order);
73700 kmemcheck_free_shadow(page, order);
73701
73702@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73703 debug_check_no_obj_freed(page_address(page),
73704 PAGE_SIZE << order);
73705 }
73706+
73707+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73708+ for (; index; --index)
73709+ sanitize_highpage(page + index - 1);
73710+#endif
73711+
73712 arch_free_page(page, order);
73713 kernel_map_pages(page, 1 << order, 0);
73714
73715@@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73716 arch_alloc_page(page, order);
73717 kernel_map_pages(page, 1 << order, 1);
73718
73719+#ifndef CONFIG_PAX_MEMORY_SANITIZE
73720 if (gfp_flags & __GFP_ZERO)
73721 prep_zero_page(page, order, gfp_flags);
73722+#endif
73723
73724 if (order && (gfp_flags & __GFP_COMP))
73725 prep_compound_page(page, order);
73726@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter)
73727 int cpu;
73728 struct zone *zone;
73729
73730+ pax_track_stack();
73731+
73732 for_each_populated_zone(zone) {
73733 if (skip_free_areas_node(filter, zone_to_nid(zone)))
73734 continue;
73735@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73736 unsigned long pfn;
73737
73738 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73739+#ifdef CONFIG_X86_32
73740+ /* boot failures in VMware 8 on 32bit vanilla since
73741+ this change */
73742+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73743+#else
73744 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73745+#endif
73746 return 1;
73747 }
73748 return 0;
73749diff --git a/mm/percpu.c b/mm/percpu.c
73750index 0ae7a09..613118e 100644
73751--- a/mm/percpu.c
73752+++ b/mm/percpu.c
73753@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73754 static unsigned int pcpu_high_unit_cpu __read_mostly;
73755
73756 /* the address of the first chunk which starts with the kernel static area */
73757-void *pcpu_base_addr __read_mostly;
73758+void *pcpu_base_addr __read_only;
73759 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73760
73761 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73762diff --git a/mm/rmap.c b/mm/rmap.c
73763index 8005080..198c2cd 100644
73764--- a/mm/rmap.c
73765+++ b/mm/rmap.c
73766@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73767 struct anon_vma *anon_vma = vma->anon_vma;
73768 struct anon_vma_chain *avc;
73769
73770+#ifdef CONFIG_PAX_SEGMEXEC
73771+ struct anon_vma_chain *avc_m = NULL;
73772+#endif
73773+
73774 might_sleep();
73775 if (unlikely(!anon_vma)) {
73776 struct mm_struct *mm = vma->vm_mm;
73777@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73778 if (!avc)
73779 goto out_enomem;
73780
73781+#ifdef CONFIG_PAX_SEGMEXEC
73782+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73783+ if (!avc_m)
73784+ goto out_enomem_free_avc;
73785+#endif
73786+
73787 anon_vma = find_mergeable_anon_vma(vma);
73788 allocated = NULL;
73789 if (!anon_vma) {
73790@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73791 /* page_table_lock to protect against threads */
73792 spin_lock(&mm->page_table_lock);
73793 if (likely(!vma->anon_vma)) {
73794+
73795+#ifdef CONFIG_PAX_SEGMEXEC
73796+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73797+
73798+ if (vma_m) {
73799+ BUG_ON(vma_m->anon_vma);
73800+ vma_m->anon_vma = anon_vma;
73801+ avc_m->anon_vma = anon_vma;
73802+ avc_m->vma = vma;
73803+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73804+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
73805+ avc_m = NULL;
73806+ }
73807+#endif
73808+
73809 vma->anon_vma = anon_vma;
73810 avc->anon_vma = anon_vma;
73811 avc->vma = vma;
73812@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73813
73814 if (unlikely(allocated))
73815 put_anon_vma(allocated);
73816+
73817+#ifdef CONFIG_PAX_SEGMEXEC
73818+ if (unlikely(avc_m))
73819+ anon_vma_chain_free(avc_m);
73820+#endif
73821+
73822 if (unlikely(avc))
73823 anon_vma_chain_free(avc);
73824 }
73825 return 0;
73826
73827 out_enomem_free_avc:
73828+
73829+#ifdef CONFIG_PAX_SEGMEXEC
73830+ if (avc_m)
73831+ anon_vma_chain_free(avc_m);
73832+#endif
73833+
73834 anon_vma_chain_free(avc);
73835 out_enomem:
73836 return -ENOMEM;
73837@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73838 * Attach the anon_vmas from src to dst.
73839 * Returns 0 on success, -ENOMEM on failure.
73840 */
73841-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73842+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73843 {
73844 struct anon_vma_chain *avc, *pavc;
73845 struct anon_vma *root = NULL;
73846@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73847 * the corresponding VMA in the parent process is attached to.
73848 * Returns 0 on success, non-zero on failure.
73849 */
73850-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73851+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73852 {
73853 struct anon_vma_chain *avc;
73854 struct anon_vma *anon_vma;
73855diff --git a/mm/shmem.c b/mm/shmem.c
73856index 32f6763..431c405 100644
73857--- a/mm/shmem.c
73858+++ b/mm/shmem.c
73859@@ -31,7 +31,7 @@
73860 #include <linux/module.h>
73861 #include <linux/swap.h>
73862
73863-static struct vfsmount *shm_mnt;
73864+struct vfsmount *shm_mnt;
73865
73866 #ifdef CONFIG_SHMEM
73867 /*
73868@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73869 #define BOGO_DIRENT_SIZE 20
73870
73871 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73872-#define SHORT_SYMLINK_LEN 128
73873+#define SHORT_SYMLINK_LEN 64
73874
73875 struct shmem_xattr {
73876 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73877@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
73878 struct mempolicy mpol, *spol;
73879 struct vm_area_struct pvma;
73880
73881+ pax_track_stack();
73882+
73883 spol = mpol_cond_copy(&mpol,
73884 mpol_shared_policy_lookup(&info->policy, index));
73885
73886@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73887 int err = -ENOMEM;
73888
73889 /* Round up to L1_CACHE_BYTES to resist false sharing */
73890- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73891- L1_CACHE_BYTES), GFP_KERNEL);
73892+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73893 if (!sbinfo)
73894 return -ENOMEM;
73895
73896diff --git a/mm/slab.c b/mm/slab.c
73897index 893c76d..a742de2 100644
73898--- a/mm/slab.c
73899+++ b/mm/slab.c
73900@@ -151,7 +151,7 @@
73901
73902 /* Legal flag mask for kmem_cache_create(). */
73903 #if DEBUG
73904-# define CREATE_MASK (SLAB_RED_ZONE | \
73905+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73906 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73907 SLAB_CACHE_DMA | \
73908 SLAB_STORE_USER | \
73909@@ -159,7 +159,7 @@
73910 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73911 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73912 #else
73913-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73914+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73915 SLAB_CACHE_DMA | \
73916 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73917 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73918@@ -288,7 +288,7 @@ struct kmem_list3 {
73919 * Need this for bootstrapping a per node allocator.
73920 */
73921 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73922-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73923+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73924 #define CACHE_CACHE 0
73925 #define SIZE_AC MAX_NUMNODES
73926 #define SIZE_L3 (2 * MAX_NUMNODES)
73927@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73928 if ((x)->max_freeable < i) \
73929 (x)->max_freeable = i; \
73930 } while (0)
73931-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73932-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73933-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73934-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73935+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73936+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73937+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73938+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73939 #else
73940 #define STATS_INC_ACTIVE(x) do { } while (0)
73941 #define STATS_DEC_ACTIVE(x) do { } while (0)
73942@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73943 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73944 */
73945 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73946- const struct slab *slab, void *obj)
73947+ const struct slab *slab, const void *obj)
73948 {
73949 u32 offset = (obj - slab->s_mem);
73950 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73951@@ -564,7 +564,7 @@ struct cache_names {
73952 static struct cache_names __initdata cache_names[] = {
73953 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73954 #include <linux/kmalloc_sizes.h>
73955- {NULL,}
73956+ {NULL}
73957 #undef CACHE
73958 };
73959
73960@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
73961 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73962 sizes[INDEX_AC].cs_size,
73963 ARCH_KMALLOC_MINALIGN,
73964- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73965+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73966 NULL);
73967
73968 if (INDEX_AC != INDEX_L3) {
73969@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
73970 kmem_cache_create(names[INDEX_L3].name,
73971 sizes[INDEX_L3].cs_size,
73972 ARCH_KMALLOC_MINALIGN,
73973- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73974+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73975 NULL);
73976 }
73977
73978@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
73979 sizes->cs_cachep = kmem_cache_create(names->name,
73980 sizes->cs_size,
73981 ARCH_KMALLOC_MINALIGN,
73982- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73983+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73984 NULL);
73985 }
73986 #ifdef CONFIG_ZONE_DMA
73987@@ -4327,10 +4327,10 @@ static int s_show(struct seq_file *m, void *p)
73988 }
73989 /* cpu stats */
73990 {
73991- unsigned long allochit = atomic_read(&cachep->allochit);
73992- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73993- unsigned long freehit = atomic_read(&cachep->freehit);
73994- unsigned long freemiss = atomic_read(&cachep->freemiss);
73995+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73996+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73997+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73998+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73999
74000 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74001 allochit, allocmiss, freehit, freemiss);
74002@@ -4587,15 +4587,70 @@ static const struct file_operations proc_slabstats_operations = {
74003
74004 static int __init slab_proc_init(void)
74005 {
74006- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
74007+ mode_t gr_mode = S_IRUGO;
74008+
74009+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74010+ gr_mode = S_IRUSR;
74011+#endif
74012+
74013+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
74014 #ifdef CONFIG_DEBUG_SLAB_LEAK
74015- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74016+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
74017 #endif
74018 return 0;
74019 }
74020 module_init(slab_proc_init);
74021 #endif
74022
74023+void check_object_size(const void *ptr, unsigned long n, bool to)
74024+{
74025+
74026+#ifdef CONFIG_PAX_USERCOPY
74027+ struct page *page;
74028+ struct kmem_cache *cachep = NULL;
74029+ struct slab *slabp;
74030+ unsigned int objnr;
74031+ unsigned long offset;
74032+ const char *type;
74033+
74034+ if (!n)
74035+ return;
74036+
74037+ type = "<null>";
74038+ if (ZERO_OR_NULL_PTR(ptr))
74039+ goto report;
74040+
74041+ if (!virt_addr_valid(ptr))
74042+ return;
74043+
74044+ page = virt_to_head_page(ptr);
74045+
74046+ type = "<process stack>";
74047+ if (!PageSlab(page)) {
74048+ if (object_is_on_stack(ptr, n) == -1)
74049+ goto report;
74050+ return;
74051+ }
74052+
74053+ cachep = page_get_cache(page);
74054+ type = cachep->name;
74055+ if (!(cachep->flags & SLAB_USERCOPY))
74056+ goto report;
74057+
74058+ slabp = page_get_slab(page);
74059+ objnr = obj_to_index(cachep, slabp, ptr);
74060+ BUG_ON(objnr >= cachep->num);
74061+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74062+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74063+ return;
74064+
74065+report:
74066+ pax_report_usercopy(ptr, n, to, type);
74067+#endif
74068+
74069+}
74070+EXPORT_SYMBOL(check_object_size);
74071+
74072 /**
74073 * ksize - get the actual amount of memory allocated for a given object
74074 * @objp: Pointer to the object
74075diff --git a/mm/slob.c b/mm/slob.c
74076index bf39181..727f7a3 100644
74077--- a/mm/slob.c
74078+++ b/mm/slob.c
74079@@ -29,7 +29,7 @@
74080 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74081 * alloc_pages() directly, allocating compound pages so the page order
74082 * does not have to be separately tracked, and also stores the exact
74083- * allocation size in page->private so that it can be used to accurately
74084+ * allocation size in slob_page->size so that it can be used to accurately
74085 * provide ksize(). These objects are detected in kfree() because slob_page()
74086 * is false for them.
74087 *
74088@@ -58,6 +58,7 @@
74089 */
74090
74091 #include <linux/kernel.h>
74092+#include <linux/sched.h>
74093 #include <linux/slab.h>
74094 #include <linux/mm.h>
74095 #include <linux/swap.h> /* struct reclaim_state */
74096@@ -102,7 +103,8 @@ struct slob_page {
74097 unsigned long flags; /* mandatory */
74098 atomic_t _count; /* mandatory */
74099 slobidx_t units; /* free units left in page */
74100- unsigned long pad[2];
74101+ unsigned long pad[1];
74102+ unsigned long size; /* size when >=PAGE_SIZE */
74103 slob_t *free; /* first free slob_t in page */
74104 struct list_head list; /* linked list of free pages */
74105 };
74106@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
74107 */
74108 static inline int is_slob_page(struct slob_page *sp)
74109 {
74110- return PageSlab((struct page *)sp);
74111+ return PageSlab((struct page *)sp) && !sp->size;
74112 }
74113
74114 static inline void set_slob_page(struct slob_page *sp)
74115@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
74116
74117 static inline struct slob_page *slob_page(const void *addr)
74118 {
74119- return (struct slob_page *)virt_to_page(addr);
74120+ return (struct slob_page *)virt_to_head_page(addr);
74121 }
74122
74123 /*
74124@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
74125 /*
74126 * Return the size of a slob block.
74127 */
74128-static slobidx_t slob_units(slob_t *s)
74129+static slobidx_t slob_units(const slob_t *s)
74130 {
74131 if (s->units > 0)
74132 return s->units;
74133@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
74134 /*
74135 * Return the next free slob block pointer after this one.
74136 */
74137-static slob_t *slob_next(slob_t *s)
74138+static slob_t *slob_next(const slob_t *s)
74139 {
74140 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74141 slobidx_t next;
74142@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
74143 /*
74144 * Returns true if s is the last free block in its page.
74145 */
74146-static int slob_last(slob_t *s)
74147+static int slob_last(const slob_t *s)
74148 {
74149 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74150 }
74151@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
74152 if (!page)
74153 return NULL;
74154
74155+ set_slob_page(page);
74156 return page_address(page);
74157 }
74158
74159@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
74160 if (!b)
74161 return NULL;
74162 sp = slob_page(b);
74163- set_slob_page(sp);
74164
74165 spin_lock_irqsave(&slob_lock, flags);
74166 sp->units = SLOB_UNITS(PAGE_SIZE);
74167 sp->free = b;
74168+ sp->size = 0;
74169 INIT_LIST_HEAD(&sp->list);
74170 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74171 set_slob_page_free(sp, slob_list);
74172@@ -476,10 +479,9 @@ out:
74173 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74174 */
74175
74176-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74177+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74178 {
74179- unsigned int *m;
74180- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74181+ slob_t *m;
74182 void *ret;
74183
74184 gfp &= gfp_allowed_mask;
74185@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74186
74187 if (!m)
74188 return NULL;
74189- *m = size;
74190+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74191+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74192+ m[0].units = size;
74193+ m[1].units = align;
74194 ret = (void *)m + align;
74195
74196 trace_kmalloc_node(_RET_IP_, ret,
74197@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74198 gfp |= __GFP_COMP;
74199 ret = slob_new_pages(gfp, order, node);
74200 if (ret) {
74201- struct page *page;
74202- page = virt_to_page(ret);
74203- page->private = size;
74204+ struct slob_page *sp;
74205+ sp = slob_page(ret);
74206+ sp->size = size;
74207 }
74208
74209 trace_kmalloc_node(_RET_IP_, ret,
74210 size, PAGE_SIZE << order, gfp, node);
74211 }
74212
74213- kmemleak_alloc(ret, size, 1, gfp);
74214+ return ret;
74215+}
74216+
74217+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74218+{
74219+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74220+ void *ret = __kmalloc_node_align(size, gfp, node, align);
74221+
74222+ if (!ZERO_OR_NULL_PTR(ret))
74223+ kmemleak_alloc(ret, size, 1, gfp);
74224 return ret;
74225 }
74226 EXPORT_SYMBOL(__kmalloc_node);
74227@@ -533,13 +547,92 @@ void kfree(const void *block)
74228 sp = slob_page(block);
74229 if (is_slob_page(sp)) {
74230 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74231- unsigned int *m = (unsigned int *)(block - align);
74232- slob_free(m, *m + align);
74233- } else
74234+ slob_t *m = (slob_t *)(block - align);
74235+ slob_free(m, m[0].units + align);
74236+ } else {
74237+ clear_slob_page(sp);
74238+ free_slob_page(sp);
74239+ sp->size = 0;
74240 put_page(&sp->page);
74241+ }
74242 }
74243 EXPORT_SYMBOL(kfree);
74244
74245+void check_object_size(const void *ptr, unsigned long n, bool to)
74246+{
74247+
74248+#ifdef CONFIG_PAX_USERCOPY
74249+ struct slob_page *sp;
74250+ const slob_t *free;
74251+ const void *base;
74252+ unsigned long flags;
74253+ const char *type;
74254+
74255+ if (!n)
74256+ return;
74257+
74258+ type = "<null>";
74259+ if (ZERO_OR_NULL_PTR(ptr))
74260+ goto report;
74261+
74262+ if (!virt_addr_valid(ptr))
74263+ return;
74264+
74265+ type = "<process stack>";
74266+ sp = slob_page(ptr);
74267+ if (!PageSlab((struct page*)sp)) {
74268+ if (object_is_on_stack(ptr, n) == -1)
74269+ goto report;
74270+ return;
74271+ }
74272+
74273+ type = "<slob>";
74274+ if (sp->size) {
74275+ base = page_address(&sp->page);
74276+ if (base <= ptr && n <= sp->size - (ptr - base))
74277+ return;
74278+ goto report;
74279+ }
74280+
74281+ /* some tricky double walking to find the chunk */
74282+ spin_lock_irqsave(&slob_lock, flags);
74283+ base = (void *)((unsigned long)ptr & PAGE_MASK);
74284+ free = sp->free;
74285+
74286+ while (!slob_last(free) && (void *)free <= ptr) {
74287+ base = free + slob_units(free);
74288+ free = slob_next(free);
74289+ }
74290+
74291+ while (base < (void *)free) {
74292+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74293+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
74294+ int offset;
74295+
74296+ if (ptr < base + align)
74297+ break;
74298+
74299+ offset = ptr - base - align;
74300+ if (offset >= m) {
74301+ base += size;
74302+ continue;
74303+ }
74304+
74305+ if (n > m - offset)
74306+ break;
74307+
74308+ spin_unlock_irqrestore(&slob_lock, flags);
74309+ return;
74310+ }
74311+
74312+ spin_unlock_irqrestore(&slob_lock, flags);
74313+report:
74314+ pax_report_usercopy(ptr, n, to, type);
74315+#endif
74316+
74317+}
74318+EXPORT_SYMBOL(check_object_size);
74319+
74320 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74321 size_t ksize(const void *block)
74322 {
74323@@ -552,10 +645,10 @@ size_t ksize(const void *block)
74324 sp = slob_page(block);
74325 if (is_slob_page(sp)) {
74326 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74327- unsigned int *m = (unsigned int *)(block - align);
74328- return SLOB_UNITS(*m) * SLOB_UNIT;
74329+ slob_t *m = (slob_t *)(block - align);
74330+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74331 } else
74332- return sp->page.private;
74333+ return sp->size;
74334 }
74335 EXPORT_SYMBOL(ksize);
74336
74337@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74338 {
74339 struct kmem_cache *c;
74340
74341+#ifdef CONFIG_PAX_USERCOPY
74342+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
74343+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74344+#else
74345 c = slob_alloc(sizeof(struct kmem_cache),
74346 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74347+#endif
74348
74349 if (c) {
74350 c->name = name;
74351@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
74352
74353 lockdep_trace_alloc(flags);
74354
74355+#ifdef CONFIG_PAX_USERCOPY
74356+ b = __kmalloc_node_align(c->size, flags, node, c->align);
74357+#else
74358 if (c->size < PAGE_SIZE) {
74359 b = slob_alloc(c->size, flags, c->align, node);
74360 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74361 SLOB_UNITS(c->size) * SLOB_UNIT,
74362 flags, node);
74363 } else {
74364+ struct slob_page *sp;
74365+
74366 b = slob_new_pages(flags, get_order(c->size), node);
74367+ sp = slob_page(b);
74368+ sp->size = c->size;
74369 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74370 PAGE_SIZE << get_order(c->size),
74371 flags, node);
74372 }
74373+#endif
74374
74375 if (c->ctor)
74376 c->ctor(b);
74377@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
74378
74379 static void __kmem_cache_free(void *b, int size)
74380 {
74381- if (size < PAGE_SIZE)
74382+ struct slob_page *sp = slob_page(b);
74383+
74384+ if (is_slob_page(sp))
74385 slob_free(b, size);
74386- else
74387+ else {
74388+ clear_slob_page(sp);
74389+ free_slob_page(sp);
74390+ sp->size = 0;
74391 slob_free_pages(b, get_order(size));
74392+ }
74393 }
74394
74395 static void kmem_rcu_free(struct rcu_head *head)
74396@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
74397
74398 void kmem_cache_free(struct kmem_cache *c, void *b)
74399 {
74400+ int size = c->size;
74401+
74402+#ifdef CONFIG_PAX_USERCOPY
74403+ if (size + c->align < PAGE_SIZE) {
74404+ size += c->align;
74405+ b -= c->align;
74406+ }
74407+#endif
74408+
74409 kmemleak_free_recursive(b, c->flags);
74410 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74411 struct slob_rcu *slob_rcu;
74412- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
74413- slob_rcu->size = c->size;
74414+ slob_rcu = b + (size - sizeof(struct slob_rcu));
74415+ slob_rcu->size = size;
74416 call_rcu(&slob_rcu->head, kmem_rcu_free);
74417 } else {
74418- __kmem_cache_free(b, c->size);
74419+ __kmem_cache_free(b, size);
74420 }
74421
74422+#ifdef CONFIG_PAX_USERCOPY
74423+ trace_kfree(_RET_IP_, b);
74424+#else
74425 trace_kmem_cache_free(_RET_IP_, b);
74426+#endif
74427+
74428 }
74429 EXPORT_SYMBOL(kmem_cache_free);
74430
74431diff --git a/mm/slub.c b/mm/slub.c
74432index f73234d..ce9940d 100644
74433--- a/mm/slub.c
74434+++ b/mm/slub.c
74435@@ -208,7 +208,7 @@ struct track {
74436
74437 enum track_item { TRACK_ALLOC, TRACK_FREE };
74438
74439-#ifdef CONFIG_SYSFS
74440+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74441 static int sysfs_slab_add(struct kmem_cache *);
74442 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74443 static void sysfs_slab_remove(struct kmem_cache *);
74444@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
74445 if (!t->addr)
74446 return;
74447
74448- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74449+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74450 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
74451 #ifdef CONFIG_STACKTRACE
74452 {
74453@@ -2461,6 +2461,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
74454
74455 page = virt_to_head_page(x);
74456
74457+ BUG_ON(!PageSlab(page));
74458+
74459 slab_free(s, page, x, _RET_IP_);
74460
74461 trace_kmem_cache_free(_RET_IP_, x);
74462@@ -2494,7 +2496,7 @@ static int slub_min_objects;
74463 * Merge control. If this is set then no merging of slab caches will occur.
74464 * (Could be removed. This was introduced to pacify the merge skeptics.)
74465 */
74466-static int slub_nomerge;
74467+static int slub_nomerge = 1;
74468
74469 /*
74470 * Calculate the order of allocation given an slab object size.
74471@@ -2917,7 +2919,7 @@ static int kmem_cache_open(struct kmem_cache *s,
74472 * list to avoid pounding the page allocator excessively.
74473 */
74474 set_min_partial(s, ilog2(s->size));
74475- s->refcount = 1;
74476+ atomic_set(&s->refcount, 1);
74477 #ifdef CONFIG_NUMA
74478 s->remote_node_defrag_ratio = 1000;
74479 #endif
74480@@ -3022,8 +3024,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
74481 void kmem_cache_destroy(struct kmem_cache *s)
74482 {
74483 down_write(&slub_lock);
74484- s->refcount--;
74485- if (!s->refcount) {
74486+ if (atomic_dec_and_test(&s->refcount)) {
74487 list_del(&s->list);
74488 if (kmem_cache_close(s)) {
74489 printk(KERN_ERR "SLUB %s: %s called for cache that "
74490@@ -3233,6 +3234,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
74491 EXPORT_SYMBOL(__kmalloc_node);
74492 #endif
74493
74494+void check_object_size(const void *ptr, unsigned long n, bool to)
74495+{
74496+
74497+#ifdef CONFIG_PAX_USERCOPY
74498+ struct page *page;
74499+ struct kmem_cache *s = NULL;
74500+ unsigned long offset;
74501+ const char *type;
74502+
74503+ if (!n)
74504+ return;
74505+
74506+ type = "<null>";
74507+ if (ZERO_OR_NULL_PTR(ptr))
74508+ goto report;
74509+
74510+ if (!virt_addr_valid(ptr))
74511+ return;
74512+
74513+ page = virt_to_head_page(ptr);
74514+
74515+ type = "<process stack>";
74516+ if (!PageSlab(page)) {
74517+ if (object_is_on_stack(ptr, n) == -1)
74518+ goto report;
74519+ return;
74520+ }
74521+
74522+ s = page->slab;
74523+ type = s->name;
74524+ if (!(s->flags & SLAB_USERCOPY))
74525+ goto report;
74526+
74527+ offset = (ptr - page_address(page)) % s->size;
74528+ if (offset <= s->objsize && n <= s->objsize - offset)
74529+ return;
74530+
74531+report:
74532+ pax_report_usercopy(ptr, n, to, type);
74533+#endif
74534+
74535+}
74536+EXPORT_SYMBOL(check_object_size);
74537+
74538 size_t ksize(const void *object)
74539 {
74540 struct page *page;
74541@@ -3507,7 +3552,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
74542 int node;
74543
74544 list_add(&s->list, &slab_caches);
74545- s->refcount = -1;
74546+ atomic_set(&s->refcount, -1);
74547
74548 for_each_node_state(node, N_NORMAL_MEMORY) {
74549 struct kmem_cache_node *n = get_node(s, node);
74550@@ -3624,17 +3669,17 @@ void __init kmem_cache_init(void)
74551
74552 /* Caches that are not of the two-to-the-power-of size */
74553 if (KMALLOC_MIN_SIZE <= 32) {
74554- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
74555+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
74556 caches++;
74557 }
74558
74559 if (KMALLOC_MIN_SIZE <= 64) {
74560- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
74561+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74562 caches++;
74563 }
74564
74565 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74566- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74567+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74568 caches++;
74569 }
74570
74571@@ -3702,7 +3747,7 @@ static int slab_unmergeable(struct kmem_cache *s)
74572 /*
74573 * We may have set a slab to be unmergeable during bootstrap.
74574 */
74575- if (s->refcount < 0)
74576+ if (atomic_read(&s->refcount) < 0)
74577 return 1;
74578
74579 return 0;
74580@@ -3761,7 +3806,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74581 down_write(&slub_lock);
74582 s = find_mergeable(size, align, flags, name, ctor);
74583 if (s) {
74584- s->refcount++;
74585+ atomic_inc(&s->refcount);
74586 /*
74587 * Adjust the object sizes so that we clear
74588 * the complete object on kzalloc.
74589@@ -3770,7 +3815,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74590 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
74591
74592 if (sysfs_slab_alias(s, name)) {
74593- s->refcount--;
74594+ atomic_dec(&s->refcount);
74595 goto err;
74596 }
74597 up_write(&slub_lock);
74598@@ -3898,7 +3943,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
74599 }
74600 #endif
74601
74602-#ifdef CONFIG_SYSFS
74603+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74604 static int count_inuse(struct page *page)
74605 {
74606 return page->inuse;
74607@@ -4285,12 +4330,12 @@ static void resiliency_test(void)
74608 validate_slab_cache(kmalloc_caches[9]);
74609 }
74610 #else
74611-#ifdef CONFIG_SYSFS
74612+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74613 static void resiliency_test(void) {};
74614 #endif
74615 #endif
74616
74617-#ifdef CONFIG_SYSFS
74618+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74619 enum slab_stat_type {
74620 SL_ALL, /* All slabs */
74621 SL_PARTIAL, /* Only partially allocated slabs */
74622@@ -4500,7 +4545,7 @@ SLAB_ATTR_RO(ctor);
74623
74624 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74625 {
74626- return sprintf(buf, "%d\n", s->refcount - 1);
74627+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74628 }
74629 SLAB_ATTR_RO(aliases);
74630
74631@@ -5030,6 +5075,7 @@ static char *create_unique_id(struct kmem_cache *s)
74632 return name;
74633 }
74634
74635+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74636 static int sysfs_slab_add(struct kmem_cache *s)
74637 {
74638 int err;
74639@@ -5092,6 +5138,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
74640 kobject_del(&s->kobj);
74641 kobject_put(&s->kobj);
74642 }
74643+#endif
74644
74645 /*
74646 * Need to buffer aliases during bootup until sysfs becomes
74647@@ -5105,6 +5152,7 @@ struct saved_alias {
74648
74649 static struct saved_alias *alias_list;
74650
74651+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74652 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74653 {
74654 struct saved_alias *al;
74655@@ -5127,6 +5175,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74656 alias_list = al;
74657 return 0;
74658 }
74659+#endif
74660
74661 static int __init slab_sysfs_init(void)
74662 {
74663@@ -5262,7 +5311,13 @@ static const struct file_operations proc_slabinfo_operations = {
74664
74665 static int __init slab_proc_init(void)
74666 {
74667- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
74668+ mode_t gr_mode = S_IRUGO;
74669+
74670+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74671+ gr_mode = S_IRUSR;
74672+#endif
74673+
74674+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
74675 return 0;
74676 }
74677 module_init(slab_proc_init);
74678diff --git a/mm/swap.c b/mm/swap.c
74679index 87627f1..8a9eb34 100644
74680--- a/mm/swap.c
74681+++ b/mm/swap.c
74682@@ -31,6 +31,7 @@
74683 #include <linux/backing-dev.h>
74684 #include <linux/memcontrol.h>
74685 #include <linux/gfp.h>
74686+#include <linux/hugetlb.h>
74687
74688 #include "internal.h"
74689
74690@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
74691
74692 __page_cache_release(page);
74693 dtor = get_compound_page_dtor(page);
74694+ if (!PageHuge(page))
74695+ BUG_ON(dtor != free_compound_page);
74696 (*dtor)(page);
74697 }
74698
74699diff --git a/mm/swapfile.c b/mm/swapfile.c
74700index 17bc224..1677059 100644
74701--- a/mm/swapfile.c
74702+++ b/mm/swapfile.c
74703@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
74704
74705 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74706 /* Activity counter to indicate that a swapon or swapoff has occurred */
74707-static atomic_t proc_poll_event = ATOMIC_INIT(0);
74708+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74709
74710 static inline unsigned char swap_count(unsigned char ent)
74711 {
74712@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74713 }
74714 filp_close(swap_file, NULL);
74715 err = 0;
74716- atomic_inc(&proc_poll_event);
74717+ atomic_inc_unchecked(&proc_poll_event);
74718 wake_up_interruptible(&proc_poll_wait);
74719
74720 out_dput:
74721@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74722
74723 poll_wait(file, &proc_poll_wait, wait);
74724
74725- if (seq->poll_event != atomic_read(&proc_poll_event)) {
74726- seq->poll_event = atomic_read(&proc_poll_event);
74727+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74728+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74729 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74730 }
74731
74732@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74733 return ret;
74734
74735 seq = file->private_data;
74736- seq->poll_event = atomic_read(&proc_poll_event);
74737+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74738 return 0;
74739 }
74740
74741@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74742 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74743
74744 mutex_unlock(&swapon_mutex);
74745- atomic_inc(&proc_poll_event);
74746+ atomic_inc_unchecked(&proc_poll_event);
74747 wake_up_interruptible(&proc_poll_wait);
74748
74749 if (S_ISREG(inode->i_mode))
74750diff --git a/mm/util.c b/mm/util.c
74751index 88ea1bd..0f1dfdb 100644
74752--- a/mm/util.c
74753+++ b/mm/util.c
74754@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
74755 * allocated buffer. Use this if you don't want to free the buffer immediately
74756 * like, for example, with RCU.
74757 */
74758+#undef __krealloc
74759 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
74760 {
74761 void *ret;
74762@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
74763 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
74764 * %NULL pointer, the object pointed to is freed.
74765 */
74766+#undef krealloc
74767 void *krealloc(const void *p, size_t new_size, gfp_t flags)
74768 {
74769 void *ret;
74770@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74771 void arch_pick_mmap_layout(struct mm_struct *mm)
74772 {
74773 mm->mmap_base = TASK_UNMAPPED_BASE;
74774+
74775+#ifdef CONFIG_PAX_RANDMMAP
74776+ if (mm->pax_flags & MF_PAX_RANDMMAP)
74777+ mm->mmap_base += mm->delta_mmap;
74778+#endif
74779+
74780 mm->get_unmapped_area = arch_get_unmapped_area;
74781 mm->unmap_area = arch_unmap_area;
74782 }
74783diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74784index 3a65d6f7..39d5e33 100644
74785--- a/mm/vmalloc.c
74786+++ b/mm/vmalloc.c
74787@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74788
74789 pte = pte_offset_kernel(pmd, addr);
74790 do {
74791- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74792- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74793+
74794+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74795+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74796+ BUG_ON(!pte_exec(*pte));
74797+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74798+ continue;
74799+ }
74800+#endif
74801+
74802+ {
74803+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74804+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74805+ }
74806 } while (pte++, addr += PAGE_SIZE, addr != end);
74807 }
74808
74809@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74810 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74811 {
74812 pte_t *pte;
74813+ int ret = -ENOMEM;
74814
74815 /*
74816 * nr is a running index into the array which helps higher level
74817@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74818 pte = pte_alloc_kernel(pmd, addr);
74819 if (!pte)
74820 return -ENOMEM;
74821+
74822+ pax_open_kernel();
74823 do {
74824 struct page *page = pages[*nr];
74825
74826- if (WARN_ON(!pte_none(*pte)))
74827- return -EBUSY;
74828- if (WARN_ON(!page))
74829- return -ENOMEM;
74830+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74831+ if (pgprot_val(prot) & _PAGE_NX)
74832+#endif
74833+
74834+ if (WARN_ON(!pte_none(*pte))) {
74835+ ret = -EBUSY;
74836+ goto out;
74837+ }
74838+ if (WARN_ON(!page)) {
74839+ ret = -ENOMEM;
74840+ goto out;
74841+ }
74842 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74843 (*nr)++;
74844 } while (pte++, addr += PAGE_SIZE, addr != end);
74845- return 0;
74846+ ret = 0;
74847+out:
74848+ pax_close_kernel();
74849+ return ret;
74850 }
74851
74852 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74853@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74854 * and fall back on vmalloc() if that fails. Others
74855 * just put it in the vmalloc space.
74856 */
74857-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74858+#ifdef CONFIG_MODULES
74859+#ifdef MODULES_VADDR
74860 unsigned long addr = (unsigned long)x;
74861 if (addr >= MODULES_VADDR && addr < MODULES_END)
74862 return 1;
74863 #endif
74864+
74865+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74866+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74867+ return 1;
74868+#endif
74869+
74870+#endif
74871+
74872 return is_vmalloc_addr(x);
74873 }
74874
74875@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74876
74877 if (!pgd_none(*pgd)) {
74878 pud_t *pud = pud_offset(pgd, addr);
74879+#ifdef CONFIG_X86
74880+ if (!pud_large(*pud))
74881+#endif
74882 if (!pud_none(*pud)) {
74883 pmd_t *pmd = pmd_offset(pud, addr);
74884+#ifdef CONFIG_X86
74885+ if (!pmd_large(*pmd))
74886+#endif
74887 if (!pmd_none(*pmd)) {
74888 pte_t *ptep, pte;
74889
74890@@ -1290,10 +1330,20 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74891 unsigned long align, unsigned long flags, unsigned long start,
74892 unsigned long end, int node, gfp_t gfp_mask, void *caller)
74893 {
74894- static struct vmap_area *va;
74895+ struct vmap_area *va;
74896 struct vm_struct *area;
74897
74898 BUG_ON(in_interrupt());
74899+
74900+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74901+ if (flags & VM_KERNEXEC) {
74902+ if (start != VMALLOC_START || end != VMALLOC_END)
74903+ return NULL;
74904+ start = (unsigned long)MODULES_EXEC_VADDR;
74905+ end = (unsigned long)MODULES_EXEC_END;
74906+ }
74907+#endif
74908+
74909 if (flags & VM_IOREMAP) {
74910 int bit = fls(size);
74911
74912@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
74913 if (count > totalram_pages)
74914 return NULL;
74915
74916+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74917+ if (!(pgprot_val(prot) & _PAGE_NX))
74918+ flags |= VM_KERNEXEC;
74919+#endif
74920+
74921 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74922 __builtin_return_address(0));
74923 if (!area)
74924@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74925 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74926 return NULL;
74927
74928+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74929+ if (!(pgprot_val(prot) & _PAGE_NX))
74930+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74931+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74932+ else
74933+#endif
74934+
74935 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74936 start, end, node, gfp_mask, caller);
74937
74938@@ -1674,6 +1736,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
74939 gfp_mask, prot, node, caller);
74940 }
74941
74942+#undef __vmalloc
74943 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
74944 {
74945 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
74946@@ -1697,6 +1760,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
74947 * For tight control over page level allocator and protection flags
74948 * use __vmalloc() instead.
74949 */
74950+#undef vmalloc
74951 void *vmalloc(unsigned long size)
74952 {
74953 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
74954@@ -1713,6 +1777,7 @@ EXPORT_SYMBOL(vmalloc);
74955 * For tight control over page level allocator and protection flags
74956 * use __vmalloc() instead.
74957 */
74958+#undef vzalloc
74959 void *vzalloc(unsigned long size)
74960 {
74961 return __vmalloc_node_flags(size, -1,
74962@@ -1727,6 +1792,7 @@ EXPORT_SYMBOL(vzalloc);
74963 * The resulting memory area is zeroed so it can be mapped to userspace
74964 * without leaking data.
74965 */
74966+#undef vmalloc_user
74967 void *vmalloc_user(unsigned long size)
74968 {
74969 struct vm_struct *area;
74970@@ -1754,6 +1820,7 @@ EXPORT_SYMBOL(vmalloc_user);
74971 * For tight control over page level allocator and protection flags
74972 * use __vmalloc() instead.
74973 */
74974+#undef vmalloc_node
74975 void *vmalloc_node(unsigned long size, int node)
74976 {
74977 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
74978@@ -1773,6 +1840,7 @@ EXPORT_SYMBOL(vmalloc_node);
74979 * For tight control over page level allocator and protection flags
74980 * use __vmalloc_node() instead.
74981 */
74982+#undef vzalloc_node
74983 void *vzalloc_node(unsigned long size, int node)
74984 {
74985 return __vmalloc_node_flags(size, node,
74986@@ -1795,10 +1863,10 @@ EXPORT_SYMBOL(vzalloc_node);
74987 * For tight control over page level allocator and protection flags
74988 * use __vmalloc() instead.
74989 */
74990-
74991+#undef vmalloc_exec
74992 void *vmalloc_exec(unsigned long size)
74993 {
74994- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74995+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74996 -1, __builtin_return_address(0));
74997 }
74998
74999@@ -1817,6 +1885,7 @@ void *vmalloc_exec(unsigned long size)
75000 * Allocate enough 32bit PA addressable pages to cover @size from the
75001 * page level allocator and map them into contiguous kernel virtual space.
75002 */
75003+#undef vmalloc_32
75004 void *vmalloc_32(unsigned long size)
75005 {
75006 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
75007@@ -1831,6 +1900,7 @@ EXPORT_SYMBOL(vmalloc_32);
75008 * The resulting memory area is 32bit addressable and zeroed so it can be
75009 * mapped to userspace without leaking data.
75010 */
75011+#undef vmalloc_32_user
75012 void *vmalloc_32_user(unsigned long size)
75013 {
75014 struct vm_struct *area;
75015@@ -2093,6 +2163,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
75016 unsigned long uaddr = vma->vm_start;
75017 unsigned long usize = vma->vm_end - vma->vm_start;
75018
75019+ BUG_ON(vma->vm_mirror);
75020+
75021 if ((PAGE_SIZE-1) & (unsigned long)addr)
75022 return -EINVAL;
75023
75024diff --git a/mm/vmstat.c b/mm/vmstat.c
75025index d52b13d..381d1ac 100644
75026--- a/mm/vmstat.c
75027+++ b/mm/vmstat.c
75028@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
75029 *
75030 * vm_stat contains the global counters
75031 */
75032-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75033+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75034 EXPORT_SYMBOL(vm_stat);
75035
75036 #ifdef CONFIG_SMP
75037@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
75038 v = p->vm_stat_diff[i];
75039 p->vm_stat_diff[i] = 0;
75040 local_irq_restore(flags);
75041- atomic_long_add(v, &zone->vm_stat[i]);
75042+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75043 global_diff[i] += v;
75044 #ifdef CONFIG_NUMA
75045 /* 3 seconds idle till flush */
75046@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
75047
75048 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75049 if (global_diff[i])
75050- atomic_long_add(global_diff[i], &vm_stat[i]);
75051+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75052 }
75053
75054 #endif
75055@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
75056 start_cpu_timer(cpu);
75057 #endif
75058 #ifdef CONFIG_PROC_FS
75059- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75060- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75061- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75062- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75063+ {
75064+ mode_t gr_mode = S_IRUGO;
75065+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75066+ gr_mode = S_IRUSR;
75067+#endif
75068+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75069+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75070+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75071+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75072+#else
75073+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75074+#endif
75075+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75076+ }
75077 #endif
75078 return 0;
75079 }
75080diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
75081index 8970ba1..e3361fe 100644
75082--- a/net/8021q/vlan.c
75083+++ b/net/8021q/vlan.c
75084@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
75085 err = -EPERM;
75086 if (!capable(CAP_NET_ADMIN))
75087 break;
75088- if ((args.u.name_type >= 0) &&
75089- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75090+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75091 struct vlan_net *vn;
75092
75093 vn = net_generic(net, vlan_net_id);
75094diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
75095index fdfdb57..38d368c 100644
75096--- a/net/9p/trans_fd.c
75097+++ b/net/9p/trans_fd.c
75098@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
75099 oldfs = get_fs();
75100 set_fs(get_ds());
75101 /* The cast to a user pointer is valid due to the set_fs() */
75102- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75103+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75104 set_fs(oldfs);
75105
75106 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75107diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
75108index e317583..3c8aeaf 100644
75109--- a/net/9p/trans_virtio.c
75110+++ b/net/9p/trans_virtio.c
75111@@ -327,7 +327,7 @@ req_retry_pinned:
75112 } else {
75113 char *pbuf;
75114 if (req->tc->pubuf)
75115- pbuf = (__force char *) req->tc->pubuf;
75116+ pbuf = (char __force_kernel *) req->tc->pubuf;
75117 else
75118 pbuf = req->tc->pkbuf;
75119 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
75120@@ -357,7 +357,7 @@ req_retry_pinned:
75121 } else {
75122 char *pbuf;
75123 if (req->tc->pubuf)
75124- pbuf = (__force char *) req->tc->pubuf;
75125+ pbuf = (char __force_kernel *) req->tc->pubuf;
75126 else
75127 pbuf = req->tc->pkbuf;
75128
75129diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
75130index f41f026..fe76ea8 100644
75131--- a/net/atm/atm_misc.c
75132+++ b/net/atm/atm_misc.c
75133@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
75134 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75135 return 1;
75136 atm_return(vcc, truesize);
75137- atomic_inc(&vcc->stats->rx_drop);
75138+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75139 return 0;
75140 }
75141 EXPORT_SYMBOL(atm_charge);
75142@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
75143 }
75144 }
75145 atm_return(vcc, guess);
75146- atomic_inc(&vcc->stats->rx_drop);
75147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75148 return NULL;
75149 }
75150 EXPORT_SYMBOL(atm_alloc_charge);
75151@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
75152
75153 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75154 {
75155-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75156+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75157 __SONET_ITEMS
75158 #undef __HANDLE_ITEM
75159 }
75160@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
75161
75162 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75163 {
75164-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75165+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75166 __SONET_ITEMS
75167 #undef __HANDLE_ITEM
75168 }
75169diff --git a/net/atm/lec.h b/net/atm/lec.h
75170index dfc0719..47c5322 100644
75171--- a/net/atm/lec.h
75172+++ b/net/atm/lec.h
75173@@ -48,7 +48,7 @@ struct lane2_ops {
75174 const u8 *tlvs, u32 sizeoftlvs);
75175 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75176 const u8 *tlvs, u32 sizeoftlvs);
75177-};
75178+} __no_const;
75179
75180 /*
75181 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
75182diff --git a/net/atm/mpc.h b/net/atm/mpc.h
75183index 0919a88..a23d54e 100644
75184--- a/net/atm/mpc.h
75185+++ b/net/atm/mpc.h
75186@@ -33,7 +33,7 @@ struct mpoa_client {
75187 struct mpc_parameters parameters; /* parameters for this client */
75188
75189 const struct net_device_ops *old_ops;
75190- struct net_device_ops new_ops;
75191+ net_device_ops_no_const new_ops;
75192 };
75193
75194
75195diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
75196index d1b2d9a..7cc2219 100644
75197--- a/net/atm/mpoa_caches.c
75198+++ b/net/atm/mpoa_caches.c
75199@@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client)
75200 struct timeval now;
75201 struct k_message msg;
75202
75203+ pax_track_stack();
75204+
75205 do_gettimeofday(&now);
75206
75207 read_lock_bh(&client->ingress_lock);
75208diff --git a/net/atm/proc.c b/net/atm/proc.c
75209index 0d020de..011c7bb 100644
75210--- a/net/atm/proc.c
75211+++ b/net/atm/proc.c
75212@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
75213 const struct k_atm_aal_stats *stats)
75214 {
75215 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
75216- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
75217- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
75218- atomic_read(&stats->rx_drop));
75219+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75220+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75221+ atomic_read_unchecked(&stats->rx_drop));
75222 }
75223
75224 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75225diff --git a/net/atm/resources.c b/net/atm/resources.c
75226index 23f45ce..c748f1a 100644
75227--- a/net/atm/resources.c
75228+++ b/net/atm/resources.c
75229@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
75230 static void copy_aal_stats(struct k_atm_aal_stats *from,
75231 struct atm_aal_stats *to)
75232 {
75233-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75234+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75235 __AAL_STAT_ITEMS
75236 #undef __HANDLE_ITEM
75237 }
75238@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
75239 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75240 struct atm_aal_stats *to)
75241 {
75242-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75243+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75244 __AAL_STAT_ITEMS
75245 #undef __HANDLE_ITEM
75246 }
75247diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
75248index db7aacf..991e539 100644
75249--- a/net/batman-adv/hard-interface.c
75250+++ b/net/batman-adv/hard-interface.c
75251@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
75252 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
75253 dev_add_pack(&hard_iface->batman_adv_ptype);
75254
75255- atomic_set(&hard_iface->seqno, 1);
75256- atomic_set(&hard_iface->frag_seqno, 1);
75257+ atomic_set_unchecked(&hard_iface->seqno, 1);
75258+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
75259 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
75260 hard_iface->net_dev->name);
75261
75262diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
75263index 0f32c81..82d1895 100644
75264--- a/net/batman-adv/routing.c
75265+++ b/net/batman-adv/routing.c
75266@@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr,
75267 return;
75268
75269 /* could be changed by schedule_own_packet() */
75270- if_incoming_seqno = atomic_read(&if_incoming->seqno);
75271+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
75272
75273 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
75274
75275diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
75276index 58d1447..2a66c8c 100644
75277--- a/net/batman-adv/send.c
75278+++ b/net/batman-adv/send.c
75279@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
75280
75281 /* change sequence number to network order */
75282 batman_packet->seqno =
75283- htonl((uint32_t)atomic_read(&hard_iface->seqno));
75284+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75285
75286 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
75287 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
75288@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
75289 else
75290 batman_packet->gw_flags = NO_FLAGS;
75291
75292- atomic_inc(&hard_iface->seqno);
75293+ atomic_inc_unchecked(&hard_iface->seqno);
75294
75295 slide_own_bcast_window(hard_iface);
75296 send_time = own_send_time(bat_priv);
75297diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
75298index 05dd351..2ecd19b 100644
75299--- a/net/batman-adv/soft-interface.c
75300+++ b/net/batman-adv/soft-interface.c
75301@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
75302
75303 /* set broadcast sequence number */
75304 bcast_packet->seqno =
75305- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
75306+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
75307
75308 add_bcast_packet_to_list(bat_priv, skb, 1);
75309
75310@@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name)
75311 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
75312
75313 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
75314- atomic_set(&bat_priv->bcast_seqno, 1);
75315+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
75316 atomic_set(&bat_priv->ttvn, 0);
75317 atomic_set(&bat_priv->tt_local_changes, 0);
75318 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
75319diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
75320index 51a0db7..b8a62be 100644
75321--- a/net/batman-adv/types.h
75322+++ b/net/batman-adv/types.h
75323@@ -38,8 +38,8 @@ struct hard_iface {
75324 int16_t if_num;
75325 char if_status;
75326 struct net_device *net_dev;
75327- atomic_t seqno;
75328- atomic_t frag_seqno;
75329+ atomic_unchecked_t seqno;
75330+ atomic_unchecked_t frag_seqno;
75331 unsigned char *packet_buff;
75332 int packet_len;
75333 struct kobject *hardif_obj;
75334@@ -153,7 +153,7 @@ struct bat_priv {
75335 atomic_t orig_interval; /* uint */
75336 atomic_t hop_penalty; /* uint */
75337 atomic_t log_level; /* uint */
75338- atomic_t bcast_seqno;
75339+ atomic_unchecked_t bcast_seqno;
75340 atomic_t bcast_queue_left;
75341 atomic_t batman_queue_left;
75342 atomic_t ttvn; /* tranlation table version number */
75343diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
75344index 32b125f..f1447e0 100644
75345--- a/net/batman-adv/unicast.c
75346+++ b/net/batman-adv/unicast.c
75347@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
75348 frag1->flags = UNI_FRAG_HEAD | large_tail;
75349 frag2->flags = large_tail;
75350
75351- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
75352+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
75353 frag1->seqno = htons(seqno - 1);
75354 frag2->seqno = htons(seqno);
75355
75356diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
75357index ea7f031..0615edc 100644
75358--- a/net/bluetooth/hci_conn.c
75359+++ b/net/bluetooth/hci_conn.c
75360@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
75361 cp.handle = cpu_to_le16(conn->handle);
75362 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
75363 cp.ediv = ediv;
75364- memcpy(cp.rand, rand, sizeof(rand));
75365+ memcpy(cp.rand, rand, sizeof(cp.rand));
75366
75367 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
75368 }
75369@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
75370 memset(&cp, 0, sizeof(cp));
75371
75372 cp.handle = cpu_to_le16(conn->handle);
75373- memcpy(cp.ltk, ltk, sizeof(ltk));
75374+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
75375
75376 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
75377 }
75378diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
75379index b3bdb48..7ad90ac 100644
75380--- a/net/bluetooth/l2cap_core.c
75381+++ b/net/bluetooth/l2cap_core.c
75382@@ -2145,7 +2145,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
75383 void *ptr = req->data;
75384 int type, olen;
75385 unsigned long val;
75386- struct l2cap_conf_rfc rfc;
75387+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
75388
75389 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
75390
75391@@ -2169,8 +2169,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
75392 break;
75393
75394 case L2CAP_CONF_RFC:
75395- if (olen == sizeof(rfc))
75396- memcpy(&rfc, (void *)val, olen);
75397+ if (olen != sizeof(rfc))
75398+ break;
75399+
75400+ memcpy(&rfc, (void *)val, olen);
75401
75402 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
75403 rfc.mode != chan->mode)
75404@@ -2258,12 +2260,24 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
75405
75406 switch (type) {
75407 case L2CAP_CONF_RFC:
75408- if (olen == sizeof(rfc))
75409- memcpy(&rfc, (void *)val, olen);
75410+ if (olen != sizeof(rfc))
75411+ break;
75412+
75413+ memcpy(&rfc, (void *)val, olen);
75414 goto done;
75415 }
75416 }
75417
75418+ /* Use sane default values in case a misbehaving remote device
75419+ * did not send an RFC option.
75420+ */
75421+ rfc.mode = chan->mode;
75422+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
75423+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
75424+ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
75425+
75426+ BT_ERR("Expected RFC option was not found, using defaults");
75427+
75428 done:
75429 switch (rfc.mode) {
75430 case L2CAP_MODE_ERTM:
75431diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
75432index e79ff75..215b57d 100644
75433--- a/net/bridge/br_multicast.c
75434+++ b/net/bridge/br_multicast.c
75435@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
75436 nexthdr = ip6h->nexthdr;
75437 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
75438
75439- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
75440+ if (nexthdr != IPPROTO_ICMPV6)
75441 return 0;
75442
75443 /* Okay, we found ICMPv6 header */
75444diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
75445index 5864cc4..94cab18 100644
75446--- a/net/bridge/netfilter/ebtables.c
75447+++ b/net/bridge/netfilter/ebtables.c
75448@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
75449 tmp.valid_hooks = t->table->valid_hooks;
75450 }
75451 mutex_unlock(&ebt_mutex);
75452- if (copy_to_user(user, &tmp, *len) != 0){
75453+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
75454 BUGPRINT("c2u Didn't work\n");
75455 ret = -EFAULT;
75456 break;
75457@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t,
75458 int ret;
75459 void __user *pos;
75460
75461+ pax_track_stack();
75462+
75463 memset(&tinfo, 0, sizeof(tinfo));
75464
75465 if (cmd == EBT_SO_GET_ENTRIES) {
75466diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
75467index a986280..13444a1 100644
75468--- a/net/caif/caif_socket.c
75469+++ b/net/caif/caif_socket.c
75470@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
75471 #ifdef CONFIG_DEBUG_FS
75472 struct debug_fs_counter {
75473 atomic_t caif_nr_socks;
75474- atomic_t caif_sock_create;
75475- atomic_t num_connect_req;
75476- atomic_t num_connect_resp;
75477- atomic_t num_connect_fail_resp;
75478- atomic_t num_disconnect;
75479- atomic_t num_remote_shutdown_ind;
75480- atomic_t num_tx_flow_off_ind;
75481- atomic_t num_tx_flow_on_ind;
75482- atomic_t num_rx_flow_off;
75483- atomic_t num_rx_flow_on;
75484+ atomic_unchecked_t caif_sock_create;
75485+ atomic_unchecked_t num_connect_req;
75486+ atomic_unchecked_t num_connect_resp;
75487+ atomic_unchecked_t num_connect_fail_resp;
75488+ atomic_unchecked_t num_disconnect;
75489+ atomic_unchecked_t num_remote_shutdown_ind;
75490+ atomic_unchecked_t num_tx_flow_off_ind;
75491+ atomic_unchecked_t num_tx_flow_on_ind;
75492+ atomic_unchecked_t num_rx_flow_off;
75493+ atomic_unchecked_t num_rx_flow_on;
75494 };
75495 static struct debug_fs_counter cnt;
75496 #define dbfs_atomic_inc(v) atomic_inc_return(v)
75497+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
75498 #define dbfs_atomic_dec(v) atomic_dec_return(v)
75499 #else
75500 #define dbfs_atomic_inc(v) 0
75501@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75502 atomic_read(&cf_sk->sk.sk_rmem_alloc),
75503 sk_rcvbuf_lowwater(cf_sk));
75504 set_rx_flow_off(cf_sk);
75505- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75506+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75507 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75508 }
75509
75510@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75511 set_rx_flow_off(cf_sk);
75512 if (net_ratelimit())
75513 pr_debug("sending flow OFF due to rmem_schedule\n");
75514- dbfs_atomic_inc(&cnt.num_rx_flow_off);
75515+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75516 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75517 }
75518 skb->dev = NULL;
75519@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
75520 switch (flow) {
75521 case CAIF_CTRLCMD_FLOW_ON_IND:
75522 /* OK from modem to start sending again */
75523- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
75524+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
75525 set_tx_flow_on(cf_sk);
75526 cf_sk->sk.sk_state_change(&cf_sk->sk);
75527 break;
75528
75529 case CAIF_CTRLCMD_FLOW_OFF_IND:
75530 /* Modem asks us to shut up */
75531- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
75532+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
75533 set_tx_flow_off(cf_sk);
75534 cf_sk->sk.sk_state_change(&cf_sk->sk);
75535 break;
75536@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75537 /* We're now connected */
75538 caif_client_register_refcnt(&cf_sk->layer,
75539 cfsk_hold, cfsk_put);
75540- dbfs_atomic_inc(&cnt.num_connect_resp);
75541+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
75542 cf_sk->sk.sk_state = CAIF_CONNECTED;
75543 set_tx_flow_on(cf_sk);
75544 cf_sk->sk.sk_state_change(&cf_sk->sk);
75545@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75546
75547 case CAIF_CTRLCMD_INIT_FAIL_RSP:
75548 /* Connect request failed */
75549- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
75550+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
75551 cf_sk->sk.sk_err = ECONNREFUSED;
75552 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
75553 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75554@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75555
75556 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
75557 /* Modem has closed this connection, or device is down. */
75558- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
75559+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
75560 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75561 cf_sk->sk.sk_err = ECONNRESET;
75562 set_rx_flow_on(cf_sk);
75563@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
75564 return;
75565
75566 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
75567- dbfs_atomic_inc(&cnt.num_rx_flow_on);
75568+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
75569 set_rx_flow_on(cf_sk);
75570 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
75571 }
75572@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
75573 /*ifindex = id of the interface.*/
75574 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
75575
75576- dbfs_atomic_inc(&cnt.num_connect_req);
75577+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
75578 cf_sk->layer.receive = caif_sktrecv_cb;
75579
75580 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
75581@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
75582 spin_unlock_bh(&sk->sk_receive_queue.lock);
75583 sock->sk = NULL;
75584
75585- dbfs_atomic_inc(&cnt.num_disconnect);
75586+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
75587
75588 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
75589 if (cf_sk->debugfs_socket_dir != NULL)
75590@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
75591 cf_sk->conn_req.protocol = protocol;
75592 /* Increase the number of sockets created. */
75593 dbfs_atomic_inc(&cnt.caif_nr_socks);
75594- num = dbfs_atomic_inc(&cnt.caif_sock_create);
75595+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
75596 #ifdef CONFIG_DEBUG_FS
75597 if (!IS_ERR(debugfsdir)) {
75598
75599diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
75600index e22671b..6598ea0 100644
75601--- a/net/caif/cfctrl.c
75602+++ b/net/caif/cfctrl.c
75603@@ -9,6 +9,7 @@
75604 #include <linux/stddef.h>
75605 #include <linux/spinlock.h>
75606 #include <linux/slab.h>
75607+#include <linux/sched.h>
75608 #include <net/caif/caif_layer.h>
75609 #include <net/caif/cfpkt.h>
75610 #include <net/caif/cfctrl.h>
75611@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
75612 dev_info.id = 0xff;
75613 memset(this, 0, sizeof(*this));
75614 cfsrvl_init(&this->serv, 0, &dev_info, false);
75615- atomic_set(&this->req_seq_no, 1);
75616- atomic_set(&this->rsp_seq_no, 1);
75617+ atomic_set_unchecked(&this->req_seq_no, 1);
75618+ atomic_set_unchecked(&this->rsp_seq_no, 1);
75619 this->serv.layer.receive = cfctrl_recv;
75620 sprintf(this->serv.layer.name, "ctrl");
75621 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
75622@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
75623 struct cfctrl_request_info *req)
75624 {
75625 spin_lock_bh(&ctrl->info_list_lock);
75626- atomic_inc(&ctrl->req_seq_no);
75627- req->sequence_no = atomic_read(&ctrl->req_seq_no);
75628+ atomic_inc_unchecked(&ctrl->req_seq_no);
75629+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
75630 list_add_tail(&req->list, &ctrl->list);
75631 spin_unlock_bh(&ctrl->info_list_lock);
75632 }
75633@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
75634 if (p != first)
75635 pr_warn("Requests are not received in order\n");
75636
75637- atomic_set(&ctrl->rsp_seq_no,
75638+ atomic_set_unchecked(&ctrl->rsp_seq_no,
75639 p->sequence_no);
75640 list_del(&p->list);
75641 goto out;
75642@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
75643 struct cfctrl *cfctrl = container_obj(layer);
75644 struct cfctrl_request_info rsp, *req;
75645
75646+ pax_track_stack();
75647
75648 cfpkt_extr_head(pkt, &cmdrsp, 1);
75649 cmd = cmdrsp & CFCTRL_CMD_MASK;
75650diff --git a/net/compat.c b/net/compat.c
75651index c578d93..257fab7 100644
75652--- a/net/compat.c
75653+++ b/net/compat.c
75654@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
75655 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75656 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75657 return -EFAULT;
75658- kmsg->msg_name = compat_ptr(tmp1);
75659- kmsg->msg_iov = compat_ptr(tmp2);
75660- kmsg->msg_control = compat_ptr(tmp3);
75661+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75662+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75663+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75664 return 0;
75665 }
75666
75667@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75668
75669 if (kern_msg->msg_namelen) {
75670 if (mode == VERIFY_READ) {
75671- int err = move_addr_to_kernel(kern_msg->msg_name,
75672+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
75673 kern_msg->msg_namelen,
75674 kern_address);
75675 if (err < 0)
75676@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75677 kern_msg->msg_name = NULL;
75678
75679 tot_len = iov_from_user_compat_to_kern(kern_iov,
75680- (struct compat_iovec __user *)kern_msg->msg_iov,
75681+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
75682 kern_msg->msg_iovlen);
75683 if (tot_len >= 0)
75684 kern_msg->msg_iov = kern_iov;
75685@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75686
75687 #define CMSG_COMPAT_FIRSTHDR(msg) \
75688 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75689- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75690+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75691 (struct compat_cmsghdr __user *)NULL)
75692
75693 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75694 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75695 (ucmlen) <= (unsigned long) \
75696 ((mhdr)->msg_controllen - \
75697- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75698+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75699
75700 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75701 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75702 {
75703 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75704- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75705+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75706 msg->msg_controllen)
75707 return NULL;
75708 return (struct compat_cmsghdr __user *)ptr;
75709@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75710 {
75711 struct compat_timeval ctv;
75712 struct compat_timespec cts[3];
75713- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75714+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75715 struct compat_cmsghdr cmhdr;
75716 int cmlen;
75717
75718@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75719
75720 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75721 {
75722- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75723+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75724 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75725 int fdnum = scm->fp->count;
75726 struct file **fp = scm->fp->fp;
75727@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
75728 return -EFAULT;
75729 old_fs = get_fs();
75730 set_fs(KERNEL_DS);
75731- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
75732+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
75733 set_fs(old_fs);
75734
75735 return err;
75736@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
75737 len = sizeof(ktime);
75738 old_fs = get_fs();
75739 set_fs(KERNEL_DS);
75740- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75741+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75742 set_fs(old_fs);
75743
75744 if (!err) {
75745@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75746 case MCAST_JOIN_GROUP:
75747 case MCAST_LEAVE_GROUP:
75748 {
75749- struct compat_group_req __user *gr32 = (void *)optval;
75750+ struct compat_group_req __user *gr32 = (void __user *)optval;
75751 struct group_req __user *kgr =
75752 compat_alloc_user_space(sizeof(struct group_req));
75753 u32 interface;
75754@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75755 case MCAST_BLOCK_SOURCE:
75756 case MCAST_UNBLOCK_SOURCE:
75757 {
75758- struct compat_group_source_req __user *gsr32 = (void *)optval;
75759+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75760 struct group_source_req __user *kgsr = compat_alloc_user_space(
75761 sizeof(struct group_source_req));
75762 u32 interface;
75763@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75764 }
75765 case MCAST_MSFILTER:
75766 {
75767- struct compat_group_filter __user *gf32 = (void *)optval;
75768+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75769 struct group_filter __user *kgf;
75770 u32 interface, fmode, numsrc;
75771
75772@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
75773 char __user *optval, int __user *optlen,
75774 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75775 {
75776- struct compat_group_filter __user *gf32 = (void *)optval;
75777+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75778 struct group_filter __user *kgf;
75779 int __user *koptlen;
75780 u32 interface, fmode, numsrc;
75781diff --git a/net/core/datagram.c b/net/core/datagram.c
75782index 18ac112..fe95ed9 100644
75783--- a/net/core/datagram.c
75784+++ b/net/core/datagram.c
75785@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
75786 }
75787
75788 kfree_skb(skb);
75789- atomic_inc(&sk->sk_drops);
75790+ atomic_inc_unchecked(&sk->sk_drops);
75791 sk_mem_reclaim_partial(sk);
75792
75793 return err;
75794diff --git a/net/core/dev.c b/net/core/dev.c
75795index ae5cf2d..2c950a1 100644
75796--- a/net/core/dev.c
75797+++ b/net/core/dev.c
75798@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name)
75799 if (no_module && capable(CAP_NET_ADMIN))
75800 no_module = request_module("netdev-%s", name);
75801 if (no_module && capable(CAP_SYS_MODULE)) {
75802+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75803+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
75804+#else
75805 if (!request_module("%s", name))
75806 pr_err("Loading kernel module for a network device "
75807 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75808 "instead\n", name);
75809+#endif
75810 }
75811 }
75812 EXPORT_SYMBOL(dev_load);
75813@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75814
75815 struct dev_gso_cb {
75816 void (*destructor)(struct sk_buff *skb);
75817-};
75818+} __no_const;
75819
75820 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75821
75822@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
75823 }
75824 EXPORT_SYMBOL(netif_rx_ni);
75825
75826-static void net_tx_action(struct softirq_action *h)
75827+static void net_tx_action(void)
75828 {
75829 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75830
75831@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi)
75832 }
75833 EXPORT_SYMBOL(netif_napi_del);
75834
75835-static void net_rx_action(struct softirq_action *h)
75836+static void net_rx_action(void)
75837 {
75838 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75839 unsigned long time_limit = jiffies + 2;
75840diff --git a/net/core/flow.c b/net/core/flow.c
75841index d6968e5..1690d9d 100644
75842--- a/net/core/flow.c
75843+++ b/net/core/flow.c
75844@@ -61,7 +61,7 @@ struct flow_cache {
75845 struct timer_list rnd_timer;
75846 };
75847
75848-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75849+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75850 EXPORT_SYMBOL(flow_cache_genid);
75851 static struct flow_cache flow_cache_global;
75852 static struct kmem_cache *flow_cachep __read_mostly;
75853@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75854
75855 static int flow_entry_valid(struct flow_cache_entry *fle)
75856 {
75857- if (atomic_read(&flow_cache_genid) != fle->genid)
75858+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75859 return 0;
75860 if (fle->object && !fle->object->ops->check(fle->object))
75861 return 0;
75862@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75863 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75864 fcp->hash_count++;
75865 }
75866- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75867+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75868 flo = fle->object;
75869 if (!flo)
75870 goto ret_object;
75871@@ -280,7 +280,7 @@ nocache:
75872 }
75873 flo = resolver(net, key, family, dir, flo, ctx);
75874 if (fle) {
75875- fle->genid = atomic_read(&flow_cache_genid);
75876+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
75877 if (!IS_ERR(flo))
75878 fle->object = flo;
75879 else
75880diff --git a/net/core/iovec.c b/net/core/iovec.c
75881index c40f27e..7f49254 100644
75882--- a/net/core/iovec.c
75883+++ b/net/core/iovec.c
75884@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75885 if (m->msg_namelen) {
75886 if (mode == VERIFY_READ) {
75887 void __user *namep;
75888- namep = (void __user __force *) m->msg_name;
75889+ namep = (void __force_user *) m->msg_name;
75890 err = move_addr_to_kernel(namep, m->msg_namelen,
75891 address);
75892 if (err < 0)
75893@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75894 }
75895
75896 size = m->msg_iovlen * sizeof(struct iovec);
75897- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75898+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75899 return -EFAULT;
75900
75901 m->msg_iov = iov;
75902diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
75903index 1683e5d..f3621f6 100644
75904--- a/net/core/net-sysfs.c
75905+++ b/net/core/net-sysfs.c
75906@@ -664,11 +664,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
75907 if (count) {
75908 int i;
75909
75910- if (count > 1<<30) {
75911- /* Enforce a limit to prevent overflow */
75912+ if (count > INT_MAX)
75913 return -EINVAL;
75914- }
75915 count = roundup_pow_of_two(count);
75916+ if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
75917+ / sizeof(struct rps_dev_flow)) {
75918+ /* Enforce a limit to prevent overflow */
75919+ return -EINVAL;
75920+ }
75921 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
75922 if (!table)
75923 return -ENOMEM;
75924diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75925index 99d9e95..209bae2 100644
75926--- a/net/core/rtnetlink.c
75927+++ b/net/core/rtnetlink.c
75928@@ -57,7 +57,7 @@ struct rtnl_link {
75929 rtnl_doit_func doit;
75930 rtnl_dumpit_func dumpit;
75931 rtnl_calcit_func calcit;
75932-};
75933+} __no_const;
75934
75935 static DEFINE_MUTEX(rtnl_mutex);
75936 static u16 min_ifinfo_dump_size;
75937diff --git a/net/core/scm.c b/net/core/scm.c
75938index 811b53f..5d6c343 100644
75939--- a/net/core/scm.c
75940+++ b/net/core/scm.c
75941@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
75942 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75943 {
75944 struct cmsghdr __user *cm
75945- = (__force struct cmsghdr __user *)msg->msg_control;
75946+ = (struct cmsghdr __force_user *)msg->msg_control;
75947 struct cmsghdr cmhdr;
75948 int cmlen = CMSG_LEN(len);
75949 int err;
75950@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75951 err = -EFAULT;
75952 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75953 goto out;
75954- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75955+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75956 goto out;
75957 cmlen = CMSG_SPACE(len);
75958 if (msg->msg_controllen < cmlen)
75959@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
75960 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75961 {
75962 struct cmsghdr __user *cm
75963- = (__force struct cmsghdr __user*)msg->msg_control;
75964+ = (struct cmsghdr __force_user *)msg->msg_control;
75965
75966 int fdmax = 0;
75967 int fdnum = scm->fp->count;
75968@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75969 if (fdnum < fdmax)
75970 fdmax = fdnum;
75971
75972- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75973+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75974 i++, cmfptr++)
75975 {
75976 int new_fd;
75977diff --git a/net/core/skbuff.c b/net/core/skbuff.c
75978index 387703f..035abcf 100644
75979--- a/net/core/skbuff.c
75980+++ b/net/core/skbuff.c
75981@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
75982 struct sock *sk = skb->sk;
75983 int ret = 0;
75984
75985+ pax_track_stack();
75986+
75987 if (splice_grow_spd(pipe, &spd))
75988 return -ENOMEM;
75989
75990diff --git a/net/core/sock.c b/net/core/sock.c
75991index 11d67b3..df26d4b 100644
75992--- a/net/core/sock.c
75993+++ b/net/core/sock.c
75994@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75995 */
75996 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75997 (unsigned)sk->sk_rcvbuf) {
75998- atomic_inc(&sk->sk_drops);
75999+ atomic_inc_unchecked(&sk->sk_drops);
76000 trace_sock_rcvqueue_full(sk, skb);
76001 return -ENOMEM;
76002 }
76003@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76004 return err;
76005
76006 if (!sk_rmem_schedule(sk, skb->truesize)) {
76007- atomic_inc(&sk->sk_drops);
76008+ atomic_inc_unchecked(&sk->sk_drops);
76009 return -ENOBUFS;
76010 }
76011
76012@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76013 skb_dst_force(skb);
76014
76015 spin_lock_irqsave(&list->lock, flags);
76016- skb->dropcount = atomic_read(&sk->sk_drops);
76017+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76018 __skb_queue_tail(list, skb);
76019 spin_unlock_irqrestore(&list->lock, flags);
76020
76021@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76022 skb->dev = NULL;
76023
76024 if (sk_rcvqueues_full(sk, skb)) {
76025- atomic_inc(&sk->sk_drops);
76026+ atomic_inc_unchecked(&sk->sk_drops);
76027 goto discard_and_relse;
76028 }
76029 if (nested)
76030@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76031 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76032 } else if (sk_add_backlog(sk, skb)) {
76033 bh_unlock_sock(sk);
76034- atomic_inc(&sk->sk_drops);
76035+ atomic_inc_unchecked(&sk->sk_drops);
76036 goto discard_and_relse;
76037 }
76038
76039@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76040 if (len > sizeof(peercred))
76041 len = sizeof(peercred);
76042 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76043- if (copy_to_user(optval, &peercred, len))
76044+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76045 return -EFAULT;
76046 goto lenout;
76047 }
76048@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76049 return -ENOTCONN;
76050 if (lv < len)
76051 return -EINVAL;
76052- if (copy_to_user(optval, address, len))
76053+ if (len > sizeof(address) || copy_to_user(optval, address, len))
76054 return -EFAULT;
76055 goto lenout;
76056 }
76057@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76058
76059 if (len > lv)
76060 len = lv;
76061- if (copy_to_user(optval, &v, len))
76062+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
76063 return -EFAULT;
76064 lenout:
76065 if (put_user(len, optlen))
76066@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
76067 */
76068 smp_wmb();
76069 atomic_set(&sk->sk_refcnt, 1);
76070- atomic_set(&sk->sk_drops, 0);
76071+ atomic_set_unchecked(&sk->sk_drops, 0);
76072 }
76073 EXPORT_SYMBOL(sock_init_data);
76074
76075diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
76076index 02e75d1..9a57a7c 100644
76077--- a/net/decnet/sysctl_net_decnet.c
76078+++ b/net/decnet/sysctl_net_decnet.c
76079@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
76080
76081 if (len > *lenp) len = *lenp;
76082
76083- if (copy_to_user(buffer, addr, len))
76084+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
76085 return -EFAULT;
76086
76087 *lenp = len;
76088@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
76089
76090 if (len > *lenp) len = *lenp;
76091
76092- if (copy_to_user(buffer, devname, len))
76093+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
76094 return -EFAULT;
76095
76096 *lenp = len;
76097diff --git a/net/econet/Kconfig b/net/econet/Kconfig
76098index 39a2d29..f39c0fe 100644
76099--- a/net/econet/Kconfig
76100+++ b/net/econet/Kconfig
76101@@ -4,7 +4,7 @@
76102
76103 config ECONET
76104 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
76105- depends on EXPERIMENTAL && INET
76106+ depends on EXPERIMENTAL && INET && BROKEN
76107 ---help---
76108 Econet is a fairly old and slow networking protocol mainly used by
76109 Acorn computers to access file and print servers. It uses native
76110diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
76111index 92fc5f6..b790d91 100644
76112--- a/net/ipv4/fib_frontend.c
76113+++ b/net/ipv4/fib_frontend.c
76114@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
76115 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76116 fib_sync_up(dev);
76117 #endif
76118- atomic_inc(&net->ipv4.dev_addr_genid);
76119+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76120 rt_cache_flush(dev_net(dev), -1);
76121 break;
76122 case NETDEV_DOWN:
76123 fib_del_ifaddr(ifa, NULL);
76124- atomic_inc(&net->ipv4.dev_addr_genid);
76125+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76126 if (ifa->ifa_dev->ifa_list == NULL) {
76127 /* Last address was deleted from this interface.
76128 * Disable IP.
76129@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
76130 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76131 fib_sync_up(dev);
76132 #endif
76133- atomic_inc(&net->ipv4.dev_addr_genid);
76134+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76135 rt_cache_flush(dev_net(dev), -1);
76136 break;
76137 case NETDEV_DOWN:
76138diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
76139index 80106d8..232e898 100644
76140--- a/net/ipv4/fib_semantics.c
76141+++ b/net/ipv4/fib_semantics.c
76142@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
76143 nh->nh_saddr = inet_select_addr(nh->nh_dev,
76144 nh->nh_gw,
76145 nh->nh_parent->fib_scope);
76146- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
76147+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
76148
76149 return nh->nh_saddr;
76150 }
76151diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
76152index 389a2e6..ac1c1de 100644
76153--- a/net/ipv4/inet_diag.c
76154+++ b/net/ipv4/inet_diag.c
76155@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
76156 r->idiag_retrans = 0;
76157
76158 r->id.idiag_if = sk->sk_bound_dev_if;
76159+
76160+#ifdef CONFIG_GRKERNSEC_HIDESYM
76161+ r->id.idiag_cookie[0] = 0;
76162+ r->id.idiag_cookie[1] = 0;
76163+#else
76164 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
76165 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
76166+#endif
76167
76168 r->id.idiag_sport = inet->inet_sport;
76169 r->id.idiag_dport = inet->inet_dport;
76170@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
76171 r->idiag_family = tw->tw_family;
76172 r->idiag_retrans = 0;
76173 r->id.idiag_if = tw->tw_bound_dev_if;
76174+
76175+#ifdef CONFIG_GRKERNSEC_HIDESYM
76176+ r->id.idiag_cookie[0] = 0;
76177+ r->id.idiag_cookie[1] = 0;
76178+#else
76179 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
76180 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
76181+#endif
76182+
76183 r->id.idiag_sport = tw->tw_sport;
76184 r->id.idiag_dport = tw->tw_dport;
76185 r->id.idiag_src[0] = tw->tw_rcv_saddr;
76186@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
76187 if (sk == NULL)
76188 goto unlock;
76189
76190+#ifndef CONFIG_GRKERNSEC_HIDESYM
76191 err = -ESTALE;
76192 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
76193 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
76194 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
76195 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
76196 goto out;
76197+#endif
76198
76199 err = -ENOMEM;
76200 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
76201@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
76202 r->idiag_retrans = req->retrans;
76203
76204 r->id.idiag_if = sk->sk_bound_dev_if;
76205+
76206+#ifdef CONFIG_GRKERNSEC_HIDESYM
76207+ r->id.idiag_cookie[0] = 0;
76208+ r->id.idiag_cookie[1] = 0;
76209+#else
76210 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
76211 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
76212+#endif
76213
76214 tmo = req->expires - jiffies;
76215 if (tmo < 0)
76216diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
76217index 984ec65..97ac518 100644
76218--- a/net/ipv4/inet_hashtables.c
76219+++ b/net/ipv4/inet_hashtables.c
76220@@ -18,12 +18,15 @@
76221 #include <linux/sched.h>
76222 #include <linux/slab.h>
76223 #include <linux/wait.h>
76224+#include <linux/security.h>
76225
76226 #include <net/inet_connection_sock.h>
76227 #include <net/inet_hashtables.h>
76228 #include <net/secure_seq.h>
76229 #include <net/ip.h>
76230
76231+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
76232+
76233 /*
76234 * Allocate and initialize a new local port bind bucket.
76235 * The bindhash mutex for snum's hash chain must be held here.
76236@@ -530,6 +533,8 @@ ok:
76237 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
76238 spin_unlock(&head->lock);
76239
76240+ gr_update_task_in_ip_table(current, inet_sk(sk));
76241+
76242 if (tw) {
76243 inet_twsk_deschedule(tw, death_row);
76244 while (twrefcnt) {
76245diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
76246index 86f13c67..0bce60f 100644
76247--- a/net/ipv4/inetpeer.c
76248+++ b/net/ipv4/inetpeer.c
76249@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
76250 unsigned int sequence;
76251 int invalidated, gccnt = 0;
76252
76253+ pax_track_stack();
76254+
76255 /* Attempt a lockless lookup first.
76256 * Because of a concurrent writer, we might not find an existing entry.
76257 */
76258@@ -436,8 +438,8 @@ relookup:
76259 if (p) {
76260 p->daddr = *daddr;
76261 atomic_set(&p->refcnt, 1);
76262- atomic_set(&p->rid, 0);
76263- atomic_set(&p->ip_id_count,
76264+ atomic_set_unchecked(&p->rid, 0);
76265+ atomic_set_unchecked(&p->ip_id_count,
76266 (daddr->family == AF_INET) ?
76267 secure_ip_id(daddr->addr.a4) :
76268 secure_ipv6_id(daddr->addr.a6));
76269diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
76270index 0e0ab98..2ed7dd5 100644
76271--- a/net/ipv4/ip_fragment.c
76272+++ b/net/ipv4/ip_fragment.c
76273@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
76274 return 0;
76275
76276 start = qp->rid;
76277- end = atomic_inc_return(&peer->rid);
76278+ end = atomic_inc_return_unchecked(&peer->rid);
76279 qp->rid = end;
76280
76281 rc = qp->q.fragments && (end - start) > max;
76282diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
76283index 8905e92..0b179fb 100644
76284--- a/net/ipv4/ip_sockglue.c
76285+++ b/net/ipv4/ip_sockglue.c
76286@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76287 int val;
76288 int len;
76289
76290+ pax_track_stack();
76291+
76292 if (level != SOL_IP)
76293 return -EOPNOTSUPP;
76294
76295@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76296 len = min_t(unsigned int, len, opt->optlen);
76297 if (put_user(len, optlen))
76298 return -EFAULT;
76299- if (copy_to_user(optval, opt->__data, len))
76300+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76301+ copy_to_user(optval, opt->__data, len))
76302 return -EFAULT;
76303 return 0;
76304 }
76305@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76306 if (sk->sk_type != SOCK_STREAM)
76307 return -ENOPROTOOPT;
76308
76309- msg.msg_control = optval;
76310+ msg.msg_control = (void __force_kernel *)optval;
76311 msg.msg_controllen = len;
76312 msg.msg_flags = flags;
76313
76314diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
76315index 004bb74..8d4a58c 100644
76316--- a/net/ipv4/ipconfig.c
76317+++ b/net/ipv4/ipconfig.c
76318@@ -317,7 +317,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
76319
76320 mm_segment_t oldfs = get_fs();
76321 set_fs(get_ds());
76322- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76323+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76324 set_fs(oldfs);
76325 return res;
76326 }
76327@@ -328,7 +328,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
76328
76329 mm_segment_t oldfs = get_fs();
76330 set_fs(get_ds());
76331- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76332+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76333 set_fs(oldfs);
76334 return res;
76335 }
76336@@ -339,7 +339,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
76337
76338 mm_segment_t oldfs = get_fs();
76339 set_fs(get_ds());
76340- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76341+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76342 set_fs(oldfs);
76343 return res;
76344 }
76345diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76346index 076b7c8..9c8d038 100644
76347--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
76348+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76349@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
76350
76351 *len = 0;
76352
76353- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
76354+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
76355 if (*octets == NULL) {
76356 if (net_ratelimit())
76357 pr_notice("OOM in bsalg (%d)\n", __LINE__);
76358diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
76359index 39b403f..8e6a0a8 100644
76360--- a/net/ipv4/ping.c
76361+++ b/net/ipv4/ping.c
76362@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
76363 sk_rmem_alloc_get(sp),
76364 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76365 atomic_read(&sp->sk_refcnt), sp,
76366- atomic_read(&sp->sk_drops), len);
76367+ atomic_read_unchecked(&sp->sk_drops), len);
76368 }
76369
76370 static int ping_seq_show(struct seq_file *seq, void *v)
76371diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
76372index 61714bd..c9cee6d 100644
76373--- a/net/ipv4/raw.c
76374+++ b/net/ipv4/raw.c
76375@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
76376 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76377 {
76378 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76379- atomic_inc(&sk->sk_drops);
76380+ atomic_inc_unchecked(&sk->sk_drops);
76381 kfree_skb(skb);
76382 return NET_RX_DROP;
76383 }
76384@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
76385
76386 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76387 {
76388+ struct icmp_filter filter;
76389+
76390 if (optlen > sizeof(struct icmp_filter))
76391 optlen = sizeof(struct icmp_filter);
76392- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76393+ if (copy_from_user(&filter, optval, optlen))
76394 return -EFAULT;
76395+ raw_sk(sk)->filter = filter;
76396 return 0;
76397 }
76398
76399 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76400 {
76401 int len, ret = -EFAULT;
76402+ struct icmp_filter filter;
76403
76404 if (get_user(len, optlen))
76405 goto out;
76406@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
76407 if (len > sizeof(struct icmp_filter))
76408 len = sizeof(struct icmp_filter);
76409 ret = -EFAULT;
76410- if (put_user(len, optlen) ||
76411- copy_to_user(optval, &raw_sk(sk)->filter, len))
76412+ filter = raw_sk(sk)->filter;
76413+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
76414 goto out;
76415 ret = 0;
76416 out: return ret;
76417@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76418 sk_wmem_alloc_get(sp),
76419 sk_rmem_alloc_get(sp),
76420 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76421- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76422+ atomic_read(&sp->sk_refcnt),
76423+#ifdef CONFIG_GRKERNSEC_HIDESYM
76424+ NULL,
76425+#else
76426+ sp,
76427+#endif
76428+ atomic_read_unchecked(&sp->sk_drops));
76429 }
76430
76431 static int raw_seq_show(struct seq_file *seq, void *v)
76432diff --git a/net/ipv4/route.c b/net/ipv4/route.c
76433index b563854..e03f8a6 100644
76434--- a/net/ipv4/route.c
76435+++ b/net/ipv4/route.c
76436@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
76437
76438 static inline int rt_genid(struct net *net)
76439 {
76440- return atomic_read(&net->ipv4.rt_genid);
76441+ return atomic_read_unchecked(&net->ipv4.rt_genid);
76442 }
76443
76444 #ifdef CONFIG_PROC_FS
76445@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
76446 unsigned char shuffle;
76447
76448 get_random_bytes(&shuffle, sizeof(shuffle));
76449- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76450+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
76451 redirect_genid++;
76452 }
76453
76454@@ -3015,7 +3015,7 @@ static int rt_fill_info(struct net *net,
76455 error = rt->dst.error;
76456 if (peer) {
76457 inet_peer_refcheck(rt->peer);
76458- id = atomic_read(&peer->ip_id_count) & 0xffff;
76459+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
76460 if (peer->tcp_ts_stamp) {
76461 ts = peer->tcp_ts;
76462 tsage = get_seconds() - peer->tcp_ts_stamp;
76463diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
76464index 46febca..98b73a4 100644
76465--- a/net/ipv4/tcp.c
76466+++ b/net/ipv4/tcp.c
76467@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
76468 int val;
76469 int err = 0;
76470
76471+ pax_track_stack();
76472+
76473 /* These are data/string values, all the others are ints */
76474 switch (optname) {
76475 case TCP_CONGESTION: {
76476@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
76477 struct tcp_sock *tp = tcp_sk(sk);
76478 int val, len;
76479
76480+ pax_track_stack();
76481+
76482 if (get_user(len, optlen))
76483 return -EFAULT;
76484
76485diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
76486index 7963e03..c44f5d0 100644
76487--- a/net/ipv4/tcp_ipv4.c
76488+++ b/net/ipv4/tcp_ipv4.c
76489@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
76490 int sysctl_tcp_low_latency __read_mostly;
76491 EXPORT_SYMBOL(sysctl_tcp_low_latency);
76492
76493+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76494+extern int grsec_enable_blackhole;
76495+#endif
76496
76497 #ifdef CONFIG_TCP_MD5SIG
76498 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
76499@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
76500 return 0;
76501
76502 reset:
76503+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76504+ if (!grsec_enable_blackhole)
76505+#endif
76506 tcp_v4_send_reset(rsk, skb);
76507 discard:
76508 kfree_skb(skb);
76509@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
76510 TCP_SKB_CB(skb)->sacked = 0;
76511
76512 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76513- if (!sk)
76514+ if (!sk) {
76515+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76516+ ret = 1;
76517+#endif
76518 goto no_tcp_socket;
76519-
76520+ }
76521 process:
76522- if (sk->sk_state == TCP_TIME_WAIT)
76523+ if (sk->sk_state == TCP_TIME_WAIT) {
76524+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76525+ ret = 2;
76526+#endif
76527 goto do_time_wait;
76528+ }
76529
76530 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
76531 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76532@@ -1739,6 +1752,10 @@ no_tcp_socket:
76533 bad_packet:
76534 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76535 } else {
76536+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76537+ if (!grsec_enable_blackhole || (ret == 1 &&
76538+ (skb->dev->flags & IFF_LOOPBACK)))
76539+#endif
76540 tcp_v4_send_reset(NULL, skb);
76541 }
76542
76543@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
76544 0, /* non standard timer */
76545 0, /* open_requests have no inode */
76546 atomic_read(&sk->sk_refcnt),
76547+#ifdef CONFIG_GRKERNSEC_HIDESYM
76548+ NULL,
76549+#else
76550 req,
76551+#endif
76552 len);
76553 }
76554
76555@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
76556 sock_i_uid(sk),
76557 icsk->icsk_probes_out,
76558 sock_i_ino(sk),
76559- atomic_read(&sk->sk_refcnt), sk,
76560+ atomic_read(&sk->sk_refcnt),
76561+#ifdef CONFIG_GRKERNSEC_HIDESYM
76562+ NULL,
76563+#else
76564+ sk,
76565+#endif
76566 jiffies_to_clock_t(icsk->icsk_rto),
76567 jiffies_to_clock_t(icsk->icsk_ack.ato),
76568 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
76569@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
76570 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
76571 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76572 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76573- atomic_read(&tw->tw_refcnt), tw, len);
76574+ atomic_read(&tw->tw_refcnt),
76575+#ifdef CONFIG_GRKERNSEC_HIDESYM
76576+ NULL,
76577+#else
76578+ tw,
76579+#endif
76580+ len);
76581 }
76582
76583 #define TMPSZ 150
76584diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
76585index 0ce3d06..e182e59 100644
76586--- a/net/ipv4/tcp_minisocks.c
76587+++ b/net/ipv4/tcp_minisocks.c
76588@@ -27,6 +27,10 @@
76589 #include <net/inet_common.h>
76590 #include <net/xfrm.h>
76591
76592+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76593+extern int grsec_enable_blackhole;
76594+#endif
76595+
76596 int sysctl_tcp_syncookies __read_mostly = 1;
76597 EXPORT_SYMBOL(sysctl_tcp_syncookies);
76598
76599@@ -750,6 +754,10 @@ listen_overflow:
76600
76601 embryonic_reset:
76602 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
76603+
76604+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76605+ if (!grsec_enable_blackhole)
76606+#endif
76607 if (!(flg & TCP_FLAG_RST))
76608 req->rsk_ops->send_reset(sk, skb);
76609
76610diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
76611index 882e0b0..2eba47f 100644
76612--- a/net/ipv4/tcp_output.c
76613+++ b/net/ipv4/tcp_output.c
76614@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
76615 int mss;
76616 int s_data_desired = 0;
76617
76618+ pax_track_stack();
76619+
76620 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
76621 s_data_desired = cvp->s_data_desired;
76622 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
76623diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
76624index 85ee7eb..53277ab 100644
76625--- a/net/ipv4/tcp_probe.c
76626+++ b/net/ipv4/tcp_probe.c
76627@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
76628 if (cnt + width >= len)
76629 break;
76630
76631- if (copy_to_user(buf + cnt, tbuf, width))
76632+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
76633 return -EFAULT;
76634 cnt += width;
76635 }
76636diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
76637index ecd44b0..b32fba6 100644
76638--- a/net/ipv4/tcp_timer.c
76639+++ b/net/ipv4/tcp_timer.c
76640@@ -22,6 +22,10 @@
76641 #include <linux/gfp.h>
76642 #include <net/tcp.h>
76643
76644+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76645+extern int grsec_lastack_retries;
76646+#endif
76647+
76648 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76649 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76650 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
76651@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
76652 }
76653 }
76654
76655+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76656+ if ((sk->sk_state == TCP_LAST_ACK) &&
76657+ (grsec_lastack_retries > 0) &&
76658+ (grsec_lastack_retries < retry_until))
76659+ retry_until = grsec_lastack_retries;
76660+#endif
76661+
76662 if (retransmits_timed_out(sk, retry_until,
76663 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
76664 /* Has it gone just too far? */
76665diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
76666index 1b5a193..bd354b0 100644
76667--- a/net/ipv4/udp.c
76668+++ b/net/ipv4/udp.c
76669@@ -86,6 +86,7 @@
76670 #include <linux/types.h>
76671 #include <linux/fcntl.h>
76672 #include <linux/module.h>
76673+#include <linux/security.h>
76674 #include <linux/socket.h>
76675 #include <linux/sockios.h>
76676 #include <linux/igmp.h>
76677@@ -108,6 +109,10 @@
76678 #include <trace/events/udp.h>
76679 #include "udp_impl.h"
76680
76681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76682+extern int grsec_enable_blackhole;
76683+#endif
76684+
76685 struct udp_table udp_table __read_mostly;
76686 EXPORT_SYMBOL(udp_table);
76687
76688@@ -565,6 +570,9 @@ found:
76689 return s;
76690 }
76691
76692+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76693+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76694+
76695 /*
76696 * This routine is called by the ICMP module when it gets some
76697 * sort of error condition. If err < 0 then the socket should
76698@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
76699 dport = usin->sin_port;
76700 if (dport == 0)
76701 return -EINVAL;
76702+
76703+ err = gr_search_udp_sendmsg(sk, usin);
76704+ if (err)
76705+ return err;
76706 } else {
76707 if (sk->sk_state != TCP_ESTABLISHED)
76708 return -EDESTADDRREQ;
76709+
76710+ err = gr_search_udp_sendmsg(sk, NULL);
76711+ if (err)
76712+ return err;
76713+
76714 daddr = inet->inet_daddr;
76715 dport = inet->inet_dport;
76716 /* Open fast path for connected socket.
76717@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
76718 udp_lib_checksum_complete(skb)) {
76719 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76720 IS_UDPLITE(sk));
76721- atomic_inc(&sk->sk_drops);
76722+ atomic_inc_unchecked(&sk->sk_drops);
76723 __skb_unlink(skb, rcvq);
76724 __skb_queue_tail(&list_kill, skb);
76725 }
76726@@ -1185,6 +1202,10 @@ try_again:
76727 if (!skb)
76728 goto out;
76729
76730+ err = gr_search_udp_recvmsg(sk, skb);
76731+ if (err)
76732+ goto out_free;
76733+
76734 ulen = skb->len - sizeof(struct udphdr);
76735 if (len > ulen)
76736 len = ulen;
76737@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76738
76739 drop:
76740 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76741- atomic_inc(&sk->sk_drops);
76742+ atomic_inc_unchecked(&sk->sk_drops);
76743 kfree_skb(skb);
76744 return -1;
76745 }
76746@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76747 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
76748
76749 if (!skb1) {
76750- atomic_inc(&sk->sk_drops);
76751+ atomic_inc_unchecked(&sk->sk_drops);
76752 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76753 IS_UDPLITE(sk));
76754 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76755@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76756 goto csum_error;
76757
76758 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76759+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76760+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76761+#endif
76762 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76763
76764 /*
76765@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
76766 sk_wmem_alloc_get(sp),
76767 sk_rmem_alloc_get(sp),
76768 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76769- atomic_read(&sp->sk_refcnt), sp,
76770- atomic_read(&sp->sk_drops), len);
76771+ atomic_read(&sp->sk_refcnt),
76772+#ifdef CONFIG_GRKERNSEC_HIDESYM
76773+ NULL,
76774+#else
76775+ sp,
76776+#endif
76777+ atomic_read_unchecked(&sp->sk_drops), len);
76778 }
76779
76780 int udp4_seq_show(struct seq_file *seq, void *v)
76781diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
76782index 12368c5..fbf899f 100644
76783--- a/net/ipv6/addrconf.c
76784+++ b/net/ipv6/addrconf.c
76785@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
76786 p.iph.ihl = 5;
76787 p.iph.protocol = IPPROTO_IPV6;
76788 p.iph.ttl = 64;
76789- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76790+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76791
76792 if (ops->ndo_do_ioctl) {
76793 mm_segment_t oldfs = get_fs();
76794diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
76795index 8a58e8c..8b5e631 100644
76796--- a/net/ipv6/inet6_connection_sock.c
76797+++ b/net/ipv6/inet6_connection_sock.c
76798@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
76799 #ifdef CONFIG_XFRM
76800 {
76801 struct rt6_info *rt = (struct rt6_info *)dst;
76802- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76803+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76804 }
76805 #endif
76806 }
76807@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
76808 #ifdef CONFIG_XFRM
76809 if (dst) {
76810 struct rt6_info *rt = (struct rt6_info *)dst;
76811- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76812+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76813 __sk_dst_reset(sk);
76814 dst = NULL;
76815 }
76816diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
76817index 2fbda5f..26ed683 100644
76818--- a/net/ipv6/ipv6_sockglue.c
76819+++ b/net/ipv6/ipv6_sockglue.c
76820@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
76821 int val, valbool;
76822 int retv = -ENOPROTOOPT;
76823
76824+ pax_track_stack();
76825+
76826 if (optval == NULL)
76827 val=0;
76828 else {
76829@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76830 int len;
76831 int val;
76832
76833+ pax_track_stack();
76834+
76835 if (ip6_mroute_opt(optname))
76836 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
76837
76838@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76839 if (sk->sk_type != SOCK_STREAM)
76840 return -ENOPROTOOPT;
76841
76842- msg.msg_control = optval;
76843+ msg.msg_control = (void __force_kernel *)optval;
76844 msg.msg_controllen = len;
76845 msg.msg_flags = flags;
76846
76847diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76848index 343852e..c92bd15 100644
76849--- a/net/ipv6/raw.c
76850+++ b/net/ipv6/raw.c
76851@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
76852 {
76853 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
76854 skb_checksum_complete(skb)) {
76855- atomic_inc(&sk->sk_drops);
76856+ atomic_inc_unchecked(&sk->sk_drops);
76857 kfree_skb(skb);
76858 return NET_RX_DROP;
76859 }
76860@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76861 struct raw6_sock *rp = raw6_sk(sk);
76862
76863 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76864- atomic_inc(&sk->sk_drops);
76865+ atomic_inc_unchecked(&sk->sk_drops);
76866 kfree_skb(skb);
76867 return NET_RX_DROP;
76868 }
76869@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76870
76871 if (inet->hdrincl) {
76872 if (skb_checksum_complete(skb)) {
76873- atomic_inc(&sk->sk_drops);
76874+ atomic_inc_unchecked(&sk->sk_drops);
76875 kfree_skb(skb);
76876 return NET_RX_DROP;
76877 }
76878@@ -601,7 +601,7 @@ out:
76879 return err;
76880 }
76881
76882-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76883+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76884 struct flowi6 *fl6, struct dst_entry **dstp,
76885 unsigned int flags)
76886 {
76887@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
76888 u16 proto;
76889 int err;
76890
76891+ pax_track_stack();
76892+
76893 /* Rough check on arithmetic overflow,
76894 better check is made in ip6_append_data().
76895 */
76896@@ -909,12 +911,15 @@ do_confirm:
76897 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76898 char __user *optval, int optlen)
76899 {
76900+ struct icmp6_filter filter;
76901+
76902 switch (optname) {
76903 case ICMPV6_FILTER:
76904 if (optlen > sizeof(struct icmp6_filter))
76905 optlen = sizeof(struct icmp6_filter);
76906- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76907+ if (copy_from_user(&filter, optval, optlen))
76908 return -EFAULT;
76909+ raw6_sk(sk)->filter = filter;
76910 return 0;
76911 default:
76912 return -ENOPROTOOPT;
76913@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76914 char __user *optval, int __user *optlen)
76915 {
76916 int len;
76917+ struct icmp6_filter filter;
76918
76919 switch (optname) {
76920 case ICMPV6_FILTER:
76921@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76922 len = sizeof(struct icmp6_filter);
76923 if (put_user(len, optlen))
76924 return -EFAULT;
76925- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76926+ filter = raw6_sk(sk)->filter;
76927+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
76928 return -EFAULT;
76929 return 0;
76930 default:
76931@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76932 0, 0L, 0,
76933 sock_i_uid(sp), 0,
76934 sock_i_ino(sp),
76935- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76936+ atomic_read(&sp->sk_refcnt),
76937+#ifdef CONFIG_GRKERNSEC_HIDESYM
76938+ NULL,
76939+#else
76940+ sp,
76941+#endif
76942+ atomic_read_unchecked(&sp->sk_drops));
76943 }
76944
76945 static int raw6_seq_show(struct seq_file *seq, void *v)
76946diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76947index 7b8fc57..c6185da 100644
76948--- a/net/ipv6/tcp_ipv6.c
76949+++ b/net/ipv6/tcp_ipv6.c
76950@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76951 }
76952 #endif
76953
76954+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76955+extern int grsec_enable_blackhole;
76956+#endif
76957+
76958 static void tcp_v6_hash(struct sock *sk)
76959 {
76960 if (sk->sk_state != TCP_CLOSE) {
76961@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76962 return 0;
76963
76964 reset:
76965+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76966+ if (!grsec_enable_blackhole)
76967+#endif
76968 tcp_v6_send_reset(sk, skb);
76969 discard:
76970 if (opt_skb)
76971@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76972 TCP_SKB_CB(skb)->sacked = 0;
76973
76974 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76975- if (!sk)
76976+ if (!sk) {
76977+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76978+ ret = 1;
76979+#endif
76980 goto no_tcp_socket;
76981+ }
76982
76983 process:
76984- if (sk->sk_state == TCP_TIME_WAIT)
76985+ if (sk->sk_state == TCP_TIME_WAIT) {
76986+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76987+ ret = 2;
76988+#endif
76989 goto do_time_wait;
76990+ }
76991
76992 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76993 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76994@@ -1779,6 +1794,10 @@ no_tcp_socket:
76995 bad_packet:
76996 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76997 } else {
76998+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76999+ if (!grsec_enable_blackhole || (ret == 1 &&
77000+ (skb->dev->flags & IFF_LOOPBACK)))
77001+#endif
77002 tcp_v6_send_reset(NULL, skb);
77003 }
77004
77005@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq,
77006 uid,
77007 0, /* non standard timer */
77008 0, /* open_requests have no inode */
77009- 0, req);
77010+ 0,
77011+#ifdef CONFIG_GRKERNSEC_HIDESYM
77012+ NULL
77013+#else
77014+ req
77015+#endif
77016+ );
77017 }
77018
77019 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77020@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77021 sock_i_uid(sp),
77022 icsk->icsk_probes_out,
77023 sock_i_ino(sp),
77024- atomic_read(&sp->sk_refcnt), sp,
77025+ atomic_read(&sp->sk_refcnt),
77026+#ifdef CONFIG_GRKERNSEC_HIDESYM
77027+ NULL,
77028+#else
77029+ sp,
77030+#endif
77031 jiffies_to_clock_t(icsk->icsk_rto),
77032 jiffies_to_clock_t(icsk->icsk_ack.ato),
77033 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
77034@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq,
77035 dest->s6_addr32[2], dest->s6_addr32[3], destp,
77036 tw->tw_substate, 0, 0,
77037 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77038- atomic_read(&tw->tw_refcnt), tw);
77039+ atomic_read(&tw->tw_refcnt),
77040+#ifdef CONFIG_GRKERNSEC_HIDESYM
77041+ NULL
77042+#else
77043+ tw
77044+#endif
77045+ );
77046 }
77047
77048 static int tcp6_seq_show(struct seq_file *seq, void *v)
77049diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
77050index bb95e8e..ae0ee80 100644
77051--- a/net/ipv6/udp.c
77052+++ b/net/ipv6/udp.c
77053@@ -50,6 +50,10 @@
77054 #include <linux/seq_file.h>
77055 #include "udp_impl.h"
77056
77057+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77058+extern int grsec_enable_blackhole;
77059+#endif
77060+
77061 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
77062 {
77063 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
77064@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
77065
77066 return 0;
77067 drop:
77068- atomic_inc(&sk->sk_drops);
77069+ atomic_inc_unchecked(&sk->sk_drops);
77070 drop_no_sk_drops_inc:
77071 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77072 kfree_skb(skb);
77073@@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77074 continue;
77075 }
77076 drop:
77077- atomic_inc(&sk->sk_drops);
77078+ atomic_inc_unchecked(&sk->sk_drops);
77079 UDP6_INC_STATS_BH(sock_net(sk),
77080 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
77081 UDP6_INC_STATS_BH(sock_net(sk),
77082@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77083 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
77084 proto == IPPROTO_UDPLITE);
77085
77086+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77087+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77088+#endif
77089 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
77090
77091 kfree_skb(skb);
77092@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77093 if (!sock_owned_by_user(sk))
77094 udpv6_queue_rcv_skb(sk, skb);
77095 else if (sk_add_backlog(sk, skb)) {
77096- atomic_inc(&sk->sk_drops);
77097+ atomic_inc_unchecked(&sk->sk_drops);
77098 bh_unlock_sock(sk);
77099 sock_put(sk);
77100 goto discard;
77101@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
77102 0, 0L, 0,
77103 sock_i_uid(sp), 0,
77104 sock_i_ino(sp),
77105- atomic_read(&sp->sk_refcnt), sp,
77106- atomic_read(&sp->sk_drops));
77107+ atomic_read(&sp->sk_refcnt),
77108+#ifdef CONFIG_GRKERNSEC_HIDESYM
77109+ NULL,
77110+#else
77111+ sp,
77112+#endif
77113+ atomic_read_unchecked(&sp->sk_drops));
77114 }
77115
77116 int udp6_seq_show(struct seq_file *seq, void *v)
77117diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
77118index b3cc8b3..baa02d0 100644
77119--- a/net/irda/ircomm/ircomm_tty.c
77120+++ b/net/irda/ircomm/ircomm_tty.c
77121@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77122 add_wait_queue(&self->open_wait, &wait);
77123
77124 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
77125- __FILE__,__LINE__, tty->driver->name, self->open_count );
77126+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77127
77128 /* As far as I can see, we protect open_count - Jean II */
77129 spin_lock_irqsave(&self->spinlock, flags);
77130 if (!tty_hung_up_p(filp)) {
77131 extra_count = 1;
77132- self->open_count--;
77133+ local_dec(&self->open_count);
77134 }
77135 spin_unlock_irqrestore(&self->spinlock, flags);
77136- self->blocked_open++;
77137+ local_inc(&self->blocked_open);
77138
77139 while (1) {
77140 if (tty->termios->c_cflag & CBAUD) {
77141@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77142 }
77143
77144 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
77145- __FILE__,__LINE__, tty->driver->name, self->open_count );
77146+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77147
77148 schedule();
77149 }
77150@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77151 if (extra_count) {
77152 /* ++ is not atomic, so this should be protected - Jean II */
77153 spin_lock_irqsave(&self->spinlock, flags);
77154- self->open_count++;
77155+ local_inc(&self->open_count);
77156 spin_unlock_irqrestore(&self->spinlock, flags);
77157 }
77158- self->blocked_open--;
77159+ local_dec(&self->blocked_open);
77160
77161 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
77162- __FILE__,__LINE__, tty->driver->name, self->open_count);
77163+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
77164
77165 if (!retval)
77166 self->flags |= ASYNC_NORMAL_ACTIVE;
77167@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
77168 }
77169 /* ++ is not atomic, so this should be protected - Jean II */
77170 spin_lock_irqsave(&self->spinlock, flags);
77171- self->open_count++;
77172+ local_inc(&self->open_count);
77173
77174 tty->driver_data = self;
77175 self->tty = tty;
77176 spin_unlock_irqrestore(&self->spinlock, flags);
77177
77178 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
77179- self->line, self->open_count);
77180+ self->line, local_read(&self->open_count));
77181
77182 /* Not really used by us, but lets do it anyway */
77183 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
77184@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77185 return;
77186 }
77187
77188- if ((tty->count == 1) && (self->open_count != 1)) {
77189+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
77190 /*
77191 * Uh, oh. tty->count is 1, which means that the tty
77192 * structure will be freed. state->count should always
77193@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77194 */
77195 IRDA_DEBUG(0, "%s(), bad serial port count; "
77196 "tty->count is 1, state->count is %d\n", __func__ ,
77197- self->open_count);
77198- self->open_count = 1;
77199+ local_read(&self->open_count));
77200+ local_set(&self->open_count, 1);
77201 }
77202
77203- if (--self->open_count < 0) {
77204+ if (local_dec_return(&self->open_count) < 0) {
77205 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77206- __func__, self->line, self->open_count);
77207- self->open_count = 0;
77208+ __func__, self->line, local_read(&self->open_count));
77209+ local_set(&self->open_count, 0);
77210 }
77211- if (self->open_count) {
77212+ if (local_read(&self->open_count)) {
77213 spin_unlock_irqrestore(&self->spinlock, flags);
77214
77215 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
77216@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77217 tty->closing = 0;
77218 self->tty = NULL;
77219
77220- if (self->blocked_open) {
77221+ if (local_read(&self->blocked_open)) {
77222 if (self->close_delay)
77223 schedule_timeout_interruptible(self->close_delay);
77224 wake_up_interruptible(&self->open_wait);
77225@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
77226 spin_lock_irqsave(&self->spinlock, flags);
77227 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77228 self->tty = NULL;
77229- self->open_count = 0;
77230+ local_set(&self->open_count, 0);
77231 spin_unlock_irqrestore(&self->spinlock, flags);
77232
77233 wake_up_interruptible(&self->open_wait);
77234@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
77235 seq_putc(m, '\n');
77236
77237 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77238- seq_printf(m, "Open count: %d\n", self->open_count);
77239+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
77240 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77241 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77242
77243diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
77244index e2013e4..edfc1e3 100644
77245--- a/net/iucv/af_iucv.c
77246+++ b/net/iucv/af_iucv.c
77247@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk)
77248
77249 write_lock_bh(&iucv_sk_list.lock);
77250
77251- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77252+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77253 while (__iucv_get_sock_by_name(name)) {
77254 sprintf(name, "%08x",
77255- atomic_inc_return(&iucv_sk_list.autobind_name));
77256+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77257 }
77258
77259 write_unlock_bh(&iucv_sk_list.lock);
77260diff --git a/net/key/af_key.c b/net/key/af_key.c
77261index 1e733e9..c84de2f 100644
77262--- a/net/key/af_key.c
77263+++ b/net/key/af_key.c
77264@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
77265 struct xfrm_migrate m[XFRM_MAX_DEPTH];
77266 struct xfrm_kmaddress k;
77267
77268+ pax_track_stack();
77269+
77270 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
77271 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
77272 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
77273@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
77274 static u32 get_acqseq(void)
77275 {
77276 u32 res;
77277- static atomic_t acqseq;
77278+ static atomic_unchecked_t acqseq;
77279
77280 do {
77281- res = atomic_inc_return(&acqseq);
77282+ res = atomic_inc_return_unchecked(&acqseq);
77283 } while (!res);
77284 return res;
77285 }
77286diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
77287index 956b7e4..f01d328 100644
77288--- a/net/lapb/lapb_iface.c
77289+++ b/net/lapb/lapb_iface.c
77290@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
77291 goto out;
77292
77293 lapb->dev = dev;
77294- lapb->callbacks = *callbacks;
77295+ lapb->callbacks = callbacks;
77296
77297 __lapb_insert_cb(lapb);
77298
77299@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
77300
77301 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
77302 {
77303- if (lapb->callbacks.connect_confirmation)
77304- lapb->callbacks.connect_confirmation(lapb->dev, reason);
77305+ if (lapb->callbacks->connect_confirmation)
77306+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
77307 }
77308
77309 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
77310 {
77311- if (lapb->callbacks.connect_indication)
77312- lapb->callbacks.connect_indication(lapb->dev, reason);
77313+ if (lapb->callbacks->connect_indication)
77314+ lapb->callbacks->connect_indication(lapb->dev, reason);
77315 }
77316
77317 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
77318 {
77319- if (lapb->callbacks.disconnect_confirmation)
77320- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
77321+ if (lapb->callbacks->disconnect_confirmation)
77322+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
77323 }
77324
77325 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
77326 {
77327- if (lapb->callbacks.disconnect_indication)
77328- lapb->callbacks.disconnect_indication(lapb->dev, reason);
77329+ if (lapb->callbacks->disconnect_indication)
77330+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
77331 }
77332
77333 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
77334 {
77335- if (lapb->callbacks.data_indication)
77336- return lapb->callbacks.data_indication(lapb->dev, skb);
77337+ if (lapb->callbacks->data_indication)
77338+ return lapb->callbacks->data_indication(lapb->dev, skb);
77339
77340 kfree_skb(skb);
77341 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
77342@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
77343 {
77344 int used = 0;
77345
77346- if (lapb->callbacks.data_transmit) {
77347- lapb->callbacks.data_transmit(lapb->dev, skb);
77348+ if (lapb->callbacks->data_transmit) {
77349+ lapb->callbacks->data_transmit(lapb->dev, skb);
77350 used = 1;
77351 }
77352
77353diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
77354index a01d213..6a1f1ab 100644
77355--- a/net/mac80211/debugfs_sta.c
77356+++ b/net/mac80211/debugfs_sta.c
77357@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
77358 struct tid_ampdu_rx *tid_rx;
77359 struct tid_ampdu_tx *tid_tx;
77360
77361+ pax_track_stack();
77362+
77363 rcu_read_lock();
77364
77365 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
77366@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
77367 struct sta_info *sta = file->private_data;
77368 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
77369
77370+ pax_track_stack();
77371+
77372 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
77373 htc->ht_supported ? "" : "not ");
77374 if (htc->ht_supported) {
77375diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
77376index 9fab144..7f0fc14 100644
77377--- a/net/mac80211/ieee80211_i.h
77378+++ b/net/mac80211/ieee80211_i.h
77379@@ -27,6 +27,7 @@
77380 #include <net/ieee80211_radiotap.h>
77381 #include <net/cfg80211.h>
77382 #include <net/mac80211.h>
77383+#include <asm/local.h>
77384 #include "key.h"
77385 #include "sta_info.h"
77386
77387@@ -754,7 +755,7 @@ struct ieee80211_local {
77388 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
77389 spinlock_t queue_stop_reason_lock;
77390
77391- int open_count;
77392+ local_t open_count;
77393 int monitors, cooked_mntrs;
77394 /* number of interfaces with corresponding FIF_ flags */
77395 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
77396diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
77397index 556e7e6..120dcaf 100644
77398--- a/net/mac80211/iface.c
77399+++ b/net/mac80211/iface.c
77400@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77401 break;
77402 }
77403
77404- if (local->open_count == 0) {
77405+ if (local_read(&local->open_count) == 0) {
77406 res = drv_start(local);
77407 if (res)
77408 goto err_del_bss;
77409@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77410 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
77411
77412 if (!is_valid_ether_addr(dev->dev_addr)) {
77413- if (!local->open_count)
77414+ if (!local_read(&local->open_count))
77415 drv_stop(local);
77416 return -EADDRNOTAVAIL;
77417 }
77418@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77419 mutex_unlock(&local->mtx);
77420
77421 if (coming_up)
77422- local->open_count++;
77423+ local_inc(&local->open_count);
77424
77425 if (hw_reconf_flags) {
77426 ieee80211_hw_config(local, hw_reconf_flags);
77427@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77428 err_del_interface:
77429 drv_remove_interface(local, &sdata->vif);
77430 err_stop:
77431- if (!local->open_count)
77432+ if (!local_read(&local->open_count))
77433 drv_stop(local);
77434 err_del_bss:
77435 sdata->bss = NULL;
77436@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77437 }
77438
77439 if (going_down)
77440- local->open_count--;
77441+ local_dec(&local->open_count);
77442
77443 switch (sdata->vif.type) {
77444 case NL80211_IFTYPE_AP_VLAN:
77445@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77446
77447 ieee80211_recalc_ps(local, -1);
77448
77449- if (local->open_count == 0) {
77450+ if (local_read(&local->open_count) == 0) {
77451 if (local->ops->napi_poll)
77452 napi_disable(&local->napi);
77453 ieee80211_clear_tx_pending(local);
77454diff --git a/net/mac80211/main.c b/net/mac80211/main.c
77455index 3d90dad..36884d5 100644
77456--- a/net/mac80211/main.c
77457+++ b/net/mac80211/main.c
77458@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
77459 local->hw.conf.power_level = power;
77460 }
77461
77462- if (changed && local->open_count) {
77463+ if (changed && local_read(&local->open_count)) {
77464 ret = drv_config(local, changed);
77465 /*
77466 * Goal:
77467diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
77468index 0f48368..d48e688 100644
77469--- a/net/mac80211/mlme.c
77470+++ b/net/mac80211/mlme.c
77471@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
77472 bool have_higher_than_11mbit = false;
77473 u16 ap_ht_cap_flags;
77474
77475+ pax_track_stack();
77476+
77477 /* AssocResp and ReassocResp have identical structure */
77478
77479 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
77480diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
77481index 6326d34..7225f61 100644
77482--- a/net/mac80211/pm.c
77483+++ b/net/mac80211/pm.c
77484@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77485 struct ieee80211_sub_if_data *sdata;
77486 struct sta_info *sta;
77487
77488- if (!local->open_count)
77489+ if (!local_read(&local->open_count))
77490 goto suspend;
77491
77492 ieee80211_scan_cancel(local);
77493@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77494 cancel_work_sync(&local->dynamic_ps_enable_work);
77495 del_timer_sync(&local->dynamic_ps_timer);
77496
77497- local->wowlan = wowlan && local->open_count;
77498+ local->wowlan = wowlan && local_read(&local->open_count);
77499 if (local->wowlan) {
77500 int err = drv_suspend(local, wowlan);
77501 if (err < 0) {
77502@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77503 }
77504
77505 /* stop hardware - this must stop RX */
77506- if (local->open_count)
77507+ if (local_read(&local->open_count))
77508 ieee80211_stop_device(local);
77509
77510 suspend:
77511diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
77512index 3d5a2cb..b17ad48 100644
77513--- a/net/mac80211/rate.c
77514+++ b/net/mac80211/rate.c
77515@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
77516
77517 ASSERT_RTNL();
77518
77519- if (local->open_count)
77520+ if (local_read(&local->open_count))
77521 return -EBUSY;
77522
77523 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
77524diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
77525index 4851e9e..d860e05 100644
77526--- a/net/mac80211/rc80211_pid_debugfs.c
77527+++ b/net/mac80211/rc80211_pid_debugfs.c
77528@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
77529
77530 spin_unlock_irqrestore(&events->lock, status);
77531
77532- if (copy_to_user(buf, pb, p))
77533+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
77534 return -EFAULT;
77535
77536 return p;
77537diff --git a/net/mac80211/util.c b/net/mac80211/util.c
77538index fd031e8..84fbfcf 100644
77539--- a/net/mac80211/util.c
77540+++ b/net/mac80211/util.c
77541@@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
77542 drv_set_coverage_class(local, hw->wiphy->coverage_class);
77543
77544 /* everything else happens only if HW was up & running */
77545- if (!local->open_count)
77546+ if (!local_read(&local->open_count))
77547 goto wake_up;
77548
77549 /*
77550diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
77551index 32bff6d..d0cf986 100644
77552--- a/net/netfilter/Kconfig
77553+++ b/net/netfilter/Kconfig
77554@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
77555
77556 To compile it as a module, choose M here. If unsure, say N.
77557
77558+config NETFILTER_XT_MATCH_GRADM
77559+ tristate '"gradm" match support'
77560+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77561+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77562+ ---help---
77563+ The gradm match allows to match on grsecurity RBAC being enabled.
77564+ It is useful when iptables rules are applied early on bootup to
77565+ prevent connections to the machine (except from a trusted host)
77566+ while the RBAC system is disabled.
77567+
77568 config NETFILTER_XT_MATCH_HASHLIMIT
77569 tristate '"hashlimit" match support'
77570 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77571diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
77572index 1a02853..5d8c22e 100644
77573--- a/net/netfilter/Makefile
77574+++ b/net/netfilter/Makefile
77575@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
77576 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
77577 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
77578 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77579+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77580 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77581 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77582 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77583diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
77584index 12571fb..fb73976 100644
77585--- a/net/netfilter/ipvs/ip_vs_conn.c
77586+++ b/net/netfilter/ipvs/ip_vs_conn.c
77587@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
77588 /* Increase the refcnt counter of the dest */
77589 atomic_inc(&dest->refcnt);
77590
77591- conn_flags = atomic_read(&dest->conn_flags);
77592+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
77593 if (cp->protocol != IPPROTO_UDP)
77594 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
77595 /* Bind with the destination and its corresponding transmitter */
77596@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
77597 atomic_set(&cp->refcnt, 1);
77598
77599 atomic_set(&cp->n_control, 0);
77600- atomic_set(&cp->in_pkts, 0);
77601+ atomic_set_unchecked(&cp->in_pkts, 0);
77602
77603 atomic_inc(&ipvs->conn_count);
77604 if (flags & IP_VS_CONN_F_NO_CPORT)
77605@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
77606
77607 /* Don't drop the entry if its number of incoming packets is not
77608 located in [0, 8] */
77609- i = atomic_read(&cp->in_pkts);
77610+ i = atomic_read_unchecked(&cp->in_pkts);
77611 if (i > 8 || i < 0) return 0;
77612
77613 if (!todrop_rate[i]) return 0;
77614diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
77615index 4f77bb1..5d0bc26 100644
77616--- a/net/netfilter/ipvs/ip_vs_core.c
77617+++ b/net/netfilter/ipvs/ip_vs_core.c
77618@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
77619 ret = cp->packet_xmit(skb, cp, pd->pp);
77620 /* do not touch skb anymore */
77621
77622- atomic_inc(&cp->in_pkts);
77623+ atomic_inc_unchecked(&cp->in_pkts);
77624 ip_vs_conn_put(cp);
77625 return ret;
77626 }
77627@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
77628 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
77629 pkts = sysctl_sync_threshold(ipvs);
77630 else
77631- pkts = atomic_add_return(1, &cp->in_pkts);
77632+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77633
77634 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
77635 cp->protocol == IPPROTO_SCTP) {
77636diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
77637index e3be48b..d658c8c 100644
77638--- a/net/netfilter/ipvs/ip_vs_ctl.c
77639+++ b/net/netfilter/ipvs/ip_vs_ctl.c
77640@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
77641 ip_vs_rs_hash(ipvs, dest);
77642 write_unlock_bh(&ipvs->rs_lock);
77643 }
77644- atomic_set(&dest->conn_flags, conn_flags);
77645+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
77646
77647 /* bind the service */
77648 if (!dest->svc) {
77649@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77650 " %-7s %-6d %-10d %-10d\n",
77651 &dest->addr.in6,
77652 ntohs(dest->port),
77653- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77654+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77655 atomic_read(&dest->weight),
77656 atomic_read(&dest->activeconns),
77657 atomic_read(&dest->inactconns));
77658@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77659 "%-7s %-6d %-10d %-10d\n",
77660 ntohl(dest->addr.ip),
77661 ntohs(dest->port),
77662- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77663+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77664 atomic_read(&dest->weight),
77665 atomic_read(&dest->activeconns),
77666 atomic_read(&dest->inactconns));
77667@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
77668 struct ip_vs_dest_user_kern udest;
77669 struct netns_ipvs *ipvs = net_ipvs(net);
77670
77671+ pax_track_stack();
77672+
77673 if (!capable(CAP_NET_ADMIN))
77674 return -EPERM;
77675
77676@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
77677
77678 entry.addr = dest->addr.ip;
77679 entry.port = dest->port;
77680- entry.conn_flags = atomic_read(&dest->conn_flags);
77681+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77682 entry.weight = atomic_read(&dest->weight);
77683 entry.u_threshold = dest->u_threshold;
77684 entry.l_threshold = dest->l_threshold;
77685@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
77686 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77687
77688 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77689- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77690+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77691 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77692 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77693 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
77694diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
77695index 3cdd479..116afa8 100644
77696--- a/net/netfilter/ipvs/ip_vs_sync.c
77697+++ b/net/netfilter/ipvs/ip_vs_sync.c
77698@@ -649,7 +649,7 @@ control:
77699 * i.e only increment in_pkts for Templates.
77700 */
77701 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
77702- int pkts = atomic_add_return(1, &cp->in_pkts);
77703+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77704
77705 if (pkts % sysctl_sync_period(ipvs) != 1)
77706 return;
77707@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
77708
77709 if (opt)
77710 memcpy(&cp->in_seq, opt, sizeof(*opt));
77711- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77712+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77713 cp->state = state;
77714 cp->old_state = cp->state;
77715 /*
77716diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
77717index ee319a4..8a285ee 100644
77718--- a/net/netfilter/ipvs/ip_vs_xmit.c
77719+++ b/net/netfilter/ipvs/ip_vs_xmit.c
77720@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
77721 else
77722 rc = NF_ACCEPT;
77723 /* do not touch skb anymore */
77724- atomic_inc(&cp->in_pkts);
77725+ atomic_inc_unchecked(&cp->in_pkts);
77726 goto out;
77727 }
77728
77729@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
77730 else
77731 rc = NF_ACCEPT;
77732 /* do not touch skb anymore */
77733- atomic_inc(&cp->in_pkts);
77734+ atomic_inc_unchecked(&cp->in_pkts);
77735 goto out;
77736 }
77737
77738diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
77739index 7dec88a..0996ce3 100644
77740--- a/net/netfilter/nf_conntrack_netlink.c
77741+++ b/net/netfilter/nf_conntrack_netlink.c
77742@@ -135,7 +135,7 @@ nla_put_failure:
77743 static inline int
77744 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
77745 {
77746- long timeout = (ct->timeout.expires - jiffies) / HZ;
77747+ long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
77748
77749 if (timeout < 0)
77750 timeout = 0;
77751@@ -1638,7 +1638,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
77752 const struct nf_conntrack_expect *exp)
77753 {
77754 struct nf_conn *master = exp->master;
77755- long timeout = (exp->timeout.expires - jiffies) / HZ;
77756+ long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
77757 struct nf_conn_help *help;
77758
77759 if (timeout < 0)
77760diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
77761index 2d8158a..5dca296 100644
77762--- a/net/netfilter/nfnetlink_log.c
77763+++ b/net/netfilter/nfnetlink_log.c
77764@@ -70,7 +70,7 @@ struct nfulnl_instance {
77765 };
77766
77767 static DEFINE_SPINLOCK(instances_lock);
77768-static atomic_t global_seq;
77769+static atomic_unchecked_t global_seq;
77770
77771 #define INSTANCE_BUCKETS 16
77772 static struct hlist_head instance_table[INSTANCE_BUCKETS];
77773@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst,
77774 /* global sequence number */
77775 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77776 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77777- htonl(atomic_inc_return(&global_seq)));
77778+ htonl(atomic_inc_return_unchecked(&global_seq)));
77779
77780 if (data_len) {
77781 struct nlattr *nla;
77782diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
77783new file mode 100644
77784index 0000000..6905327
77785--- /dev/null
77786+++ b/net/netfilter/xt_gradm.c
77787@@ -0,0 +1,51 @@
77788+/*
77789+ * gradm match for netfilter
77790